query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Update parameters using one step of gradient descent
def update_parameters_with_gd(parameters, grads, learning_rate): L = len(parameters) // 2 # number of layers in the neural networks # Update rule for each parameter for l in range(L): parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads['dW' + str(l+1)] parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads['db' + str(l+1)] return parameters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_parameters(self, loss):\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def updateParams(self,gradients):\n for i in xrange(len(self.params)):\n self.params[i].set_value(self.params[i].get_value()-gradients[i]/(1/self.learning_rate+self.iterations))", "def gradient_descent(data_x, data_y, parameters, learn_rate, nb_iterations):\n\n # Cost history\n cost_tracking = np.zeros(nb_iterations)\n\n for _i in range(nb_iterations):\n parameters -= learn_rate * gradient(data_x, data_y, parameters)\n # recording the cost for each iteration\n cost_tracking[_i] = cost_function(data_x, data_y, parameters)\n\n return parameters, cost_tracking", "def gradientDescent(X, y, theta, alpha, num_iters):\n\n # Initialize some useful values\n J_history = []\n m = y.size # number of training examples\n\n for i in range(num_iters):\n # ====================== YOUR CODE HERE ======================\n # Instructions: Perform a single gradient step on the parameter vector\n # theta.\n #\n # Hint: While debugging, it can be useful to print out the values\n # of the cost function (computeCost) and gradient here.\n #\n # Calculate the gradient step according to the equation for theta1:\n g_step1 = (alpha / m * np.sum( (np.dot(X,theta) - y) * X[:,1]) )\n # Gradient step for theta knot:\n g_step0 = (alpha / m * np.sum( (np.dot(X,theta) - y) ) )\n \n #update theta\n theta[0] = (theta[0] - g_step0)\n theta[1] = (theta[1] - g_step1)\n \n #print([theta , g_step1, g_step0])\n\n # ============================================================\n\n # Save the cost J in every iteration\n J_history.append(computeCost(X, y, theta))\n\n return theta, J_history", "def update_param(self, lr):\n\n\n self.W=self.W-lr*self.W_grad\n self.b = self.b - lr*self.b_grad", "def add_grad_updates(self):\n \n gradients = T.grad(self.cost, self.theta)\n \n for target_param, grad in zip(self.theta, gradients):\n \n if target_param.name ==\"W\" and self.num_hidden ==0\\\n and self.zero_diag:\n \n grad = grad - T.diag(T.diag(grad)) # no x i - xi connections\n # for all i = 1, ..., D\n ##############################################################\n if target_param.name ==\"b\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n elif target_param.name ==\"bhid\" and self.learn_biases == False:\n print(\"Will not learn bias terms\")\n pass\n \n else:\n \n if self.use_momentum:\n \n # alternative definition (mostly seen):\n #g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n #T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n #self.updates[target_param] = target_param + g_tilda\n \n g_tilda = self.momentum*self.grad_vec[target_param.name] - \\\n (1-self.momentum)*grad\n \n self.updates[target_param] = target_param +\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*g_tilda\n \n # store g_tilda for next iteration:\n self.updates[self.grad_vec[target_param.name]] = g_tilda\n \n else:\n \n self.updates[target_param] = target_param -\\\n T.cast(self.learning_rate, dtype = theano.config.floatX)*grad\n \n if (\"PCD\" in self.algorithm) and self.num_hidden > 0:\n \n self.updates[self.persistent_gibbs] = self.hid_samples", "def _update_parameters(self, curr_state, reward, next_state):\n phi = self._features.vector(curr_state)\n phi_dash = self._features.vector(next_state)\n\n self._A += np.outer(phi, (phi - self._gamma * phi_dash))\n self._b += reward * phi", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w_new = w - gamma*grad\n #grad is for debugging purpose\n return loss, w_new,grad", "def gradient_descent(y, tx, initial_w, max_iters, gamma, compute_loss, compute_grad, verbose=False):\n \n w = initial_w.copy()\n loss = 0\n\n for n_iter in range(max_iters):\n grad = compute_grad(y, tx, w)\n loss = compute_loss(y, tx, w)\n\n w -= gamma * grad\n\n if verbose:\n print(f\"Gradient Descent ({n_iter}/{max_iters - 1}): loss={loss}, w={w}\")\n \n return w, loss", "def update(params: hk.Params, opt_state: OptState, batch, labels, xent_weight=self.weights, l1_coeff=self.l1_coef, l2_coeff=self.l2_coef) -> Tuple[hk.Params, OptState]:\n grads = jax.grad(loss)(params, batch, labels, xent_weight, l1_coeff, l2_coeff)\n updates, opt_state = opt.update(grads, opt_state)\n new_params = optax.apply_updates(params, updates)", "def update_parameters(parameters, grads, learning_rate):\n pass", "def _step(self):\n # Make a minibatch of training data\n num_train = self.X_train.shape[0]\n # random choose the samples\n batch_mask = np.random.choice(num_train, self.batch_size)\n X_batch = self.X_train[batch_mask]\n y_batch = self.y_train[batch_mask]\n\n # Compute loss and gradient\n loss, grads = self.model.loss(X_batch, y_batch)\n self.loss_history.append(loss)\n\n # Perform a parameter update\n for p, w in self.model.params.items():\n dw = grads[p]\n config = self.optim_configs[p]\n next_w, next_config = self.update_rule(w, dw, config)\n self.model.params[p] = next_w\n self.optim_configs[p] = next_config", "def run_gradient_descent(data,theta,alpha,num_iters):\n population = data[:,0]\n prices = data[:,1]\n x = ones(shape=(len(population),2)) #add ones for theta0 \n x[:,1] = population\n x = transpose(x)\n error_history = zeros(shape=(num_iters,1))\n \n for i in range(num_iters):\n predictions = theta.dot(x)\n errors_x1 = (predictions - prices) * x[0,:]\n errors_x2 = (predictions - prices) * x[1,:]\n theta[0][0] = theta[0][0] - alpha*(1.0/len(population))*errors_x1.sum()\n theta[0][1] = theta[0][1] - alpha*(1.0/len(population))*errors_x2.sum()\n error_history[i,0] = calculate_cost(theta,data)\n \n return theta, error_history", "def update_params(self, learning_rate):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tself._W = self._W - learning_rate * self._grad_W_current\n\t\tself._b = self._b - learning_rate * self._grad_b_current\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r", "def gradient_descent(self, X ,eta, tol,iter):\n gd=[]\n gd_x=[X]\n iteration=0\n # current_pt=X\n first_derivative=sym.diff(self.gdfunc)\n #print(first_derivative)\n x=sym.Symbol('x')\n first_derivative=sym.lambdify(x,first_derivative)\n learn_rate=eta\n \n \n prev_x=X\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n #print(\"prev_x = \",prev_x,\" Next x = \",new_x)\n for i in range(iter):\n prev_x=new_x\n #print(prev_x)\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n # print(\"x = \",new_x,\"Gradient =\",learn_rate*self.func(prev_x))\n if abs(self.func(new_x)) <= self.func(tol) :\n break\n iteration=iteration+1\n #print(\"Best at GD x= \",new_x)\n gd.append(gd_x)\n gd.append(new_x)\n gd.append(iteration)\n\n return gd", "def step_and_update_lr(self):\r\n self._update_learning_rate()\r\n self._optimizer.step()", "def optimize_parameters(self):\n self.loss_total.backward() # calculate gradients\n self.optimizer.step()\n self.optimizer.zero_grad()\n torch.cuda.empty_cache()", "def step(self):\n if self.defaults['max_grad_norm'] > 0:\n device = self.param_groups[0]['params'][0].device\n global_grad_norm = torch.zeros(1, device=device)\n\n max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device)\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is not None:\n grad = p.grad\n global_grad_norm.add_(grad.pow(2).sum())\n\n global_grad_norm = torch.sqrt(global_grad_norm)\n\n clip_global_grad_norm = torch.clamp(max_grad_norm / (global_grad_norm + group['eps']), max=1.0)\n else:\n clip_global_grad_norm = 1.0\n\n for group in self.param_groups:\n beta1, beta2, beta3 = group['betas']\n # assume same step across group now to simplify things\n # per parameter step can be easily support by making it tensor, or pass list into kernel\n if 'step' in group:\n group['step'] += 1\n else:\n group['step'] = 1\n\n bias_correction1 = 1.0 - beta1 ** group['step']\n\n bias_correction2 = 1.0 - beta2 ** group['step']\n\n bias_correction3 = 1.0 - beta3 ** group['step']\n\n for p in group['params']:\n if p.grad is None:\n continue\n\n state = self.state[p]\n if len(state) == 0:\n state['exp_avg'] = torch.zeros_like(p)\n state['exp_avg_sq'] = torch.zeros_like(p)\n state['exp_avg_diff'] = torch.zeros_like(p)\n\n grad = p.grad.mul_(clip_global_grad_norm)\n if 'pre_grad' not in state or group['step'] == 1:\n state['pre_grad'] = grad\n\n copy_grad = grad.clone()\n\n exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_diff']\n diff = grad - state['pre_grad']\n\n update = grad + beta2 * diff\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t\n exp_avg_diff.mul_(beta2).add_(diff, alpha=1 - beta2) # diff_t\n exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1 - beta3) # n_t\n\n denom = ((exp_avg_sq).sqrt() / math.sqrt(bias_correction3)).add_(group['eps'])\n update = ((exp_avg / bias_correction1 + beta2 * exp_avg_diff / bias_correction2)).div_(denom)\n\n if group['no_prox']:\n p.data.mul_(1 - group['lr'] * group['weight_decay'])\n p.add_(update, alpha=-group['lr'])\n else:\n p.add_(update, alpha=-group['lr'])\n p.data.div_(1 + group['lr'] * group['weight_decay'])\n\n state['pre_grad'] = copy_grad", "def gradient_descent(features, values, theta, alpha, num_iterations):\n \n # number of points\n npoints = len(values)\n \n # intialize cost history\n cost_history = []\n \n # num_interations iterations\n for iiter in range(num_iterations):\n \n # compute and store cost\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n \n # update values of theta\n values_predicted = np.dot(features, theta)\n theta = theta + (alpha/npoints)*(np.dot(values - values_predicted,features))\n \n return theta, pandas.Series(cost_history)", "def _gradient_descent(self, X, y, epochs, learning_rate, batch_size):\n num_feats = X.shape[1]\n num_samples = X.shape[0]\n\n y = y.reshape(num_samples, 1)\n W = np.random.rand(num_feats, 1)\n training_loss_epochs = []\n\n for ix in range(epochs):\n shuffled_ix = (np.arange(0, len(X)))\n np.random.shuffle(shuffled_ix)\n X = X[shuffled_ix, :]\n y = y[shuffled_ix, :]\n\n for batch_ix in np.arange(0, X.shape[0], batch_size):\n dW = self._compute_gradient(W, X[batch_ix:batch_ix + batch_size], y[batch_ix:batch_ix + batch_size])\n W -= learning_rate * dW\n\n if ix % 10 == 0:\n y_pred = np.dot(X, W)\n training_loss = self.mse(y, y_pred)\n print('epoch {0} : training loss {1}'.format(ix, training_loss))\n training_loss_epochs.append(training_loss[0])\n\n self.weights = W\n self.training_loss = training_loss_epochs\n return None", "def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update_estimator(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update_params(self, loss, step_size=0.5, first_order=False):\n #grads = torch.autograd.grad(loss, self.parameters(),\n # create_graph=not first_order)\n self.optim.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm(self.parameters(), self.grad_clip_norm)\n self.optim.step()\n #updated_params = OrderedDict()\n #self.relation_emb.zero_grad()\n #self.entity_emb.zero_grad()\n #for (name, param), grad in zip(self.named_parameters(), grads):\n '''\n for (name, param) in self.named_parameters():\n updated_params[name] = param.clone()\n if param.grad is not None:\n updated_params[name] -= step_size * param.grad\n\n return updated_params\n '''", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n \r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range(num_iterations):\r\n # your code here\r\n cost = compute_cost(features, values, theta)/(2.0*m)\r\n cost_history.append([cost])\r\n \r\n error = features.dot(theta) - values\r\n error = np.reshape(error,(error.shape[0], 1))\r\n errorWeighted = features*error\r\n errorSum = (np.sum(errorWeighted,0))/(m*1.0)\r\n theta = theta - alpha*errorSum \r\n \r\n return theta, pandas.Series(cost_history)", "def gradient_descent(initial_theta, X, y, niter, alpha, Lambda=0.0):\n theta_list = []\n cost_list = []\n\n theta = initial_theta\n for i in range(0, niter):\n theta -= alpha*gradient(theta, X, y, Lambda)\n theta_list.append(theta)\n cost_list.append(cost(theta, X, y, Lambda))\n\n return theta_list, cost_list", "def perform_step(self) -> None:\n self.n_it = self.n_it + 1\n self.update_learning_rate()\n observed_gradient = self.get_observed_gradient(self.theta)\n latent_gradient = self.compute_latent_gradient(observed_gradient)\n eta = np.random.normal(0.0, np.sqrt(self.epsilon), self.dim_latent)\n self.omega = self.omega + self.epsilon / 2.0 * latent_gradient + eta\n self.theta = self.gplvm_model.predict(self.omega)[0]\n return", "def __update_weights_grad_desc(self, x_train, y_train):\n\n predictions = self.__compute_prediction(x_train)\n weights_delta = np.dot(x_train.T, y_train - predictions)\n\n m = y_train.shape[0]\n self.__weights += self.__learning_rate / float(m) * weights_delta", "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def adversarial_update(model, optimiser, loss_fn, x, y, epoch, eps, step, k, norm, **kwargs):\n model.train()\n\n # Adversial perturbation\n if norm == 'inf':\n x_adv = iterated_fgsm(model, x, y, loss_fn, k=k, step=step, eps=eps, norm='inf', random=args.random_start)\n elif norm == 2:\n x_adv = pgd(model, x, y, loss_fn, k=k, step=step, eps=eps, norm=2, random=args.random_start)\n else:\n raise ValueError('Unsupported norm')\n\n optimiser.zero_grad()\n y_pred = model(x_adv)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimiser.step()\n\n return loss, y_pred", "def gradient_descent(features, values, theta, alpha, num_iterations):\n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted_values = np.dot(features, theta)\n delta = alpha / m * np.dot((predicted_values - values), features)\n theta = theta - delta\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n return theta, pandas.Series(cost_history)", "def gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n thetas = [theta]\n cost = np.zeros(num_iters)\n\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate, lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas", "def update_parameters_with_gd(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)]-learning_rate* grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)]-learning_rate* grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters", "def gradient_descent(\n self,\n coeffs, \n x_values, y_values):\n old_loss = self.old_loss\n mse = self.loss\n\n for i in range(self.steps):\n new_loss = self.loss_mse(coeffs, x_values, y_values)\n mse = np.append(mse, new_loss)\n if abs(new_loss - old_loss) <= self.early_stop:\n print(f\"Early cut off, difference of losses between steps is less that {self.early_stop}.\")\n break\n old_loss = new_loss\n\n coeffs = coeffs - (self.learning_rate)*self.gradient_calculation(coeffs, x_values, y_values)\n\n mse = np.append(mse, self.loss_mse(coeffs, x_values, y_values))\n self.coefficients = coeffs\n self.loss = mse", "def _update_params(self, gradients: dict, learning_rate: float):\n L = len(self.activations)\n\n for l in range(L):\n self.params[\"W_\" + str(l + 1)] = self.params[\"W_\" + str(l + 1)] - learning_rate * gradients[\n \"dW\" + str(l + 1)]\n\n self.params[\"b_\" + str(l + 1)] = self.params[\"b_\" + str(l + 1)] - learning_rate * gradients[\n \"db\" + str(l + 1)]", "def step_update(self, num_updates):\n if self.args['optimization']['warmup_updates'] > 0 and \\\n num_updates <= self.args['optimization']['warmup_updates']:\n self.warmup_factor = num_updates / float(self.args['optimization']['warmup_updates'])\n lr = self.warmup_factor * self.lr\n elif num_updates >= self.total_num_update:\n lr = self.end_learning_rate\n else:\n warmup = self.args['optimization']['warmup_updates']\n lr_range = self.lr - self.end_learning_rate\n pct_remaining = 1 - (num_updates - warmup) / (self.total_num_update - warmup)\n lr = lr_range * pct_remaining ** (self.power) + self.end_learning_rate\n self.optimizer.set_lr(lr)\n return self.optimizer.get_lr()", "def gradient_step(self):\n n = 10 #Granularity of line search\n grad = self.gradient()\n W = project(self.W[-1] + grad)\n A = np.linspace(0., self.alpha, n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def _apply_gradient_descent(self, gradients):\n updated_sd = {}\n global_model = self._get_global_model()\n \n for name, param, grad in zip(global_model.keys(), global_model.values(), gradients):\n updated_sd[name] = param - self.global_lr * grad\n \n self._load_global_model(updated_sd)", "def train_gradient_descent(self, X, y, learning_rate=0.01, n_iters=100):\r\n # Step 0: Initialize the parameters\r\n n_samples, n_features = X.shape\r\n self.weights = np.zeros(shape=(n_features,1))\r\n self.bias = 0\r\n costs = []\r\n\r\n for i in range(n_iters):\r\n # Step 1: Compute a linear combination of the input features and weights\r\n y_predict = np.dot(X, self.weights) + self.bias\r\n\r\n # Step 2: Compute cost over training set\r\n cost = (1 / n_samples) * np.sum((y_predict - y)**2)\r\n costs.append(cost)\r\n\r\n if i % 100 == 0:\r\n print(f\"Cost at iteration {i}: {cost}\")\r\n\r\n # Step 3: Compute the gradients\r\n dJ_dw = (2 / n_samples) * np.dot(X.T, (y_predict - y))\r\n dJ_db = (2 / n_samples) * np.sum((y_predict - y)) \r\n \r\n # Step 4: Update the parameters\r\n self.weights = self.weights - learning_rate * dJ_dw\r\n self.bias = self.bias - learning_rate * dJ_db\r\n\r\n return self.weights, self.bias, costs", "def _batch_gradient_descent(self, X, y, lr, epochs):\n\n # Initialize the bias and weights.\n _, n = X.shape\n self.bias = 0\n self.weights = np.random.normal(size=n)\n\n for i in range(epochs):\n # Calculate and sum the gradient delta of each sample\n grad_bias, grad_weights = self._get_gradient(X, y)\n\n # Show the gradient of each epoch.\n grad = (grad_bias + grad_weights.mean()) / 2\n print(\"Epochs %d gradient %.3f\" % (i + 1, grad), flush=True)\n\n # Update the bias and weight by gradient of current epoch\n self.bias += lr * grad_bias\n self.weights += lr * grad_weights", "def update_step(image_batch, label_batch, model, learning_rate):\n f = model.forward(image_batch)\n gradient = model.backward(f,label_batch)\n model.w = model.w - learning_rate*gradient", "def update_parameters(parameters, grads, learning_rate=0.01):\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n ### END CODE HERE ###\n\n # Retrieve each gradient from the dictionary \"grads\"\n ### START CODE HERE ### (≈ 4 lines of code)\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n dW3 = grads[\"dW3\"]\n db3 = grads[\"db3\"]\n ## END CODE HERE ###\n\n # Update rule for each parameter\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = W1 - (learning_rate * dW1)\n b1 = b1 - (learning_rate * db1)\n W2 = W2 - (learning_rate * dW2)\n b2 = b2 - (learning_rate * db2)\n W3 = W3 - (learning_rate * dW3)\n b3 = b3 - (learning_rate * db3)\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n\n return parameters", "def gradient_descent(x_data, starting_b, starting_w, learning_rate, num_iterations):\n\n b = starting_b\n w = starting_w\n\n for i in range(num_iterations):\n b, w = step_gradient(b, w, x_data, learning_rate)\n b_history.append(b) # stores bias approximations to plot\n w_history.append(w) # stores weight approximations to plot\n err = error(b, w, x_data)\n if err <= .6: # if the error is acceptable exit iterations loop\n print('error = % f' % err)\n break\n return [b, w]", "def gradient_descent(x0,df,rate=0.1,max_iters=1000,min_step=1e-6,max_step=1e5,\n projection=None,trajectory=False,step_history=False,f=None,\n cost_history=False,feedback=False,plot_history=False):\n if feedback is True:\n print(\"gd.gradient_descent():\")\n if f is not None:\n assert callable(f)\n fx0 = f(x0)\n if feedback is True:\n print(f\" initial cost = {fx0:.2e}\")\n if projection is not None:\n assert callable(projection)\n project = True\n else:\n project = False\n if trajectory is True:\n xx = [x0.copy()]\n if step_history is True:\n steps = []\n if cost_history is True:\n assert callable(f)\n fx = [fx0]\n\n x = x0.copy()\n for i in range(max_iters):\n dx = -rate*df(x)\n if project is True:\n x0 = x.copy()\n x = projection(x0+dx)\n dx = x-x0\n else:\n x += dx\n if trajectory is True:\n xx.append(x.copy())\n if cost_history is True:\n fx += [f(x)]\n step_size = np.linalg.norm(dx)\n if step_history is True:\n steps += [step_size]\n if step_size < min_step or step_size > max_step:\n break\n\n results = dict()\n results['output'] = x\n if trajectory is True:\n results['trajectory'] = xx\n if cost_history is True:\n results['cost_history'] = fx\n if step_history is True:\n results['step_history'] = steps\n if plot_history is True:\n assert step_history is True or cost_history is True\n plt.figure()\n if step_history is True:\n plt.semilogy(steps,label='step size')\n if cost_history is True:\n plt.semilogy(fx,label='cost')\n plt.xlabel('iteration number')\n plt.title('Gradient Descent')\n plt.legend()\n results['figure'] = plt\n plt.show(block=False)\n \n if feedback is True:\n if f is not None:\n print(f\" final cost = {f(x):.2e}\")\n \n return results", "def gradient_ascent(f, df, theta_init, step_size, max_iter):\n\n fs = []\n xs = []\n thetas = theta_init\n for i in range(max_iter): #for each data example\n fs.append(f(thetas))\n\n temp = step_size*df(thetas)\n thetas = step_size*df(thetas) #modify that feature by using the derivative of log likelihood\n xs.append(thetas.flatten())\n if i % 10 == 0:\n print(i, thetas)\n\n return thetas, fs, xs", "def step_update(self, num_updates):\n if num_updates < self.cfg.warmup_updates:\n self.lr = self.warmup_init_lr + num_updates * self.warmup_lr_step\n else:\n curr_updates = num_updates - self.cfg.warmup_updates\n lr_mult = self.lr_decay ** (curr_updates // self.lr_deacy_period)\n self.lr = max(self.max_lr * lr_mult, self.min_lr)\n\n self.optimizer.set_lr(self.lr)\n return self.lr", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w = w-gamma*grad\n return w, loss", "def perform_step(self) -> None:\n self.n_it = self.n_it + 1\n self.update_learning_rate()\n observed_gradient = self.get_observed_gradient(self.theta)\n latent_gradient = self.compute_latent_gradient(observed_gradient)\n g_omega = self.gplvm_model.predict_wishart_embedding(self.omega)[0]\n g_inv_omega = np.linalg.inv(g_omega)\n mu = np.dot(g_inv_omega, latent_gradient[0, :])\n epsilon_derivative = 1e-4\n for k in range(self.dim_latent):\n increment = np.copy(self.omega)\n increment[0, k] = increment[0, k] + epsilon_derivative\n g_derivative =\\\n (self.gplvm_model.predict_wishart_embedding(increment)[0] -\n g_omega) / epsilon_derivative\n tmp_mu = np.dot(g_inv_omega, np.dot(g_derivative, g_inv_omega))\n mu = mu - 2.0 * tmp_mu[:, k]\n mu = mu + g_inv_omega[:, k] * np.trace(np.dot(g_inv_omega,\n g_derivative))\n g_inv_sqrt_omega = sqrtm(g_inv_omega)\n eta = np.random.normal(0.0, np.sqrt(self.epsilon), self.dim_latent)\n self.omega = self.omega + self.epsilon / 2.0 * mu +\\\n np.dot(g_inv_sqrt_omega, eta)\n self.theta = self.gplvm_model.predict(self.omega)[0]\n return", "def update_generate_params(self,inps,trgs,preds):\n batch_size = np.shape(trgs)[0]\n\n self.delta_weight_h_to_v = self.learning_rate / batch_size * np.transpose(trgs) @ (inps - preds)\n self.delta_bias_v = self.learning_rate * np.mean(inps - preds)\n \n self.weight_h_to_v += self.delta_weight_h_to_v\n self.bias_v += self.delta_bias_v \n \n return", "def update(self, X, y):\n proba = self.predict_proba(X)\n top_loss = proba - y\n bias_gradient = np.sum(top_loss)\n weight_gradient = (top_loss).T.dot(X)\n\n # the gradient update\n self.b = self.b - self.lrate * bias_gradient\n self.W = self.W - self.lrate * weight_gradient", "def _update_parameters(self, delta):\n if delta is not None:\n self.SGD.update_with_L1_regularization(self.variables, delta, self.L1)", "def test_gradient_step(var_f, len_f, var_y, N):\n\n x, y = build_data(N)\n\n gp_model = initialise_gp_model(var_f, len_f, var_y, x, y)\n markovgp_model = initialise_markovgp_model(var_f, len_f, var_y, x, y)\n\n gv = objax.GradValues(gp_model.energy, gp_model.vars())\n gv_markov = objax.GradValues(markovgp_model.energy, markovgp_model.vars())\n\n lr_adam = 0.1\n lr_newton = 1.\n opt = objax.optimizer.Adam(gp_model.vars())\n opt_markov = objax.optimizer.Adam(markovgp_model.vars())\n\n gp_model.update_posterior()\n gp_grads, gp_value = gv()\n gp_loss_ = gp_value[0]\n opt(lr_adam, gp_grads)\n gp_hypers = np.array([gp_model.kernel.lengthscale, gp_model.kernel.variance, gp_model.likelihood.variance])\n print(gp_hypers)\n print(gp_grads)\n\n markovgp_model.update_posterior()\n markovgp_grads, markovgp_value = gv_markov()\n markovgp_loss_ = markovgp_value[0]\n opt_markov(lr_adam, markovgp_grads)\n markovgp_hypers = np.array([markovgp_model.kernel.lengthscale, markovgp_model.kernel.variance,\n markovgp_model.likelihood.variance])\n print(markovgp_hypers)\n print(markovgp_grads)\n\n np.testing.assert_allclose(gp_grads[0], markovgp_grads[0], rtol=1e-4)\n np.testing.assert_allclose(gp_grads[1], markovgp_grads[1], rtol=1e-4)\n np.testing.assert_allclose(gp_grads[2], markovgp_grads[2], rtol=1e-4)", "def update_params(self, loss, step_size=0.5, first_order=False):\n grads = torch.autograd.grad(loss, self.parameters(),\n create_graph=not first_order)\n updated_params = OrderedDict()\n for (name, param), grad in zip(self.named_parameters(), grads):\n updated_params[name] = param - step_size * grad\n\n return updated_params", "def perform_step(self) -> None:\n self.update_learning_rate()\n observed_gradient = self.objective.get_gradient(self.theta)\n eta = np.random.normal(0.0, np.sqrt(self.epsilon), self.dim_observed)\n self.theta = self.theta + self.epsilon / 2.0 * observed_gradient + eta\n return", "def step(self):\n\n with torch.no_grad():\n for group in self.param_groups:\n lr = group[\"lr\"]\n for p in group[\"params\"]:\n\n if p.grad is None:\n continue\n\n lambda_square = self.mf.conf_factor(p, keepdim=True) ** 2\n p.data.copy_(self.mf.exp(p, -lr * p.grad.data / lambda_square))", "def gradientDescentMulti(X, y, theta, alpha, num_iters):\n\n # Initialize some useful values\n J_history = []\n theta, J_history = gradientDescent(X, y, theta, alpha, num_iters)\n return theta, J_history", "def test_update_parameters(model):\n train_inputs = torch.tensor([[1., 2., 3.]])\n train_loss = 0.5 * (model(train_inputs) ** 2)\n\n params = gradient_update_parameters(model,\n train_loss,\n params=None,\n step_size=0.5,\n first_order=False)\n\n assert train_loss.item() == 264.5\n assert list(params.keys()) == ['weight']\n assert torch.all(params['weight'].data == torch.tensor([[-9.5, -20., -29.5]]))\n\n \"\"\"\n The new loss function (still with respect to the weights of the model w) is\n defined as:\n g(w) = 0.5 * (4 * w'_1 + 5 * w'_2 + 6 * w'_3) ** 2\n = 0.5 * (4 * (w_1 - 0.5 * df / dw_1)\n + 5 * (w_2 - 0.5 * df / dw_2)\n + 6 * (w_3 - 0.5 * df / dw_3)) ** 2\n = 0.5 * (4 * (w_1 - 0.5 * 1 * (1 * w_1 + 2 * w_2 + 3 * w_3))\n + 5 * (w_2 - 0.5 * 2 * (1 * w_1 + 2 * w_2 + 3 * w_3))\n + 6 * (w_3 - 0.5 * 3 * (1 * w_1 + 2 * w_2 + 3 * w_3))) ** 2\n = 0.5 * ((4 - 4 * 0.5 - 5 * 1.0 - 6 * 1.5) * w_1\n + (5 - 4 * 1.0 - 5 * 2.0 - 6 * 3.0) * w_2\n + (6 - 4 * 1.5 - 5 * 3.0 - 6 * 4.5) * w_3) ** 2\n = 0.5 * (-12 * w_1 - 27 * w_2 - 42 * w_3) ** 2\n\n Therefore the gradient of the function g with respect to w (and evaluated\n at w = [2, 3, 5]) is:\n dg / dw_1 = -12 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 3780\n dg / dw_2 = -27 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 8505\n dg / dw_3 = -42 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 13230\n \"\"\"\n test_inputs = torch.tensor([[4., 5., 6.]])\n test_loss = 0.5 * (model(test_inputs, params=params) ** 2)\n\n grads = torch.autograd.grad(test_loss, model.parameters())\n\n assert test_loss.item() == 49612.5\n assert len(grads) == 1\n assert torch.all(grads[0].data == torch.tensor([[3780., 8505., 13230.]]))", "def gradient_step(self):\n n = 3 #Granularity of line search\n grad = self.gradient()\n #grad = grad/np.linalg.norm(grad, 2)\n W = project(self.W[-1] + grad)\n A = np.linspace(0., 1., n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def update(self,parameters, grads):\n \n L = len(parameters) // 2 # number of layers in the neural network\n #print(L)\n\n # Update rule for each parameter. Use a for loop.\n for l in range(L):\n \n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - self.alpha * grads[\"dW\" + str(l+1)]\n \n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - self.alpha * grads[\"db\" + str(l+1)]\n \n parameters[\"W\" + str(l+1)][np.isnan(parameters[\"W\" + str(l+1)])] = 0\n parameters[\"b\" + str(l+1)][np.isnan(parameters[\"b\" + str(l+1)])] = 0\n \n return parameters", "def test_multiple_update_parameters(model):\n train_inputs = torch.tensor([[1., 2., 3.]])\n\n train_loss_1 = 0.5 * (model(train_inputs) ** 2)\n params_1 = gradient_update_parameters(model,\n train_loss_1,\n params=None,\n step_size=1.,\n first_order=False)\n\n assert train_loss_1.item() == 264.5\n assert list(params_1.keys()) == ['weight']\n assert torch.all(params_1['weight'].data == torch.tensor([[-21., -43., -64.]]))\n\n \"\"\"\n The new loss function is defined as\n g(w') = 0.5 * (1 * w'_1 + 2 * w'_2 + 3 * w'_3) ** 2\n with w' = [-21, -43, -64].\n\n The gradient of g with respect to w' is:\n dg / dw'_1 = 1 * (1 * w'_1 + 2 * w'_2 + 3 * w'_3) = -299\n dg / dw'_2 = 2 * (1 * w'_1 + 2 * w'_2 + 3 * w'_3) = -598\n dg / dw'_3 = 3 * (1 * w'_1 + 2 * w'_2 + 3 * w'_3) = -897\n\n The updated parameters are given by:\n w''_1 = w'_1 - 1. * dg / dw'_1 = -21 - 1. * -299 = 278\n w''_2 = w'_2 - 1. * dg / dw'_2 = -43 - 1. * -598 = 555\n w''_3 = w'_3 - 1. * dg / dw'_3 = -64 - 1. * -897 = 833\n \"\"\"\n train_loss_2 = 0.5 * (model(train_inputs, params=params_1) ** 2)\n params_2 = gradient_update_parameters(model,\n train_loss_2,\n params=params_1,\n step_size=1.,\n first_order=False)\n\n assert train_loss_2.item() == 44700.5\n assert list(params_2.keys()) == ['weight']\n assert torch.all(params_2['weight'].data == torch.tensor([[278., 555., 833.]]))\n\n \"\"\"\n The new loss function is defined as\n h(w'') = 0.5 * (1 * w''_1 + 2 * w''_2 + 3 * w''_3) ** 2\n with w'' = [278, 555, 833].\n\n The gradient of h with respect to w'' is:\n dh / dw''_1 = 1 * (1 * w''_1 + 2 * w''_2 + 3 * w''_3) = 3887\n dh / dw''_2 = 2 * (1 * w''_1 + 2 * w''_2 + 3 * w''_3) = 7774\n dh / dw''_3 = 3 * (1 * w''_1 + 2 * w''_2 + 3 * w''_3) = 11661\n\n The updated parameters are given by:\n w'''_1 = w''_1 - 1. * dh / dw''_1 = 278 - 1. * 3887 = -3609\n w'''_2 = w''_2 - 1. * dh / dw''_2 = 555 - 1. * 7774 = -7219\n w'''_3 = w''_3 - 1. * dh / dw''_3 = 833 - 1. * 11661 = -10828\n \"\"\"\n train_loss_3 = 0.5 * (model(train_inputs, params=params_2) ** 2)\n params_3 = gradient_update_parameters(model,\n train_loss_3,\n params=params_2,\n step_size=1.,\n first_order=False)\n\n assert train_loss_3.item() == 7554384.5\n assert list(params_3.keys()) == ['weight']\n assert torch.all(params_3['weight'].data == torch.tensor([[-3609., -7219., -10828.]]))\n\n \"\"\"\n The new loss function is defined as\n l(w) = 4 * w'''_1 + 5 * w'''_2 + 6 * w'''_3\n with w = [2, 3, 5] and w''' = [-3609, -7219, -10828].\n\n The gradient of l with respect to w is:\n dl / dw_1 = 4 * dw'''_1 / dw_1 + 5 * dw'''_2 / dw_1 + 6 * dw'''_3 / dw_1\n = ... = -5020\n dl / dw_2 = 4 * dw'''_1 / dw_2 + 5 * dw'''_2 / dw_2 + 6 * dw'''_3 / dw_2\n = ... = -10043\n dl / dw_3 = 4 * dw'''_1 / dw_3 + 5 * dw'''_2 / dw_3 + 6 * dw'''_3 / dw_3\n = ... = -15066\n \"\"\"\n test_inputs = torch.tensor([[4., 5., 6.]])\n test_loss = model(test_inputs, params=params_3)\n grads = torch.autograd.grad(test_loss, model.parameters())\n\n assert test_loss.item() == -115499.\n assert len(grads) == 1\n assert torch.all(grads[0].data == torch.tensor([[-5020., -10043., -15066.]]))", "def gradientdescent(cost_func, theta, args=(), delta_func = 0):\n step = 1\n old_cost = 0\n while True:\n theta_old = theta.copy()\n cost = cost_func(theta, *args)\n delta = delta_func(theta, *args)\n theta = theta - step * delta\n if cost > old_cost and old_cost != 0:\n step = step*0.7\n if np.allclose(theta_old, theta):\n break\n old_cost = cost\n return theta", "def gradientDescent(self,X, y, theta): \n # number of instances\n m = len(y)\n J_history = np.zeros((self.NUM_ITERS,1))\n for i in range(self.NUM_ITERS):\n h = self.sigmoid(X@theta)\n grad = 1 / m * X.T @ (h - y)\n theta = theta - self.ALPHA * grad \n J_history[i] = self.costFunction(theta, X, y)\n \n \n return theta, J_history", "def gradient_model (self, x, initial_weights = None, \\\n step_size = 5.0e-6, tol = 2.5e+7, n_iters = 501, l2 = 0):\n # setup initial intercept, slope, iter number and rss\n if initial_weights is None:\n weights = self.initial_weight\n else:\n weights = initial_weights\n # Compute indicator value for (y_i = +1)\n indicators = np.array([int (i) for i in (self.train_output_y==1)])\n for itr in range(n_iters):\n # Predict P(y_i = +1|x_1,w) using your predict_probability() function\n _, pred_probs = self.predict_probability(self.train_feature_x, weights)\n \n # Compute the errors as indicator - predictions\n errors = indicators - pred_probs\n\n #Update the weights:\n derivative = self.feature_derivative(errors, weights, l2)\n weights = weights + derivative * (step_size) \n \n #check if converged\n #todo\n \"\"\"\n # Checking whether log likelihood is increasing\n if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \\\n or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:\n lp = self.compute_log_likelihood(indicators,weights)\n print 'iteration %*d: log likelihood of observed labels = %.8f' % \\\n (int(np.ceil(np.log10(n_iters))), itr, lp)\n \"\"\"\n \n #check weights\n #print \"\\n\"\n #print \"The weights for features: \", weights\n #final prediction\n preds = self.prediction(x, weights)\n return preds, weights", "def step(self):\n\n self.compute_lr()\n\n self.optimizer.param_groups[self.param_group]['lr'] = self.lr\n self.optimizer.param_groups[self.param_group]['momentum'] = self.momentum", "def gradient_descent_step(self, x, y, learning_rate):\n # compute derivative of loss wrt Z\n dZ = self.derivative_loss(y, self.predict(x))\n dW = np.dot(dZ, x)\n # subtract average derivative from weights\n self.w -= learning_rate * 1.0/dW.shape[0] * dW\n if self.fit_b:\n self.b -= learning_rate * (1.0/x.shape[0] * np.sum(dZ))", "def step(self, closure=None):\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n beta1, beta2 = group['betas']\n base_lr = group['lr']\n max_norm = group['max_norm']\n fixed_norm = group['fixed_norm']\n zero_mean = group['zero_mean']\n normalizing_grad_by_norm = group['normalizing_grad_by_norm']\n eps = group['eps']\n weight_decay = group['weight_decay']\n l2_regularization = group['l2_regularization']\n bias_correction1 = 1\n for p in group['params']:\n if p.grad is None:\n continue\n pmax_norm, pfixed_norm, pzero_mean = _get_opt_args(\n p, max_norm, fixed_norm, zero_mean)\n lr = get_opt_arg(p, 'lr', base_lr)\n lr_scale = get_opt_arg(p, 'lr_scale', 1)\n state = self.state[p]\n state['step'] += 1\n bias_correction2 = 1 - beta2**state['step']\n grad = p.grad\n pweight_decay = get_opt_arg(p, 'weight_decay', weight_decay)\n pl2_regularization = get_opt_arg(p, 'l2_regularization',\n l2_regularization)\n if pl2_regularization != 0:\n grad = grad.add(p, alpha=pl2_regularization)\n if beta1 > 0:\n bias_correction1 = 1 - beta1**state['step']\n exp_avg = state['exp_avg']\n exp_avg.lerp_(grad, 1 - beta1)\n else:\n exp_avg = grad\n if normalizing_grad_by_norm:\n sq = _norm(grad)**2\n else:\n sq = grad**2\n state['exp_avg_sq'].lerp_(sq, 1 - beta2)\n denom = state['exp_avg_sq'].sqrt().add_(eps)\n if pweight_decay > 0:\n p.mul_(1 - lr_scale * lr * pweight_decay)\n # the exponential moving average of exp_avg and exp_avg_sq are not\n # unbiased estimate of the mean. Correct them using bas_correction1\n # and bias_correct2 as suggest by the original Adam paper.\n step_size = lr_scale * lr * math.sqrt(\n bias_correction2) / bias_correction1\n # p <- p - step_size * exp_avg / denom\n p.addcdiv_(exp_avg, denom, value=-step_size)\n _normalize(p, pmax_norm, pfixed_norm, pzero_mean)\n\n return loss", "def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):\n n = len_data\n # WE NEED TO transpose data_x into (p+1) *n ,theta is 1*(p+1)\n prod = np.dot(theta, data_x.transpose())\n\n prod -= data_y\n print(\"pro: data_x\", prod.shape, data_x.shape)\n #prod represent the loss of the hypothesis and true label\n sum_grad = np.dot(prod, data_x)\n print(\"总梯度的值:\",sum_grad.shape)\n\n # batch-gradient descent\n theta = theta -(alpha / n) * sum_grad\n return theta", "def logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma):\n\tw = initial_w\n\n\tfor iter in range(max_iters):\n\t\tw = learning_by_gradient_descent(y, tx, w, gamma)\n\n\treturn w", "def __init__(self, initial_step_size=0.01 * units.angstroms):\n\n timestep = 1.0 * units.femtoseconds\n super(GradientDescentMinimizationIntegrator, self).__init__(timestep)\n\n self.addGlobalVariable(\"step_size\", initial_step_size / units.nanometers)\n self.addGlobalVariable(\"energy_old\", 0)\n self.addGlobalVariable(\"energy_new\", 0)\n self.addGlobalVariable(\"delta_energy\", 0)\n self.addGlobalVariable(\"accept\", 0)\n self.addGlobalVariable(\"fnorm2\", 0)\n self.addPerDofVariable(\"x_old\", 0)\n\n # Update context state.\n self.addUpdateContextState()\n\n # Constrain positions.\n self.addConstrainPositions()\n\n # Store old energy and positions.\n self.addComputeGlobal(\"energy_old\", \"energy\")\n self.addComputePerDof(\"x_old\", \"x\")\n\n # Compute sum of squared norm.\n self.addComputeSum(\"fnorm2\", \"f^2\")\n\n # Take step.\n self.addComputePerDof(\"x\", \"x+step_size*f/sqrt(fnorm2 + delta(fnorm2))\")\n self.addConstrainPositions()\n\n # Ensure we only keep steps that go downhill in energy.\n self.addComputeGlobal(\"energy_new\", \"energy\")\n self.addComputeGlobal(\"delta_energy\", \"energy_new-energy_old\")\n # Accept also checks for NaN\n self.addComputeGlobal(\"accept\", \"step(-delta_energy) * delta(energy - energy_new)\")\n\n self.addComputePerDof(\"x\", \"accept*x + (1-accept)*x_old\")\n\n # Update step size.\n self.addComputeGlobal(\"step_size\", \"step_size * (2.0*accept + 0.5*(1-accept))\")", "def learning_by_gradient_descent(y, tx, w, gamma):\n\tgrad = calculate_gradient(y, tx, w)\n\n\tw = w - gamma * grad\n\treturn w", "def _fit(self, init_step_size, y):\n step_size = np.zeros(self.max_iter + 1)\n beta = np.zeros((self.max_iter + 1, self.d))\n theta = np.zeros((self.max_iter + 1, self.d))\n beta_mask = np.zeros((self.max_iter + 1, self.n))\n theta_mask = np.zeros((self.max_iter + 1, self.n))\n grad_beta = np.zeros((self.max_iter + 1, self.d))\n grad_theta = np.zeros((self.max_iter + 1, self.d))\n norm_grad_beta = np.zeros(self.max_iter + 1)\n norm_grad_theta = np.zeros(self.max_iter + 1)\n\n step_size[0] = init_step_size\n grad_beta[0, :], beta_mask[0, :] = self._gradient(beta[0, :], y)\n grad_theta[0, :], theta_mask[0, :] = self._gradient(theta[0, :], y)\n norm_grad_beta[0] = np.linalg.norm(grad_beta[0, :])\n norm_grad_theta[0] = np.linalg.norm(grad_theta[0, :])\n for t in range(self.max_iter):\n if self.verbose:\n print(\"ITERATION {}\".format(t))\n\n step_size[t + 1], beta_mask[t + 1, :] = self._backtracking(step_size[t], theta[t], theta_mask[t, :], grad_theta[t, :], norm_grad_theta[t], y)\n beta[t + 1, :] = theta[t, :] - step_size[t + 1]*grad_theta[t, :]\n theta[t + 1, :] = beta[t + 1, :] + t/(t + 3)*(beta[t + 1, :] - beta[t, :])\n\n grad_beta[t + 1, :], _ = self._gradient(beta[t + 1, :], y, mask=beta_mask[t + 1, :])\n grad_theta[t + 1, :], theta_mask[t + 1, :] = self._gradient(theta[t + 1, :], y)\n norm_grad_beta[t + 1] = np.linalg.norm(grad_beta[t + 1, :])\n norm_grad_theta[t + 1] = np.linalg.norm(grad_theta[t + 1, :])\n\n if norm_grad_beta[t + 1] <= self.epsilon:\n break\n\n if self.verbose and t + 1 == self.max_iter:\n print(\"Maximum iterations reached\")\n\n return beta[t + 1, :]", "def _UpdateGradient(self):\n self.mol.GetGradient('analytic')", "def step(self):\n loss = None\n for group in self.param_groups:\n for p in group['params']:\n grad = p.grad.data\n state = self.state[p]\n\n if len(state) == 0:\n t = 0\n m = torch.zeros_like(p.data)\n v = torch.zeros_like(p.data)\n # v_hat = torch.zeros_like(p.data)\n else:\n t = state['t']\n m = state['m']\n v = state['v']\n\n b1 = group['beta1']\n b2 = group['beta2']\n t += 1\n\n m = torch.mul(m, b1) + (1-b1) * grad\n v = torch.mul(v, b2) + (1-b2) * grad**2\n\n m_unbias = 1 / (1 - b1**t)\n v_unbias = 1 / (1 - b2**t)\n\n p.data -= (group['lr'] * m_unbias / math.sqrt(v_unbias)) * \\\n m / (math.sqrt(v_unbias) + group['eps'])\n\n # v_hat = torch.max(v_hat, v)\n # p.data -= group['lr'] / m_unbias * m * v_hat / (v_unbias.sqrt() + group['eps'])\n state['t'] = t\n state['m'] = m\n state['v'] = v\n\n return loss", "def _update_parameters(self, delta):\n if delta is not None:\n VectorRegression._update_parameters(self, delta)", "def gradient_descent(f, df, x, sigma=0.5, epsilon=1e-8):\n pass", "def update(self, state, y):\n y_pred = self.model(torch.Tensor(state))\n loss = self.criterion(y_pred, Variable(torch.Tensor(y)))\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def update_parameters(self, learning_rate):\n for i in range(self.L - 1):\n self.W[i] -= learning_rate * self.dW[i]\n self.b[i] -= learning_rate * self.db[i]", "def optimize(w, b, X, Y, num_iterations,learning_rate,print_cost = False):\n costs = []\n for i in range(num_iterations):\n\n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ###\n grads,cost = propagate(w,b,X,Y)\n ### END CODE HERE ###\n\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate*dw\n b = b - learning_rate*db\n ### END CODE HERE ###\n\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n\n # Print the cost every 100 training examples\n if print_cost and i%100==0:\n print(\"Cost after iteration %i: %f\"%(i,cost))\n\n params = {\n \"w\":w,\n \"b\":b\n }\n grads = {\n \"dw\":dw,\n \"db\":db\n }\n return params,grads,costs", "def sgd_optim(config = None, global_step = None):\n learning_rate = config[\"learning_rate\"]\n \n train_step = tf.train.GradientDescentOptimizer(learning_rate)\n #train_step = tf.train.GradientDescentOptimizer(learning_rate)\n return train_step", "def update_parameters(self, parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural network\n v_corrected = {} # Initializing first moment estimate, python dictionary\n s_corrected = {} # Initializing second moment estimate, python dictionary\n self.t += 1\n\n # Perform Adam update on all parameters\n for l in range(L):\n # Moving average of the gradients. Inputs: \"v, grads, beta1\". Output: \"v\".\n self.v[\"dW\" + str(l + 1)] = self.beta1 * self.v[\"dW\" + str(l + 1)] + (1 - self.beta1) * grads['dW' + str(l + 1)]\n self.v[\"db\" + str(l + 1)] = self.beta1 * self.v[\"db\" + str(l + 1)] + (1 - self.beta1) * grads['db' + str(l + 1)]\n\n # Compute bias-corrected first moment estimate. Inputs: \"v, beta1, t\". Output: \"v_corrected\".\n v_corrected[\"dW\" + str(l + 1)] = self.v[\"dW\" + str(l + 1)] / (1 - np.power(self.beta1, self.t))\n v_corrected[\"db\" + str(l + 1)] = self.v[\"db\" + str(l + 1)] / (1 - np.power(self.beta1, self.t))\n\n # Moving average of the squared gradients. Inputs: \"s, grads, beta2\". Output: \"s\".\n self.s[\"dW\" + str(l + 1)] = self.beta2 * self.s[\"dW\" + str(l + 1)] + (1 - self.beta2) * np.power(grads['dW' + str(l + 1)], 2)\n self.s[\"db\" + str(l + 1)] = self.beta2 * self.s[\"db\" + str(l + 1)] + (1 - self.beta2) * np.power(grads['db' + str(l + 1)], 2)\n\n # Compute bias-corrected second raw moment estimate. Inputs: \"s, beta2, t\". Output: \"s_corrected\".\n s_corrected[\"dW\" + str(l + 1)] = self.s[\"dW\" + str(l + 1)] / (1 - np.power(self.beta2, self.t))\n s_corrected[\"db\" + str(l + 1)] = self.s[\"db\" + str(l + 1)] / (1 - np.power(self.beta2, self.t))\n\n # Update parameters. Inputs: \"parameters, learning_rate, v_corrected, s_corrected, epsilon\". Output: \"parameters\".\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * v_corrected[\"dW\" + str(l + 1)] / np.sqrt(self.s[\"dW\" + str(l + 1)] + self.epsilon)\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * v_corrected[\"db\" + str(l + 1)] / np.sqrt(self.s[\"db\" + str(l + 1)] + self.epsilon)\n\n return parameters", "def apply_gradient(params: torch.Tensor, grads: torch.Tensor, lr: float) -> torch.Tensor:\n params_prime = params + lr * grads\n return params_prime", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n # Define parameters to store w and loss\n w = initial_w\n for n_iter in range(max_iters):\n # compute gradient\n grad = compute_gradient(y, tx, w)\n # gradient w by descent update\n if n_iter % (max_iters//10) == 0:\n print(compute_cost(y, tx, w))\n w -= gamma * grad\n\n return w, compute_cost(y, tx, w)", "def gradient_descent(self, alpha, batch, weight_gradients, bias_gradients):\n self._dwg = [0] * 8\n self._dbg = [0] * 8\n self._cost = 0\n\n workers = []\n for i in range(batch[0].shape[0]-1):\n p = Process(target=self.mp_gd, args=(batch, weight_gradients, bias_gradients, i))\n workers.append(p)\n p.start()\n\n\n for p in workers:\n self._cost += self._q.get()\n\n self._dwg = list(map(add, self._dwg, self._dwgq.get()))\n self._dbg = list(map(add, self._dbg, self._dbgq.get()))\n\n p.join()\n\n for j in range(len(self._dwg)):\n weight_gradients[j] = weight_gradients[j] - alpha * self._dwg[j]\n bias_gradients[j] = bias_gradients[j] - alpha * self._dbg[j]\n cost = self._cost/len(batch)\n self._cost_history.append(cost)\n\n return weight_gradients, bias_gradients", "def gradient_update(self, states, Q_target):\n params = [self.w1, self.w2, self.w3, self.b1, self.b2, self.b3]\n loss = self.get_loss(states, Q_target)\n gradients = nn.gradients(loss, params)\n self.w1.update(gradients[0], -self.learning_rate)\n self.w2.update(gradients[1], -self.learning_rate)\n self.w3.update(gradients[2], -self.learning_rate)\n self.b1.update(gradients[3], -self.learning_rate)\n self.b2.update(gradients[4], -self.learning_rate)\n self.b3.update(gradients[5], -self.learning_rate)", "def update_parameters(parameters, grads, learning_rate = 1.2):\n\t# Retrieve each parameter from the dictionary \"parameters\"\n\tW1 = parameters['W1']\n\tb1 = parameters['b1']\n\tW2 = parameters['W2']\n\tb2 = parameters['b2']\n\n\t# Retrieve each gradient from the dictionary \"grads\"\n\tdW1 = grads['dW1']\n\tdb1 = grads['db1']\n\tdW2 = grads['dW2']\n\tdb2 = grads['db2']\n\n\t# Update rule for each parameter\n\tW1 = W1 - learning_rate*dW1\n\tb1 = b1 - learning_rate*db1\n\tW2 = W2 - learning_rate*dW2\n\tb2 = b2 - learning_rate*db2\n\n\tparameters = {\"W1\": W1,\n\t\t\t\t\t\"b1\": b1,\n\t\t\t\t\t\"W2\": W2,\n\t\t\t\t\t\"b2\": b2}\n\n\treturn parameters", "def gradient_descent(x, y, theta=[[0], [0]]):\n m = y.size\n j_history = []\n for i in range(ITERATIONS):\n h = x.dot(theta)\n theta = theta - (ALPHA / m) * (x.T.dot(h - y))\n j_history.append(compute_cost(x, y, theta))\n return theta, j_history", "def batchGD(self, x, y, epochs):\n print(\"Training using batch gradient descent\")\n epoch = 0\n #output training progress ten times in run\n outputChunk = int ( epochs / 10 )\n\n while epoch <= epochs:\n\n #output progress? \n if epoch % outputChunk is 0:\n J = self.costFunction(x,y)\n print(\"Epoch=\", epoch, \"J=\", J)\n\n #get analytic gradients \n partial_J_w_ih, partial_J_w_ho, partial_J_b_h, partial_J_b_o = \\\n self.deriv_costFunction( x, y )\n #take a GD step\n #To-do - implement variable learning rate\n self.w_ih -= partial_J_w_ih\n self.w_ho -= partial_J_w_ho\n self.b_h -= partial_J_b_h\n self.b_o -= partial_J_b_o\n \n epoch += 1", "def run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter):\n delta = sparse.coo_matrix(theta.shape).toarray()\n\n h = compute_probabilities(X, theta, temp_parameter)\n\n for j in range(delta.shape[0]):\n y = Y\n y = np.where(y != j, 0, 1)\n p = y - h[j]\n\n x = X.T * p\n x = x.T\n x = x.sum(axis=0)\n\n grad = -x / (temp_parameter * X.shape[0]) + lambda_factor * theta[j]\n\n delta[j] += grad\n\n theta -= alpha * delta\n\n return theta", "def update(self, max_norm=1.0):\n \n theta_prime = []\n\n for i, batch in enumerate(self.tasks):\n y_hat = self.constraint(self.theta, self.f(batch)) # gather predictions to single dimension\n loss = self.criteon( y_hat, self.y )\n #compute gradients\n grad = torch.autograd.grad(loss, self.theta)\n #update params\n theta_prime.append( self.theta - self.alpha * grad[0] )\n\n del loss\n\n #perform meta-update\n m_loss = torch.tensor(0.0, requires_grad=True)\n for i in range(len(self.tasks)):\n theta = theta_prime[i]\n batch = self.tasks[i]\n y_hat = self.constraint(theta, self.f(batch)) # gather predictions to single dimension\n m_loss = m_loss + self.criteon( y_hat, self.y ) # updating meta-loss\n \n #zero gradient before running backward pass\n self.meta_optim.zero_grad()\n\n #backward pass\n m_loss.backward(retain_graph=True)\n\n #clip gradients\n nn.utils.clip_grad_norm_([self.theta], max_norm)\n \n #one-step gradient descent\n self.meta_optim.step()", "def update_lr(epoch, optimizer, args):\n gamma = 0\n for step in args.step:\n if epoch + 1.0 > int(step):\n gamma += 1\n lr = args.lr * math.pow(0.1, gamma)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def sgd(params, grads, lr, batch_size): #@save\n for param, grad in zip(params, grads):\n param.assign_sub(lr*grad/batch_size)", "def update_parameters(parameters, grads, learning_rate):\n L = len(parameters) // 2\n\n for i in range(L):\n parameters[\"W\"+str(i+1)] = parameters[\"W\"+str(i+1)] - learning_rate * grads[\"dW\"+str(i+1)]\n parameters[\"b\"+str(i+1)] = parameters[\"b\"+str(i+1)] - learning_rate * grads[\"db\"+str(i+1)]\n\n return parameters", "def update_model_parameters(parameters, grads, learning_rate):\n L = len(parameters) /2 # number of layers in the neural network\n\n for l in range(int(L)):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n return parameters\n # raise NotImplementedError", "def RatingsGradientDescent(params, Y, R, num_users, num_movies, num_features, lbd, alpha, num_iters):\n J_history = np.zeros(num_iters)\n for i in range(num_iters):\n J_history[i], grad = cofiCostFunc(params, Y, R, num_users, num_movies, num_features, lbd)\n params = params-alpha*grad\n if i % 100 == 99:\n print('Step %i, cost=%f' % (i+1, J_history[i]))\n return params, J_history", "def step(self):\n for p, grad, v, square_grad_avg, delta_x_acc in self.params:\n # Compute the running average of the squared gradients \n square_grad_avg.mul_(self.rho)\n square_grad_avg.addcmul_(grad, grad, value = 1 - self.rho)\n # Compute the RMS of the previous squared gradients (eps to avoid numerical issues later for division)\n std = (square_grad_avg.add_(self.eps)).sqrt_()\n # Compute the accumulated update\n delta_x = ((delta_x_acc.add_(self.eps)).sqrt_()) * grad / std\n # Accumulate the updates\n delta_x_acc.mul_(self.rho)\n delta_x_acc.addcmul_(delta_x, delta_x, value = 1 - self.rho) \n # Update the parameters\n p.add_(delta_x, alpha = - self.lr)", "def _stochastic_gradient_descent(self, X, y, lr, epochs, sample_rate):\n\n # Initialize the bias and weights.\n m, n = X.shape\n self.bias = 0\n self.weights = np.random.normal(size=n)\n\n n_sample = int(m * sample_rate)\n for i in range(epochs):\n for idx in choice(range(m), n_sample, replace=False):\n # Calculate the gradient delta of each sample\n grad_bias, grad_weights = self._get_gradient(X[idx], y[idx])\n\n # Update the bias and weight by gradient of current sample\n self.bias += lr * grad_bias\n self.weights += lr * grad_weights\n\n # Show the gradient of each epoch.\n grad_bias, grad_weights = self._get_gradient(X, y)\n grad = (grad_bias + grad_weights.mean()) / 2\n print(\"Epochs %d gradient %.3f\" % (i + 1, grad), flush=True)", "def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n feature_dictionary = self.featExtractor.getFeatures(state, action)\n difference = (reward + self.discount * self.computeValueFromQValues(nextState)) - self.getQValue(state, action)\n\n for feature in feature_dictionary:\n self.weights[feature] += self.alpha * difference * feature_dictionary[feature]\n\n #if self.epsilon > self.epsilon_min:\n # self.epsilon *= self.epsilon_decay", "def step(self, layers, d_weights, d_biases, lr):\n for layer, d_W, d_b in zip(layers, d_weights, d_biases):\n layer.update(-lr * d_W, -lr * d_b)", "def projected_gradient_descent(self, x, y):\n x_adv = x.clone().detach().requires_grad_(True).to(x.device)\n targeted = self.y_target is not None\n num_channels = x.shape[1]\n\n if self.random:\n x_adv = random_perturbation(x_adv, self.norm, self.eps)\n\n for i in range(self.num_steps):\n _x_adv = x_adv.clone().detach().requires_grad_(True)\n\n prediction = self.model(_x_adv)\n loss = self.loss_fn(prediction, self.y_target if targeted else y)\n loss.backward()\n\n with torch.no_grad():\n # Force the gradient step to be a fixed size in a certain norm\n if self.norm == 'inf':\n gradients = _x_adv.grad.sign() * self.step_size\n else:\n # Note .view() assumes batched image data as 4D tensor\n gradients = _x_adv.grad * self.step_size / _x_adv.grad.view(\n _x_adv.shape[0], -1) \\\n .norm(self.norm, dim=-1) \\\n .view(-1, num_channels, 1, 1)\n\n if targeted:\n # Targeted: Gradient descent with on the loss of the (incorrect) target label\n # w.r.t. the image data\n x_adv -= gradients\n else:\n # Untargeted: Gradient ascent on the loss of the correct label w.r.t.\n # the model parameters\n x_adv += gradients\n\n # Project back into l_norm ball and correct range\n if self.norm == 'inf':\n # Workaround as PyTorch doesn't have elementwise clip\n x_adv = torch.max(torch.min(x_adv, x + self.eps), x - self.eps)\n else:\n delta = x_adv - x\n\n # Assume x and x_adv are batched tensors where the first dimension is\n # a batch dimension\n mask = delta.view(delta.shape[0], -1).norm(self.norm,\n dim=1) <= self.eps\n\n scaling_factor = delta.view(delta.shape[0], -1).norm(self.norm,\n dim=1)\n scaling_factor[mask] = self.eps\n\n # .view() assumes batched images as a 4D Tensor\n delta *= self.eps / scaling_factor.view(-1, 1, 1, 1)\n\n x_adv = x + delta\n\n x_adv = x_adv.clamp(*self.clamp)\n\n return x_adv.detach()", "def learning_by_penalized_gradient(y, tx, w, gamma, lambda_):\n\n #on test avec Newton\n\n loss,gradient,_ = penalized_logistic_regression(y,tx,w,lambda_)\n\n w = w - gamma*gradient\n return loss, w,gradient", "def _update_learning_rate(self):\r\n\r\n self.n_steps += 1\r\n lr = self.factor * self._get_lr_scale()\r\n for param_group in self._optimizer.param_groups:\r\n param_group['lr'] = lr" ]
[ "0.7269349", "0.7062195", "0.7061314", "0.7021435", "0.6942396", "0.693245", "0.69282365", "0.6900024", "0.68978393", "0.68952733", "0.6880095", "0.6862448", "0.68579733", "0.6851211", "0.6844264", "0.68293726", "0.6826274", "0.67916423", "0.67859596", "0.67605203", "0.6751199", "0.67440313", "0.67440313", "0.67429173", "0.6728848", "0.67139727", "0.6698705", "0.6694347", "0.6689193", "0.66873926", "0.6667153", "0.6662345", "0.6661977", "0.6661403", "0.6655406", "0.66495883", "0.6647276", "0.6630332", "0.66235906", "0.6621541", "0.66199625", "0.6611944", "0.66075873", "0.6602738", "0.66016877", "0.6600715", "0.65990376", "0.65966433", "0.65940636", "0.65892327", "0.65871596", "0.65775144", "0.6571265", "0.6567205", "0.6566082", "0.6538002", "0.6528039", "0.65195423", "0.6514028", "0.65067756", "0.6502484", "0.6493353", "0.6487837", "0.64833313", "0.6479989", "0.6476293", "0.6467049", "0.64648235", "0.64535517", "0.64527726", "0.6447422", "0.6444302", "0.6443289", "0.6442445", "0.6442223", "0.643871", "0.64340264", "0.6433579", "0.64298284", "0.64171267", "0.64133275", "0.6409529", "0.63931924", "0.63878113", "0.63869274", "0.6375359", "0.63745624", "0.6364987", "0.63644767", "0.63597596", "0.63536435", "0.6335434", "0.63216317", "0.63194805", "0.6316585", "0.6314654", "0.6313555", "0.6309839", "0.63098156", "0.6304696", "0.63037217" ]
0.0
-1
Creates a list of random minibatches from (X, Y)
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0): np.random.seed(seed) # To make your "random" minibatches the same as ours m = X.shape[1] # number of training examples mini_batches = [] # Step 1: Shuffle (X, Y) permutation = list(np.random.permutation(m)) shuffled_X = X[:, permutation] shuffled_Y = Y[:, permutation].reshape((1,m)) # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case. num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning for k in range(0, num_complete_minibatches): mini_batch_X = shuffled_X[:, k*mini_batch_size:(k+1)*mini_batch_size] mini_batch_Y = shuffled_Y[:, k*mini_batch_size:(k+1)*mini_batch_size] mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) # Handling the end case (last mini-batch < mini_batch_size) if m % mini_batch_size != 0: mini_batch_X = shuffled_X[:, num_complete_minibatches*mini_batch_size:] mini_batch_Y = shuffled_X[:, num_complete_minibatches*mini_batch_size:] mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) return mini_batches
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_mini_batches(X, Y, mini_batch_size = 64):\n\n\n\tm = X.shape[1]\n\tmini_batches = []\n\n\t#Shuffling around the data randomly according to the 'permutation' list\n\tpermutation = list(np.random.permutation(m))\n\tshuffled_X = X[:, permutation]\n\tshuffled_Y = Y[:, permutation].reshape((1,m))\n\n\tcomplete_minibatch_number = int(math.floor(m/mini_batch_size))\n\n\tfor k in xrange(complete_minibatch_number):\n\n\t\tmini_batch_X = shuffled_X[:, k*mini_batch_size : (k+1)*mini_batch_size]\n\t\tmini_batch_Y = shuffled_Y[:, k*mini_batch_size : (k+1)*mini_batch_size]\n\n\t\tmini_batch = (mini_batch_X, mini_batch_Y)\n\t\tmini_batches.append(mini_batch)\n\n\tif m%mini_batch_size != 0:\n\t\tmini_batch_X = shuffled_X[:, (k+1)*mini_batch_size : m]\n\t\tmini_batch_Y = shuffled_Y[:, (k+1)*mini_batch_size : m]\n\n\t\tmini_batch = (mini_batch_X, mini_batch_Y)\n\t\tmini_batches.append(mini_batch)\n\n\treturn mini_batches", "def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n \n np.random.seed(seed) # To make your \"random\" minibatches the same as ours\n m = X.shape[1] # number of training examples\n mini_batches = []\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((1,m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = int(math.floor(m/mini_batch_size)) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n ### START CODE HERE ### (approx. 2 lines)\n mini_batch_X = shuffled_X[:,k * mini_batch_size:(k + 1) * mini_batch_size]\n mini_batch_Y = shuffled_Y[:,k * mini_batch_size:(k + 1) * mini_batch_size]\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n ### START CODE HERE ### (approx. 2 lines)\n end = m - mini_batch_size * math.floor(m / mini_batch_size)\n mini_batch_X = shuffled_X[:,num_complete_minibatches * mini_batch_size:]\n mini_batch_Y = shuffled_Y[:,num_complete_minibatches * mini_batch_size:]\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def random_mini_batches(X, Y, mini_batch_size = 64):\n \n m = X.shape[1] # number of training examples\n mini_batches = []\n\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) \n # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def random_mini_batches(X, Y, mini_batch_size = 64):\n \n m = X.shape[0] # number of training examples.\n mini_batches = []\n \n # Step 1: Shuffle (X, Y).\n permutation = list(np.random.permutation(m))\n shuffled_X = X[permutation,:,:,:]\n shuffled_Y = Y[permutation,:]\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in partitionning.\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:]\n mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size).\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]\n mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def generate_mines(self, number):\n mine_locations = []\n available_places = [[j, i]\n for i in xrange(0, self.x) for j in xrange(0, self.y)]\n while number > 0:\n # the chosen coordinate for a mine is appended into the list and is\n # removed from the list of choices to prevent duplicates.\n choice = random.choice(available_places)\n available_places.remove(choice)\n mine_locations.append(choice)\n number -= 1\n return mine_locations", "def iterate_minibatches(X_,y_,batchsize_,shuffle_=True):\n\tindx = [i for i in range(len(X_))]\n\trandom.shuffle(indx)\n\tindxgenerator = (i for i in indx)\n\tdel indx\n\twhile True:\n\t\tbatch_indx = list(islice(indxgenerator, batchsize_))\n\t\tif len(batch_indx)==0:\n\t\t\tbreak;\n\t\telse:\n\t\t\tyield X_[batch_indx,:], y_[batch_indx]", "def mini_batches(X, Y, mini_batch_size = 64):\n m = X.shape[1]\n mini_batches = []\n\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((10,m))\n\n num_complete_minibatches = math.floor(m/mini_batch_size)\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[:,k * mini_batch_size:(k + 1) * mini_batch_size]\n mini_batch_Y = shuffled_Y[:,k * mini_batch_size:(k + 1) * mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n if m % mini_batch_size != 0:\n end = m - mini_batch_size * math.floor(m / mini_batch_size)\n mini_batch_X = shuffled_X[:,num_complete_minibatches * mini_batch_size:]\n mini_batch_Y = shuffled_Y[:,num_complete_minibatches * mini_batch_size:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n return mini_batches", "def sample_trajectories(self):\n minibatch = []\n for i in range(self.num_avg_gradient):\n trajectory = self.replay_buffer[random.randint(0, len(self.replay_buffer) - 1)]\n trajectory = trajectory[random.randint(0, len(trajectory) - 1):]\n minibatch.append(trajectory)\n return minibatch", "def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n \n m = X.shape[0] # number of training examples\n mini_batches = []\n np.random.seed(seed)\n \n # Step 1: Shuffle (X, Y)\n #permutation = list(np.random.permutation(m))\n permutation = list(torch.randperm(m))\n shuffled_X = X[permutation,:,:,:]\n shuffled_Y = Y[permutation,:]\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = int(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:]\n mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]\n mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def random_mini_batches(X, Y, mini_batch_size = 32, seed = 0):\n \n m = X.shape[1] # number of training examples\n mini_batches = []\n np.random.seed(seed)\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine", "def get_mines(self):\n\t\treturn ((x, y) for x in range(self.width)\n\t\t for y in range(self.height) if self.mines[x][y])", "def random_mini_batches(X, Y, mini_batch_size = 4, seed = 0):\n \n m = X.shape[0] # number of training examples\n mini_batches = []\n np.random.seed(seed)\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[permutation,:,:,:]\n shuffled_Y = Y[permutation,:]\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:]\n mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]\n mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def minibatch(x_train, y_train, batch_size, train_epochs):\n epoch = 0\n start = 0\n key = random.PRNGKey(0)\n\n while epoch < train_epochs:\n end = start + batch_size\n\n if end > x_train.shape[0]:\n key, split = random.split(key)\n permutation = random.permutation(split,\n np.arange(x_train.shape[0], dtype=np.int64))\n x_train = x_train[permutation]\n y_train = y_train[permutation]\n epoch += 1\n start = 0\n continue\n\n yield x_train[start:end], y_train[start:end]\n start = start + batch_size", "def get_minibatches(data, minibatch_size, shuffle=True):\r\n list_data = type(data) is list and (type(data[0]) is list or type(data[0]) is np.ndarray)\r\n data_size = len(data[0]) if list_data else len(data)\r\n indices = np.arange(data_size)\r\n if shuffle:\r\n np.random.shuffle(indices)\r\n for minibatch_start in np.arange(0, data_size, minibatch_size):\r\n minibatch_indices = indices[minibatch_start:minibatch_start + minibatch_size]\r\n yield [minibatch(d, minibatch_indices) for d in data] if list_data \\\r\n else minibatch(data, minibatch_indices)", "def _get_bootstrap_sample(x, y, num_reps):\r\n combined = array(list(x) + list(y))\r\n total_obs = len(combined)\r\n num_x = len(x)\r\n for i in range(num_reps):\r\n # sampling with replacement\r\n indices = randint(0, total_obs, total_obs)\r\n sampled = combined.take(indices)\r\n # split into the two populations\r\n sampled_x = sampled[:num_x]\r\n sampled_y = sampled[num_x:]\r\n yield sampled_x, sampled_y", "def generator(self, random, args):\r\n locations = [i for i in range(len(self.weights))]\r\n random.shuffle(locations)\r\n return locations", "def generateMines(num_rows, num_cols, num_mines):\n arr = np.random.permutation(num_rows * num_cols)\n return arr[:num_mines]", "def get_minibatches(data, minibatch_size, shuffle=True):\n list_data = type(data) is list and (type(data[0]) is list or type(data[0]) is np.ndarray)\n data_size = len(data[0]) if list_data else len(data)\n indices = np.arange(data_size)\n if shuffle:\n np.random.shuffle(indices)\n for minibatch_start in np.arange(0, data_size, minibatch_size):\n minibatch_indices = indices[minibatch_start:minibatch_start + minibatch_size]\n yield [minibatch(d, minibatch_indices) for d in data] if list_data \\\n else minibatch(data, minibatch_indices)", "def generate_random_data(size, x_min=X_MIN, x_max=X_MAX, y_min=Y_MIN, y_max=Y_MAX):\n result = []\n for _i in range(size):\n result.append((randint(x_min, x_max), randint(y_min, y_max)))\n\n return result", "def random_mini_batches(X, mini_batch_size = 64, seed = 0):\n \n X = X.T\n \n m = X.shape[1] # number of training examples\n mini_batches = []\n np.random.seed(seed)\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n \n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]\n mini_batch = (mini_batch_X.T)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch = (mini_batch_X.T)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def generate_small_hypermoons(m_rel: int, max_r: float, min_r: float) -> List[Tuple[List[float], float]]:\n c_big, r_big = generate_small_hypersphere(m_rel, max_r, min_r)\n\n c_small = c_big + ((np.random.rand(m_rel) * (0.4 * r_big)) * random.choice([1, -1]))\n r_small = 0.95 * r_big\n\n return [(c_big, r_big), (c_small, r_small)]", "def create_minibatch(self):\r\n if self.experience_batch.shape[0] <= self.minibatch_size:\r\n self.minibatch = self.experience_batch\r\n\r\n else:\r\n ind = np.random.randint(self.experience_batch.shape[0], size=self.minibatch_size) # same sample can be in the minibatch multiple times --> problem for algorithm ?\r\n self.minibatch = self.experience_batch[ind]", "def supervised_random_mini_batches(X, Y, mini_batch_size, seed):\n\n np.random.seed(seed)\n m = X.shape[0] #number of examples in set\n n_classes = Y.shape[1]\n mini_batches=[]\n\n permutation = list(np.random.permutation(m))\n \n shuffled_X = X[permutation,:]\n shuffled_Y = Y[permutation,:]\n #partition of (shuffled_X, shuffled_Y) except the last mini_batch\n \n num_complete_mini_batches = math.floor(m/mini_batch_size)\n for k in range(num_complete_mini_batches):\n mini_batch_X = shuffled_X[k*mini_batch_size:(k+1)*mini_batch_size,:]\n mini_batch_Y = shuffled_Y[k*mini_batch_size:(k+1)*mini_batch_size,:]\n \n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # handling the case of last mini_batch < mini_batch_size \n if m % mini_batch_size !=0:\n \n mini_batch_X = shuffled_X[mini_batch_size*num_complete_mini_batches:m,:]\n mini_batch_Y = shuffled_Y[mini_batch_size*num_complete_mini_batches:m,:]\n \n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "def test_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def generate_batches(x, y, x_placeholder, y_placeholder, batch_size=20, seed=None):\n\n # Sanitize inputs\n assert(isinstance(batch_size, int)), \"generate_batches: batch size must be an integer.\"\n assert(batch_size > 0), \"generate_batches: batch size must be greater than zero.\"\n\n assert(seed is None or isinstance(seed, int)), \"generate_batches: seed must be an integer or `None`\"\n\n assert seed is None or (0 <= seed <= 2 ** 32 - 1)\n\n assert(y.shape[0] == x.shape[0]), \"Not exactly one label per datapoint!\"\n\n n_examples = x.shape[0]\n\n if seed is None:\n seed = np.random.randint(1, 100000)\n\n rng = np.random.RandomState()\n rng.seed(seed)\n\n # Check if we have enough data points to form a minibatch\n # otherwise set the batchsize equal to the number of input points\n initial_batch_size = batch_size\n # print(batch_size)\n batch_size = min(initial_batch_size, n_examples)\n # print(batch_size)\n\n if initial_batch_size != batch_size:\n logging.error(\"Not enough datapoints to form a minibatch. \"\n \"Batchsize was set to %s\", batch_size)\n\n while True:\n # `np.random.randint` is end-exclusive => for n_examples == batch_size, start == 0 holds\n start = rng.randint(0, (n_examples - batch_size + 1))\n\n minibatch_x = x[start:start + batch_size]\n minibatch_y = y[start:start + batch_size, None]\n\n feed_dict = {\n x_placeholder: minibatch_x,\n y_placeholder: minibatch_y.reshape(-1, 1)\n }\n yield feed_dict", "def sample(self):\n # For each row: round(random .* (max - min) + min, 0)\n random_array = prng.np_random.rand(self.num_discrete_space)\n return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]", "def random_positions(mini, maxi):\n x_cord = (maxi - mini)*np.random.random(SIZE) + mini\n y_cord = (maxi - mini)*np.random.random(SIZE) + mini\n return np.column_stack([x_cord, y_cord])", "def get_random_point(self):\n\t\tx = np.random.uniform(self.xmin, self.xmax)\n\t\ty = np.random.uniform(self.ymin, self.ymax)\n\t\treturn [x, y, 0.0]", "def process_minibatch(\n self,\n x: torch.Tensor,\n y: torch.Tensor,\n train_context: TrainContext,\n ) -> List[torch.Tensor]:\n output = train_context.model(x)\n if isinstance(output, torch.Tensor):\n return [output]\n return output", "def individual(min_val, max_val):\n value_list = [i for i in range(min_val, max_val+1)] #generate a list of 1 to 10\n random.shuffle(value_list) #shuffle the list\n return value_list", "def _generate_mines(self):\r\n mines_left = self.mines\r\n while mines_left > 0:\r\n gen_row = random.randint(0, self.rows-1)\r\n gen_col = random.randint(0, self.cols-1)\r\n\r\n if not self.fields[gen_row][gen_col].mine:\r\n self.fields[gen_row][gen_col].mine = True\r\n self._increment_fields_values(gen_row, gen_col)\r\n self.mines_cords.append((gen_row, gen_col))\r\n mines_left -= 1", "def sampling_algorithm(self, X, y):\r\n n_to_sample = self.det_n_to_sample(self.proportion)\r\n\r\n if n_to_sample == 0:\r\n return self.return_copies(X, y, \"Sampling is not needed\")\r\n\r\n X_min = X[y == self.min_label]\r\n X_maj = X[y == self.maj_label]\r\n\r\n # fitting nearest neighbors model to find closest majority points to\r\n # minority samples\r\n nn_params = {**self.nn_params}\r\n nn_params['metric_tensor'] = \\\r\n self.metric_tensor_from_nn_params(nn_params, X, y)\r\n\r\n density = self.calculate_density(X_min, X_maj, nn_params)\r\n\r\n # fitting nearest neighbors model to minority samples to run\r\n # SMOTE-like sampling\r\n n_neighbors = min([len(X_min), self.n_neighbors+1])\r\n nnmt = NearestNeighborsWithMetricTensor(n_neighbors=n_neighbors,\r\n n_jobs=self.n_jobs,\r\n **nn_params)\r\n nnmt.fit(X_min)\r\n ind = nnmt.kneighbors(X_min, return_distance=False)\r\n\r\n samples = self.sample_simplex(X=X_min,\r\n indices=ind,\r\n n_to_sample=n_to_sample,\r\n base_weights=density)\r\n\r\n # do the sampling\r\n #samples = []\r\n #while len(samples) < n_to_sample:\r\n # idx = self.random_state.choice(np.arange(len(density)), p=density)\r\n # random_neighbor_idx = self.random_state.choice(ind[idx][1:])\r\n # X_a = X_min[idx]\r\n # X_b = X_min[random_neighbor_idx]\r\n # samples.append(self.sample_between_points(X_a, X_b))\r\n\r\n return (np.vstack([X, samples]),\r\n np.hstack([y, np.repeat(self.min_label, len(samples))]))", "def get_random_points(N): \n x1 = np.random.uniform(-1,1,N)\n x2 = np.random.uniform(-1,1,N)\n return (x1,x2)", "def create_list(self):\n\n\t\trandom_list = random.sample(range(0, 500), 10)\n\n\t\treturn random_list", "def samplePositions(self):\n samples = []\n for i in range(self.sampleIter):\n x = random.randint(-self.sampleRange, self.sampleRange)\n y = random.randint(-self.sampleRange, self.sampleRange)\n x += self.currentPosition[0]\n y += self.currentPosition[1]\n if (x, y) in self.graph.keys():\n if self.graph[(x, y)] == 0:\n samples.append((x, y))\n return samples", "def generate_random_points(\n start: Float,\n end: Float,\n limit: Integer\n) -> List[Point]:\n\n return [\n Point(x=random.uniform(start, end), y=random.uniform(start, end))\n for _ in range(limit)\n ]", "def pickInputs(self):\n return [np.random.choice([-1,1])*np.random.random() for i in range(self.N)]", "def generate_batches(X, y, batch_size):\r\n m = len(X)\r\n batches = []\r\n \r\n # Shuffle \r\n permutation = list(np.random.permutation(m))\r\n shuff_X = X[permutation,:]\r\n shuff_y = y[permutation]\r\n \r\n # Partition\r\n num_complete_batches = math.floor(m/batch_size)\r\n for k in range(0, num_complete_batches):\r\n batch_X = shuff_X[k*batch_size:(k+1)*batch_size, :]\r\n batch_y = shuff_y[k*batch_size:(k+1)*batch_size]\r\n batches.append((batch_X, batch_y))\r\n \r\n # End case (last mini-batch < mini_batch_size)\r\n if m % batch_size != 0:\r\n batch_X = shuff_X[num_complete_batches*batch_size:m]\r\n batch_y = shuff_y[num_complete_batches*batch_size:m]\r\n batches.append((batch_X, batch_y))\r\n \r\n return batches", "def list_of_positions():\n positions = []\n while len(positions) != 20:\n x = random.randrange(0, 20)\n y = random.randrange(0, 20)\n if (x, y) not in positions:\n positions.append((x, y))\n return positions", "def sample_data(self, x: list, y: list) -> Tuple[list, list, list, list]:\n y = [self.mapping[i] for i in y]\n\n x_train = list()\n x_test = list()\n y_test = list()\n y_train = list()\n\n min_label = min([y.count(i) for i in list(set(y))])\n nbr_labels = len(set(y))\n data_size = min_label * len(set(y))\n\n train_size = int(0.66 * data_size)\n test_size = data_size - train_size\n\n for elem_x, elem_y in zip(x, y):\n if y_train.count(elem_y) < int(train_size / nbr_labels):\n x_train.append([elem_x])\n y_train.append(elem_y)\n\n elif y_test.count(elem_y) < int(test_size / nbr_labels):\n x_test.append([elem_x])\n y_test.append(elem_y)\n\n return x_train, y_train, x_test, y_test", "def generator(self, random, args):\r\n if self.duplicates:\r\n max_count = [self.capacity // item[0] for item in self.items]\r\n return [random.randint(0, m) for m in max_count]\r\n else:\r\n return [random.choice([0, 1]) for _ in range(len(self.items))]", "def _sample_mini_dataset(dataset, num_classes, num_shots):\n shuffled = list(dataset)\n random.shuffle(shuffled)\n for class_idx, class_obj in enumerate(shuffled[:num_classes]):\n for sample in class_obj.sample(num_shots):\n yield (sample, class_idx)", "def pattern_generator(self, x: int, y: int) -> float:\n dis_corners = self.distance_from_corners(x, y)\n dis_center = self.distance_from_center(x, y)\n key = (dis_corners - dis_center)\n return [x / key, y / key]", "def valid_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def __sample(self):\n # xvals are \"east\" vals and yvals are \"north\" vals on the map\n xvals = np.random.uniform(self._xmin, self._xmax, self._num_samples)\n yvals = np.random.uniform(self._ymin, self._ymax, self._num_samples)\n if self._target_altitude is None:\n zvals = np.random.uniform(self._zmin, self._zmax, self._num_samples)\n else:\n zvals = np.full(self._num_samples, self._target_altitude, dtype=float)\n \n samples = list(zip(xvals, yvals, zvals))\n\n pts = []\n for s in samples:\n in_collision = False\n idxs = list(self._obstacles_tree.query_radius(\n np.array([s[0], s[1]]).reshape(1, -1), r=self._max_poly_xy)[0])\n \n if len(idxs) > 0:\n for ind in idxs: \n p = self._polygons[int(ind)]\n if p.contains(s) and p.height >= s[2]:\n in_collision = True\n\n if not in_collision:\n pts.append(s)\n \n return pts", "def _place_nodes(self, i, j, step, max_nodes):\n points = []\n for k in range(max_nodes):\n while(True):\n t = Point(random.randint(i,i+step), random.randint(j,j+step)) \n if all([point.get_distance(t) > self.min_distance for point in points]):\n points.append(t)\n break\n \n for point in points:\n n=Node(self.counter, point)\n self.nodes.append(n)\n self.counter+=1", "def generateRandomList(minval, maxval, size):\n return [random.randint(minval, maxval) for _ in range(size)]", "def generator_fit(x, y, batch_size=128):\n while True:\n indices = np.random.randint(x.shape[0], size=batch_size)\n yield x[indices], y[indices]", "def generate_small_hypercube(m_rel: int, max_r: float, min_r: float) -> Tuple[List[float], float]:\n r = (random.random() * (max_r - min_r)) + min_r\n max_c = 1 - r\n min_c = - max_c\n\n c = list(np.random.rand(m_rel) * (max_c - min_c) + min_c)\n\n return c, r", "def bingo_numbers():\n def random_five(min_x, max_x):\n \"\"\"Return sample of 5 random numbers in the range specified.\"\"\"\n return random.sample(xrange(min_x, max_x), 5)\n\n tuple_array = zip(*[random_five(i, i + 15) for i in xrange(1, 76, 15)])\n return map(list, tuple_array)", "def random_batch(self):\n x = self._random_x()\n y = self._random_x()\n\n return x, y, self.f(x, y)", "def _random2min_max(points):\n x_max = max([x for x, y in points])\n x_min = min([x for x, y in points])\n y_max = max([y for x, y in points])\n y_min = min([y for x, y in points])\n return np.array([x_min, y_min, x_max, y_max])", "def jitter(x, y): \r\n dx = max(x) - min(x)\r\n dy = max(y) - min(y)\r\n assert(dx > 0)\r\n assert(dy > 0)\r\n d = math.sqrt(float(dx**2 + dy**2))\r\n print 'dx=%d,dy=%d,d=%f' % (dx,dy,d)\r\n assert(d > 0)\r\n F = 1.0/15.0\r\n fx = F*dx/d\r\n fy = F*dy/d\r\n \r\n def j(xx, yy):\r\n r = random.random() * d\r\n t = random.random() * 2.0 * math.pi\r\n return xx+r*math.cos(t)*fx, yy+r*math.sin(t)*fy\r\n \r\n xy = [j(x[i],y[i]) for i in range(len(x))] \r\n return zip(*xy)", "def set_min_dist(S1, S2):\n ret =[]\n if len(S2)>len(S1):\n tmp = S1\n S1=S2\n S2=tmp\n \n for x in S1:\n min_x=((x[0]-S2[0][0])**2+(x[1]-S2[0][1])**2)**0.5\n for y in S2:\n d = ((x[0]-y[0])**2+(x[1]-y[1])**2)**0.5\n if d<min_x:\n min_x = d\n ret.append(min_x)\n\n return ret", "def individual(length, min, max):\r\n return [ randint(min, max) for x in range(length) ]", "def generate_small_hypersphere(m_rel: int, max_r: float, min_r: float) -> Tuple[List[float], float]:\n c = [0] * m_rel\n r = (random.random() * (max_r - min_r)) + min_r\n\n for j in np.random.permutation(m_rel):\n bound = ((1 - r) ** 2) - np.sum(np.square(c))\n bound = math.sqrt(bound) if bound > 0 else 0\n max_c = bound\n min_c = -bound\n\n c[j] = (random.random() * (max_c - min_c)) + min_c\n\n return c, r", "def get_random_coordinates(self):\n array_shape = np.shape(self.cells) # type: tuple\n points_on_island = []\n for i in range(1, array_shape[0] - 1):\n for j in range(1, array_shape[1] - 1):\n points_on_island.append((i, j))\n random.shuffle(points_on_island)\n return points_on_island", "def pickup_samples(pts1, pts2):\n\n #\n # Your code here\n #\n\n x = min(len(pts1), len(pts2))\n return np.random.choice(range(x), min_num_pairs(), replace=False)", "def __init__(self, boardDimensions, shipsAfloat): \r\n ShotSelector.__init__(self, boardDimensions, shipsAfloat)\r\n self.remainingCoordinates = [Coordinates(i, j) for i in range(self.boardDimensions) for j in range(self.boardDimensions)]\r\n random.shuffle(self.remainingCoordinates)", "def get_mines(self):\n mines = []\n for i in range(self.rows):\n for j in range(self.cols):\n if self.board[i][j].category == Tiles.mine:\n mines.append((i, j))\n return mines", "def mini_batches(training_data: TrainingDataLoader, mini_batche_size: int):\n\n X = []\n Y = []\n\n for x, y in training_data:\n\n X.append(x)\n Y.append(y)\n\n if len(X) == mini_batche_size:\n yield (np.column_stack((X)), np.column_stack((Y)))\n X = []\n Y = []", "def get_Xc_starts(self):\r\n start_list = [np.random.randint(low=0, high=self.total_windows-self.buffer_needed) for _ in range(self.total_points)]\r\n return np.array(start_list)", "def generate_testdata(N: int, min_value: int, max_value: int) -> list:\r\n numbers = set([])\r\n while len(numbers) < N:\r\n random = randint(min_value, max_value)\r\n numbers.add(random)\r\n return list(numbers)", "def generatoze(b):\r\n l = []\r\n for i in range(b):\r\n k = random.randint(0, 100)\r\n l.append(k)\r\n return l", "def rand_custom(x: np.ndarray, y: np.ndarray, \n shape: tuple=(1,), interp_type: str='linear') -> np.ndarray :\n \n assert (y >= 0).any(), \"y shouldn't contain negative numbers\"\n \n size = 1\n for n in shape:\n size *= n\n \n y_norm = y/y.max()\n func = interp1d(x, y_norm, kind=interp_type)\n integr_ratio = (x.max() - x.min())/trapz(y_norm, x)\n \n full = np.array([])\n \n while full.shape[0] < size:\n size_all = int(np.round(integr_ratio*(size - full.shape[0])))\n \n a = np.random.uniform(size=size_all)\n b = np.random.uniform(x.min(), x.max(), size=size_all)\n \n full = np.hstack([full, b[np.where(a < func(b))[0]]])\n \n return full[:size].reshape(shape).copy()", "def minibatches(dataset: List[T],\n batch_size: int,\n shuffle: bool = True) -> Iterator[List[T]]:\n # start indexes 0, batch_size, 2 * batch_size, ...\n batch_starts = [start for start in range(0, len(dataset),batch_size)]\n\n if shuffle: random.shuffle(batch_starts) #shuffle the butches\n\n for start in batch_starts:\n end = start + batch_size\n yield dataset[start: end]", "def random_ints(count=20, min=1, max=50):\n import random\n return [random.randint(min, max) for _ in range(count)]", "def generate(random, lower, upper, count=1):\n if count > 1:\n points = []\n\n for x in range(lower.x, upper.x):\n for y in range(lower.y, upper.y):\n points.append(Point(x, y)) # REFACTOR: Not very efficient\n\n return random.sample(points, count)\n else:\n return Point(random.randrange(lower.x, upper.x), random.randrange(lower.y, upper.y))", "def create_random_points(n):\n\n\treturn [(random.randint(0,n),random.randint(0,n)) for i in range(n)]", "def simulate_x_values(self, minimum = -10, maximum = 10, length = 100):\n return np.sort(np.random.uniform(minimum, maximum, length) )", "def random_sample(grid_size):\r\n g = grid_size\r\n x_range = g[1] - g[0]\r\n\r\n y_range = g[3] - g[2]\r\n\r\n x_off = g[0]\r\n y_off = g[2]\r\n (x,y) = (x_range*np.random.ranf()+x_off,y_range*np.random.ranf()+y_off) \r\n return (x,y)", "def minibatch(l, bs):\n for i in xrange(0, len(l), bs):\n yield l[i:i+bs]", "def choose_m_n(li,min,max):\n n_items = random.randrange(min,max+1)\n if n_items == 0:\n return [ ]\n sample=random.sample(li,n_items) # Should it be sorted?\n return sample", "def pointListForGrass(x, y):\n\n\treturn []\t# No points are made for grass", "def generate_random_scatter(x_range, w, b, k):\n\tx_1 = []\n\ty_1 = []\n\tx_2 = []\n\ty_2 = []\n\tfor i in range(k):\n\t\txx = random.random() * (x_range[1] - x_range[0]) + x_range[0]\n\t\tx_1.append(xx)\n\t\tamplitude = random.randint(4, 15)\n\t\tyy = w * xx + b + amplitude\n\t\ty_1.append(yy)\n\n\t\txx = random.random() * (x_range[1] - x_range[0]) + x_range[0]\n\t\tx_2.append(xx)\n\t\tamplitude = random.randint(4, 15)\n\t\tyy = w * xx + b - amplitude\n\t\ty_2.append(yy)\n\treturn x_1, y_1, x_2, y_2", "def __init__minefield__(self):\n # Creates random locations of mines according to the size of the game board.\n mines = random.sample(range(0, self.rows * self.cols), self.mines)\n \n # Uses a helper method to initialize tile categories: mine or zero.\n return [[Tiles(i, j, self.create_tile(mines, i, j)) for j in range(self.cols)] for i in range(self.rows)]", "def sampling_algorithm(self, X, y):\r\n\r\n n_to_sample = self.det_n_to_sample(self.proportion)\r\n\r\n if n_to_sample == 0:\r\n return self.return_copies(X, y, \"Sampling is not needed.\")\r\n\r\n # standardization is needed to make the range of the propensity scores\r\n # similar to that of the features\r\n mms = MinMaxScaler()\r\n X_trans = mms.fit_transform(X) # pylint: disable=invalid-name\r\n\r\n X_min = X_trans[y == self.min_label]\r\n\r\n # adding propensity scores as a new feature\r\n X_new = np.column_stack([X_trans, self.propensity_scores(X_trans, y)])\r\n X_min_new = X_new[y == self.min_label] # pylint: disable=invalid-name\r\n\r\n # finding nearest neighbors of minority samples\r\n n_neighbors = min([len(X_new), self.n_neighbors+1])\r\n\r\n ind = self.neighborhood_structure(X_new, y, n_neighbors, X_min_new)\r\n\r\n # noise removal\r\n t_hat = np.sum(y[ind[:, 1:]] == self.min_label, axis=1)\r\n to_remove = np.where(t_hat < self.t * n_neighbors)[0]\r\n\r\n if len(to_remove) >= len(X_min) - 1:\r\n return self.return_copies(X, y,\r\n \"most minority samples indentified as noise\")\r\n\r\n n_to_sample = n_to_sample + to_remove.shape[0]\r\n\r\n samples = self.generate_samples(X_min=X_min,\r\n to_remove=to_remove,\r\n X_trans=X_trans,\r\n y=y,\r\n ind=ind,\r\n n_to_sample=n_to_sample)\r\n\r\n X_min = np.delete(X_min, to_remove, axis=0)\r\n\r\n # do the sampling\r\n #samples = []\r\n #while len(samples) < n_to_sample:\r\n # idx = self.random_state.randint(len(X_min))\r\n # # finding the number of minority neighbors\r\n # t_hat = np.sum(y[ind[idx][1:]] == self.min_label)\r\n # if t_hat < self.t*n_neighbors:\r\n # # removing the minority point if the number of minority\r\n # # neighbors is less then the threshold\r\n # # to_remove indexes X_min\r\n # if idx not in to_remove:\r\n # to_remove.append(idx)\r\n # # compensating the removal of the minority point\r\n # n_to_sample = n_to_sample + 1\r\n #\r\n # if len(to_remove) == len(X_min):\r\n # _logger.warning(self.__class__.__name__ + \": \" +\r\n # \"all minority samples identified as noise\")\r\n # return X.copy(), y.copy()\r\n # else:\r\n # # otherwise do the sampling\r\n # X_b = X_trans[self.random_state.choice(ind[idx][1:])]\r\n # samples.append(self.sample_between_points(X_min[idx], X_b))\r\n\r\n return (mms.inverse_transform(np.vstack([X_trans[y == self.maj_label],\r\n X_min,\r\n samples])),\r\n np.hstack([np.repeat(self.maj_label,\r\n np.sum(y == self.maj_label)),\r\n np.repeat(self.min_label, len(X_min)),\r\n np.repeat(self.min_label, len(samples))]))", "def rand_inside(x1, y1, x2, y2):\n\n rx = map_between(random.random(), x1, x2)\n ry = map_between(random.random(), y1, y2)\n\n return rx, ry", "def cell_sample(mask, samplingPoints):\n maskedArea = np.array(np.where(mask)).T\n maskedAreaLength = len(maskedArea)\n randomIndex = sp.random.randint(0, maskedAreaLength, samplingPoints)\n coordsRandom = maskedArea[randomIndex] + sp.rand(samplingPoints, 2)\n return(coordsRandom)", "def rnd_pset(self):\n\t\treturn [rnd() * 10, rnd() * 10, rnd() * 12 * 15, rnd() * 12 * 15]", "def choose_ordered_m_n(li,min,max):\n n_items = random.randrange(min,max+1)\n if n_items == 0:\n return [ ]\n indices = list(range(len(li)))\n sample=random.sample(indices,n_items) # Should it be sorted?\n return [li[i] for i in sorted(sample)]", "def get_random_patches(images, n_patches, patch_x, patch_y):\n n_images = images.shape[0]\n img_c = images.shape[1]\n img_r = images.shape[2]\n r_images = rng.randint(n_images, size = n_patches)\n r_x = rng.randint(img_c-patch_x+1, size = n_patches)\n r_y = rng.randint(img_r-patch_y+1, size = n_patches)\n patches_list = []\n for image_i, x_i, y_i in zip(r_images, r_x, r_y):\n patch_i = images[image_i, x_i:(x_i + patch_x), y_i:(y_i + patch_y)]\n patches_list.append(patch_i)\n \n patches_np = np.asarray(patches_list, dtype=images.dtype) \n return patches_np", "def _create_random_offsets(self, block_locations):\n\n min_x, max_x, min_y, _ = self._find_min_and_max_coords(block_locations)\n x_offset = randrange(10 - (max_x - min_x)) - min_x\n y_offset = 0 - min_y\n return [x_offset, y_offset]", "def samplingWithReplacement(m):\n return [ random.randrange(m) for i in range(m) ]", "def sampleset():\n pos = [(0, i) for i in range(50)]\n neg = [(1, i) for i in range(10)]\n return pos + neg", "def gen_data(min_coord, max_coord, size):\r\n data = np.random.randint(min_coord, max_coord, size)\r\n return data", "def neighbor(self, start):\n x = start[0] + random.uniform(-20, 20)\n y = start[1] + random.uniform(-20, 20)\n x = max(min(x, xbounds[1]), xbounds[0])\n y = max(min(y, ybounds[1]), ybounds[0])\n return [x,y]", "def mutate(w, h, mines, p):\r\n for i in range(w*h):\r\n if uniform(0, 1) <= p:\r\n #mutate:\r\n if i in mines:\r\n mines.remove(i)\r\n else:\r\n mines.append(i)\r\n return mines", "def _random_under_sampling(self, X_maj, y_maj, X_min, y_min):\n np.random.seed(self._random_state)\n idx = np.random.choice(len(X_maj), len(X_min), replace=False)\n X_train = np.concatenate([X_maj.iloc[idx], X_min])\n y_train = np.concatenate([y_maj.iloc[idx], y_min])\n return X_train, y_train", "def make_locations(x_width, y_height, count, x_offset):\n bottom = set()\n while len(bottom) < count:\n loc = random_location(x_offset, x_offset + x_width, 0, y_height)\n bottom.add(loc)\n return bottom", "def list_random_sample_numbers(min: int, max: int, length: int) -> List:\r\n result = random.sample(range(min, max), length)\r\n return result", "def minibatcher(inputs, targets, batchsize, shuffle=False):", "def get_sample(self):\n # initialize with a seed point\n self.__sample__(rnd() * self.width, rnd() * self.height)\n while len(self.queue) > 0:\n idx = int(rnd() * len(self.queue))\n p = self.queue[idx]\n new_inserted = False\n for j in xrange(self.k):\n theta = 2 * np.pi * rnd()\n # radius <= r <= 2 * radius\n r = np.sqrt(3 * rnd() * self.radius**2 + self.radius**2)\n x = p[0] + r * np.cos(theta)\n y = p[1] + r * np.sin(theta)\n if (0 <= x < self.width) and (0 <= y < self.height) and self.__far__(x,y):\n self.__sample__(x,y)\n new_inserted = True\n break\n # remove point from active list\n if not new_inserted:\n self.queue = self.queue[:idx] + self.queue[idx+1:]\n self.samples.append(p)\n\n return self.samples", "def train_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def random_noise_sample(\n size: int,\n cxs: List[int],\n cys: List[int],\n angle: int,\n diameter: int,\n tmp: OpenSimplex,\n minimum: int = -1,\n maximum: int = 1\n) -> List[float]:\n assert len(cxs) == size\n assert len(cys) == size\n\n sample = []\n for i in range(size):\n sample.append(generate_noise(cxs[i], cys[i], angle, diameter, tmp, minimum, maximum))\n \n return sample", "def create_points(N, M):\n arr = numpy.random.randint(1, N+1, size=(M, 2))\n idx = 0\n coords = []\n points = []\n \n for ele in arr:\n if (ele[0], ele[1]) not in coords:\n idx += 1\n coords.append((ele[0], ele[1]))\n \n while idx < M:\n missed = numpy.random.randint(1, N+1, size=(M-idx, 2))\n for ele in missed:\n if (ele[0], ele[1]) not in coords:\n idx += 1\n coords.append((ele[0], ele[1]))\n\n # creates real points in the plane\n idx = 0\n for coord in coords:\n idx += 1\n points.append(Point(id=idx, x=coord[0], y=coord[1]))\n\n return points", "def compute_bootstrapped_sample(X_table, y_table):\n n = len(X_table)\n X_sample = []\n y_sample = []\n for _ in range(n):\n rand_index = random.randrange(0, n)\n X_sample.append(X_table[rand_index])\n y_sample.append(y_table[rand_index])\n return X_sample, y_sample", "def get_grid_coords(self, count, boundry_x, boundry_y, grid_size):\n\n coords = []\n\n boundry_x = int(boundry_x/10)\n boundry_y = int(boundry_y/10)\n\n while len(coords) < count:\n seed()\n\n\n x = randint(-boundry_x, boundry_x)\n y = randint(-boundry_y, boundry_y)\n\n if len(coords) == 0:\n coords.append((x*grid_size, y*grid_size))\n else:\n for coord in coords:\n if (x not in range(coord[0]-buffer*grid_size, coord[0]+buffer*grid_size)) and (y not in range(coord[1]-buffer, coord[1]+buffer)):\n pass\n else:\n break", "def shuffle_pair(x,y):\n xy = list(zip(x,y))\n np.random.shuffle(xy)\n x, y = zip(*xy)\n x = np.array(x)\n y = np.array(y)" ]
[ "0.65385675", "0.6229042", "0.6218775", "0.62119395", "0.62032425", "0.61461645", "0.6137679", "0.60998034", "0.6097758", "0.60959524", "0.60723096", "0.60708946", "0.6014316", "0.5990937", "0.5940285", "0.5884059", "0.5871586", "0.5844728", "0.58423215", "0.5820612", "0.5779203", "0.5763866", "0.5759305", "0.5740081", "0.5703614", "0.5698038", "0.568929", "0.5668056", "0.5660645", "0.5634374", "0.5603447", "0.5593081", "0.5578581", "0.5573625", "0.55659944", "0.5562254", "0.5560358", "0.5530504", "0.5526789", "0.5525799", "0.5525373", "0.5504122", "0.5499967", "0.5499364", "0.54905516", "0.5477895", "0.5475025", "0.5473076", "0.54721284", "0.5463054", "0.54507095", "0.5440666", "0.5433749", "0.54276145", "0.54260874", "0.5418769", "0.54171944", "0.5417", "0.54153544", "0.5413637", "0.54126525", "0.5403884", "0.54026717", "0.539679", "0.5395919", "0.53952956", "0.53943205", "0.53941745", "0.53918576", "0.53860706", "0.53849155", "0.5369469", "0.5361591", "0.5360365", "0.53554696", "0.5355225", "0.53323984", "0.5326909", "0.5325844", "0.5303969", "0.530269", "0.5302453", "0.5302426", "0.5300055", "0.52882165", "0.5285285", "0.52823526", "0.52791005", "0.52786946", "0.5276615", "0.5273881", "0.52719605", "0.526925", "0.5261929", "0.5259231", "0.5251937", "0.5250429", "0.52427447", "0.52425647", "0.5239002" ]
0.6306558
1
Yield successive nsized chunks from l.
def chunks(l, n): for i in range(0, len(l), n): yield l[i:i+n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _chunk(self, l, n):\n for i in range(0, len(l) + 1, n):\n yield l[i:i + n]", "def chunks(self, l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def __chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def get_chunks(self, l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i+n]", "def chunks(self, l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def _chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def _chunks(l, n):\n\tfor i in range(0, len(l), n):\n\t\tyield l[i:i + n]", "def chunks(cls, l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, n):\n yield l[i::n]", "def chunks(self, l, n):\n yield l[:n-1]\n for i in range(n-1, len(l), n):\n yield l[i:i+n]", "def chunks(l: List, n: int):\n for i in range(0, len(l), n):\n yield l[i : i + n] # noqa: E203", "def chunks(l: List, n: int):\n for i in range(0, len(l), n):\n yield l[i : i + n] # noqa: E203", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n): # noqa: E741\n for i in range(0, len(l), n):\n yield l[i : i + n] # noqa: E203", "def chunks(_class, l, n):\n\t\t# CITE: http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python\n\t for i in xrange(0, len(l), n):\n\t yield l[i:i+n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\n\tfor i in xrange(0, len(l), n):\n\t\tyield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i+n]", "def chunk(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def _chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i : i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i : i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i: i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\r\n for i in xrange(0, len(l), n):\r\n yield l[i:i+n]", "def chunks(l, n):\n if n:\n for i in xrange(0, len(l), n):\n yield l[i:i + n]" ]
[ "0.8038813", "0.79248375", "0.7923423", "0.7885103", "0.78773195", "0.7815877", "0.77655786", "0.77556044", "0.77441615", "0.7731815", "0.77288336", "0.772473", "0.77028215", "0.76889825", "0.76889825", "0.7664208", "0.76570904", "0.7655856", "0.7655856", "0.7635831", "0.7547221", "0.7537881", "0.7537881", "0.7537881", "0.7537881", "0.7537881", "0.7533697", "0.75302094", "0.7480959", "0.7473358", "0.7473328", "0.7473328", "0.7453012", "0.7453012", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.7448925", "0.74381757", "0.74379355", "0.74379355", "0.74379355", "0.74379355", "0.74379355", "0.74379355", "0.74379355", "0.74379355", "0.74379355", "0.74379355", "0.74379355", "0.74221736", "0.738925" ]
0.76458347
21
Tests that Predictor instances are not serializable.
def test_serialization(): # Class is serializable. ray.put(DummyPredictor) # Instance is not serializable. predictor = DummyPredictor() with pytest.raises(PredictorNotSerializableException): ray.put(predictor)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deserialize_bad_data(self):\n data = \"this is not a dictionary\"\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)", "def test_valid_serialization_unfit_model(self):\n instance = GammaUnivariate()\n result = GammaUnivariate.from_dict(instance.to_dict())\n assert instance.to_dict() == result.to_dict()", "def test_repr(self, cls):\n inst = cls()\n # Exact values aren't a concern so long as neither direction\n # raises an exception.\n pkl = cloudpickle.dumps(inst)\n cloudpickle.loads(pkl)", "def test__pickle_unpickle(self):\n pass", "def test_serialize_object(self):\n test_obj = self.TestObject(prop1='x', prop2=1234)\n\n with self.assertRaises(TypeError):\n serialize(test_obj)", "def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))", "def test_init_prediction_data(raw_data):\n prediction_data = PredictionData(**raw_data)\n assert prediction_data", "def test_serialize_no_metadata(self):\n pass # pragma: no cover", "def test_predict_transient_smoke_new(self):\n self.check_predict_transient_smoke()", "def test_instmap_picklable(self):\n instmap = FakeAthens().defaults().instruction_schedule_map\n\n ser_obj = pickle.dumps(instmap)\n deser_instmap = pickle.loads(ser_obj)\n\n self.assertEqual(instmap, deser_instmap)", "def test_predict_transient_smoke_old(self):\n self.check_predict_transient_smoke()", "def is_serializable(instance_or_class: Any) -> bool:\n return hasattr(instance_or_class, SERDE_SCOPE)", "def test_deserialize_missing_data(self):\n data = {\"product_id\": 1}\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)", "def assertSerializeDeserialize(self, obj, version=None):\n tested_versions = [version] if version is not None else Version.supported\n for version in tested_versions:\n constructor = obj.__class__.from_json\n json_obj = obj.to_json(version)\n clone = constructor(json_obj)\n\n self.assertEqual(obj.__class__, clone.__class__)\n\n if isinstance(obj, dict):\n orig_dict = obj\n clone_dict = clone\n else:\n orig_dict = obj.__dict__\n clone_dict = clone.__dict__\n\n self.assertEqual(orig_dict, clone_dict)", "def test_no_fit_predict() -> None:\n mapie = MapieClassifier(estimator=DummyClassifier())\n with pytest.raises(NotFittedError, match=r\".*not fitted.*\"):\n mapie.predict(X_toy)", "def IgnorePersistedDecision(self) -> bool:", "def pickle_fake_estimator():\n estimator = FakeEstimator()\n data = FakeEstimatorData()\n\n estimator.fit(*data.train_data)\n estimator.dump(filepath=\"src/tests/fixtures/fake_estimator.pkl\")", "def test_fit_without_saving(self) -> type(None):\n X, y = get_dataset_for_regression()\n rgr = StackingRegressor(keep_meta_X=False)\n rgr.fit(X, y)\n self.assertFalse(hasattr(rgr, 'meta_X_'))\n self.assertTrue(hasattr(rgr, 'meta_estimator_'))", "def test_data_object_vaporise(self):\n pass", "def test_serialize_a_recommendation(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=2, relationship=Type.UP_SELL)\n data = recommendation.serialize()\n self.assertNotEqual(data, None)\n self.assertIn(\"product_id\", data)\n self.assertEqual(data[\"product_id\"], recommendation.product_id)\n self.assertIn(\"recommendation_product_id\", data)\n self.assertEqual(data[\"recommendation_product_id\"], recommendation.recommendation_product_id)\n self.assertIn(\"relationship\", data)\n self.assertEqual(data[\"relationship\"], recommendation.relationship.name)", "def test_pickle(self):\n origin = np.random.randn(3)\n normal = np.random.randn(3)\n up_vector = np.random.randn(3)\n plane = shapes_3d.CoordinatePlane(origin, normal, up_vector)\n \n p2 = pickle.loads(pickle.dumps(plane))\n np.testing.assert_almost_equal(plane.origin, p2.origin)\n np.testing.assert_almost_equal(plane.normal, p2.normal)\n np.testing.assert_almost_equal(plane.basis_u, p2.basis_u)\n np.testing.assert_almost_equal(plane.basis_v, p2.basis_v)", "def test_default(self):\n self.assertEqual(self.model.frozen(), False)", "def is_picklable(obj):\n try:\n pickle.dumps(obj)\n\n except pickle.PicklingError:\n return False\n return True", "def test_save_load(self):\n features = np.array([[0, 0], [0.1, 0.1], [0.4, 0.4], [1, 1]])\n labels = np.array([0, 0.1, 0.4, 1])\n num_inputs = 2\n qnn = TwoLayerQNN(\n num_inputs,\n feature_map=ZZFeatureMap(num_inputs),\n ansatz=RealAmplitudes(num_inputs),\n observable=PauliSumOp.from_list([(\"Z\" * num_inputs, 1)]),\n quantum_instance=self.qasm_quantum_instance,\n )\n regressor = NeuralNetworkRegressor(qnn, optimizer=COBYLA())\n regressor.fit(features, labels)\n\n # predicted labels from the newly trained model\n test_features = np.array([[0.5, 0.5]])\n original_predicts = regressor.predict(test_features)\n\n # save/load, change the quantum instance and check if predicted values are the same\n with tempfile.TemporaryDirectory() as dir_name:\n file_name = os.path.join(dir_name, \"regressor.model\")\n regressor.save(file_name)\n\n regressor_load = NeuralNetworkRegressor.load(file_name)\n loaded_model_predicts = regressor_load.predict(test_features)\n\n np.testing.assert_array_almost_equal(original_predicts, loaded_model_predicts)\n\n # test loading warning\n class FakeModel(SerializableModelMixin):\n \"\"\"Fake model class for test purposes.\"\"\"\n\n pass\n\n with self.assertRaises(TypeError):\n FakeModel.load(file_name)", "def test_invalid_prefit_estimator(estimator: ClassifierMixin) -> None:\n mapie = MapieClassifier(estimator=estimator, cv=\"prefit\")\n with pytest.raises(NotFittedError):\n mapie.fit(X_toy, y_toy)", "def test_no_fit_predict() -> None:\n mapie = MapieRegressor(estimator=DummyRegressor())\n with pytest.raises(NotFittedError, match=r\".*not fitted.*\"):\n mapie.predict(X_toy)", "def test_serialize_and_deserialize_returns_unchanged_collection(\n self\n ) -> None:\n self.assertEqual(\n self.collection.to_dict(),\n collection_domain.Collection.deserialize(\n self.collection.serialize()).to_dict())", "def testPredict_AsIterableFalse(self):\n sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(\n 'language', hash_bucket_size=20)\n feature_columns = [\n tf.contrib.layers.embedding_column(sparse_column, dimension=1)\n ]\n\n classifier = tf.contrib.learn.DNNClassifier(\n n_classes=3,\n feature_columns=feature_columns,\n hidden_units=[3, 3],\n config=tf.contrib.learn.RunConfig(tf_random_seed=3))\n\n classifier.fit(input_fn=_input_fn, steps=100)\n\n scores = classifier.evaluate(input_fn=_input_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n self.assertLess(scores['loss'], 0.3)\n predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)\n self.assertListEqual(list(predictions), [1, 0, 0])\n predictions = classifier.predict_proba(input_fn=_input_fn,\n as_iterable=False)\n self.assertAllClose(\n predictions, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.1)", "def test_valid_serialization_fit_model(self):\n instance = GammaUnivariate()\n instance.fit(np.array([1, 2, 3, 2, 1]))\n result = GammaUnivariate.from_dict(instance.to_dict())\n assert instance.to_dict() == result.to_dict()", "def test_not_loaded(person):\n with pytest.raises(KeyError):\n person.load(-1)\n\n assert person.loaded is False", "def test_invalid_prefit_estimator(estimator: RegressorMixin) -> None:\n mapie = MapieRegressor(estimator=estimator, cv=\"prefit\")\n with pytest.raises(NotFittedError):\n mapie.fit(X_toy, y_toy)", "def test_valid_prediction(alpha: Any) -> None:\n model = LogisticRegression(multi_class=\"multinomial\")\n model.fit(X_toy, y_toy)\n mapie = MapieClassifier(estimator=model, cv=\"prefit\")\n mapie.fit(X_toy, y_toy)\n mapie.predict(X_toy, alpha=alpha)", "def test_pickling_error():\n client, server = make_queue_pairs('localhost')\n\n # Attempt to push a non-JSON-able object to the queue\n with pytest.raises(TypeError):\n client.send_inputs(Test())", "def test_deserialize_with_no_data(self):\n pet = Pet(0)\n self.assertRaises(DataValidationError, pet.deserialize, None)", "def test_basedict(self):\n tester = BaseModel()\n self.assertTrue(dict, type(tester.to_dict()))", "def test_predict():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.predict(testing_features)\n assert False # Should be unreachable\n except ValueError:\n pass", "def test_score_without_fitted_estimator(self):\n oz = ClassificationScoreVisualizer(GaussianNB())\n assert_not_fitted(oz)\n\n with pytest.raises(NotFitted):\n oz.score(self.binary.X.test, self.binary.y.test)\n assert_not_fitted(oz)", "def test_serialization(self):\n serialized = self.Gs.as_dict()\n unserialized = BayesianNetwork.from_dict(serialized)\n\n self.assertDictEqual(serialized, unserialized.as_dict())", "def test_dict_to_instance(self):\n r = Review()\n r_dictionary = r.to_dict()\n r2 = Review(**r_dictionary)\n self.assertEqual(type(r), type(r2))", "def test_raise_not_fitted_error_if_not_fitted(estimator, build_dataset,\n with_preprocessor):\n input_data, labels, preprocessor, _ = build_dataset(with_preprocessor)\n estimator = clone(estimator)\n estimator.set_params(preprocessor=preprocessor)\n set_random_state(estimator)\n with pytest.raises(NotFittedError):\n estimator.predict(input_data)", "def can_be_pickled(x):\n try:\n s = BytesIO() \n pickle.dump(x, s) \n return True\n except:\n return False", "def test_valid_prefit_estimator(estimator: RegressorMixin) -> None:\n estimator.fit(X_toy, y_toy)\n mapie = MapieRegressor(estimator=estimator, cv=\"prefit\")\n mapie.fit(X_toy, y_toy)\n if isinstance(estimator, Pipeline):\n check_is_fitted(mapie.single_estimator_[-1])\n else:\n check_is_fitted(mapie.single_estimator_)\n check_is_fitted(\n mapie,\n [\n \"n_features_in_\",\n \"single_estimator_\",\n \"estimators_\",\n \"k_\",\n \"residuals_\"\n ]\n )\n assert mapie.n_features_in_ == 1", "def __validate__(self):\n if self.train:\n assert self.random is not None", "def testDoNotEncodeStrangeObjects(self):\n class BogusObject(object):\n\n def check_initialized(self):\n pass\n\n self.assertRaises(TypeError,\n protojson.encode_message,\n BogusObject())", "def test_serialize_sinfo(self):\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)", "def test_no_extra_fields():\n t_task = Task()\n t_dict = t_task._asdict()\n assert len(t_dict) <= 4", "def test_none_estimator() -> None:\n mapie = MapieClassifier(estimator=None)\n mapie.fit(X_toy, y_toy)\n assert isinstance(mapie.single_estimator_, LogisticRegression)", "def test_invalid_method():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"SGD\")\n pytest.raises(AttributeError, atom.sgd.predict_proba, X_bin)", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy", "def assert_is_not_instance(self, obj, cls, msg=\"\"):\r\n assert not isinstance(obj, cls)", "def assertDeserializeNonString(self):\r\n self.assertDeserializeEqual(None, None)\r\n self.assertDeserializeEqual(3.14, 3.14)\r\n self.assertDeserializeEqual(True, True)\r\n self.assertDeserializeEqual([10], [10])\r\n self.assertDeserializeEqual({}, {})\r\n self.assertDeserializeEqual([], [])\r\n self.assertDeserializeEqual(None, 'null')", "def test_object_is_not_created_without_required_fields(self):\n data1 = self.data.copy()\n del data1[\"title\"]\n\n serializer = ProductSerializer(data=data1)\n\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"title\")[0], self.error_message)\n\n data2 = self.data.copy()\n del data2[\"description\"]\n\n serializer = ProductSerializer(data=data2)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"description\")[0], self.error_message)\n\n data3 = self.data.copy()\n del data3[\"price\"]\n\n serializer = ProductSerializer(data=data3)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors.get(\"price\")[0], self.error_message)", "def test_none_alpha_results() -> None:\n estimator = LogisticRegression()\n estimator.fit(X, y)\n y_pred_est = estimator.predict(X)\n mapie = MapieClassifier(estimator=estimator, cv=\"prefit\")\n mapie.fit(X, y)\n y_pred_mapie = mapie.predict(X)\n np.testing.assert_allclose(y_pred_est, y_pred_mapie)", "def test_serialize_nofmt():\n inst = AsciiTableSerialize.AsciiTableSerialize()\n test_msg = np.zeros((5, 5))\n nt.assert_raises(RuntimeError, inst.serialize, test_msg)\n nt.assert_equal(inst.table_info, None)", "def test_before_todict(self):\n b1 = BaseModel()\n b1_dict = b1.__dict__\n self.assertEqual(type(b1).__name__, \"BaseModel\")\n self.assertTrue(hasattr(b1, '__class__'))\n self.assertEqual(str(b1.__class__),\n \"<class 'models.base_model.BaseModel'>\")\n self.assertTrue(type(b1_dict['created_at']), 'datetime.datetime')\n self.assertTrue(type(b1_dict['updated_at']), 'datetime.datetime')\n self.assertTrue(type(b1_dict['id']), 'str')", "def from_serializable(self, _):\n\n assert False, \"not implemented\"", "def test_constructor(self):\r\n self.assertTrue(isinstance(self.estimator1,\r\n ObservationRichnessEstimator))", "def from_serializable(self, _):\n\n assert False, \"Not implemented\"", "def test_pickling_tensors(free_alg):\n\n dr = free_alg\n p = dr.names\n x = IndexedBase('x')\n v = Vec('v')\n b = Vec('b')\n\n tensor = dr.einst(x[p.i] * v[p.i])\n def_ = dr.define(b, tensor)\n serialized = pickle.dumps([tensor, def_])\n\n with pytest.raises(ValueError):\n pickle.loads(serialized)\n\n with dr.pickle_env():\n res = pickle.loads(serialized)\n\n assert res[0] == tensor\n assert res[1] == def_", "def sanitize_serializable(instance_data):\n output = {}\n\n # we can't serialize all values, so just grab the ones we can\n for k, v in instance_data.iteritems():\n can_ser = can_serialize_json(k, v)\n if not can_ser:\n continue\n\n output[k] = v\n\n return output", "def test_valid_prefit_estimator(estimator: ClassifierMixin) -> None:\n estimator.fit(X_toy, y_toy)\n mapie = MapieClassifier(estimator=estimator, cv=\"prefit\")\n mapie.fit(X_toy, y_toy)\n if isinstance(estimator, Pipeline):\n check_is_fitted(mapie.single_estimator_[-1])\n else:\n check_is_fitted(mapie.single_estimator_)\n check_is_fitted(\n mapie,\n [\n \"single_estimator_\",\n \"n_features_in_\",\n \"n_samples_val_\"\n ]\n )\n assert mapie.n_features_in_ == 1", "def test_to_dict_checker():\n msg = \"It looks like an object has changed. Please be sure to update to_dict before updating this test to pass.\"\n assert len(DisplayInfo.__slots__) == 5, msg\n assert len(Event.__slots__) == 13, msg\n assert len(SessionInfo.__slots__) == 5, msg\n assert len(SlotInfo.__slots__) == 4, msg\n assert len(TimestampInfo.__slots__) == 3, msg\n assert len(VisitorInfo.__slots__) == 3, msg", "def test_check_estimator_does_not_raise(estimator_class):\n estimator_instance = estimator_class.create_test_instance()\n\n check_estimator(estimator_class, raise_exceptions=True, verbose=False)\n\n check_estimator(estimator_instance, raise_exceptions=True, verbose=False)", "def test_fit_predict_proba_with_false_in_keep_meta_X(self) -> type(None):\n X, y = get_dataset_for_classification()\n clf = StackingClassifier(keep_meta_X=False)\n _ = clf.fit_predict_proba(X, y)\n self.assertFalse(clf.keep_meta_X)\n self.assertTrue(clf.meta_X_ is None)", "def pickling_check(instance):\n pkled_instance = pickle.loads(pickle.dumps(instance))\n equality_check(instance, pkled_instance)", "def is_trainable(self):\n return False", "def test_none_estimator() -> None:\n mapie = MapieRegressor(estimator=None)\n mapie.fit(X_toy, y_toy)\n assert isinstance(mapie.single_estimator_, LinearRegression)", "def test_only_ml_model_instances_allowed_to_be_stored(self):\n # arrange\n model_manager = ModelManager()\n\n # act\n exception_raised = False\n exception_message = \"\"\n try:\n model_manager.load_model(\"tests.mocks.SomeClass\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"ModelManager instance can only hold references to objects of type MLModel.\")", "def test_persistence(self):\n model = PoincareModel(self.data, burn_in=0, negative=3)\n model.train(epochs=1)\n model.save(testfile())\n loaded = PoincareModel.load(testfile())\n self.models_equal(model, loaded)", "def test_no_fit(self, example_dataset):\n\n transformer = PreprocessFeatures()\n\n with pytest.raises(NotFittedError):\n transformer.transform(example_dataset)", "def test_can_instantiate(self):\n\n exc_thrown = False\n\n try:\n self.klass(*self.instantiate_args)\n except Exception:\n exc_thrown = True\n\n self.assertFalse(exc_thrown)", "def test_to_plain_python_obj_error():\n\n class FailingObject:\n pass\n\n output = r.to_plain_python_obj(FailingObject())\n with pytest.raises(TypeError):\n json.dumps(output)", "def test_instantiation(self):\n classifier = WidgetClassifier()\n classifier.load_model()\n assert(classifier.encoder is not None)\n assert(classifier.cluster is not None)\n assert(classifier.telemetry_keys is not None)", "def test_all_prediction_properties():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"LR\", \"SGD\"])\n assert isinstance(atom.lr.predict_train, np.ndarray)\n assert isinstance(atom.lr.predict_test, np.ndarray)\n assert isinstance(atom.lr.predict_proba_train, np.ndarray)\n assert isinstance(atom.lr.predict_proba_test, np.ndarray)\n assert isinstance(atom.lr.predict_log_proba_train, np.ndarray)\n assert isinstance(atom.lr.predict_log_proba_test, np.ndarray)\n assert isinstance(atom.lr.decision_function_train, np.ndarray)\n assert isinstance(atom.lr.decision_function_test, np.ndarray)\n assert isinstance(atom.lr.score_train, np.float64)\n assert isinstance(atom.lr.score_test, np.float64)", "def test_notImplementedNotEquals(self):\n self.assertEqual(Comparable(1).__ne__(object()), NotImplemented)", "def test_api_predictors_get(self):\n pass", "def test_attribute_serialization(self):\n\n # Construct a json representation of a Attribute model\n attribute_model_json = {}\n attribute_model_json['name'] = 'testString'\n attribute_model_json['value'] = 'testString'\n\n # Construct a model instance of Attribute by calling from_dict on the json representation\n attribute_model = Attribute.from_dict(attribute_model_json)\n assert attribute_model != False\n\n # Construct a model instance of Attribute by calling from_dict on the json representation\n attribute_model_dict = Attribute.from_dict(attribute_model_json).__dict__\n attribute_model2 = Attribute(**attribute_model_dict)\n\n # Verify the model instances are equivalent\n assert attribute_model == attribute_model2\n\n # Convert model instance back to dict and verify no loss of data\n attribute_model_json2 = attribute_model.to_dict()\n assert attribute_model_json2 == attribute_model_json", "def test_instance(self):\n b = Review()\n self.assertIsInstance(b, Review)\n self.assertTrue(issubclass(type(b), BaseModel))", "def test_none_alpha_results() -> None:\n estimator = LinearRegression()\n estimator.fit(X, y)\n y_pred_est = estimator.predict(X)\n mapie = MapieRegressor(estimator=estimator, cv=\"prefit\")\n mapie.fit(X, y)\n y_pred_mapie = mapie.predict(X)\n np.testing.assert_allclose(y_pred_est, y_pred_mapie)", "def test_save_load(self):\n tmpfile = os.path.join(os.path.dirname(__file__), \"tmp.pkl\")\n self.p.save(tmpfile)\n _ = Parameters.load(tmpfile)\n os.remove(tmpfile)\n with self.assertRaises(OSError):\n self.p.load(\"__________\")\n\n with open(tmpfile, \"wb\") as f:\n pickle.dump({}, f)\n with self.assertRaises(AttributeError):\n self.p.load(tmpfile)\n os.remove(tmpfile)", "def test_positive_time_period_dict_in_serializer() -> None:\n assert cv.custom_serializer(cv.positive_time_period_dict) == {\n \"type\": \"positive_time_period_dict\",\n }", "def test_pickle(\n config: Config\n) -> None:\n assert isinstance(config, Config)\n other: Config = pickle.loads(pickle.dumps(config))\n assert config == other", "def test_predict_proba_nonnegative():\n\n def check_for_negative_prob(proba):\n for p in np.ravel(proba):\n assert np.round(p,7) >= 0\n\n clf = mord.LogisticAT(alpha=0.)\n clf.fit(X, y)\n check_for_negative_prob(clf.predict_proba(X))\n\n clf2 = mord.LogisticIT(alpha=0.)\n clf2.fit(X, y)\n check_for_negative_prob(clf2.predict_proba(X))\n\n clf3 = mord.LogisticSE(alpha=0.)\n clf3.fit(X, y)\n check_for_negative_prob(clf3.predict_proba(X))", "def test_writer_serializer_no_company(self):\n transaction = TransactionFactory.build()\n self.assertEqual(Transaction.objects.count(), 0)\n\n data = WritableTransactionSerializer(transaction).data\n serializer = WritableTransactionSerializer(data=data)\n self.assertFalse(serializer.is_valid())\n\n expected_key = \"estabelecimento\"\n expected_text = \"object does not exist.\"\n self.assertIn(expected_key, serializer.errors)\n self.assertIn(expected_text, str(serializer.errors[expected_key]))\n self.assertEqual(Transaction.objects.count(), 0)", "def test_serialize_none(self):\n self.assertEqual(serialize(None), 'null')", "def test_serialization(self):\n serialized_establishment = serializers.EstablishmentSerializer(\n self.establishment\n )\n self.assertSetEqual(\n set(serialized_establishment.fields.keys()),\n ESTABLISHMENT_EXPECTED_KEYS\n )", "def test_bert_embedder_frozen_params(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": { # default embedder_output_pooling_type for bert is \"first\"\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"distilbert-base-uncased\",\n \"update_embeddings\": False\n },\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n # fit the model\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n\n # assert only some weights are trainable\n clf = model._clf\n n_requires_grad, n_total = get_num_weights_of_model(clf)\n assert n_requires_grad < n_total, print(n_requires_grad, n_total)\n\n # check if dumping and loading partial state dict logs required messages & throws no errors\n os.makedirs(GENERATED_TMP_FOLDER, exist_ok=True)\n clf.dump(GENERATED_TMP_FOLDER)\n new_clf = clf.load(GENERATED_TMP_FOLDER)\n shutil.rmtree(GENERATED_TMP_FOLDER)\n\n # do predictions with loaded model\n model._clf = new_clf\n model_predictions_assertions(model)", "def test_serialize_map_fails_with_missing_values():\n Paint = Map(\n MapEntrySpec(2, \"colour\", String)\n )\n\n with pytest.raises(ValueError):\n bytes(Paint.to_bytes({}))", "def test_valid_estimator(strategy: str) -> None:\n mapie = MapieRegressor(estimator=DummyRegressor(), **STRATEGIES[strategy])\n mapie.fit(X_toy, y_toy)\n assert isinstance(mapie.single_estimator_, DummyRegressor)\n for estimator in mapie.estimators_:\n assert isinstance(estimator, DummyRegressor)", "def objDictConsistency(self, cls, hard, dis=False):\n orig = cls(\"OK\")\n # the hard way\n if hard:\n p = self.dumpWithPreobjects(None, orig.__dict__, orig, dis=dis)\n d, obj = self.pickler.loads(p)[-1]\n else:\n p = self.dumpWithPreobjects(None, orig, orig.__dict__, dis=dis)\n obj, d = self.pickler.loads(p)[-1]\n self.assertTrue(type(obj)is type(orig))\n self.assertTrue(type(obj.__dict__) is type(orig.__dict__)) # @IgnorePep8\n self.assertEquals(set(obj.__dict__.keys()), set(orig.__dict__.keys()))\n self.assertTrue(obj.__dict__ is d)\n self.assertTrue(obj.isOk() is True)", "def test_jurisdictions_instances(self):\n obj = Jurisdiction()\n self.assertIsInstance(obj, Jurisdiction)", "def test_is_trainable(estimator_fn, machine_settings):\n # Setup\n batch_size = 128 # Must be divisible by number of replicas (8 for TPU v2)\n crop_size = 24\n eval_count = 1024\n eval_steps = int(eval_count / batch_size)\n assert eval_steps * batch_size == eval_count\n estimator = estimator_fn(\n micronet.cifar.linear_model.create_model, batch_size, batch_size)\n\n # Replace with lambda?\n def input_fn(params):\n # Only the TPUEstimator needs to pass batch_size to the input_fn.\n if 'batch_size' in params:\n assert params['batch_size'] == batch_size\n del params\n mini_ds = cifar_ds.train_dataset(\n cloud_storage=machine_settings.is_cloud)\n mini_ds = mini_ds.map(\n cifar_ds.preprocess_fn(augment=False, crop_to=crop_size))\n # Take a small amount and repeat so that the test can show training\n # in a smaller amount of steps (so the test runs quickly).\n mini_ds.take(500).repeat()\n return mini_ds.batch(batch_size, drop_remainder=True)\n\n # Test\n # 1. Check that the untrained model predicts randomly.\n #\n # I want the test to pass 99% of the time.\n # For a 1000 trial experiment with success probability of 1% (100 classes),\n # CDF_inverse(0.01) ~= 3\n # CDF_inverse(0.99) ~= 19\n # (from binomial dist calculator:\n # https://www.di-mgt.com.au/binomial-calculator.html)\n # TODO: is it valid to assume a random output from the untrained model?\n results = estimator.evaluate(input_fn, steps=eval_steps)\n assert 3/eval_count < results[micronet.estimator.TOP_1_ACCURACY_KEY] \\\n <= 19/eval_count\n\n # 2. Check that the model can be trained.\n # Using the eval_steps as the max training steps. Could use something else.\n estimator.train(input_fn, max_steps=eval_steps)\n\n # 3. Check that the training has increased the model's accuracy.\n # Results is a dict containing the metrics defined by the model_fn.\n # FIXME 4: I should encapsulate/separate the metric creation so that it\n # is easy to assume that certain metrics are present.\n results = estimator.evaluate(input_fn, steps=eval_steps)\n # We should expect some improvement over the random case, 1/100. Running\n # it a few times gave ~4.5%, so using a value a little lower to make sure\n # the test reliably passes (while still being useful).\n assert results[micronet.estimator.TOP_1_ACCURACY_KEY] >= 0.040", "def test_differentClassesInequality(self):\n self.assertTrue(Record(1, 2) != DifferentRecord(1, 2))", "def test_invalid_estimator(estimator: Any) -> None:\n mapie = MapieClassifier(estimator=estimator)\n with pytest.raises(ValueError, match=r\".*Invalid estimator.*\"):\n mapie.fit(X_toy, y_toy)", "def _test_unfitted_internals(self,\n knn_clf,\n init_k=3,\n init_is_classifier=True):\n # pylint: disable=no-self-use\n assert knn_clf._k == init_k\n assert knn_clf._is_classifier is init_is_classifier\n\n assert knn_clf._is_fitted is False\n assert np.equal(knn_clf._X, np.ndarray((0, 0))).all()\n assert np.equal(knn_clf._y, np.ndarray((0, ))).all()\n assert knn_clf._X_n == int()\n assert np.equal(knn_clf._unique_y, np.ndarray((0, ))).all()\n assert np.equal(knn_clf._unique_y_counts, np.ndarray((0, ))).all()\n assert np.equal(knn_clf._unique_y_probabilities,\n np.ndarray((0, ))).all() # yapf: disable\n assert knn_clf._majority_label is None\n assert knn_clf._is_structured is False\n assert np.equal(knn_clf._categorical_indices, np.ndarray((0, ))).all()\n assert np.equal(knn_clf._numerical_indices, np.ndarray((0, ))).all()", "def _TestReadSerialized(self, serializer_object, json_dict):\n # We use json.dumps to make sure the dict does not serialize into\n # an invalid JSON string such as one that contains Python string prefixes\n # like b'' or u''.\n json_string = json.dumps(json_dict)\n unserialized_object = serializer_object.ReadSerialized(json_string)\n\n self.assertIsNotNone(unserialized_object)\n return unserialized_object", "def test_thatallattributesareindict(self):\n b1 = BaseModel()\n dictionary = b1.to_dict()\n self.assertEqual('__class__' in dictionary, True)\n self.assertEqual('id' in dictionary, True)\n self.assertEqual('created_at' in dictionary, True)\n self.assertEqual('updated_at' in dictionary, True)", "def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])", "def test_raises_when_accessing_none_implementation(self):\n\n class APIObj(\n platform.APIObject,\n collections.namedtuple(\"APIObj\", \"implementation\"),\n ):\n def __new__(cls):\n return super().__new__(cls, implementation=None)\n\n obj = APIObj()\n\n with pytest.raises(AttributeError) as exc_info:\n obj.implementation # pylint: disable=pointless-statement\n\n assert \"invalid access to 'implementation': not initialized\" in str(\n exc_info.value\n )" ]
[ "0.58206624", "0.58120334", "0.5762754", "0.57272524", "0.56799954", "0.56695384", "0.56055886", "0.5602577", "0.5546996", "0.5540443", "0.5539814", "0.55207306", "0.54866004", "0.5472597", "0.5461503", "0.5412684", "0.53859967", "0.537395", "0.535977", "0.53534746", "0.53437835", "0.5326557", "0.53215855", "0.53127384", "0.5302242", "0.5302011", "0.52975065", "0.5284721", "0.5273268", "0.5271901", "0.5260257", "0.5257693", "0.52556545", "0.52536184", "0.52512306", "0.5242738", "0.52346206", "0.52237046", "0.52226096", "0.52201164", "0.5217636", "0.5210803", "0.52080995", "0.5201721", "0.5200111", "0.52000433", "0.5196979", "0.51925886", "0.5187355", "0.5187355", "0.51853615", "0.51758516", "0.51752746", "0.51643026", "0.5157236", "0.5155737", "0.51516795", "0.51472104", "0.5140084", "0.51392394", "0.51388603", "0.51372385", "0.51242757", "0.51140946", "0.51071346", "0.510641", "0.5102838", "0.5095098", "0.5073206", "0.50678855", "0.5064028", "0.5063322", "0.5054568", "0.5046306", "0.50388104", "0.50376093", "0.50349754", "0.50289404", "0.50276905", "0.5018283", "0.5017791", "0.5004805", "0.49951357", "0.49899232", "0.4989685", "0.49842077", "0.4983363", "0.49622586", "0.49621838", "0.49605912", "0.49605143", "0.49568594", "0.49487722", "0.49474838", "0.49471307", "0.49469236", "0.4941709", "0.49381182", "0.49361107", "0.49348763" ]
0.7470269
0
Fit model to current data batch + previous data batch
def fit(self, x, y, logger): history = self.model1.fit(x=x, y=y, batch_size=self.batch_size, epochs=self.epochs) logger.log({'ValFuncLoss': history.history['loss'][-1]})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit(self, data):\n\n \"\"\"YOUR CODE HERE \"\"\"\n # unormalized data\n un_st = np.concatenate([datum[\"observations\"] for datum in data])\n un_stp1 = np.concatenate([datum[\"next_observations\"] for datum in data])\n un_at = np.concatenate([datum[\"actions\"] for datum in data])\n \n # normalize data\n n_st = (un_st-self.mean_obs)/(self.std_obs+self.epsilon)\n n_at = (un_at-self.mean_action)/(self.std_action+self.epsilon)\n n_stat = np.concatenate([n_st,n_at],axis=1)\n \n n_delta = ((un_stp1-un_st)-self.mean_deltas)/(self.std_deltas+self.epsilon)\n\n # make a shuffle row of whole data to be used\n N = n_delta.shape[0]\n train_indicies = np.arange(N)\n np.random.shuffle(train_indicies)\n # train over the whole data set for the number of iterations\n for i in range(self.iterations):\n for i in range(int(math.ceil(N/self.batch_size))):\n # index for the batch points from a random row\n start_idx = i*self.batch_size%N\n idx = train_indicies[start_idx:start_idx+self.batch_size]\n # choose the batch\n feed_dict = {self.st_at : n_stat[idx,:], self.delta_ : n_delta[idx,:]}\n # train the data\n self.sess.run(self.update_op, feed_dict=feed_dict)", "def fit_training_data(self):\n self.model.fit(self.X_train)", "def fit(self,X_train,y_train):\r\n \r\n self.X_train_data=X_train.reset_index(drop=True)\r\n self.y_train_data=y_train.reset_index(drop=True)\r\n \r\n temp_fitted_model=[]\r\n for each_model in self.model_list:\r\n each_model.fit(self.X_train_data,self.y_train_data)\r\n temp_fitted_model.append(each_model)\r\n \r\n self.fitted_model=temp_fitted_model", "def fit_batch(self, data, labels, mask, weights_init, settings, test_data=None,test_labels=None):\n if self.early_stopping:\n stop_patience = settings['patience']\n patience=0\n best_acc=0.0\n current_epoch = 0\n n = np.size(data,axis=0)\n n_batch = self.batch_size\n acc_history = []\n x_train, y_train, x_val, y_val = self.shuffle_in_unison(data,labels, settings['split'])\n\n if not settings['use_random_init']:\n current_weights = weights_init\n else:\n current_weights = self.get_weights()\n for e in range(0, settings['n_epochs']):\n x_train, y_train, _,_ = self.shuffle_in_unison(x_train,y_train,0.0)\n current_epoch=e\n print(\"Epoch \" + str(e+1) + \"/\" + str(settings['n_epochs']))\n for j in tqdm(range(int(len(x_train) / n_batch))):\n masked_weights = self.mask_weights(mask, current_weights)\n self.model.set_weights(masked_weights)\n j_start = j*n_batch\n j_end = (j+1)*n_batch\n Xbatch = x_train[j_start:j_end,:,:]\n Ybatch = y_train[j_start:j_end]\n self.model.train_on_batch(Xbatch,Ybatch)\n current_weights = self.get_weights()\n if self.early_stopping:\n _, val_acc = self.evaluate_model(x_val,y_val)\n if val_acc <= best_acc:\n patience=patience+1\n else:\n best_acc=val_acc\n if settings['eval_test']:\n _, test_acc = self.evaluate_model(test_data,test_labels)\n acc_history.append(test_acc)\n if self.early_stopping:\n if patience>=stop_patience:\n break\n \n new_weights = self.mask_weights(mask, current_weights)\n self.model.set_weights(new_weights)\n return acc_history, current_epoch", "def fit(self):\n \n print(\"Training model...\")\n center = self.center\n model = self.model\n n_epochs = self.config.epochs\n best_val_loss = np.inf\n for epoch in range(n_epochs):\n model.train()\n loop = tqdm(self.train_dataloader)\n for xb, _ in loop:\n loss = self.loss_batch(xb)\n loop.set_description(\"Epoch [{}/{}] \".format(epoch, n_epochs))\n loop.set_postfix({\"loss\":loss.item()})\n\n model.eval()\n with torch.no_grad():\n losses = [torch.cdist(model(xb), center.view(1, -1))\n for xb, yb in self.val_dataloader]\n losses = [x.item() for xb in losses for x in xb]\n val_loss = np.mean(losses) + self.get_regularizer_term()\n print(\"val_loss={:.6f}\".format(val_loss))\n\n if val_loss < best_val_loss:\n best_model_state = copy.deepcopy(model.state_dict())\n best_val_loss = val_loss\n self.save_model(self.config.mnist_cnn_weights, best_model_state)", "def fit(self, X):", "def fit(self):\n for i in range(self.current_epoch, self.max_epoch):\n self.current_epoch += 1\n # train\n train_dataloader = self.data_module.get_train_dataloader(\n batch_size=self.train_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers,\n pin_memory=True\n )\n neptune.log_metric(\"learning_rate_vs_epoch\", self.optimizer.param_groups[0]['lr'])\n self.train_one_epoch(train_dataloader)\n\n # validate \n if self.validate_after == 'epoch' and self.train_on_all_data == False and self.run_lr_range_test == False:\n validation_dataloader = self.data_module.get_valid_dataloader(\n batch_size=self.valid_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers, \n pin_memory=True\n )\n self.validate_one_epoch(validation_dataloader)\n\n if self.scheduler:\n if self.step_scheduler_after == 'epoch': \n if self.step_scheduler_metric == 'val_auc':\n self.scheduler.step(self.metrics['valid'][-1]['auc_score'])\n else:\n self.scheduler.step()\n\n if self.run_lr_range_test:\n neptune.log_metric('validation_epoch_end_AUC_vs_LR', \n self.scheduler.get_last_lr()[0], y=self.metrics['valid'][-1]['auc_score'])\n\n # checkpoint model for resuming model\n if (self.current_epoch % self.checkpoint_epochs) == 0:\n self.save_checkpoint()\n\n # sleep the training process\n if self.current_epoch % self.sleep_in_epochs == 0:\n print(f\"SLEEPING FOR {self.sleep_time} at epoch={self.current_epoch}\")\n for i in range(int(self.sleep_time/30)):\n time.sleep(i)\n neptune.log_metric(\"sleeping_status\", y=1)\n\n stop_training = self.stopping_criteria()\n if stop_training:\n if self.fp16:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer.zero_grad()\n else:\n self.optimizer.step()\n self.optimizer.zero_grad()\n # backward all the accumulate gradients\n print(f\"stopped training at {self.current_epoch} epoch\")\n break", "def fit(self, data):\n batch_count = data.shape[0]\n batch_mu = np.mean(data, axis=0, keepdims=True)\n batch_var = np.var(data, axis=0, keepdims=True)\n new_mean, new_var, new_count = self.running_mean_var_from_batch(batch_mu, batch_var, batch_count)\n #sigma[sigma < 1e-8] = 1.0\n self.mu.load(new_mean)\n self.var.load(new_var)\n self.count.load(new_count)\n self.fitted = True\n self.cache()", "def fit(self, data):\n #paths=data\n obs = data['observations']\n delta = data[\"delta\"]\n acs = data[\"actions\"]\n \n \n ### normalize\n obs = normalize(obs,self.normalization['observations'][0],self.normalization['observations'][1])\n delta = normalize(delta,self.normalization['delta'][0],self.normalization['delta'][1])\n acs = normalize(acs,self.normalization['actions'][0],self.normalization['actions'][1])\n \n \n train_count = len(obs) \n N_EPOCHS=50\n for i in range(1, N_EPOCHS + 1): # tf.data /tf.train.batch /tf.train.shuffle_batch -- later\n print(\"epoch: \",i)\n done=False\n start=0;end=0\n while(not done):\n start=end\n end=min(start+self.batch_size,train_count)\n #print(start+self.batch_size,train_count)\n #print(end)\n if(end==train_count):\n done=True\n self.sess.run(self.dyna_update_op, feed_dict={self.sy_ob:obs[start:end], self.sy_ac:acs[start:end], self.delta:delta[start:end] })", "def train(self, x, t):\n for i in range(self.number_model):\n curr_model = self.all_model[i]\n curr_model.fit(x, t)", "def fit(self):\n converge = False\n while not converge:\n converge = True\n for xi, yi in zip(self.X, self.y):\n yhat = self.classify(xi)\n if yhat != yi:\n converge = False\n # update model\n self.W += self.lr * yi * xi\n self.b += self.lr * yi * 1", "def fit(self):\n # Iterate and train.\n step_file = self.checkpointer.get_step_file()\n start_step = Pickle.load(open(step_file, 'rb'))\n for step in xrange(start_step, self.train_size // self.train_batch_size):\n print 'Step No.:', step\n # Checkpoint tensorflow variables for recovery\n if step % self.checkpointer.get_checkpoint_steps() == 0:\n print 'Checkpointing: Saving Tensorflow variables'\n self.saver.save(self.sess, self.checkpointer.get_save_address())\n Pickle.dump(step + 1, open(step_file, 'wb'))\n print 'Checkpointing Complete. Deleting historical checkpoints....'\n self.checkpointer.delete_previous_checkpoints(num_previous=2)\n print 'Deleted.. Moving forward...'\n\n offset = (step * self.train_batch_size) % self.train_size\n batch_data_fwd = self.X_trn_fwd[offset:(offset + self.train_batch_size), :].T\n batch_data_bwd = self.X_trn_bwd[offset:(offset + self.train_batch_size), :].T\n batch_labels = self.Y_trn[offset:(offset + self.train_batch_size), :].T\n\n loss_t_forward, loss_t_backward = self._train_batch(batch_data_fwd, batch_data_bwd, batch_labels)\n print \"Present Loss Forward:\", loss_t_forward\n print \"Present Loss Backward:\", loss_t_backward\n\n # check results on 2 tasks - Visual Validation\n print 'Train Data Validation\\n'\n self._visual_validate(self.X_trn_fwd[301, :], self.X_trn_bwd[301, :], self.Y_trn[301, :])\n print\n print\n print 'Test Data Validation\\n'\n self._visual_validate(self.X_tst_fwd[56, :], self.X_tst_bwd[56, :], self.Y_tst[56, :])\n print\n print\n\n # Store prediction after certain number of steps #############\n # This will be useful for the graph construction\n '''\n if(step % self.checkpointer.get_prediction_checkpoint_steps() == 0):\n self.predict()\n self.store_test_predictions('_' + str(step))\n '''", "def fit_batch(self, batch):\n # check if we need to switch optimizer\n # if so change the optimizer from Adam to SGD\n self.check_and_reset_optimizer()\n predictions = self.compute_forward(batch, sb.Stage.TRAIN)\n loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)\n\n # normalize the loss by gradient_accumulation step\n (loss / self.hparams.gradient_accumulation).backward()\n\n if self.step % self.hparams.gradient_accumulation == 0:\n # gradient clipping & early stop if loss is not fini\n self.check_gradients(loss)\n\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # anneal lr every update\n self.hparams.noam_annealing(self.optimizer)\n\n return loss.detach()", "def fit(self, inp, targ):\n self.model.fit(inp, targ, epochs=1, verbose=0)", "def fit():\n pass", "def fit(self):\n\t\tfor i in range(self.n_iter):\n\t\t\tself.weights = self.weights - (self.learning_rate / self.n_samples) \\\n\t\t\t\t* self.x_data.T @ (self.x_data @ self.weights - self.y_data)\n\n\t\tself.intercept_ = self.weights[0]\n\t\tself.coef_ = self.weights[1:]\n\n\t\treturn self", "def fit_one_epoch(self):\n preds, labels = [], []\n for batch_idx, data in tqdm(enumerate(self.primary_dataloader)):\n losses_report, train_preds, train_labels = self.forward_one_batch(\n data)\n preds.append(train_preds)\n labels.append(train_labels)\n\n self._optimize(losses_report)\n self._update_losses(losses_report, train=True)\n\n self.iter += 1\n\n # log/check point\n with torch.no_grad():\n if self.iter % self.log_iter == 0:\n # TODO: track train\n preds = np.concatenate(preds, axis=0)\n labels = np.concatenate(labels, axis=0)\n if IS_REG:\n preds = disc(preds)\n\n metrics_report = self.evaluate_metrics(preds, labels)\n self._update_metrics(metrics_report, train=True)\n preds, labels = [], []\n\n if self.valid_dataloader:\n self.validate()\n\n self.log_meters()\n self.save_checkpoint()\n self.reset_meters()", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, X, y=...):\n ...", "def fit(self, data):\n for v in self.features + self.targets:\n v._fit(data)", "def fit(self, X, Y):\n np.random.seed(40)\n self.num_samples=X.shape[0]\n self.layers_sizes.insert(0,X.shape[1])\n self.initialize_parameters()\n variable=self.num_epochs//5\n\n # loop for epochs\n for vv in range(self.num_epochs):\n # creating batches of dataset of specified batch size\n X,Y=shuffle(X,Y,random_state=vv)\n num_batches=X.shape[0]//self.batch_size\n train_x=np.vsplit(X,num_batches)\n train_y=np.vsplit(Y,num_batches)\n train_cost=0\n \n for i in range(num_batches):\n # iterating over batches and applying forward and backward propagation\n # and determining training cost (cross entropy loss) for every batch\n # and averaging them to give a generalised loss\n A,d_collection=self.forward(train_x[i])\n train_cost+=(-np.mean(train_y[i]*np.log(np.transpose(A))))/num_batches\n derivatives=self.backward(train_x[i],train_y[i],d_collection)\n\n self.weight_update(derivatives)\n \n if vv%variable==0:\n print(\"Accuracy score:\",self.score(X,Y))\n \n # adding both training and testing losses in a list to plot in further ques\n self.training_loss_values.append(train_cost)\n test_cost=-np.mean(self.YTEST*np.log(np.transpose(self.predict_proba(self.XTEST))))\n self.testing_loss_values.append(test_cost)\n return self", "def partial_fit(self, X, y=...):\n ...", "def partial_fit(self, X, y=...):\n ...", "def fit(self, X, y):\n y = np.array(y)\n rows, columns = X.shape\n num_batches = int(np.ceil(rows / self.batch_size))\n batches = np.arange(num_batches + 1) * self.batch_size\n indxs = np.arange(rows)\n\n self.W = np.zeros(columns)\n self.b = rng.random(1)\n\n # stochastic gradient descent logic\n for _ in range(self.num_epochs):\n rng.shuffle(indxs)\n\n for i, j in zip(batches[0:-1], batches[1:]):\n batch_indxs = indxs[i:j]\n x_batch = X[batch_indxs]\n y_batch = y[batch_indxs]\n self.update(x_batch, y_batch)\n\n # track loss history during training\n self.loss_history.append(self.loss(self.predict_proba(X), y))\n self.accuracies.append(self.accuracy(X, y))", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def fit(self, X, Y):\n ...", "def fit(self, train_x, train_y):\n history = self.model.fit(train_x, train_y, epochs=self.EPOCH_SIZE, batch_size=self.BATCH_SIZE, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', patience=1, min_delta=0.0001)])\n self.history = history\n return history", "def fit_batch(self, batch):\n\n predictions = self.compute_forward(batch, sb.Stage.TRAIN)\n loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)\n\n # normalize the loss by gradient_accumulation step\n (loss / self.hparams.gradient_accumulation).backward()\n\n if self.step % self.hparams.gradient_accumulation == 0:\n # gradient clipping & early stop if loss is not finite\n self.check_gradients(loss)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n return loss.detach()", "def update_fit(self, X, y):\n pass", "def fit(self, X, y):\n self.model_x = X\n self.model_y = y", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, data):\n raise NotImplementedError(\"To be implemented in sub classes\")", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def fit(self) -> None:\n start_time = time.time()\n # ---- START -----\n train_df = self.processed_train_df[self.processed_train_df[self.filter_col_name]].dropna()\n train_features = train_df[self.feature_list]\n for label, model in zip(self.label_list, self.models):\n model.fit(train_features, train_df[label])\n # ---- END -----\n end_time = time.time()\n print(\"Finished fitting : elasped time : \" + str(end_time - start_time))", "def batch_fit(self, data_loader: torch.utils.data.DataLoader, epochs: int):\n pass", "def partial_fit(self, X, y=None):\n # update model on a minibatch\n self.logger.info(self.__name__ +\n ' is updated on dataset with {:d} samples and {:d} features.'.\n format(X.shape[0], X.shape[1]))\n pass", "def fit(self):\r\n self.init_plot(self.FEATURES)\r\n\r\n # YOUR CODE HERE\r\n\r\n while not self.converged() or self.training_iteration == 0:\r\n print('Iteration: ', self.training_iteration)\r\n\r\n self.compute_gradient_for_all()\r\n self.upd_theta()\r\n self.update_plot(np.sum(np.square(self.gradient)))\r\n\r\n self.training_iteration += 1", "def fit(self, x):\n pass", "def prepare_next_batch(self) -> None:\n if not (\n self._forward_succesful and self._clip_succesful and self._noise_succesful\n ):\n raise RuntimeError(\n \"An error occured during model training. The model.prepare_next_batch() \"\n \" method must be called after model.forward(), model.clip_and_accumulate() \"\n \" and model.noise_gradient().\"\n )\n for model in self.models:\n for target_param, source_param in zip(\n model.parameters(), self.wrapped_model.parameters()\n ):\n target_param.data = source_param.data\n self._steps_taken += 1\n self._forward_succesful = self._clip_succesful = self._noise_succesful = False\n if self.watchdog:\n self.watchdog.inform(self._steps_taken)", "def predict_batch(self, model, context, data=None):\n pass", "def fit(self, X, Y, y, params):\n Ndim, Npts = X.shape\n Nout, _ = Y.shape\n epochs = params.epochs\n n_batch = params.n_batch\n eta = params.eta\n lmd = params.lmd\n\n n_layers = len(self.hidden_units)\n\n # Initialize weights and biases\n if n_layers == 0:\n print('Add Hidden Layers with .Dense(N_units)')\n\n else:\n\n for i in range(n_layers + 1):\n\n if i == 0:\n n_units = self.hidden_units[i]\n\n W_curr = np.random.normal(0, 1/np.sqrt(Ndim),\n size = (n_units, Ndim))\n\n b_curr = np.zeros((n_units, 1))\n\n elif i == n_layers:\n n_units_previous = self.hidden_units[i - 1]\n\n W_curr = np.random.normal(0, 1/np.sqrt(n_units_previous),\n size = (Nout, n_units_previous))\n\n b_curr = np.zeros((Nout, 1))\n\n else:\n n_units = self.hidden_units[i]\n n_units_previous = self.hidden_units[i - 1]\n\n W_curr = np.random.normal(0, 1/np.sqrt(n_units_previous),\n size = (n_units, n_units_previous))\n\n b_curr = np.zeros((n_units, 1))\n\n\n self.W.append(W_curr)\n self.b.append(b_curr)\n\n print('=-=- Training starting -=-= \\n')\n for i in range(epochs):\n\n for j in range(round(Npts/n_batch)):\n\n #Create mini_batch\n ind = self.BatchCreator(j, n_batch)\n XBatch = X[:, ind]\n YBatch = Y[:, ind]\n\n #Training\n P, H = self.forward(XBatch)\n gradients_W, gradients_b = self.backward(XBatch, YBatch, P, H, lmd)\n self.update(gradients_W, gradients_b, eta)\n\n print('=-=- Epoch: ', i, ' -=-=')\n\n print('\\n =-=- Training completed! -=-=')", "def __fit_model(self):\n\n labels = self.labeled_labels\n features = self.labeled_features\n\n pred = np.array(cross_val_predict(self.clf,\n features,\n labels,\n cv=self.cv))\n\n stats = self.__get_statistics(labels, pred)\n self.statistics.append(stats)\n\n self.clf.fit(features, labels)\n\n return self", "def fit(self, X, Y, epochs, batch_size=32, learning_rate=0.01):\n n = 0\n n_samples = X.shape[1]\n for i in range(epochs):\n while n * batch_size < n_samples:\n X_cur = X[:, n * batch_size : min((n + 1) * batch_size, n_samples - 1)]\n Y_cur = Y[:, n * batch_size : min((n + 1) * batch_size, n_samples - 1)]\n Y_hat = self.__feedforward(X_cur)\n cost = self.__get_cost_value(Y_hat, Y_cur)\n total_cost = T.sum(cost).eval()\n if self.best_loss == None or total_cost < self.best_loss:\n self.best_loss = total_cost\n accuracy = self.__get_accuracy_value(Y_hat, Y_cur).eval()\n print('Epoch '+ str(i + 1) +' - ' + str(n + 1) + '/' + str((n_samples // batch_size) + 1) + ' Loss: ' + str(total_cost) + ' Accuracy: ' + str(accuracy), end='\\r', flush=True)\n self.__backprop(X_cur, Y_hat, Y_cur)\n self.__update(learning_rate)\n pickle.dump(self, open('model.p', 'wb'))\n n += 1", "def fit(self):\n raise NotImplementedError", "def _pre_fit(self):\n pass", "def fit(self, X,y):\n pass", "def fit(self,X, y, lr=0.1, iters=100, recompute=True):\r\n # INSERT YOUR CODE HERE\r\n # Appending a column of 1's to the train set X\r\n X = np.append(X, np.array([[1]]*X.shape[0]), axis=1)\r\n self.w =self.initialize_weights(X.shape[1])\r\n\r\n if(recompute):\r\n #Reinitialize the model weights\r\n self.w = self.initialize_weights(X.shape[1])\r\n\r\n for _ in range(iters): \r\n # Calculate gradient ascent and Update weights\r\n # w = w + lr * g / size of X\r\n self.w += lr*(1 / X.shape[0])*self.gradient_ascent(self.w,X,y,lr)", "def fit_recurrent(self, x, y):\n # print('Stage 1')\n x_ = self.scaler_s1.fit_transform(x)\n\n self.basemodel.fit(x_, y)\n self.training_hit_probability = self._hitprobability(x_, y)\n\n # Learn the hit probability\n self.hitproba = HitProbability()\n self.hitproba.fit(x_, self.training_hit_probability)\n\n # Learn high confidence for all classes\n hm_y, auto_gamma = self._adjust_gamma(self.training_hit_probability)\n self.joint_class_hc = HC_LR()\n self.joint_class_hc.fit(x_, hm_y)\n\n # hm_subtypes = []\n # proba_subtypes = []\n\n # while np.mean(y_) > 0.01:\n # for label in np.unique(y):\n\n hm_1hot = []\n hm_1hot.append(self._one_hot(self.training_hit_probability, y)[0])\n y_ = y.copy()\n\n self.recurrent_base = []\n self.recurrent_hpc = []\n for ii in range(self.recurrent_modes):\n print('Stage 1 iter: ' + str(ii))\n #self.recurrent_base.append(BaseSvc())\n\n if np.sum(y_) > 2:\n self.basemodel = BaseSvc()\n hm_y, proba_tmp = self._fit_mode(x_, y_)\n hm_candidate = self._one_hot(proba_tmp, y_)[1]\n else:\n hm_candidate = np.zeros_like(y_)\n\n self.recurrent_base.append(self.basemodel)\n\n #if np.sum(hm_candidate) >= 2:\n hm_1hot.append(hm_candidate)\n\n # remove the selected subgroup from the target list\n y_[hm_1hot[-1] == 1] = 0\n\n # make the default base model the first\n self.basemodel = self.recurrent_base[0]\n\n print('Stage 2')\n # Stage 2\n # hm_1hot = hm_subtypes\n # train stage2\n self.confidencemodel.fit(x_, hm_1hot)", "def fit(self, data, labels, n_epochs=20):\n self.model.fit(x=data, y=labels, batch_size=self.batch_size, \n validation_split=0.1 if self.early_stopping else None, epochs=n_epochs,\n callbacks=[self.es] if self.early_stopping else None)", "def fit(self, X_train, y_train, validation_split=None,\n chkpt_model_every=None, model_save_dir=None, nb_epoch=1, batch_size=32,\n max_seq_len=128, learning_rate=3e-5, adam_epsilon=1e-8,\n warmup_steps=0, gradient_accumulation_steps=1):\n \n if chkpt_model_every:\n assert type(chkpt_model_every), \"@Param: 'chkpt_model_every' must be an integer\"\n assert type(chkpt_model_every), \"@Param: 'chkpt_model_every' must be greater than zero\"\n assert os.path.isdir(model_save_dir), (f\"@Param: 'model_save_dir' == {model_save_dir}.\"\n \" Directory does not exist\"\n \" Must supply an existing directory if @Param: \"\n \"'chkpt_model_every' is used\")\n\n validation_dataloader=None\n if validation_split:\n split_percent = int(len(X_train) * validation_split)\n X_train = X_train[:split_percent]\n y_train = y_train[:split_percent]\n X_val = X_train[split_percent:]\n y_val = y_train[split_percent:]\n\n num_train_optim_steps = int(len(X_train) / batch_size) * nb_epoch\n optimizer, scheduler = setup_optim(learning_rate, adam_epsilon, warmup_steps, num_train_optim_steps)\n train_dataloader = setup_dataloader(X_train, y_train, max_seq_len, batch_size, shuffle=True)\n\n\n self.model.zero_grad()\n self.model.train()\n for i in range(nb_epoch):\n step = 0\n train_accuracy = 0\n for batch in tqdm(train_dataloader, desc=\"Iteration\"):\n batch = {k: t.to(self.device) for k, t in batch.items()}\n outputs = self.model(**batch)\n loss, logits = outputs[:2]\n loss.backward()\n train_accuracy += calculate_accuracy(logits, batch[\"labels\"])\n\n if (step + 1) % gradient_accumulation_steps == 0:\n optimizer.step()\n scheduler.step()\n self.model.zero_grad()\n step += 1\n\n batch = {k: t.detach().cpu() for k, t in batch.items()}\n del batch\n torch.cuda.empty_cache()\n \n validation_accuracy = None\n if validation_dataloader:\n validation_accuracy = self.evaluate(X_val, y_val, batch_size, max_seq_len)\n \n if chkpt_model_every:\n results = {\n \"train accuracy\": train_accuracy / len(train_dataloader),\n \"validation_accuracy\": validation_accuracy\n }\n chkpt_name = \"chkpt epochs={0}\".format(i + 1)\n self.save(model_save_dir, chkpt_name, results)", "def fit_predict_single_fold(\n self, train: TabularDataset, valid: TabularDataset\n ) -> Tuple[LinearEstimator, np.ndarray]:\n if type(train) is PandasDataset:\n train = train.to_numpy()\n valid = valid.to_numpy()\n\n _model, cs, l1_ratios, early_stopping = self._infer_params()\n\n train_target, train_weight = self.task.losses[\"sklearn\"].fw_func(train.target, train.weights)\n valid_target, valid_weight = self.task.losses[\"sklearn\"].fw_func(valid.target, valid.weights)\n\n model = deepcopy(_model)\n\n best_score = -np.inf\n best_pred = None\n best_model = None\n\n metric = self.task.losses[\"sklearn\"].metric_func\n\n for l1_ratio in sorted(l1_ratios, reverse=True):\n\n try:\n model.set_params(**{\"l1_ratio\": l1_ratio})\n except ValueError:\n pass\n\n model = deepcopy(_model)\n\n c_best_score = -np.inf\n c_best_pred = None\n c_best_model = None\n es = 0\n\n for n, c in enumerate(cs):\n\n try:\n model.set_params(**{\"C\": c})\n except ValueError:\n model.set_params(**{\"alpha\": c})\n\n model.fit(train.data, train_target, train_weight)\n\n if np.allclose(model.coef_, 0):\n if n == (len(cs) - 1):\n logger.info2(\n \"All model coefs are 0. Model with l1_ratio {0} is dummy\".format(l1_ratio),\n UserWarning,\n )\n else:\n logger.debug(\"C = {0} all model coefs are 0\".format(c))\n continue\n\n pred = self._predict_w_model_type(model, valid.data)\n score = metric(valid_target, pred, valid_weight)\n\n logger.debug(\"C = {0}, l1_ratio = {1}, score = {2}\".format(c, 1, score))\n\n # TODO: check about greater and equal\n if score >= c_best_score:\n c_best_score = score\n c_best_pred = deepcopy(pred)\n es = 0\n c_best_model = deepcopy(model)\n else:\n es += 1\n\n if es >= early_stopping:\n logger.debug(\"Early stopping..\")\n break\n\n if self.timer.time_limit_exceeded():\n logger.info3(\"Time limit exceeded\")\n break\n\n # TODO: Think about is it ok to check time inside train loop?\n if (model.coef_ != 0).all():\n logger.debug(\"All coefs are nonzero\")\n break\n\n if c_best_score >= best_score:\n best_score = c_best_score\n best_pred = deepcopy(c_best_pred)\n best_model = deepcopy(c_best_model)\n\n if self.timer.time_limit_exceeded():\n logger.info3(\"Time limit exceeded\")\n break\n\n val_pred = self.task.losses[\"sklearn\"].bw_func(best_pred)\n\n return best_model, val_pred", "def fit(self, data):\n if not self._transformers:\n return\n\n transformed_data = self._preprocess(data)\n final_step = self._transformers[-1]\n final_step[1].fit(transformed_data)", "def train(self, X_train, y_train):\n\n self.model_pipeline.fit(X_train, y_train)", "def _fit(self, x_train, y_train, x_valid, y_valid, regressor_callback=None):", "def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())", "def fit(self, data_loader, epochs):\n self.to(self.model_device)\n for epoch in range(epochs):\n epoch_loss = self.epoch_train(self, data_loader)\n if \"cuda\" in self.model_device.type:\n torch.cuda.empty_cache()\n self.train_loss.append(epoch_loss)\n print(f\"epoch {epoch + 1}/{epochs} : mean loss = {self.train_loss[-1]:.6f}\")", "def fit(self, data: np.array, labels: np.array):\n self.model.fit(squeeze_keep_batch(data), squeeze_keep_batch(labels))", "def train_model(self):\n early_stopping = EarlyStopping(self, self.hyper.early_stopping_enabled, self.hyper.early_stopping_limit)\n loss_history_train = []\n loss_metric_train = tf.keras.metrics.Mean()\n\n x_train, next_values_train = self.dataset.create_batches(self.hyper.batch_size,\n [self.dataset.x_train,\n self.dataset.next_values_train])\n\n x_train_val, next_values_train_val = self.dataset.create_batches(self.hyper.batch_size,\n [self.dataset.x_train_val,\n self.dataset.next_values_train_val])\n\n for epoch in range(self.hyper.epochs):\n print(\"Epoch %d\" % (epoch,))\n\n for step, (x_batch_train, next_values_batch_train) in enumerate(zip(x_train, next_values_train)):\n self.train_step(x_batch_train, next_values_batch_train, loss_metric_train)\n\n if step % 50 == 0:\n print(\"\\tStep %d: mean loss = %.4f\" % (step, loss_metric_train.result()))\n\n loss_train_batch = loss_metric_train.result()\n loss_history_train.append(loss_train_batch)\n loss_metric_train.reset_states()\n\n self.model.save_weights(self.checkpoint_path.format(epoch=epoch))\n\n # Check early stopping criterion --> Has the loss on the validation set not decreased?\n best_epoch = early_stopping.execute(epoch, x_train_val, next_values_train_val)\n self.clean_up(early_stopping, epoch)\n\n if best_epoch > 0:\n print('Model from epoch %d was selected by early stopping.' % best_epoch)\n print('Training process will be stopped now.')\n\n self.save_model(best_epoch)\n\n return\n\n self.save_model(epoch=self.hyper.epochs - 1)", "def train(self, x_data, y_data):\n for model in self.list_of_models:\n model.fit(x_data, y_data)\n self.trained_models.append(model)", "def fit(self, X):\n raise NotImplementedError", "def fit_model(self):\n model = self.make_model()\n self.history = model.fit(x=self.xt_train, y=self.yt_train,\n epochs=self.n_epochs, verbose=0,\n validation_split=self.v_split, shuffle=True)\n self.eval_model(model)\n self.save_model(model)\n return model", "def fit(self, X, y=None):\n # Check that X and y have correct shape\n X, y = check_X_y(X, y)\n # Store the classes seen during fit\n self.classes_ = unique_labels(y)\n \n if self.shuffle:\n X, y = shuffle(X, y)\n\n self.X_ = X\n self.y_ = y\n \n self._initialize_map()\n self._create_location_vectors()\n self._initialize_sigma()\n \n for i in range(self.num_epoch):\n j= 0\n print(f\"Epoch:{i}\")\n while(j < self.X_.shape[0]):\n current_batch_end = j+self.batch_size if j+self.batch_size < self.X_.shape[0] else self.X_.shape[0]\n current_batch = self.X_[j:current_batch_end]\n self._feedforward(current_batch)\n self._backprop(j, self.X_.shape[0], current_batch)\n j = current_batch_end \n return self", "def _batch_train(self, batch, training_step, step):\n lstm_size = (self.batch_size, self.Qmain.h_size)\n batch_mem = np.zeros(lstm_size)\n batch_carry = np.zeros(lstm_size)\n input_shape = (self.batch_size,\n self.trace_length,\n self.observation_size)\n m_data = np.vstack(batch[:, 0])\n m_data = m_data.reshape(input_shape)\n t_data = np.vstack(batch[:, 4])\n t_data = t_data.reshape(input_shape)\n q_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(m_data)]\n q1_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(t_data)]\n\n # Batch predict\n self.Qmain.trace_length.assign(self.trace_length)\n self.Qmain.dropout_rate.assign(0.0)\n self.Qtarget.trace_length.assign(self.trace_length)\n self.Qtarget.dropout_rate.assign(0.0)\n\n # Save the graph just the first time\n if training_step == 0:\n tf.summary.trace_on()\n\n # T batch predict\n pred = self.Qmain.model.predict(q_input,\n batch_size=self.batch_size)\n Q = pred[0]\n batch_bus = pred[1]\n batch_line = pred[2]\n batch_disp = pred[3]\n\n ## Log graph once and disable graph logging\n if training_step == 0:\n with self.tf_writer.as_default():\n tf.summary.trace_export(self.name + \"-graph\", step)\n\n # T+1 batch predict\n Qn, *_ = self.Qtarget.model.predict(q1_input,\n batch_size=self.batch_size)\n \n # Compute batch Q update to Qtarget\n for i in range(self.batch_size):\n idx = i * (self.trace_length - 1)\n a = batch[idx][1]\n grid = a[0]\n batch_bus[i][:] = a[1][:]\n batch_line[i][:] = a[2][:]\n batch_disp[i][:] = a[3][:]\n r = batch[idx][2]\n d = batch[idx][3]\n Q[i][grid] = r\n if d == False:\n Q[i][grid] += DISCOUNT_FACTOR * Qn[i][grid]\n\n # Batch train\n batch_x = [batch_mem, batch_carry, m_data]\n batch_y = [\n Q,\n batch_bus, batch_line, batch_disp,\n batch_mem, batch_carry\n ]\n loss = self.Qmain.model.train_on_batch(batch_x, batch_y)\n loss = loss[0]\n\n # Log to tensorboard\n self._tf_log_summary(loss, step)", "def fit(self, data):\n return self", "def minibatch_fit(self):\r\n self.init_plot(self.FEATURES)\r\n\r\n # YOUR CODE HERE\r\n max_batch_nr = self.DATAPOINTS // self.MINIBATCH_SIZE\r\n batch_nr = 1\r\n\r\n while not self.converged() or self.training_iteration == 0:\r\n print('Processing batch nr: ', batch_nr)\r\n self.compute_gradient_minibatch(batch_nr)\r\n self.upd_theta()\r\n self.update_plot(np.sum(np.square(self.gradient)))\r\n\r\n batch_nr += 1\r\n # start over on first batch if processed all data\r\n batch_nr = batch_nr % max_batch_nr\r\n self.training_iteration += 1", "def fit(self):\n raise NotImplementedError # pragma: no cover", "def fit(self, X, y):\n self.X_data = X\n self.y = y", "def fit(self, X, y):\n self.X_train = X\n self.y_train = y", "def fit(self):\n\n rmse_old = .0\n for epoch in range(self.epochs):\n error_final = .0\n\n for user, item, feedback in self.feedback_triples:\n pu = self.p[user] + self.y_sum_rows(user)\n\n # Calculate error\n eui = feedback - self._predict_svd_plus_plus_score(user, item, pu, False)\n error_final += (eui ** 2.0)\n\n # update bu and bi\n self.bu[user] += self.bias_learn_rate * (eui - self.delta_bias * self.bu[user])\n self.bi[item] += self.bias_learn_rate * (eui - self.delta_bias * self.bi[item])\n\n # Adjust the factors\n norm_eui = eui / self.n_u[user]\n\n i_f = self.q[item]\n\n # Compute factor updates\n delta_u = np.subtract(np.multiply(eui, i_f), np.multiply(self.delta, self.p[user]))\n self.p[user] += np.multiply(self.learn_rate, delta_u)\n\n delta_i = np.subtract(np.multiply(eui, pu), np.multiply(self.delta, i_f))\n self.q[item] += np.multiply(self.learn_rate, delta_i)\n\n # update y (implicit factor)\n common_update = norm_eui * i_f\n\n for j in self.items_id_seen_by_user[user]:\n delta_y = np.subtract(common_update, self.delta * self.y[j])\n self.y[j] += self.learn_rate * delta_y\n\n rmse_new = np.sqrt(error_final / self.train_set[\"number_interactions\"])\n\n if np.fabs(rmse_new - rmse_old) <= self.stop_criteria:\n break\n else:\n rmse_old = rmse_new", "def fit(self, X, y):\n Xs = self.scaler.fit_transform(X)\n self.model.fit(Xs, y)", "def train(self, X, y):\n self.model.fit(X, y)", "def fit(self, sample, expected):\n\n # shuffle the data\n shuffled_sample, shuffled_expected = m.unison_shuffle(np.array(sample), np.array(expected))\n\n # batch up\n batched_x = m.batch_(shuffled_sample, self.batch_size)\n batched_y = m.batch_(shuffled_expected, self.batch_size)\n\n # learning time\n for index in range(len(batched_x)):\n self.__train_batch(batched_x[index], batched_y[index])", "def train(self, X_train, y_train):\n self.model.fit(X_train, y_train)", "def step(self):\n fit_default_config = {\"verbose\": self.verbose}\n fit_default_config.update(self.config.get(\"fit_config\", {}))\n\n history = self.model.fit(self.train_dataset, **fit_default_config)\n if history is None:\n stats = {}\n else:\n stats = {\"train_\" + k: v[-1] for k, v in history.history.items()}\n\n self.epoch += 1\n return stats", "def train_on_batch(model,\n\t\t\t batch_of_x,\n\t\t\t batch_of_y,\n\t\t\t optimizer):\n model.zero_grad()\n\n loss = model.loss(batch_of_x, batch_of_y)\n\n loss.backward()\n\n optimizer.step()\n\n return", "def fit_model(x_train, y_train, model=None):\n if not model:\n model = create_general_lstm_model(x_train.shape[1], x_train.shape[2])\n\n history = model.fit(\n x_train,\n y_train,\n epochs=100,\n batch_size=64,\n validation_split=0.1,\n callbacks=[\n keras.callbacks.EarlyStopping(monitor=\"val_loss\", patience=10, mode=\"min\")\n ],\n shuffle=False,\n )\n\n return model, history", "def train(self, batch):\n pass", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def fit(self, X, y):\n self.model = self._initialize_model(X, y)\n self.model.optimize()", "def _train(self):\n\n batch = random.sample(self.D, min(self.batch_size, len(self.D)))\n no_state = np.zeros(self.stateCnt)\n\n states = [ o[0] for o in batch]\n states_ = [ (no_state if o[3] is None else o[3]) for o in batch ]\n\n p = []\n p_ = []\n for ii in range(len(batch)):\n p.append(self._predict(states[ii][:,:,:]))\n p_.append(self._predict(states_[ii][:,:,:]))\n\n batchLen = len(batch)\n\n x = np.zeros((batchLen, 84, 84, 1))\n y =np.zeros((batchLen, 11,11,6))\n\n for i in range(batchLen):\n o = batch[i]\n s = o[0]; a = o[1]; r = o[2]; s_ = o[3]\n\n t = p[i][0,:,:,:]\n if s_ is None:\n t[a] = r\n else:\n t[a] = r + self.gamma* np.amax(p_[i])\n x[i] = s\n y[i] = t\n\n self.model.fit(x,y,nb_epoch=1,verbose=0)", "def fit(self, X=None, y=None):\n if self.seed:\n random.seed(self.seed)\n np.random.seed(self.seed)\n tf.random.set_seed(self.seed)\n\n # pylint: disable=assignment-from-no-return\n self.model = self.create_model()\n\n if not self.model:\n raise RuntimeError(\"Model was not created.\")\n\n self.model.compile(optimizer=self.optimizer,\n loss=self.loss,\n metrics=self.metrics)\n\n self.history = self.model.fit([X[:, i] for i in range(X.shape[1])],\n y,\n epochs=self.epochs)", "def fit(self, x=None, y=None, **kwargs):\n if not isinstance(x, tf.data.Dataset) and 'input_1' not in x:\n raise TypeError('BrainModel.train must be called with tf.data.Dataset '\n 'object, not %s' % x)\n if y is not None:\n raise ValueError('Y value not needed, should be part of dataset.')\n\n if self._tensorboard_dir:\n if 'callbacks' in kwargs:\n kwargs['callbacks'].append(\n tf.keras.callbacks.TensorBoard(log_dir=self._tensorboard_dir))\n else:\n kwargs['callbacks'] = [\n tf.keras.callbacks.TensorBoard(log_dir=self._tensorboard_dir),]\n history = super(BrainModel, self).fit(x, **kwargs)\n logging.info('Training a %s model returned these metrics: %s',\n self, history)\n return history", "def train_model_with_auto_adjust_batch(model, i, data, now_is_training, label_index):\n datas = [data]\n while len(datas) != 0:\n data = datas[0]\n try:\n # Train\n tokens_tensors, segments_tensors, \\\n masks_tensors, labels = [t.to(device) for t in data]\n outputs = model(input_ids = tokens_tensors,\n attention_mask=masks_tensors,\n token_type_ids=segments_tensors)\n \n logits = outputs[0]\n\n current_label = labels.view(-1, labels.shape[-1])[:, label_index]\n current_label = current_label.view(-1)\n\n active_logits = logits.view(-1, logits.shape[-1])[masks_tensors.view(-1) == 1]\n\n actual = current_label[masks_tensors.view(-1)== 1].float().view(-1,1)\n\n loss_fct = torch.nn.BCEWithLogitsLoss()\n\n loss = loss_fct(active_logits, actual)\n loss.backward()\n datas.pop(0)\n except Exception as error:\n if \"CUDA\" not in error.args[0]:\n raise error\n # del the variables are unnecessary, Python can check automatically.\n # So I comment it in case anyone want to do the same thing here :D\n #\n # del tokens_tensors, segments_tensors, \\\n # masks_tensors, labels\n # torch.cuda.empty_cache()\n msg = f\"Failed, CUDA out of memory, dividing data from shape {np.array(datas[0][0]).shape}\"\n queue_task_log(now_is_training[\"_id\"], msg)\n length = len(datas[0][0])\n half = int(length/2)\n if length != 1:\n devided_datas = []\n for d in datas:\n tokens_tensors, segments_tensors, \\\n masks_tensors, labels = d\n \n devided_datas.append((tokens_tensors[:half], segments_tensors[:half], \\\n masks_tensors[:half], labels[:half]))\n \n devided_datas.append((tokens_tensors[:half], segments_tensors[:half], \\\n masks_tensors[:half], labels[:half]))\n datas = devided_datas\n finally:\n # do empty_cache every run to avoid CUDA OOM,\n # and let other programs can use GPU, too.\n torch.cuda.empty_cache()\n # When testing with Batch Size 128.\n # without clean cache, cost: 01:33 per 100 iteration.\n # with clean cache directly, cost: 01:43 per 100 iteration.\n # with threading clean cache and join, cost: 01:44 per 100 iteration.\n # with threading clean cache without join, cost: 01:44 per 100 iteration.\n ## Therefore, I clean cache directly in this case.\n return loss", "def fit(self, data):\n if data is None:\n self.train_self()\n else:\n # not needed this week\n pass", "def update(self, x_train_single, updated_h):\n x_row = x_train_single.toarray()\n for i in range(self.num_models):\n self.models[i].partial_fit(x_row, [updated_h[i]])", "def fit(self):\n\n # Initial values for geco algorithm\n if self.params[\"networks\"][\"variational\"]:\n self.langrange_multiplier = self.params[\"geco\"][\"initial_lagrange_multiplier\"]\n self.C_ma = None\n\n # TRAIN\n for ep in range(self.params[\"optimization\"][\"epochs\"]):\n print(\"Epoch %s / %s\" % (str(ep + 1), str(self.params[\"optimization\"][\"epochs\"])))\n pbar = tqdm.tqdm(self.train_data_loader)\n for batch_idx, rollout_batch in enumerate(pbar):\n # Move to device and change dtype\n rollout_batch = rollout_batch.to(self.device).type(self.dtype)\n\n # Do an optimization step\n losses, prediction = self.training_step(rollouts=rollout_batch)\n\n # Log progress\n self.training_logger.step(losses=losses,\n rollout_batch=rollout_batch,\n prediction=prediction,\n model=self.hgn)\n\n # Progress-bar msg\n msg = \", \".join([\n f\"{k}: {v:.2e}\" for k, v in losses.items() if v is not None\n ])\n pbar.set_description(msg)\n # Save model\n self.hgn.save(self.model_save_file)\n\n self.test()\n return self.hgn" ]
[ "0.7298221", "0.7222674", "0.71828973", "0.7077382", "0.7039742", "0.7018279", "0.69872963", "0.6833547", "0.68039864", "0.6773409", "0.6749072", "0.6726586", "0.6716468", "0.6715232", "0.6707798", "0.6707481", "0.6704683", "0.668996", "0.668996", "0.668996", "0.668996", "0.668996", "0.668996", "0.668996", "0.668996", "0.668996", "0.668996", "0.6688652", "0.6684313", "0.66785794", "0.66785794", "0.66778636", "0.6675245", "0.6661675", "0.66402817", "0.66355777", "0.6631921", "0.6631497", "0.66213137", "0.66213137", "0.66213137", "0.66169083", "0.6589438", "0.65839875", "0.6583223", "0.656806", "0.65530264", "0.6546962", "0.65462923", "0.6545546", "0.6544837", "0.6531474", "0.6529397", "0.6523084", "0.6512455", "0.6495903", "0.6487717", "0.6482245", "0.64790636", "0.6478439", "0.6468829", "0.6442217", "0.64409006", "0.64384174", "0.6425041", "0.6423259", "0.641962", "0.6417928", "0.6416681", "0.6415508", "0.64104736", "0.64096034", "0.64082515", "0.6404705", "0.6403638", "0.63760406", "0.6371581", "0.6366476", "0.6358882", "0.6349583", "0.6345402", "0.6342948", "0.6336272", "0.633453", "0.6332952", "0.63319665", "0.6324086", "0.6323628", "0.6323628", "0.6323628", "0.6323628", "0.6323628", "0.63193524", "0.63160026", "0.6310713", "0.6304502", "0.62944746", "0.6283983", "0.62777007", "0.6275566" ]
0.6526704
53
Adds player calendar to team_cal and returns filled teams
def match_with_player(self, name, player_cal): updated_team_cal = self.team_cal.copy() filled_team_keys = [] for loc in player_cal.stack().index: current_player_count = self.team_cal.at[loc] if self.price_cal.at[loc] <= player_cal.at[loc]: if current_player_count < self.team_size * 2: updated_team_cal.at[loc] += 1 self.team_dict[f'{loc[1]}-{loc[0]}'].append(name) if current_player_count == self.team_size * 2 - 1: filled_team_keys.append(f'{loc[1]}-{loc[0]}') else: continue # team is filled self.team_cal = updated_team_cal return filled_team_keys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_teams():", "def _create_teams(self):\n\t\tself.teamsDict = {}\n\t\tself.teamNamesList = []\n\t\tfor team in range(self.numberOfTeams):\n\t\t\tname = 'TEAM_'+str(team+1)\n\t\t\tself.teamNamesList.append(name)\n\t\t\tself.teamsDict[name] = app.game.team.Team(sport_type=self.gameData['sportType'])", "def create_teams(a_season):\n\n team_a = Team(name=\"Team A\", season=a_season)\n team_a.save()\n for p in create_players(7, 0):\n team_a.players.add(p)\n team_a.save()\n team_b = Team(name=\"Team B\", season=a_season)\n team_b.save()\n for p in create_players(7, 16):\n team_b.players.add(p)\n team_b.save()", "def get_player_games(self, year, use_local=True):", "def add_league_teams(league_diction, team_count, host, root, password):\r\n teams_diction = scrape_teams(league_diction, team_count)\r\n create_teams(host, root, password, dict_to_read=teams_diction)\r\n\r\n return teams_diction", "def get_player_stats_from_game(team, year, week):", "def add_players_on_floor(self):\n for period in self.Periods:\n # set current players to be period starters\n current_players = period.Starters.copy()\n for pbp_event in period.Events:\n if pbp_event.is_substitution():\n coming_in = pbp_event.player2_id\n going_out = pbp_event.player_id\n team_id = pbp_event.team_id\n current_players[team_id] = [coming_in if player == going_out else player for player in current_players[team_id]]\n pbp_event.current_players = current_players.copy()", "def get_teams_and_schedule():\n start_time = timedelta(hours=19)\n time_to_add = timedelta(minutes=15)\n teams = session.query(Team).all()\n\n for team in teams:\n team.time = str(start_time)\n start_time += time_to_add\n yield team", "def collect_teams(year: int = 2005) -> None:\n\n\twith open('../resources/config.json') as config_file, open('../resources/secrets.json') as secrets_file:\n\t\tconfig_json = json.load(config_file)\n\t\tsecrets_json = json.load(secrets_file)\n\n\t\turl = '/'.join(['http:', '', config_json['base_url'], config_json['fbs_teams_endpoint']])\n\t\tapi_key = secrets_json['api_key']\n\n\theaders = {'Authorization': api_key}\n\tparams = {'year': year}\n\n\tresponse = requests.get(url, headers = headers, params = params).json()\n\n\t# dict of one array for json dump\n\tteam_names = {'teamNames': list(map(lambda r: r['school'], response))}\n\n\twith open('../resources/teams.json', 'w') as teams_file:\n\t\tjson.dump(team_names, teams_file)", "def getAllTeams(self):\n return []", "def add_all_teams_and_players_in_league(league_dict, con, host, root, password):\r\n with con.cursor() as cur:\r\n cur.execute(\"\"\"SELECT MAX(id) FROM teams\"\"\")\r\n team_counter = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM players\"\"\")\r\n player_count = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM injuries\"\"\")\r\n injury_count = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM player_season\"\"\")\r\n player_season_count = cur.fetchall()[0][0]\r\n\r\n cur.execute(\"\"\"SELECT MAX(id) FROM player_team\"\"\")\r\n player_team_count = cur.fetchall()[0][0]\r\n\r\n teams_dict = add_league_teams(league_dict, team_counter, host, root, password)\r\n\r\n add_teams_players(teams_dict, player_count, injury_count, player_season_count,\r\n player_team_count, host, root, password)", "def get_contracted_players(self, team):\n # setting up empty list of players\n players = list()\n\n # getting html document with team's contracted players\n doc = self.get_html_document(team, 'contracts')\n\n # returning empty list if no system page could be found\n if doc is None:\n return players\n\n # collecting player names and links to capfriendly pages for different\n # player groups\n cf_links = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/@href\")\n cf_names = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/text()\")\n\n for lnk, name in zip(cf_links, cf_names):\n # retrieving capfriendly id from player page link\n cf_id = lnk.split(\"/\")[-1]\n # trying to find player in database\n plr = Player.find_by_capfriendly_id(cf_id)\n # trying to find player using suggestions\n if plr is None:\n last_name, first_name = name.split(\", \")\n suggested_players = self.get_suggested_players(\n last_name, first_name)\n for suggested_player in suggested_players:\n (\n sugg_plr_id, sugg_pos,\n sugg_last_name, sugg_first_name, _\n ) = (\n suggested_player\n )\n if (last_name, first_name) == (\n sugg_last_name, sugg_first_name):\n plr = Player.find_by_id(sugg_plr_id)\n if plr is None:\n plr = self.create_player(\n sugg_plr_id, last_name, first_name, sugg_pos)\n\n if plr is None:\n print(\"Unable to find player with name %s\" % name)\n else:\n players.append(plr)\n\n return players", "def creat_team(self):\n te = Teams()\n per = Persons()\n teamlist = []\n for one in per.find({'role':'leader'},{'team_name'}):\n if one['team_name'] not in teamlist:\n teamlist.append(one['team_name'])\n # print len(teamlist)\n for team in teamlist:\n tmp = {'name': '', 'leader_email': '', 'person_emails': []}\n tmp['name'] = team\n tmp['leader_email'] = per.get_one({'team_name':team,'role':'leader'})['email']\n for one in per.find({'team_name':team},{'email'}):\n tmp['person_emails'].append(one['email'])\n print tmp\n search_t = te.get_one({'name':team})\n if search_t is None:\n te.insert_one(tmp)\n else:\n te.update_one({'name':team,'leader_email':'','person_emails':''},tmp,cover=True)", "def collect_teams(year):\n\n team_list = Team.objects.filter(year=year).order_by('location')\n teams = []\n for t in team_list:\n team = {\n 'id': t.abbreviation,\n 'team': t,\n }\n teams.append(team)\n return teams", "def load_premiership_teams():\n # list of PremTeams to add\n team_list = [\n {'name': 'Arsenal', 'code': 'ARS', 'is_prem': True},\n {'name': 'Aston Villa', 'code': 'AVL', 'is_prem': True},\n {'name': 'Brighton and Hove Albion', 'code': 'BTN', 'is_prem': True},\n {'name': 'Brentford', 'code': 'BRE', 'is_prem': True},\n {'name': 'Bournemouth', 'code': 'BOU', 'is_prem': False},\n {'name': 'Burnley', 'code': 'BUR', 'is_prem': True},\n {'name': 'Cardiff City', 'code': 'CAR', 'is_prem': False},\n {'name': 'Chelsea', 'code': 'CHE', 'is_prem': True},\n {'name': 'Crystal Palace', 'code': 'CRY', 'is_prem': True},\n {'name': 'Everton', 'code': 'EVE', 'is_prem': True},\n {'name': 'Fulham', 'code': 'FUL', 'is_prem': False},\n {'name': 'Hull', 'code': 'HUL', 'is_prem': False},\n {'name': 'Huddersfield Town', 'code': 'HUD', 'is_prem': False},\n {'name': 'Leeds United', 'code': 'LEE', 'is_prem': True},\n {'name': 'Leicester City', 'code': 'LEI', 'is_prem': True},\n {'name': 'Liverpool', 'code': 'LIV', 'is_prem': True},\n {'name': 'Manchester City', 'code': 'MCY', 'is_prem': True},\n {'name': 'Manchester United', 'code': 'MUN', 'is_prem': True},\n {'name': 'Middlesbrough', 'code': 'MID', 'is_prem': False},\n {'name': 'Newcastle United', 'code': 'NEW', 'is_prem': True},\n {'name': 'Norwich City', 'code': 'NOR', 'is_prem': True},\n {'name': 'Queens Park Rangers', 'code': 'QPR', 'is_prem': False},\n {'name': 'Sheffield United', 'code': 'SHF', 'is_prem': False},\n {'name': 'Southampton', 'code': 'SOT', 'is_prem': True},\n {'name': 'Stoke City', 'code': 'STO', 'is_prem': False},\n {'name': 'Sunderland', 'code': 'SUN', 'is_prem': False},\n {'name': 'Swansea City', 'code': 'SWA', 'is_prem': False},\n {'name': 'Tottenham Hotspur', 'code': 'TOT', 'is_prem': True},\n {'name': 'Watford', 'code': 'WAT', 'is_prem': True},\n {'name': 'West Bromwich Albion', 'code': 'WBA', 'is_prem': False},\n {'name': 'West Ham United', 'code': 'WHM', 'is_prem': True},\n {'name': 'Wolverhampton Wanderers', 'code': 'WLV', 'is_prem': True},\n ]\n\n for team in team_list:\n print(PremTeam.objects.update_or_create(\n name=team['name'],\n code=team['code'],\n defaults={'is_prem': team['is_prem']}\n ))\n # print(pt, created)", "def add_teams_players(teams_dictionary, player_counter, injury_counter, player_season_counter,\r\n player_team_counter, host, root, password):\r\n players, injuries, players_seasons, players_teams = \\\r\n scrape_players(teams_dictionary, player_counter, injury_counter, player_season_counter,\r\n player_team_counter)\r\n if players:\r\n create_players(host, root, password, dict_to_read=players)\r\n create_injuries(host, root, password, dict_to_read=injuries)\r\n create_players_by_team(host, root, password, dict_to_read=players_teams)\r\n create_players_by_season(host, root, password, dict_to_read=players_seasons)", "def fa_finder(league_no, team_name):\n ros_proj_b_list = BatterProjection.objects.all()\n ros_proj_p_list = PitcherProjection.objects.all()\n player_comp = {}\n pitching_fa_list = yahoo_players(league_no, \"P\")\n batting_fa_list = yahoo_players(LEAGUE_NO, \"B\")\n avail_pitching_fas = rate_avail_players(pitching_fa_list, ros_proj_p_list)\n yahoo_team = get_single_yahoo_team(league_no, team_name)\n team_pitching_values = rate_team(yahoo_team, ros_proj_p_list)\n avail_batting_fas = rate_avail_players(batting_fa_list, ros_proj_b_list)\n team_batting_values = rate_team(yahoo_team, ros_proj_b_list)\n\n player_comp['Team Name'] = yahoo_team['team_name']\n player_comp['Pitching FAs'] = avail_pitching_fas\n player_comp['Pitching Team'] = team_pitching_values\n player_comp['Batting FAs'] = avail_batting_fas\n player_comp['Batting Team'] = team_batting_values\n\n return player_comp", "def teams(teamid):\n team_summary = team.TeamSummary(teamid)\n team_summary_info = team_summary.info()\n team_season_ranks = team_summary.season_ranks()\n\n team_common_roster = team.TeamCommonRoster(teamid)\n roster = team_common_roster.roster()\n coaches = team_common_roster.coaches()\n\n season = team_summary_info[0][\"SEASON_YEAR\"]\n\n team_game_log = team.TeamGameLogs(teamid,\n season=season)\n team_games = team_game_log.info()\n\n playoffs_teamgamelogs = team.TeamGameLogs(teamid,\n season=season,\n season_type=\"Playoffs\")\n playoffs_team_games = playoffs_teamgamelogs.info()\n\n team_season = team.TeamSeasons(teamid)\n team_season_info = team_season.info()\n\n for i in team_season_info:\n if (i[\"YEAR\"] == season):\n current_season_info = i\n\n return render_template(\"teams.html\",\n title=team_summary_info[0][\"TEAM_CITY\"] + \" \" + team_summary_info[0][\"TEAM_NAME\"],\n teamid=teamid,\n team_summary_info=team_summary_info,\n team_season_ranks=team_season_ranks,\n season=season,\n team_games=team_games,\n playoffs_team_games=playoffs_team_games,\n team_season=team_season_info,\n roster=roster,\n coaches=coaches,\n current_season_info=current_season_info,\n team_img=TEAM_ID_DATA)", "def _team_init(self):\r\n\t\tfor team_type, team_info in self._teams.items():\r\n\t\t\tteam_info.team_type = team_type\r\n\t\t\tteam_info.maze_pos_finder = \\\r\n\t\t\t\tself._maze_manager.get_finder_by_name(team_type.__str__())", "def enter_game_played(self, players_names, winners_names, game, date, group):\n try:\n game_played = GamePlayed()\n game_played.game = Game.objects.get(name__exact=game)\n game_played.date = date\n game_played.group = group\n game_played.save()\n\n for player in players_names:\n game_played.players.add(Player.objects.get(user__first_name__exact=player))\n for winner in winners_names:\n game_played.winners.add(Player.objects.get(user__first_name__exact=winner))\n except:\n print(\"Error entering game\", game)\n pass", "def get_games(date):\n scoreboard = nba_py.Scoreboard(month=date.month,\n day=date.day,\n year=date.year)\n line_score = scoreboard.line_score()\n game_header = scoreboard.game_header()\n\n games = []\n current_game = {}\n game_sequence = 0\n game_sequence_counter = 0\n\n # Get HOME TEAM and AWAY TEAM data for each boxscore game in line_score.\n for i, value in enumerate(line_score):\n if (value[\"GAME_SEQUENCE\"] != game_sequence):\n game_sequence += 1\n\n current_game[\"GAME_ID\"] = value[\"GAME_ID\"]\n home_team_id = game_header[game_sequence - 1][\"HOME_TEAM_ID\"]\n\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n \n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter += 1\n elif game_sequence_counter == 1:\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n\n current_game[\"GAME_STATUS_TEXT\"] = game_header[game_sequence - 1][\"GAME_STATUS_TEXT\"]\n if not game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]:\n current_game[\"BROADCASTER\"] = \"\"\n else:\n current_game[\"BROADCASTER\"] = game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]\n\n games.append(current_game)\n\n current_game = {}\n\n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter -= 1\n\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return (games, east_standings, west_standings)", "def instantiate_team_and_player_data(self, game_summary):\n self.HomeTeamId = str(game_summary['g']['hls']['tid'])\n self.VisitorTeamId = str(game_summary['g']['vls']['tid'])\n self.HomeTeamAbbreviation = str(game_summary['g']['hls']['ta'])\n self.VisitorTeamAbbreviation = str(game_summary['g']['vls']['ta'])\n self.GameDate = game_summary['g']['gdte']\n\n self.Players = {\n self.HomeTeamId: {\n str(player['pid']): player['fn'] + ' ' + player['ln']\n for player in game_summary['g']['hls']['pstsg'] if player['totsec'] > 0 # only keep track of stats for players who played\n },\n self.VisitorTeamId: {\n str(player['pid']): player['fn'] + ' ' + player['ln']\n for player in game_summary['g']['vls']['pstsg'] if player['totsec'] > 0 # only keep track of stats for players who played\n },\n }\n if self.GameId in PLAYERS_MISSING_FROM_BOXSCORE.keys():\n for team_id in PLAYERS_MISSING_FROM_BOXSCORE[self.GameId].keys():\n for player_id in PLAYERS_MISSING_FROM_BOXSCORE[self.GameId][team_id].keys():\n self.Players[team_id][player_id] = PLAYERS_MISSING_FROM_BOXSCORE[self.GameId][team_id][player_id]", "async def add_to_team(self, player : Player, team):\r\n if player in self.remaining:\r\n self.teams[team].append(player)\r\n self.remaining.remove(player)\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",\r\n description=\"{} has been drafted to team {}\".format(get_member_name(player,lower=False), \":a:\" if team == \"A\" else \":b:\"))\r\n else:\r\n return discord.Embed(title=\"Valorant 10 Man Bot\",description=\"Sorry, {} is already drafted\".format(get_member_name(player)))", "def get_teams():\n teams = []\n for teamId in range(1, 68):\n t = requests.get(TEAMS_URL.format(teamId)).json()\n team_list = t.get('teams')\n if team_list is None or len(team_list) == 0:\n continue\n teams.append(Team.from_json(team_list[0]))\n return teams", "def get_teams(self):\n url = 'teams'\n result = self.get(url)\n return result.get('teams', result)", "def test_get_teams(self):\n pass", "def test_get_teams(self):\n pass", "def __init__(self, manager):\n self.player = manager\n manager.career.seasons.append(self)\n self.team = manager.team\n self.league = self.team.league\n self.year = self.team.cosmos.year", "def season_series(game_id, pref_team, other_team, last_season=False):\n\n # Init empty dictionaries and lists\n games_against = list()\n pref_toi = dict()\n pref_goals = dict()\n pref_assists = dict()\n pref_points = dict()\n pref_record = {\"wins\": 0, \"losses\": 0, \"ot\": 0}\n roster_player = True\n\n # If this is the first game of the season, we can set the 'last_season' flag to enable the\n # season series function to check last year's season series between the two teams.\n if not last_season:\n season_start = str(game_id)[0:4]\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={yesterday:%Y-%m-%d}\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n else:\n season_start = int(str(game_id)[0:4]) - 1\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={season_end}-06-01\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n\n schedule = api.nhl_api(schedule_url).json()\n dates = schedule[\"dates\"]\n\n # Loop through scheduled to get previously played games against\n for date in dates:\n game = date[\"games\"][0]\n game_type = game[\"gameType\"]\n game_id = game[\"gamePk\"]\n game_team_home = game[\"teams\"][\"home\"][\"team\"][\"name\"]\n game_team_away = game[\"teams\"][\"away\"][\"team\"][\"name\"]\n teams = [game_team_away, game_team_home]\n game_status = game[\"status\"][\"abstractGameState\"]\n if game_type == \"R\" and game_status == \"Final\" and other_team.team_name in teams:\n game_feed = f\"/game/{game_id}/feed/live\"\n games_against.append(game_feed)\n\n # If the two teams haven't played yet, just exit this function\n if not games_against:\n return None, None, None\n\n # Loop through newly created games_against list to get each stats\n for feed in games_against:\n game = api.nhl_api(feed).json()\n game_data = game[\"gameData\"]\n home_team_name = game_data[\"teams\"][\"home\"][\"name\"]\n pref_homeaway = \"home\" if home_team_name == pref_team.team_name else \"away\"\n other_homeaway = \"away\" if home_team_name == pref_team.team_name else \"home\"\n\n # Get season series\n end_period = game[\"liveData\"][\"linescore\"][\"currentPeriod\"]\n extra_time = True if end_period > 3 else False\n pref_score = game[\"liveData\"][\"linescore\"][\"teams\"][pref_homeaway][\"goals\"]\n other_score = game[\"liveData\"][\"linescore\"][\"teams\"][other_homeaway][\"goals\"]\n if pref_score > other_score:\n pref_record[\"wins\"] += 1\n elif other_score > pref_score and extra_time:\n pref_record[\"ot\"] += 1\n else:\n pref_record[\"losses\"] += 1\n\n season_series_str = f\"Series: {pref_record['wins']}-\" f\"{pref_record['losses']}-{pref_record['ot']}\"\n\n # Get stats leaders\n # pref_teamstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"teamStats\"]\n pref_playerstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"players\"]\n for id, player in pref_playerstats.items():\n try:\n # Calculate TOI\n player_toi_str = player[\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n player_toi_minutes = int(player_toi_str.split(\":\")[0])\n player_toi_seconds = int(player_toi_str.split(\":\")[1])\n player_toi = (player_toi_minutes * 60) + player_toi_seconds\n pref_toi[id] = pref_toi.get(id, 0) + player_toi\n\n # Point Totals\n player_goal_str = player[\"stats\"][\"skaterStats\"][\"goals\"]\n pref_goals[id] = pref_goals.get(id, 0) + int(player_goal_str)\n player_assist_str = player[\"stats\"][\"skaterStats\"][\"assists\"]\n pref_assists[id] = pref_assists.get(id, 0) + int(player_assist_str)\n player_points = int(player_goal_str) + int(player_assist_str)\n pref_points[id] = pref_points.get(id, 0) + int(player_points)\n\n except KeyError:\n pass\n\n # Calculate Stats Leaders\n sorted_toi = sorted(pref_toi.values(), reverse=True)\n leader_toi = sorted_toi[0]\n\n sorted_points = sorted(pref_points.values(), reverse=True)\n leader_points = sorted_points[0]\n\n # Get TOI leader\n for id in pref_toi.keys():\n if pref_toi[id] == leader_toi:\n player_name = roster.player_attr_by_id(pref_team.roster, id, \"fullName\")\n if player_name is None:\n roster_player = False\n player_id_only = id.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n leader_toi_avg = leader_toi / len(games_against)\n m, s = divmod(leader_toi_avg, 60)\n toi_m = int(m)\n toi_s = int(s)\n toi_s = \"0{}\".format(toi_s) if toi_s < 10 else toi_s\n toi_avg = \"{}:{}\".format(toi_m, toi_s)\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n toi_leader_str = \"TOI Leader: {} with {} / game.\".format(player_short_name, toi_avg)\n\n # Handle tied points leaders\n point_leaders = list()\n for id in pref_points.keys():\n if pref_points[id] == leader_points:\n point_leaders.append(id)\n\n if leader_points == 0:\n points_leader_str = \"Points Leader: None (all players have 0 points).\"\n\n elif len(point_leaders) == 1:\n leader = point_leaders[0]\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n # If the player is no longer on the team, get their information (change string here?)\n if player_name is None:\n roster_player = False\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n if not roster_player:\n points_leader_str = (\n f\"Points Leader: {player_name} with {leader_points} points \"\n f\"({player_goals}G {player_assists}A) \"\n )\n else:\n points_leader_str = \"Points Leader: {} with {} ({}G {}A).\".format(\n player_name, leader_points, player_goals, player_assists\n )\n\n elif len(point_leaders) > 3:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n point_leaders_with_attrs.append(player_short_name)\n\n point_leaders_joined = \", \".join(point_leaders_with_attrs[0:3])\n leftover_leaders = len(point_leaders) - 3\n points_leader_str = (\n f\"Points Leaders: {point_leaders_joined} & {leftover_leaders} others ({leader_points} each).\"\n )\n\n else:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n player_str = f\"{player_short_name} ({player_goals}G {player_assists}A)\"\n point_leaders_with_attrs.append(player_str)\n\n point_leaders_joined = (\n f\", \".join(point_leaders_with_attrs[:-1]) + f\" & {point_leaders_with_attrs[-1]}\"\n )\n points_leader_str = \"Points Leaders: {} with {} each.\".format(point_leaders_joined, leader_points)\n\n return season_series_str, points_leader_str, toi_leader_str", "def add_team_player():\n if request.form['add_template'] == 'Add Team':\n return render_template('addteam.html')\n elif request.form['add_template'] == 'Add Player':\n teams = get_team()\n return render_template('addplayer.html', teams=teams)\n else:\n return getAllPlayers()", "def players_onsale(self):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/team_news.phtml', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/teamInfo.phtml?tid=' + str(self.community_id),\r\n headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n\r\n current_year = dt.today().year\r\n current_month = dt.today().month\r\n on_sale = list()\r\n year_flag = 0\r\n for i in soup.find_all('table', {'class', 'tablecontent03'})[2].find_all('tr')[1:]:\r\n columns = i.find_all('td')\r\n player_id = int(re.findall('\\d+', columns[0].img['src'])[0])\r\n playername = columns[1].text.strip()\r\n team_id = int(re.findall('\\d+', columns[2].img['src'])[0])\r\n team = columns[2].a['title'].strip()\r\n min_price = float(columns[3].text.replace(\".\", \"\").strip())\r\n market_price = float(columns[4].text.replace(\".\", \"\").strip())\r\n points = int(columns[5].text.strip().strip())\r\n # Controlamos el cambio de año, ya que comunio no lo dá\r\n if current_month <= 7 < int(columns[6].text[3:5]):\r\n year_flag = 1\r\n date = datetime.strptime(str(current_year - year_flag) + columns[6].text[3:5] + columns[6].text[:2], '%Y%m%d').date()\r\n owner = columns[7].text.strip()\r\n position = self.translate_position(columns[8].text.strip())\r\n # Comprobamos si solamente queremos los de la computadora o no\r\n on_sale.append([player_id, playername, team_id, team, min_price, market_price, points, date, owner, position])\r\n\r\n return on_sale", "def teams(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'teams')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def teams(self, game_id: int) -> DataFrame[Any]:", "def populate(self):\n record = yield self.directory.recordWithShortName(RecordType.user, u\"wsanchez\")\n yield self.transactionUnderTest().calendarHomeWithUID(record.uid, create=True)\n calendar = yield self.calendarUnderTest(name=\"calendar\", home=record.uid)\n for f in os.listdir(self.holidays_dir):\n if f.endswith(\".ics\"):\n with open(os.path.join(self.holidays_dir, f)) as fin:\n component = Component.fromString(fin.read())\n yield calendar._createCalendarObjectWithNameInternal(f, component, internal_state=ComponentUpdateState.RAW)\n yield self.commit()", "def _cache_teams(self, reader):\n club_teams = []\n reader.next() # Ignore first line (heading)\n team_names_row = reader.next() # Second line contains the team names\n club_teams = [{'name': x, 'team': None} for x in team_names_row if x != '']\n\n if len(club_teams) < 1:\n raise ParseException(\"FATAL ERROR: Could not find team names on row 2\")\n\n for team in club_teams:\n try:\n team['team'] = ClubTeam.objects.get(short_name=team['name'])\n except ClubTeam.DoesNotExist:\n raise ParseException(\"FATAL ERROR: Unrecognised team name: {}\".format(team['name']))\n\n return club_teams", "def add_players(game: LolGame, players: List[dict], add_page_id: bool = False) -> LolGame:\n\n for team_side in game[\"teams\"]:\n team_side_leaguepedia = \"1\" if team_side == \"BLUE\" else \"2\"\n\n for idx, game_player in enumerate(game[\"teams\"][team_side][\"players\"]):\n try:\n # We get the player object from the Leaguepedia players list\n player_latest_data = next(\n p\n for p in players\n if p[\"Side\"] == team_side_leaguepedia\n and lit.get_id(p[\"Champion\"], object_type=\"champion\") == game_player[\"championId\"]\n )\n\n game_player[\"role\"] = role_translation[player_latest_data[\"gameRoleNumber\"]]\n\n unique_identifiers = LeaguepediaPlayerIdentifier(\n name=player_latest_data.get(\"currentGameName\"),\n irlName=player_latest_data.get(\"irlName\"),\n country=player_latest_data.get(\"Country\"),\n residency=player_latest_data.get(\"Residency\"),\n age=player_latest_data.get(\"Age\"),\n role=player_latest_data.get(\"Role\"),\n team=player_latest_data.get(\"Team\"),\n kills=player_latest_data.get(\"Kills\"),\n deaths=player_latest_data.get(\"Deaths\"),\n assists=player_latest_data.get(\"Assists\"),\n ss=player_latest_data.get(\"SummonerSpells\"),\n gold=player_latest_data.get(\"Gold\"),\n cs=player_latest_data.get(\"CS\"),\n items=player_latest_data.get(\"Items\"),\n trinket=player_latest_data.get(\"Trinket\"),\n keystoneMastery=player_latest_data.get(\"KeystoneMastery\"),\n keystoneRune=player_latest_data.get(\"KeystoneRune\"),\n runes=player_latest_data.get(\"Runes\"),\n )\n\n if add_page_id:\n unique_identifiers[\"pageId\"] = int(player_latest_data[\"pageId\"])\n\n game_player[\"uniqueIdentifiers\"] = {\"leaguepedia\": unique_identifiers}\n\n except StopIteration:\n # Since we cannot get the role properly, we try to infer it\n game_player[\"role\"] = list(role_translation.values())[idx]\n\n return game", "def _load_team_map(self) -> None:\n self._cursor.execute(\"select id, franchid, teamid, lgid from teams where yearid = %s;\", (self._yearid,))\n all_teams = self._cursor.fetchall()\n for team in all_teams:\n r = {'id': team[0], 'franchid': team[1], 'teamid': team[2], 'lgid': team[3]}\n self._team_map[team[1]] = r", "def get_past_matches_data(team):\n matches = team.get_past_matches()\n match_list = []\n for match in matches:\n match_dict = {}\n match_dict['match_date'] = match.match_date\n match_dict['match_name'] = match.__str__()\n match_dict['id'] = match.id\n innings = match.get_innings()\n if len(innings):\n if innings[0].runs > innings[1].runs:\n match_dict['winner_team'] = innings[0].bat_team\n match_dict['win_margin'] = innings[0].runs - innings[1].runs\n match_dict['win_type'] = 'Runs'\n match_dict['winner_score'] = str(innings[0].runs) + '/' + str(innings[0].wickets)\n else:\n match_dict['winner_team'] = innings[1].bat_team\n match_dict['win_margin'] = 10 - innings[1].wickets\n match_dict['win_type'] = 'Wickets'\n match_dict['winner_score'] = str(innings[1].runs) + '/' + str(innings[1].wickets)\n match_list.append(match_dict)\n return match_list", "def __addCalendars(self, tree, key=\"dates/calendars\"):\n \n calendars = self.__getStore(self.__data, \"calendar\")\n \n for element in tree.findall(\"./%s/*\" % key):\n if not element.get(\"draft\"):\n self.__addCalendar(calendars, element)", "def add_to_calender(service, username): \n colors = service.colors().get().execute()\n d_and_t = df.get_add_to_calender_input(argv[1], argv[2])\n now = datetime.datetime.now()\n if d_and_t == None:\n return\n event_request_body = {\n 'start': {\n 'dateTime': df.convert_to_RFC_datetime(d_and_t[0], d_and_t[1], d_and_t[2], d_and_t[3][0]-2, d_and_t[3][1]),\n 'timeZone': 'Africa/Johannesburg'\n },\n 'end': {\n 'dateTime': df.convert_to_RFC_datetime(d_and_t[0], d_and_t[1], d_and_t[2], d_and_t[4][0]-2, d_and_t[4][1]),\n 'timeZone': 'Africa/Johannesburg'\n },\n 'summary': f\"{username} - Code Clinic\",\n 'description': 'empty',\n 'status': 'confirmed',\n 'transparency': 'opaque',\n 'visibility': 'public',\n 'location': 'WTC',\n 'guestsCanModify': True,\n 'attendees': [\n { \n 'displayName': username,\n 'organizer': True,\n 'email': f'{username}@student.wethinkcode.co.za',\n 'optional': True,\n 'responseStatus': 'accepted'\n }\n ]\n }\n start = event_request_body['start']['dateTime']\n end = event_request_body['end']['dateTime']\n\n overlaps = check_if_slots_overlap(start, end, service, username)\n if overlaps == False:\n response = service.events().insert(calendarId=get_events.calendar_id, sendUpdates='all', body=event_request_body).execute()\n print(\"\\nYour slot has been created...\")\n else:\n print(\"\\nYou already have an event scheduled for this time. Please choose another time...\")\n events, count = get_events.get_events_for_next_7_days_to_delete(username, service)\n if count == 0:\n print(\"\\nYou currently don't have any slots created.\")\n return", "def get_all_teams(self):\n return self._db.Teams.find({})", "def getTeamStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashteamstats?Conference=&'\\\r\n 'DateFrom=&DateTo=&Division=&GameScope=&GameSegment=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&'\\\r\n 'Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season=' + season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n team_df = df[[\"TEAM_ID\",\"TEAM_NAME\",\"GP\",\"W\",\"L\",\"W_PCT\",\"MIN\",\"FGM\",\r\n \"FGA\",\"FG_PCT\",\"FG3M\",\"FG3A\",\"FG3_PCT\",\"FTM\",\"FTA\",\"FT_PCT\",\r\n \"OREB\",\"DREB\",\"REB\",\"AST\",\"TOV\",\"STL\",\"BLK\",\"BLKA\",\"PF\",\r\n \"PFD\",\"PTS\",\"PLUS_MINUS\"]]\r\n \r\n return team_df", "async def get_team_embed(self):\r\n embed = discord.Embed(title=\"Valorant 10 Man Bot\",colour=discord.Colour(0x470386))\r\n team_a_strings = [get_member_name(m,lower=False) for m in self.teams[\"A\"]]\r\n team_b_strings = [get_member_name(m,lower=False) for m in self.teams[\"B\"]] \r\n embed.add_field(name=\"Defenders\", value=\"{}\".format(\"\\n\".join(team_a_strings)), inline=True)\r\n embed.add_field(name=\"Attackers\", value=\"{}\".format(\"\\n\".join(team_b_strings)), inline=True)\r\n return embed", "def _add_first_stream_calendar_entries(client_id, match, now):\n\t# Add a CalendarEntry for each user who starred the match.\n\tuser_ids_cursor = session.query(StarredMatch.user_id)\\\n\t\t\t.filter(StarredMatch.match_id == match.id)\n\tfor row in user_ids_cursor:\n\t\tentry = _get_calendar_entry(row.user_id, match)\n\t\tsession.add(entry)\n\tsession.flush()\n\n\t# Add a CalendarEntry for each user who starred either team.\n\tuser_ids_cursor = session.query(MatchOpponent.match_id, StarredTeam.user_id)\\\n\t\t\t.join(StarredTeam, MatchOpponent.team_id == StarredTeam.team_id)\\\n\t\t\t.filter(MatchOpponent.match_id == match.id)\n\tuser_ids_cursor = (row.user_id for row in user_ids_cursor)\n\t_multi_increment_num_user_stars(user_ids_cursor, match, now)\n\tsession.flush()\n\n\t# Add a CalendarEntry for each user who starred the streaming user.\n\tuser_ids_cursor = session.query(StarredStreamer.user_id)\\\n\t\t\t.filter(StarredStreamer.streamer_id == client_id)\n\tuser_ids_cursor = (row.user_id for row in user_ids_cursor)\n\t_multi_increment_num_user_stars(user_ids_cursor, match, now)\n\tsession.flush()", "def _add_games_to_schedule(self, schedule, game_type, year):\n for item in schedule:\n game = Game(item, game_type, year)\n self._games.append(game)", "def add_team(self):\n team = Team(self.context, ResourcePath(\"team\", self.resource_path))\n team._parent_collection = self.parent_collection\n qry = ServiceOperationQuery(self, \"team\", None, team, None, team)\n self.context.add_query(qry)\n\n def _construct_create_team_request(request):\n cur_qry = self.context.current_query\n if cur_qry.id == qry.id:\n request.method = HttpMethod.Put\n request.set_header('Content-Type', \"application/json\")\n request.data = json.dumps(request.data)\n\n self.context.before_execute(_construct_create_team_request, False)\n return team", "def test_add_team_manager_to_team(self):\n pass", "def scraping_actual_team_players(team_abbreviation):\n starting_point = domain + \"/teams/\"\n teamurl = starting_point + team_abbreviation + \"/\"\n team_id = \"div_\" + team_abbreviation\n html = urlopen(teamurl)\n bs = BeautifulSoup(html, 'html.parser')\n actual_team_url = domain + str(bs.find(\"div\", {'id': team_id}).find(\"a\").get(\"href\"))\n html = urlopen(actual_team_url)\n bs = BeautifulSoup(html, 'html.parser')\n players = bs.find(\"table\", {'id':'roster'}).findAll(\"td\", {\"data-stat\":\"player\"})\n players_url = [player.find(\"a\").get(\"href\") for player in players]\n team_players_list = []\n for player_url in players_url:\n time.sleep(3)\n url = domain + player_url\n html = urlopen(url)\n bs = BeautifulSoup(html, 'html.parser')\n print(player_url)\n try:\n tabla = pd.read_html(str(bs.find(\"div\", {'id':'all_per_poss'})).replace(\"<!--\", \"\"))[0] \n tabla[\"Player\"] = bs.find(\"h1\", {\"itemprop\" : \"name\"}).text.strip()\n indice = tabla[tabla[\"Season\"]==\"Career\"].index[0]\n tabla = tabla[0:indice]\n tabla = tabla.drop(axis= 1,columns = \"Unnamed: 29\")\n #no me encuentra tablas para uno del college que es el darlina01\n print(player_url)\n team_players_list.append(tabla)\n except:\n pass\n return pd.concat(team_players_list)", "def create_table_per_team(self) -> None:\n for team_id in self.teams:\n home = self.file.loc[(self.file[\"IdHomeTeam\"] == team_id)].reset_index(drop=True)\n home = home.rename(columns={\"FTHG\": \"FTG_asH\", \"FTR\": \"FT_RESULT\", \"HTHG\": \"HTG_asH\",\n \"HTR\": \"HT_RESULT\", \"HS\": \"Shoot_asH\", \"HST\": \"ShootTarget_asH\",\n \"HF\": \"Fouls_asH\", \"HC\": \"Corner_asH\", \"HY\": \"YellowC_asH\", \"HR\": \"RedC_asH\",\n \"FTAG\": \"FT_against_H\", \"HTAG\": \"HT_against_H\",\n \"AS\": \"Shoot_against_H\", \"AST\": \"ShootTarget_against_H\",\n \"AF\": \"Fouls_against_H\",\n \"AC\": \"Corner_against_H\", \"AR\": \"RedC_againts_H\", \"AY\": \"YellowC_against_H\"})\n home[\"nWeekHome\"] = home.index + 1\n home.drop([\"nWeekAway\", \"IdAwayTeam\"], axis=1,\n inplace=True)\n home.loc[(home[\"FT_RESULT\"] == \"H\"), \"FT_RESULT\"] = \"Winn\"\n home.loc[(home[\"FT_RESULT\"] == \"D\"), \"FT_RESULT\"] = \"Draw\"\n home.loc[(home[\"FT_RESULT\"] == \"A\"), \"FT_RESULT\"] = \"Lost\"\n home.loc[(home[\"HT_RESULT\"] == \"H\"), \"HT_RESULT\"] = \"Winn\"\n home.loc[(home[\"HT_RESULT\"] == \"D\"), \"HT_RESULT\"] = \"Draw\"\n home.loc[(home[\"HT_RESULT\"] == \"A\"), \"HT_RESULT\"] = \"Lost\"\n\n away = self.file.loc[(self.file[\"IdAwayTeam\"] == team_id)].reset_index(drop=True)\n away = away.rename(columns={\"FTAG\": \"FTG_asA\", \"FTR\": \"FT_RESULT\", \"HTAG\": \"HTG_asA\",\n \"HTR\": \"HT_RESULT\", \"AS\": \"Shoot_asA\", \"AST\": \"ShootTarget_asA\",\n \"AF\": \"Fouls_asA\", \"AC\": \"Corner_asA\", \"AY\": \"YellowC_asA\", \"AR\": \"RedC_asA\",\n \"FTHG\": \"FT_against_A\", \"HTHG\": \"HT_against_A\",\n \"HS\": \"Shoot_against_A\", \"HST\": \"ShootTarget_against_A\",\n \"HF\": \"Fouls_against_A\",\n \"HC\": \"Corner_against_A\", \"HR\": \"RedC_againts_A\", \"HY\": \"YellowC_against_A\"})\n away[\"nWeekAway\"] = away.index + 1\n away.drop([\"nWeekHome\", \"IdHomeTeam\"], axis=1,\n inplace=True)\n away.loc[(away[\"FT_RESULT\"] == \"H\"), \"FT_RESULT\"] = \"Lost\"\n away.loc[(away[\"FT_RESULT\"] == \"D\"), \"FT_RESULT\"] = \"Draw\"\n away.loc[(away[\"FT_RESULT\"] == \"A\"), \"FT_RESULT\"] = \"Winn\"\n away.loc[(away[\"HT_RESULT\"] == \"H\"), \"HT_RESULT\"] = \"Lost\"\n away.loc[(away[\"HT_RESULT\"] == \"D\"), \"HT_RESULT\"] = \"Draw\"\n away.loc[(away[\"HT_RESULT\"] == \"A\"), \"HT_RESULT\"] = \"Winn\"\n\n self.home = self.home.append(home, ignore_index=True)\n self.away = self.away.append(away, ignore_index=True)\n\n self.home[\"Date\"] = pd.to_datetime(self.home[\"Date\"], format=\"%Y-%m-%d\")\n self.away[\"Date\"] = pd.to_datetime(self.away[\"Date\"], format=\"%Y-%m-%d\")\n self.home.reset_index(drop=True)\n self.away.reset_index(drop=True)", "def make_calendar(\n self, name=None, cal_id=None, supported_calendar_component_set=None\n ):\n return self.calendar_home_set.make_calendar(\n name,\n cal_id,\n supported_calendar_component_set=supported_calendar_component_set,\n )", "def scraping_league_stats():\n url=\"https://www.basketball-reference.com/teams/\"\n html = urlopen(url)\n bs = BeautifulSoup(html, 'html.parser')\n teams = bs.find(\"div\", {'id':'div_teams_active'}).findAll(\"a\")\n teams_url = [team.get(\"href\") for team in teams]\n team_players_list = []\n for team in teams_url:\n time.sleep(3)\n teamurl = domain + team\n print(teamurl) #fafaf\n html = urlopen(teamurl)\n bs = BeautifulSoup(html, 'html.parser')\n div_team = \"div_\" + teamurl[-4:-1]\n season = bs.find(\"div\", {'id':div_team}).find(\"a\").get(\"href\") #tengo que cambiarlo para que este guay\n #esto tiene la URL de la ultima season\n seasonurl = domain + season\n print(seasonurl)\n html = urlopen(seasonurl)\n bs = BeautifulSoup(html, 'html.parser')\n players = bs.find(\"table\", {'id':'roster'}).findAll(\"td\", {\"data-stat\":\"player\"})\n player_url_list = [player.find(\"a\").get(\"href\") for player in players]\n for player in player_url_list:\n player_url = domain + player\n time.sleep(3)\n print(player_url)\n html = urlopen(player_url)\n bs = BeautifulSoup(html, 'html.parser')\n try:\n tabla = pd.read_html(str(bs.find(\"div\", {'id':'all_per_poss'})).replace(\"<!--\", \"\"))[0] \n tabla[\"Player\"] = bs.find(\"h1\", {\"itemprop\" : \"name\"}).text.strip()\n indice = tabla[tabla[\"Season\"]==\"Career\"].index[0]\n tabla = tabla[0:indice]\n tabla = tabla.drop(axis= 1,columns = \"Unnamed: 29\")\n #no me encuentra tablas para uno del college que es el darlina01\n print(player_url)\n team_players_list.append(tabla)\n except:\n pass\n return pd.concat(team_players_list)", "def fill_matchup_table_with_games(self, week, year):\n url = f'https://www.pro-football-reference.com/years/{year}/games.htm'\n schedule_html = requests.get(url)\n pro_soup = BeautifulSoup(schedule_html.content, 'html.parser')\n\n matchup_table = html_parsing.week_html_parsing(pro_soup)[0][1]\n matchup_table = matchup_table[matchup_table['Time'] != '']\n matchup_table = matchup_table.dropna()\n\n matchup_table = self.format_profootball_dates(matchup_table, year)\n\n week_matchups = matchup_table[matchup_table['Week'] == float(week)]\n sql_queries = []\n for i, row in week_matchups.iterrows():\n sql_queries.append(\"INSERT INTO \\\"2017_matchups\\\" (hometeam, awayteam, week, date) \"\n \"VALUES ({}, {}, {}, {});\".format(\n row.Home.upper(), row.Visitor.upper(), row.Week, row.datetime))\n self.set_db_data(sql_queries)\n print('Table filled successfully.')", "def add_teams(self, data):\n for k, v in data.items():\n try:\n self._db_cur.execute(\"insert or ignore into team_data \\\n (team_id, team_name) values (?, ?)\", (v, k))\n self._db_conn.commit()\n except sqlite3.Error as er:\n print er", "async def games(\n self, ctx: commands.Context, *, teams_and_date: Optional[TeamDateFinder] = {}\n ) -> None:\n log.debug(teams_and_date)\n await GamesMenu(\n source=Schedule(**teams_and_date, session=self.session),\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)", "def set_league(teams, l):\n\t\n\t#~ print teams[0].name\n\t\n\tif l>0 and l<10:\n\t\tfor team in teams:\n\t\t\tteam.league = l\n\t\t\n\telse:\n\t\tprint(\"Error! Only leagues between 1 and 9 can be set.\")\n\t\n\treturn teams", "def __init__(self, player):\n self.player = player\n player.career.seasons.append(self)\n self.team = player.team\n self.league = self.team.league\n self.year = self.team.cosmos.year", "def get_team_list(self):\n result = dict\n managers = User.get_users([UserRole.ProjectManager])\n for manager in managers:\n result.update({manager.get_username(): manager.get_team_members()})\n return build_team_list(result)", "def test_teams_list(self):\n pass", "def setPlayerList(sport = 1, season = [2019]):\n\n for y in season:\n players = get('sports_players', {'ver': 'v1', 'season': y, 'sportId': sport })\n\n for dict in players.get('people'):\n print(dict)\n for k,v in dict.items():\n\n if k == 'id':\n id = v\n elif k == 'firstName':\n first_name = v\n elif k == 'lastName':\n if \"'\" in v:\n v = v.replace(\"'\", '')\n\n last_Name = v\n playerList[id] = [first_name, last_Name]\n sql_command = \"\"\"INSERT INTO contacts VALUES ({},'{}', '{}');\"\"\".format(id,first_name,last_Name)\n print(sql_command)\n crsr.execute(sql_command)\n # If we skip this, nothing will be saved in the database.\n connection.commit()\n\n # close the connection\n connection.close()\n return playerList", "def test_teams_get_teams_v1(self):\n pass", "def getCalendar(self):\n cal = BlankCalendar()\n for datable in self.run_query():\n cal.add_component(datable.getEvent())\n \n return cal", "def collect_after_game_dicts(league, start_date, end_date):\n after_game_no_dicts = collections.defaultdict(dict)\n\n def add_team_stats(team, after_game_no, stat):\n stat_dict = after_game_no_dicts[after_game_no]\n stat_dict[team] = stat\n\n for team in league.teams:\n matches = get_matches(league, start_date, end_date, team_involved=team)\n for x in range(1, len(matches) + 1):\n stats = TeamStats(team, matches[:x])\n add_team_stats(team, x, stats)\n\n for x, dictionary in after_game_no_dicts.items():\n if len(dictionary) != len(league.teams):\n del after_game_no_dicts[x]\n return after_game_no_dicts", "def get_events(start_date, end_date, source=utils.get_native_source, **kwargs):\n if not isinstance(source, games.models.Source):\n source = source()\n logger.info(\"getting events from source %s...\", source)\n if not source:\n return []\n # with open('sportmonks/response_texts/fixtures_{}-{}.txt'.format(start_date.strftime('%Y-%m-%d'),\n # end_date.strftime('%Y-%m-%d')), 'w') as outfile:\n # season is necessary so that the season object is extracted and used\n include = kwargs.get('include', '')\n include = ','.join([include, 'season']) if include else 'season'\n kwargs['include'] = include\n data, meta, status_code = sportmonks.fixtures.by_date_range(start_date=start_date, end_date=end_date, **kwargs)\n # json.dump(data, outfile, indent=4)\n if not data:\n return []\n pre_events = []\n try:\n num_fetched_objects = len(data)\n except:\n num_fetched_objects = None\n num_processed_objects = 0\n try:\n for obj in data:\n num_processed_objects += 1\n try:\n sid = obj.get('id', None)\n time = obj.get('time', dict())\n starting_at = time.get('starting_at', dict())\n event_datetime = get_date(starting_at, 'date_time')\n # custom_timezone = pytz.timezone('Europe/Athens')\n # event_datetime = event_datetime.astimezone(custom_timezone)\n home_team_sid = obj.get('localteam_id', None)\n away_team_sid = obj.get('visitorteam_id', None)\n competition_season_sid = obj.get('season_id', None)\n season_string = obj.get('season', {}).get('data', {}).get('name')\n stage_sid = obj.get('stage_id', None)\n round_sid = obj.get('round_id', None)\n competition_sid = obj.get('league_id', None)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n\n zak_season_name = games.models.Season.zakandify_season_string(season_string)\n season = zakanda.utils.season_from_season_name(zak_season_name)\n if not season:\n logger.data_error('Could not extract season object from season string: %s', season_string)\n continue\n\n # todo sportmonks fix\n # if the event involves a problematic team it is not created in order to avoid future problems\n if is_in_problematic_teams(home_team_sid):\n home_team_sid = None\n if is_in_problematic_teams(away_team_sid):\n away_team_sid = None\n\n competition_seasons = games.models.CompetitionSeason.by_sid(competition_season_sid, source, season)\n try:\n competition_season = competition_seasons.first() # only one entity exists in the queryset\n except Exception as e:\n logger.warning('%s', e)\n competition_season = None\n\n home_team = games.models.Team.by_sid(home_team_sid, source)\n away_team = games.models.Team.by_sid(away_team_sid, source)\n pre_event = pre_models.PreEvent(source, sid, event_datetime, home_team, away_team, competition_season)\n pre_events.append(pre_event)\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.fixtures.by_date_range %s %s from source %s',\n e, start_date, end_date, source)\n logger.info(\"%s event objects were contained in the response\", num_fetched_objects)\n logger.info(\"%s event objects were processed\", num_processed_objects)\n logger.info(\"%s pre events were created\", len(pre_events))\n return pre_events", "def calendar_lists(self):\r\n return CalendarLists(self)", "def calculate_matches(teams: Dict[int, Team]) -> Dict[int, Match]:\r\n match_urls = TCS_Scraper.scrape_matches(end_round=CURRENT_ROUND)\r\n matches = {}\r\n for match in match_urls:\r\n print(\"Scraping\", match)\r\n team_1id, results, team_2id \\\r\n = TCS_Scraper.scrape_match(match, teams)\r\n # If nothing happened on this match page, skip it\r\n if not results:\r\n continue\r\n team_1 = teams[team_1id]\r\n team_2 = teams[team_2id]\r\n\r\n team_1elos = [team_1.elo]\r\n team_2elos = [team_2.elo]\r\n for result in results:\r\n # Calculate new elo for each team\r\n e1p, e2p = Team.calculate_elo(team_1.elo, team_2.elo, result[0])\r\n\r\n # Print elo changes for each team\r\n print(team_1.name, str(e1p - team_1.elo))\r\n print(team_2.name, str(e2p - team_2.elo))\r\n\r\n # Store the elo changes\r\n team_1elos.append(e1p)\r\n team_2elos.append(e2p)\r\n\r\n # Set new elo values\r\n team_1.elo = e1p\r\n team_2.elo = e2p\r\n\r\n # Create a new match object and append it to the list of matches\r\n new_match = Match(\r\n match,\r\n team_1id,\r\n team_2id,\r\n team_1elos,\r\n team_2elos,\r\n results\r\n )\r\n matches[new_match.id] = new_match\r\n\r\n # Add match id to each team object\r\n team_1.matches.append(new_match.id)\r\n team_2.matches.append(new_match.id)\r\n\r\n return matches", "def build_teams(self):\n # get all nations\n all_nations = self.games.get_all_nations()\n\n # build teams for all participating nations in FIFA World Cup 2018\n bt = BuildTeams(self.squad_size, self.selected_attrs)\n bt.read_data()\n\n # a dict with a nations' name as a key and players' data as value\n self.teams = {}\n\n # build squad for every nation\n for nation in all_nations:\n team = bt.build_team(nation)\n # if we got enough players, add team\n if team.shape[0] >= bt.squad_size:\n #print(team)\n # convert pandas dataframe to matrix and flatten it\n self.teams[nation] = team.as_matrix().flatten()", "def get_volunteers(service_id, upcoming_plan_id, location_id, event_id, event_period_id, service_time_ids_to_time, event_time_to_id):\r\n # Get all Team members\r\n team_members = requests.get(base_url + f'services/v2/service_types/{service_id}/plans/{upcoming_plan_id}/team_members?per_page=100', headers=headers3).json()\r\n\r\n volunteers = []\r\n # Loop through team members\r\n for person in team_members[\"data\"]:\r\n if person[\"attributes\"][\"status\"] == \"C\" or person[\"attributes\"][\"status\"] == \"U\":\r\n # get volunteer time ids\r\n time_ids=person['relationships']['times']['data']\r\n # convert time_id into times\r\n times = set(service_time_ids_to_time.get(time_id['id']) for time_id in time_ids)\r\n # convert times into event_ids\r\n check_time_ids = set(event_time_to_id.get(time) for time in times)\r\n \r\n # remove any None entry\r\n check_time_ids.discard(None)\r\n\r\n for check_t_id in check_time_ids:\r\n temp_dict = {\r\n 'check-in-kind':'Volunteer',\r\n# \"name\": person[\"attributes\"][\"name\"], # you can also add the persons name but it doesn't seem to be compulsory\r\n 'bulk_check_in[check_ins_attributes][][account_center_person_id]': person['relationships']['person']['data']['id'],\r\n 'bulk_check_in[check_ins_attributes][][check_in_times_attributes][][location_id]': location_id,\r\n 'bulk_check_in[check_ins_attributes][][check_in_times_attributes][][event_time_id]': check_t_id,\r\n 'bulk_check_in[check_ins_attributes][][check_in_times_attributes][][kind]': \"Volunteer\",\r\n 'bulk_check_in[check_ins_attributes][][event_id]': event_id,\r\n 'bulk_check_in[check_ins_attributes][][event_period_id]': event_period_id\r\n }\r\n volunteers.append(temp_dict)\r\n return volunteers", "def players(self):\n if self.players_cache is None:\n team_df = self.teams()\n self.players_cache = self.ea.players_endpoint(\n team_df[\"id\"].tolist())\n\n columns = [\"teamId\", \"playerId\", \"name\", \"position\"]\n all_players = []\n for team in self.players_cache[\"teams\"]:\n team_id = team[\"id\"]\n for plyr in team[\"roster\"][\"roster\"]:\n player_id = plyr[\"person\"][\"id\"]\n player_name = plyr[\"person\"][\"fullName\"]\n position = plyr[\"position\"][\"abbreviation\"]\n all_players.append({columns[0]: team_id,\n columns[1]: player_id,\n columns[2]: player_name,\n columns[3]: position})\n return pd.DataFrame(data=all_players, columns=columns)", "def test_teams_get_teams_v2(self):\n pass", "def get_player_data(self, player, season, mtgs=None, past=None, future=None, single=False):\n\n avail = []\n scheduled = []\n\n # Should be empty arrays if None\n if past is None:\n past = []\n if future is None:\n future = []\n\n nplayed = Schedule.objects.filter(meeting__in=past, player=player).count()\n nscheduled = Schedule.objects.filter(meeting__in=future, player=player).count()\n\n av = PlayerAvailability.objects.get_for_season_player(player, season)\n\n p = {\n 'name': player.first + ' ' + player.last,\n 'id': player.id,\n 'isavail': av.available,\n 'scheduled': av.scheduled,\n 'played': av.played,\n 'nplayed': nplayed,\n 'nscheduled': nscheduled + nplayed,\n 'single': single\n }\n\n return p", "def get_all_cfb_teams_for_year_range(start_year, end_year):\n \n full_team_list = []\n \n for year in range(start_year, end_year + 1):\n print(year)\n \n team_list = get_all_cfb_teams(year)\n full_team_list += team_list\n \n wait_time = random.randint(3, 15)\n time.sleep(wait_time)\n \n return full_team_list", "def get_player_team_data(self, start_date, end_date = None, \n get_player_data_ind = True, get_team_data_ind = True, \n pre_player_data_dir = None, pre_team_data_dir = None):\n #Converts start and end date from string to datetime\n start_date = datetime.strptime(start_date, '%Y-%m-%d').date()\n \n if end_date:\n end_date = datetime.strptime(end_date, '%Y-%m-%d').date()\n else: \n end_date = start_date\n \n if pre_player_data_dir:\n try: \n #Reads in the existing player dataset to append the scraped data to \n exist_player_data = pd.read_csv(pre_player_data_dir)\n except:\n raise Exception('Cannot read in existing player dataset please ensure the directory is correct')\n \n if pre_team_data_dir:\n try: \n #Reads in the existing player dataset to append the scraped data to \n exist_team_data = pd.read_csv(pre_team_data_dir)\n except:\n raise Exception('Cannot read in existing team dataset please ensure the directory is correct')\n \n delta = end_date - start_date \n #Appends list of date between start and end date to strings\n date_list = []\n for i in range(delta.days + 1):\n day = start_date + timedelta(days=i)\n date_list.append(str(day))\n \n for date in date_list:\n \n print(f'Now scraping data from NBA games on {date}')\n home_team_list = get_list_of_hometeams(self.driver, date)\n\n if len(home_team_list) > 0:\n\n counter = 1 \n\n for home_team in home_team_list:\n \n if counter == 1: \n if get_player_data_ind: \n player_df_full = get_player_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver)\n if get_team_data_ind:\n team_df_full = get_team_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver)\n else:\n if get_player_data_ind: \n player_df_full = player_df_full.append(get_player_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver), ignore_index=True)\n if get_team_data_ind:\n team_df_full = team_df_full.append(get_team_data(home_team = team_full_abrv_config[home_team]['Full Name'], \n date_played = date, \n driver = self.driver), ignore_index=True)\n counter+=1\n \n if pre_player_data_dir:\n exist_player_data = exist_player_data.append(player_df_full)\n exist_player_data.to_csv(pre_player_data_dir, index = False)\n print(f'Updated player dataset will be overwritten in {pre_player_data_dir}')\n \n if pre_team_data_dir:\n exist_team_data = exist_team_data.append(team_df_full)\n exist_team_data.to_csv(pre_team_data_dir, index = False)\n print(f'Updated team dataset will be overwritten in {pre_team_data_dir}')\n \n if pre_player_data_dir and pre_team_data_dir:\n return exist_player_data, exist_team_data\n elif pre_player_data_dir:\n return exist_player_data\n elif pre_team_data_dir:\n return exist_team_data\n elif get_player_data_ind and get_team_data_ind:\n return player_df_full, team_df_full \n elif get_player_data_ind:\n return player_df_full\n elif get_team_data_ind:\n return team_df_full", "def insert_events(self, url, notExams=[]):\n response = json.loads(requests.get(url).text)\n i = 0\n eventDate = datetime.datetime.strptime(response[i]['start'], \"%Y-%m-%dT%H:%M:%S\")\n while eventDate < self.fromDate:\n i += 1\n eventDate = datetime.datetime.strptime(response[i]['start'], \"%Y-%m-%dT%H:%M:%S\")\n\n while eventDate <= self.toDate:\n jEvent = response[i]\n if jEvent['cod_modulo'] not in notExams:\n location = ''\n desc = ''\n if (len(jEvent['aule']) > 0):\n location = jEvent['aule'][0]['des_indirizzo'].replace(' -', ',')\n for a in jEvent['aule']:\n desc += a['des_risorsa'] + ', ' + a['des_piano'] + ' - ' + a['des_ubicazione'] + '\\n'\n desc += 'Professor: ' + jEvent['docente']\n if type(jEvent['teams']) is str:\n desc += '\\nTeams: ' + jEvent['teams'] + '\\n'\n event = {\n 'summary': jEvent['cod_modulo'] + ' - ' + jEvent['title'],\n 'location': location,\n 'description': desc,\n 'start': {\n 'dateTime': jEvent['start'],\n 'timeZone': 'Europe/Rome',\n },\n 'end': {\n 'dateTime': jEvent['end'],\n 'timeZone': 'Europe/Rome',\n },\n 'recurrence': [\n # 'RRULE:FREQ=DAILY;COUNT=2'\n ],\n 'attendees': [\n # {'email': 'lpage@example.com'},\n # {'email': 'sbrin@example.com'},\n ],\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'popup', 'minutes': 60},\n ],\n },\n }\n # if you want to add it to your primary calendar just use calendarId=\"primary\"\n event = self.service.events().insert(calendarId=self.chosen_calendar,\n body=event).execute()\n print('Event created succesfully : %s' % (event.get('htmlLink')))\n i += 1\n eventDate = datetime.datetime.strptime(response[i]['start'], \"%Y-%m-%dT%H:%M:%S\")", "def collect_team(year, abbreviation):\n\n team = Team.objects.filter(year=year, abbreviation=abbreviation)[0]\n team = {\n 'id': abbreviation,\n 'team': team,\n }\n return team", "def calendars(self):\r\n return c.Calendars(self)", "def calendars(self):\r\n return c.Calendars(self)", "def teams(self):\n return self._get_by_class(Team)", "def assign_team(exp, non_exp):\n teams = [[], [], []]\n\n for n in range(len(TEAMS)):\n while len(teams[n]) != 3:\n player = exp.pop()\n player.update({'Team': TEAMS[n]})\n player.update({'Practice': PRACTICE[n]})\n teams[n].append(player)\n for n in range(len(TEAMS)):\n while len(teams[n]) != 6:\n player = non_exp.pop()\n player.update({'Team': TEAMS[n]})\n player.update({'Practice': PRACTICE[n]})\n teams[n].append(player)\n return teams", "def _register_teams(self):\n # loop through all agents\n for agent_id, agent_body in self.registered_agents.items():\n # find their team name\n team = agent_body.properties['team']\n\n # register the team (if not already done) and the agent in it\n if team not in self.__teams:\n self.__teams[team] = []\n self.__teams[team].append(agent_id)", "def new_schedule():\n sched = OrderedDict()\n for year, stype, week in year_phase_week():\n update_week(sched, year, stype, week)\n return sched", "def merge_team(self, team):\n for m in team.members:\n self.members.append(m)\n for n in team.neighbors:\n self.neighbors.append(n)\n self.calculate_a()\n return self", "def test_get_list_teams(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)", "def season_game_logs(team, year):\n\n # Check year value\n if year > 2019 or year < 1950:\n raise ValueError('Year Value Incorrect')\n\n # Rename teams that moved\n team = scrape_utils.rename_team(team, year)\n\n # Get HTML content\n url = 'http://www.basketball-reference.com/teams/%s/%s/gamelog' % (team, year)\n r = requests.get(url)\n soup = BeautifulSoup(r.content, \"html.parser\")\n season_stats = soup.find(id='tgl_basic')\n games = season_stats.find('tbody')\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # To find opponent statistics\n opponent = re.compile('^opp_.*$')\n\n # Loop through every game in a team's season\n for game in games.find_all('tr', {'class': None}):\n\n curr_team = {'team': team}\n opp_team = {}\n\n # Loop through each stat\n for stat in game.find_all('td'):\n\n stat_name = stat['data-stat']\n\n # These are opponent stats\n if re.match(opponent, stat_name):\n opp_team[stat_name[4:]] = scrape_utils.stat_parse(stat_name, stat.string)\n else:\n curr_team[stat_name] = scrape_utils.stat_parse(stat_name, stat.string)\n\n # Remove unnecessary information\n del curr_team['game_season']\n del curr_team['x']\n\n # Rename relocated teams\n curr_team['team'] = scrape_utils.rename_team(team)\n opp_team['team'] = scrape_utils.rename_team(opp_team.pop('id'))\n\n # Use the same ID as basketball reference\n result = {'date': datetime.strptime(curr_team.pop('date_game'), \"%Y-%m-%d\"),\n 'season': year,\n 'result': scrape_utils.determine_home_win(curr_team['game_location'], curr_team.pop('game_result')),\n '_id': game.find('a')['href'][-17:-5]}\n\n # Place the teams in the correct spot depending on who is the home team\n if curr_team.pop('game_location') == 0:\n result['home'] = curr_team\n result['away'] = opp_team\n else:\n result['home'] = opp_team\n result['away'] = curr_team\n\n # Insert into database\n m.insert('game_log', result)", "def modify_cal(cal, convert_dic):\n new_cal = Calendar()\n for elm in cal.walk():\n if elm.name == \"VEVENT\":\n event = elm\n event[\"summary\"] = convert_dic[str(elm.get(\"summary\"))] \n new_cal.add_component(event)\n return new_cal", "def get_people(team):", "def create_player_list(self, current_game):\n players = [Player(c['summonerId'], c['championId'], c['teamId']) for c in current_game['participants']]\n return players", "async def read_all_teams(db_handler: DBHandler = Depends(database_dependency)):\n all_team_records = await db_handler.select_teams()\n all_team_records = [init_BaseTeam(record) for record in all_team_records]\n\n return all_team_records", "def update_chart():\n selected_team = variable.get()\n selected_period = period_variable.get()\n if selected_period == \"All\":\n selected_period = -1\n elif selected_period == \"OT\":\n selected_period = 4\n selected_id = -1\n if selected_team == home_team:\n selected_id = data[\"data\"][\"game\"][\"hometeamid\"]\n elif selected_team == away_team:\n selected_id = data[\"data\"][\"game\"][\"awayteamid\"]\n clear_events()\n for i in range(0, len(plays)):\n add_to_chart(plays[i], {\"teamid\":selected_id, \"period\":selected_period})", "def func_calendar_list():\r\n creds = None\r\n global page_token\r\n #global new_calendar_list=[]\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n calendar_list = service.calendarList().list(pageToken=page_token).execute()\r\n new_calendar_list = []\r\n for calendar_list_entry in calendar_list['items']:\r\n new_calendar_list.append(calendar_list_entry['summary'])\r\n page_token = calendar_list.get('nextPageToken')\r\n return (new_calendar_list)", "def test_get_team_history(self):\n pass", "def teams(self):\n t = [e[0] for e in self.pick_set.all().values_list(\"team\")]\n return Team.objects.filter(id__in=set(t))", "def create_schedule_team(self, schedule):\r\n stub_user = self.find(\"users\", \"Stub User\", attribute=\"name\")\r\n schedule_json = {\r\n \"name\": schedule['name'],\r\n \"type\": \"schedule\",\r\n \"time_zone\": \"Pacific/Auckland\",\r\n \"schedule_layers\": [\r\n {\r\n \"start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_virtual_start\": \"2099-12-31T00:00:00+13:00\",\r\n \"rotation_turn_length_seconds\": 86400,\r\n \"users\": [\r\n {\r\n \"user\": {\r\n \"type\": \"user\",\r\n \"id\": stub_user[\"id\"]\r\n }\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n try:\r\n self.rpost(\"users\", json=schedule_json)\r\n except PDClientError as e:\r\n raise e", "def test_teams_get_team_v1(self):\n pass", "def get_all_fb_teams(self):\n\n all_teams = ()\n self._logger.debug(\"Getting all fb teams from database\")\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT team_id, team_name, time FROM team ORDER BY \\\ntime DESC\")\n teams = cursor.fetchall()\n\n for team_id, name, timestamp in teams:\n intermediate_teams = ()\n intermediate_teams = intermediate_teams + (name,)\n cursor.execute(\"SELECT player FROM player_team_xref WHERE \\\nteam = {0}\".format(team_id))\n players = cursor.fetchall()\n for player in players:\n cursor.execute(\"SELECT first_name, last_name, nickname \\\nFROM player WHERE player_id = {0}\".format(player[0]))\n first_name, last_name, nickname = cursor.fetchall()[0]\n\n intermediate_teams = intermediate_teams + (first_name,\n last_name, nickname)\n\n intermediate_teams = intermediate_teams + (timestamp.strftime('%Y-%m-%d'),)\n all_teams = all_teams + (intermediate_teams,)\n del intermediate_teams\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return all_teams", "def teamsatevent():\n if not is_request_valid(request):\n abort(400)\n \n text = request.form['text']\n \n event, _ = find_event_and_match(text)\n \n if event == None:\n return EVENT_NOT_FOUND\n \n # This gets all the teams at the event\n teams = requests.get(f'https://us-central1-pearadox-2020.cloudfunctions.net/GetSingleByTypeAndId/teams/{EVENT_IDS[event]}').json()\n return_str = ''\n for team in teams:\n name = teams[team]['team_name']\n location = teams[team]['team_loc']\n # Append the team name, number, and location to the response string\n return_str += f'\\n* {team}-{name} \\t{location}'\n \n return jsonify(\n response_type = 'in_channel',\n type='mrkdwn',\n text = return_str\n )\n \n # If there is no event, then event not found\n return EVENT_NOT_FOUND", "def find_games(days_ahead=0):\n headers = {\n 'Host': 'stats.nba.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Referer': 'https://stats.nba.com/',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'x-nba-stats-origin': 'stats',\n 'x-nba-stats-token': 'true'\n }\n board = scoreboardv2.ScoreboardV2(day_offset=days_ahead, headers=headers).get_data_frames()[0]\n board.replace(id_to_abrv, inplace=True)\n return board[['GAME_DATE_EST', 'GAME_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID']]", "def updateEvents(self):\n # Update calendar data\n d_start = datetime.datetime.today()\n d_end = d_start + datetime.timedelta(self.delta_days)\n results = self.cal_cal.date_search(d_start, d_end)\n\n # Flush the events dict\n self.events = []\n # Add each events\n for event in results:\n # Format the title of the event\n str_title = event.instance.vevent.summary.value\n if len(str_title) > 20:\n str_title = str_title[:17] + \"...\"\n # Format the date of the event\n vdate = event.instance.vevent.dtstart.value\n d = datetime.datetime.strptime(\n vdate.strftime(\"%d %m %Y\"), \"%d %m %Y\")\n str_date = \"%s %d %s\" % (\n self.days_french[d.weekday()],\n d.day,\n self.months_french[d.month -1])\n # Format the date gap\n gap = 1 + (d - d_start).days\n # Save the event\n self.events.append((str_title, str_date, gap))", "def cumulative_stats_for_teams_each_year(self):\n self.cumulative_stats_for_team_each_year = (\n self.combine_both_winning_losing_games_stats\n .sort_values(['WTeamID','Season'])\n .groupby(['WTeamID'])\n .cumsum()\n .pipe(lambda x:x.assign(Season = self.combine_both_winning_losing_games_stats.Season.values))\n .pipe(lambda x:x.assign(TeamID = self.combine_both_winning_losing_games_stats.WTeamID.values))\n .drop(['LTeamID','win_rate'],1)\n .pipe(lambda x:x.assign(win_rate = x.winning_num_counts/(x.winning_num_counts + x.losing_num_counts)))\n .pipe(lambda x:x.assign(WFGP = x.WFGM/x.WFGA))\n .pipe(lambda x:x.assign(WFG3P = x.WFGM3/x.WFGA3))\n .pipe(lambda x:x.assign(WFTP = x.WFTM/x.WFTA))\n .pipe(lambda x:x.assign(LFGP = x.LFGM/x.LFGA))\n .pipe(lambda x:x.assign(LFG3P = x.LFGM3/x.LFGA3))\n .pipe(lambda x:x.assign(LFTP = x.LFTM/x.LFTA))\n .pipe(lambda x:x.assign(fgp = x.total_fgm/x.total_fga))\n .pipe(lambda x:x.assign(fg3p = x.total_fg3m/x.total_fg3a))\n .pipe(lambda x:x.assign(ftp = x.total_ftm/x.total_fta))\n # rebounds cumsum stats\n .pipe(lambda x:x.assign(total_def_rebounds_percent = x.total_def_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_off_rebounds_percent = x.total_off_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_rebound_possession_percent = x.total_off_rebounds/x.total_team_missed_attempts))\n .pipe(lambda x:x.assign(total_rebound_possessiongain_percent = x.total_def_rebounds/x.total_opp_team_missed_attempts))\n # assists, turnovers, steals, blocks and personal fouls\n .pipe(lambda x:x.assign(total_block_opp_FGA_percent = x.total_blocks/x.total_opp_fga))\n .pipe(lambda x:x.assign(total_assist_per_fgm = x.total_assists/x.total_fgm))\n .pipe(lambda x:x.assign(total_assist_turnover_ratio = x.total_assists/x.total_turnover))\n # win or lose by how many points\n .pipe(lambda x:x.assign(lose_rate = 1-x.win_rate))\n .pipe(lambda x:x.assign(win_score_by = x.WScore - x.losing_opponent_score))\n .pipe(lambda x:x.assign(lose_score_by = x.LScore - x.winning_opponent_score))\n .pipe(lambda x:x.assign(expectation_per_game = x.win_rate * x.win_score_by/x.winning_num_counts + x.lose_rate * x.lose_score_by/x.losing_num_counts))\n .pipe(lambda x:x.assign(avg_win_score_by = x.win_score_by/x.winning_num_counts))\n .pipe(lambda x:x.assign(avg_lose_score_by = x.lose_score_by/x.losing_num_counts))\n )", "def test_get_all_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='foo@bar.com',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users'.format(team.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def player_stats_query(week, player_list, session=s): \n #initialize lists\n pos_list = []\n team_list = []\n \n #cycle thru each player that is currently available\n for player in avail_player_key:\n #build the API url for the unique player key\n url_player = base_query_url+'league/'+leagueID+'/players;player_keys='+player+'/stats;type=week;week='+str(week)\n #convert API call to json\n raw = s.get(url_player, params={'format': 'json'}).json()\n #parse out the players details info (e.g. position, owned, etc.)\n player_details = raw['fantasy_content']['league'][1]['players']['0']['player'][0]\n #parse out position from player details\n pos = player_details[9]['display_position'].upper()\n \n ## FILTER OUT NON-OFFENSE POSITIONS\n if pos not in ['QB', 'WR', 'RB', 'TE']:\n continue\n else:\n \n #parse out team from player_details\n team = player_details[6]['editorial_team_abbr'].upper()\n #append data to lists\n pos_list.append(pos)\n team_list.append(team)\n \n #initialize a stats list\n stats_list = []\n #parse out the player stats\n player_stats = raw['fantasy_content']['league'][1]['players']['0']['player'][1]['player_stats']['stats']\n #loop thru all of the various stats\n for stat in player_stats:\n stat_dict = stat['stat']\n stats_list.append(stat_dict)\n \n return stats_list" ]
[ "0.6536402", "0.59650755", "0.5887284", "0.58515286", "0.5829555", "0.579289", "0.5745127", "0.57173425", "0.57036144", "0.56878537", "0.56028706", "0.55922323", "0.5529244", "0.5521643", "0.54728687", "0.54638255", "0.54560107", "0.5452628", "0.5441508", "0.5441441", "0.5440784", "0.5407121", "0.5395928", "0.53905064", "0.5351544", "0.534404", "0.534404", "0.5337229", "0.5323738", "0.5321518", "0.53179", "0.5302837", "0.52860117", "0.5275448", "0.5270853", "0.5255861", "0.5252066", "0.52485067", "0.5240727", "0.52380586", "0.52241087", "0.52236646", "0.5218885", "0.5215997", "0.5213299", "0.5210699", "0.51993775", "0.51952934", "0.5187357", "0.5185888", "0.5184096", "0.517763", "0.5175126", "0.5169596", "0.51680714", "0.51632774", "0.514931", "0.5147372", "0.5143808", "0.5142306", "0.51414424", "0.5139255", "0.5135483", "0.51304394", "0.5122744", "0.51163083", "0.511355", "0.510976", "0.5095055", "0.50910586", "0.50896263", "0.50876254", "0.50838286", "0.5073835", "0.5069507", "0.5069507", "0.50666153", "0.5065783", "0.50646317", "0.5063042", "0.50606424", "0.5037078", "0.5033392", "0.50275946", "0.50227606", "0.5014899", "0.5012346", "0.50029397", "0.5000331", "0.49953166", "0.49907666", "0.4987973", "0.49753302", "0.49732143", "0.49711525", "0.4971128", "0.49688292", "0.4959001", "0.49519873", "0.49483722" ]
0.6215539
1
Sort players by score and alternate team picks
def make_teams(players, timeslot): player_list_with_scores = [] for name in players: player = pickle.loads(playersdb.get(name)) # while we have player object loaded, set game timeslot for player player['games'].append(timeslot) playersdb.set(name, pickle.dumps(player)) player_list_with_scores.append((name, player['score'])) player_list_with_scores.sort(key=lambda tup: tup[1], reverse=True) # sort by score teamA = [p[0] for p in player_list_with_scores[::2]] teamB = [p[0] for p in player_list_with_scores[1::2]] return teamA, teamB
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort(self):\r\n\t\tif ScoreOpt.isGroupVassals():\r\n\t\t\tself._playerScores.sort(lambda x, y: cmp(x.sortKey(), y.sortKey()))\r\n\t\t\tself._playerScores.reverse()\r\n\t\tmaxPlayers = ScoreOpt.getMaxPlayers()\r\n\t\tif maxPlayers > 0 and len(self._playerScores) > maxPlayers:\r\n\t\t\tself._playerScores = self._playerScores[len(self._playerScores) - maxPlayers:]", "def order_players_by_initial_rank(self):\n pass", "def print_top_scorers(teams):\n\t\n\tall_players = []\n\t\n\tfor i in range(0,len(teams)):\n\t\tfor j in range(0, len(teams[i].players)):\n\t\t\tall_players.append(teams[i].players[j])\n\t\t\n\tplayers_sorted_score = sorted(all_players, key=attrgetter('shot_goals'), reverse=True)\n\t\n\tfor player in players_sorted_score:\n\t\tif player.shot_goals > 0:\n\t\t\tprint(player.first_name + \" \" + player.last_name + \" - \" + player.team + \" : \" + str(player.shot_goals))", "def sort_data(self):\n\n # zips the game_list and game_Scores, sorts the result by scores, and then puts them back.\n self.game_list, self.game_scores = zip(*sorted(zip(self.game_list, self.game_scores), key=lambda pair: pair[1]))", "def get_players_by_rank(self):\n return sorted(self.participants, key=lambda p: p.tournament_score, reverse=True)", "def sorted_scores(scores):\n\treturn sorted(scores, key=lambda sailor: (total_score(sailor), sailor[1][0]))", "def get_scores_in_order_of_players(self):\n \n players = self.referee.get_current_state().get_players()\n\n player_scores = []\n for player_color in self.player_color_order:\n for player in players:\n if player_color == player.get_color():\n player_scores.append(player.get_score())\n break\n\n return player_scores", "def playerStandings():\n\n \n cursor.execute(\"select * from players\")\n player_data = cursor.fetchall()\n wins_sorted = []\n\n for tup_index in range(len(player_data)):\n #the %s is about 400 ns faster than %d for integer substitution\n cursor.execute(\"select count(winnerid) from matches where winnerid = %s\" % player_data[tup_index][0])\n numMatchesWon = cursor.fetchone()[0]\n\n cursor.execute(\"select count(loserid) from matches where loserid = %s\" % player_data[tup_index][0])\n numMatchesLost = cursor.fetchone()[0]\n\n numMatchesPlayed = numMatchesWon + numMatchesLost\n\n wins_sorted.append(int(numMatchesWon))\n player_data[tup_index] += int(numMatchesWon),\n player_data[tup_index] += int(numMatchesPlayed),\n \n wins_sorted.sort(reverse=True)\n player_data_sorted_bywins = []\n \n #this is how im sorting the data from the database by wins, I'm hoping that this was supposed to be done with python code and not sql\n for w in wins_sorted:\n for tup_ind in range(len(player_data)):\n if player_data[tup_ind][2] == w:\n player_data_sorted_bywins.append(player_data[tup_ind])\n del player_data[tup_ind]\n break\n \n return player_data_sorted_bywins", "def sort_player(players: list[Player], reverse=True) -> list:\n if reverse:\n players.sort(reverse=True, key=lambda x: (int(x.point), int(x.elo)))\n else:\n players.sort(reverse=False, key=lambda x: (int(x.point), int(x.elo)))\n return players", "def tiles_by_score(self):\n sorted_list = sorted(self.tiles, key=lambda t: t.score, reverse=True)\n return sorted_list", "def sort_cards(self):\n self.cards.sort(key=operator.attrgetter('persona', 'rank'))\n self.update_position()", "def sort_results(boxes):\n return sorted(results[k], key=lambda x : x['score'], reverse=True)", "def run_sort_home_by_score(self):\n self.homes = self.python_sort(self.homes)", "def score_game(self):\r\n players = self.player_control.get_players()\r\n ###game_control = self.game_control\r\n ###if game_control is not None:\r\n ### game_control.set_vals() # Update any changed game control settings\r\n if len(players) == 0:\r\n return # No players\r\n n_top_score = 0\r\n top_score = players[0].get_score()\r\n for player in players:\r\n if player.get_score() > top_score:\r\n top_score = player.get_score()\r\n for player in players:\r\n player_score = player.get_score()\r\n if player_score == top_score:\r\n n_top_score += 1\r\n \r\n for player in players:\r\n player_score = player.get_score()\r\n player_played = player.get_played()\r\n player_ties = player.get_ties()\r\n player_wins = player.get_wins()\r\n new_played = player_played+1\r\n player.set_played(new_played)\r\n player.set_prop(\"played\")\r\n if player_score == top_score:\r\n if n_top_score > 1:\r\n new_ties = player_ties + 1\r\n player.set_ties(new_ties)\r\n player.set_prop(\"ties\")\r\n else:\r\n new_wins = player_wins + 1\r\n player.set_wins(new_wins)\r\n player.set_prop(\"wins\")\r\n self.update_score_window()", "def drawsheet_get_score(player, scores):\n def distance(score, player):\n dx = float(score[0] - player[0]) / 5\n dy = float(score[1] - player[1])\n if dy < 0:\n dy *= 3\n\n return math.sqrt(dx * dx + dy * dy)\n\n if len(scores) == 0:\n return None\n\n scores.sort(key=lambda s: distance(s[1], player[1]))\n #print([(k, distance(k[1], player[1])) for k in scores[:3]])\n score = scores[0]\n del scores[0]\n\n return score[0]", "def sort_player(self, player):\r\n if not player:\r\n return random.choice(self.players)\r\n return player", "def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)", "def climbingLeaderboard(scores, alice):\n\n # unique scores\n scores = sorted(list(set(scores))) # asc\n player_ranks = []\n idx = 0\n n = len(scores)\n\n for alice_score in alice: # alice in asc order\n \n # Find the rank. For next alice score (which is not smaller), continue from the same index\n while (n > idx and alice_score >= scores[idx]):\n idx += 1\n\n player_ranks.append(n+1-idx)\n\n return player_ranks", "def test_merge_sort(self):\r\n\r\n players = merge_sort([\r\n Player(\"G\", \"H\", \"I\", \"J\", \"K\", \"L\"), # P1\r\n Player(\"A\", \"B\", \"C\", \"D\", \"E\", \"G\"), # P2\r\n Player(\"A\", \"B\", \"D\", \"E\", \"F\", \"G\"), # P3\r\n Player(\"A\", \"B\", \"C\", \"D\", \"E\", \"F\"), # P4\r\n ])\r\n\r\n self.assertEqual(\r\n [players[0].dod, players[1].dob, players[2].dob, players[3].dod],\r\n [\"F\", \"E\", \"F\", \"L\"]\r\n )", "def split_by_wins(matches, team_id):\n\n sorted_matches = {\n \"wins\": [],\n \"losses\": [],\n \"ties\": []\n }\n\n scores = {}\n\n for match_id, match in matches.items():\n this_team_score = 0\n other_team_score = 0\n if match.hometeam.team_id == team_id:\n this_team_score = match.homegoals\n other_team_score = match.awaygoals\n elif match.awayteam.team_id == team_id:\n this_team_score = match.awaygoals\n other_team_score = match.homegoals\n else:\n continue\n\n scores[match_id] = (this_team_score, other_team_score)\n\n if this_team_score > other_team_score:\n sorted_matches[\"wins\"].append(match_id)\n elif this_team_score < other_team_score:\n sorted_matches[\"losses\"].append(match_id)\n else:\n sorted_matches[\"ties\"].append(match_id)\n\n return sorted_matches, scores", "def sort_players(list_of_players):\n sorted_players = sorted(list_of_players,\n key=itemgetter('Height (inches)'))\n\n experienced_players = []\n non_experienced_players = []\n for player in sorted_players:\n if player['Soccer Experience'] == 'YES':\n experienced_players.append(player)\n else:\n non_experienced_players.append(player)\n return experienced_players, non_experienced_players", "def sorting_by_criteria(self, result):\r\n\t\tresult = sorted(result, key=lambda r: r[0])\r\n\t\tflag = False\r\n\t\tm = result[0][0]\r\n\t\tfor i in range(len(result)):\r\n\t\t\tif (result[i][0] == m): continue\r\n\t\t\tflag = True\r\n\t\t\tbreak\r\n\t\tif not flag: i += 1\r\n\t\tresult = result[:i]\r\n\r\n\t\t\"\"\" in prewin status, compare useful_amount only \"\"\"\r\n\t\tif (result[0][0] == 0):\r\n\t\t\tresult = sorted(result, key=lambda r: r[1], reverse=True)\r\n\t\t\ttest = \"\"\r\n\t\t\tfor r in result:\r\n\t\t\t\ttest += \"[{0}, {1}, {2}, {3}], \".format(r[0], r[1], r[2], r[3])\r\n#\t\t\tprint \"prewin status: {0}\".format(test)\r\n\t\t\tself.current_best_state = [result[0][0], result[0][1], result[0][2]]\r\n\t\t\treturn result[0][3]\r\n\r\n\t\t\"\"\" sort by score (big -> small) \"\"\"\r\n\t\tresult = sorted(result, key=lambda r: r[2], reverse=True)\r\n\t\tflag = False\r\n\t\tm = result[0][2]\r\n\t\tfor i in range(len(result)):\r\n\t\t\tif (result[i][2] == m): continue\r\n\t\t\tflag = True\r\n\t\t\tbreak\r\n\t\tif not flag: i += 1\r\n\t\tresult = result[:i]\r\n\r\n\t\t\"\"\" sort by useful card amount (big -> small) \"\"\"\r\n\t\tresult = sorted(result, key=lambda r: r[1], reverse=True)\r\n\r\n\t\t\"\"\" choose one to discard \"\"\"\r\n\t\tdcard = result[0][3]\r\n\t\tm = result[0][1]\r\n\t\tbest = result[0]\r\n\t\tfor r in result:\r\n\t\t\tif (r[1] != m): break\r\n\t\t\tctype = GameBoard.CardType(r[3])\r\n\t\t\tif (ctype == 4) and (self.word_list.count(r[3]) == 1):\r\n\t\t\t\tdcard = r[3]\r\n\t\t\t\tbest = r\r\n\t\t\tif (ctype == 5) and (self.wind_list.count(r[3]) == 1):\r\n\t\t\t\tdcard = r[3]\r\n\t\t\t\tbest = r\r\n\t\tself.current_best_state = [r[0], r[1], r[2]]\r\n\t\treturn dcard", "def sortByScore(scores,boxes):\r\n \r\n fullboxlist=[]\r\n for i in range(len(scores)):\r\n boxdict={}\r\n boxdict['scores']=scores[i]\r\n boxdict['y_min']=boxes[i][0]\r\n boxdict['x_min']=boxes[i][1]\r\n boxdict['y_max']=boxes[i][2]\r\n boxdict['x_max']=boxes[i][3]\r\n fullboxlist.append(boxdict)\r\n \r\n fullboxlist.sort(key=getClass1Score, reverse=True)\r\n boxlist=[]\r\n for boxdict in fullboxlist:\r\n # if class 0 has highest prob, find second highest as class\r\n class_code=np.where(boxdict['scores']==np.amax(boxdict['scores']))[0][0]\r\n if class_code==0:\r\n class_code = 1+ np.where(boxdict['scores'][1:]==np.amax(boxdict['scores'][1:]))[0][0]\r\n\r\n \r\n boxlist.append([boxdict['y_min'],\r\n boxdict['x_min'],\r\n boxdict['y_max'],\r\n boxdict['x_max'],\r\n boxdict['scores'][class_code],\r\n class_code\r\n ])\r\n \r\n return boxlist,fullboxlist", "def determine_winner1(self): \r\n sorted_player_rank = self._rank()\r\n print(f\"sorted player rank: {sorted_player_rank}\")\r\n print(f\"winner is player {sorted_player_rank[0]}: with points {sorted_player_rank[0][1]}\")", "def give_round_scores(list_of_players):\n print(\"\\nThe round has ended !\\nWe shall now unveil the cards and the scores!\")\n\n for player in list_of_players:\n cards = [card.name for card in player.cards]\n cards_string = \" \"\n for card in cards:\n cards_string += card + \", \"\n cards_string = cards_string[:-2]\n print(\"\\n{} has these cards: \".format(player.name), cards_string)\n print(\"{} has a score of {}\".format(player.name, player.score()))\n final_scores = [player.score() for player in list_of_players]\n min_score = min(final_scores)\n winners_index = [i for i, x in enumerate(final_scores) if x == min_score]\n if len(winners_index) == 1:\n index_winner = winners_index[0]\n winner = list_of_players[index_winner]\n print(winner.name, \"won the round with a score of {}\".format(winner.score()))\n if len(winners_index) > 1:\n print(\"It's a tie!\")\n winners_names = \"\"\n winners = [list_of_players[i] for i in winners_index]\n for winner in winners:\n winners_names += winner.name\n print(winners_names, \"won the round with a score of \", str(min_score))", "def sort_population_by_score(input_population: Population) -> None:\n input_population.sort(key=score_individual, reverse=True)", "def sort_challengers_by_points(self):\n if not self.isOrdered:\n order = []\n for k, i in self.scores.items():\n order.append((i, k))\n order.sort()\n\n nb = len(self.challengers)\n for i, d in enumerate(order):\n self.challengers[nb - i - 1] = d[1]\n self.isOrdered = True\n return self.challengers", "def __get_score_ordered(scores, idx):\t\n\treturn [x[1][idx] for x in sorted(scores.items())]", "def sort_solutions(self, solutions):\r\n if self.breeding_rules.sorting_order is ScoresSortingOrder.ASCENDING:\r\n reverse = False\r\n else:\r\n reverse = True\r\n return sorted(solutions, reverse=reverse, key=lambda solution: solution.score)", "def display_tournament_player_list(self):\r\n tournament_name = self.input_name(\"nom du tournoi\")\r\n tournament = tournaments_table.get(Query().Nom == tournament_name)\r\n player_list = list()\r\n for rated_player in tournament['Classement']:\r\n player_list.append(players_table.get(doc_id=rated_player[0]))\r\n user_choice = self.input_user_choice_sorting()\r\n print(\"Liste de tous les joueurs du tournoi de\", tournament_name, \": \")\r\n if user_choice == '1':\r\n player_list.sort(key=lambda x: x['Nom'])\r\n for player in player_list:\r\n print(player)\r\n elif user_choice == '2':\r\n player_list.sort(reverse=True, key=lambda x: x['ELO'])\r\n for player in player_list:\r\n print(player)", "def scoreTeams(curTeams, oppTeam, pokedex, league, minDistWanted):\n battleData, htmlData = loadBattleData(league)\n similarities = loadSims() \n \n #If not given an opponent team then simply randomly choose losers from the dataset to compare to.\n if len(oppTeam) == 0:\n picks = set([])\n while (len(picks) < NUMLOSINGTEAMS and (not len(picks) == len(battleData))):\n picks.add(random.randint(0,len(battleData)-1))\n\n losers = []\n loserDict = {}\n for i in picks:\n entry = battleData[i]\n winner,loser = determineWinner(entry)\n loserDict[str(loser)] = [winner]\n losers.append( (loser,0) )\n\n #Given opponent team then find similar teams\n else:\n oppTeam = [getSimPokemon(opp,similarities) for opp in oppTeam]\n\n #create dictionary from losers team to the team that beat them.\n loserDict = {}\n sims = []\n for d in battleData:\n winner, loser = determineWinner(d)\n\n wTeam = teamToArray(winner,pokedex)\n lTeam = np.array(teamToArray(loser, pokedex))\n\n score = 0\n for oppNp in oppTeam:\n score+= np.amax(lTeam*oppNp) \n\n if str(loser) in loserDict:\n loserDict[str(loser)].append(winner)\n else:\n #new to dictonary\n loserDict[str(loser)] = [winner]\n\n sims.append((loser, score))\n\n\n sims = sorted(sims, key = lambda x : x[1], reverse = True)\n\n cutoff = min(len(sims),NUMLOSINGTEAMS)\n losers = sims[:cutoff]\n\n #Gather winners to losing teams\n winnersComp = []\n for loser,_ in losers:\n for winner in loserDict[str(loser)]:\n winnersComp.append(teamToArray(winner,pokedex))\n \n topScore = len(winnersComp)*6 #pkmn team size\n\n results = []\n inverted_idx = {}\n\n existsSet = []\n\n #Creates inverted index for teams, while simoultaneously weeding out any teams that are exactly similar.\n for i in range(len(curTeams)):\n team = curTeams[i]\n results.append((team,0))\n sTeam = set(team)\n if not (sTeam in existsSet):\n existsSet.append(sTeam)\n for pkm in team:\n if pkm != EMPTY:\n if pkm in inverted_idx:\n inverted_idx[pkm].append(i)\n else:\n inverted_idx[pkm] = [i]\n \n #Giving the similiarity scores to the winners based off of the inverted index.\n for pkm in inverted_idx:\n for winner in winnersComp:\n wArr = np.array(winner)\n #tArr = getSimPokemon(pkm,similarities)\n tArr = similarities[pkm]\n \n vals = wArr * tArr\n\n score = np.amax(vals)\n\n for i in inverted_idx[pkm]:\n results[i] = (results[i][0],results[i][1]+(score/topScore))\n\n results = sorted(results, key = lambda x : x[1], reverse = True)\n\n if len(results) < NUMTEAMSRETURN:\n if len(results) == 0:\n returnTeams = [[] for x in range(NUMTEAMSRETURN)]\n teamScores = [0 for x in range(NUMTEAMSRETURN)]\n\n else:\n returnTeams = [result[0] for result in results]\n teamScores = [result[1] for result in results]\n else:\n firstResult, firstScore = results[0]\n returnTeams = [firstResult]\n teamScores = [round(firstScore*100,1)]\n returnSets = [set(firstResult)]\n \n i = 1\n\n #Loops through results and adds teams with the proper edit distance away.\n while(len(returnTeams) < NUMTEAMSRETURN and minDistWanted > 0):\n teamToConsider,teamToConsiderScore = results[i]\n \n considerSet = set(teamToConsider)\n add = True\n ##checks the edit distance of teams is above wanted\n for team in returnSets:\n if len(team.union(considerSet)) < len(team)+minDistWanted:\n add = False\n\n ##If indeed above wanted levels then add\n if add:\n returnTeams.append(teamToConsider)\n returnSets.append(considerSet)\n teamScores.append(round(teamToConsiderScore*100,1))\n \n i+=1\n\n if i >= len(results):\n i = 1\n minDistWanted -= 1 \n \n winHtmls = []\n if htmlData != None:\n for team,_ in losers:\n for winner in loserDict[str(team)]:\n winHtmls.extend(htmlData[str(sorted(winner))])\n \n\n return returnTeams, teamScores, winHtmls", "def matchscore(self):\n print(self.team1.name + \" \" + str(self.team1score) + \" - \" + str(self.team2score) + \" \" + self.team2.name)", "def rank_teams_of_curr_run(curr_score, curr_ranking):\n for place in curr_ranking:\n curr_place = get_key_with_max_value(curr_score)\n curr_ranking[place] = curr_ranking[place].__add__([curr_place])\n curr_score.pop(curr_place)\n return curr_ranking", "def sort_by_reranker_scores(self):\n self.parses.sort(key=lambda parse: (parse.reranker_score,\n parse.parser_score),\n reverse=True)", "def sort_ranking_dict(self):\n\n # reset self.ranking_dict to empty dict (if sorted tuple)\n self.ranking_dict = {}\n\n # create ranking dict with player and grand total score\n for j, player in enumerate(self._players_list):\n ranking_name, ranking_score = \\\n self._players_list[j].get_name_and_grand_total_score()\n self.ranking_dict[ranking_name] = ranking_score\n\n # reverse sort ranking dict by grand total (returns list)\n self.ranking_dict = sorted(self.ranking_dict.items(),\n key=lambda x: x[1], reverse=True)", "def leaderboard(score=None, username=None):\n if score and username != None:\n with open(\"leaderboard.csv\", \"a\", newline='') as file:\n fields = ['score', 'name']\n writer = csv.DictWriter(file, fieldnames=fields)\n writer.writerow({'score' : score, 'name' : username})\n\n with open(\"leaderboard.csv\", \"r\") as file:\n sortlist = []\n reader = csv.reader(file)\n for i in reader:\n sortlist.append(i)\n for ind, value in enumerate(sortlist):\n if ind != 0:\n value[0] = int(value[int(0)])\n\n for ind, value in enumerate(sortlist):\n print(value)\n\n\n for i in range(555):\n for i in range(len(sortlist)-1):\n if i != 0:\n if sortlist[i][0] < sortlist[i+1][0]:\n change = sortlist[i]\n sortlist[i] = sortlist[i+1]\n sortlist[i+1] = change\n for i in range(len(sortlist)-1):\n print(sortlist[i])\n else:\n print('Leaderboard has been Created Play/Win a session to create a new Leaderboard')", "def organize(select, strain, equals):\n scores = []\n data = list(strainer(select, strain, equals))\n while len(data) != 0:\n number = lowest_number(data)\n scores.append(number)\n data.remove(number)\n return scores", "def score_draft(self, other_draft):\n\n # ?\n next_order = other_draft.order_dict\n\n order_ids = {}\n for k,v in next_order.items():\n order_ids[k.id] = v\n \n \n max_pick = len(next_order) + 1\n\n # Sort picks by team.\n d = {}\n for team, picks in self.by_team():\n picks = [order_ids.get(p.player.id, max_pick) for p in picks]\n d[team] = sorted(picks)\n\n\n scores = []\n # Presumably because there are 11 drafters?\n for team, picks in d.items():\n p = picks[:11] # ?\n average = sum(p) / 11.0\n median = p[6]\n scores.append((team, average, median))\n\n scores = sorted(scores, key=lambda e: e[1])\n\n return scores", "def own_games(self):\r\n return sorted(self.games + self.finals, key=lambda g: (g.datetime, g.pitch.rank))", "def sort(self):\r\n self.candidates.sort(key=self.sortFitness)\r\n return", "def set_rank_order(self):\n for k in self._run:\n self._run[k].sort(key=lambda x:x.get_rank(),reverse=False)\n tot_res = len(self._run[k])\n for r in self._run[k]:\n r.set_score(tot_res - int(r.get_rank()) + 1)\n print r.get_str()", "def testHighscore(self):\n \n game = Game.objects.get(title='game0')\n gameplayeds = game.gameplayed_set\n ply_group = Group.objects.get(name='Player')\n for i in range(4):\n user = ply_group.user_set.get(username='ply{}'.format(i))\n gameplayed = gameplayeds.get(user=user)\n gameplayed.gameScore = i\n gameplayed.save()\n \n response = self.client.get(\n reverse('api:game-buyers', args=['v1', 'game0']),\n {'order_by': 'gameScore'},\n format='json'\n )\n self.assertEquals(response.status_code, 200)\n content = self.parser.parse(BytesIO(response.content))\n for i in range(4):\n self.assertEquals(content['results'][i]['user'], 'ply{}'.format(i))\n \n response = self.client.get(\n reverse('api:game-buyers', args=['v1', 'game0']),\n {'order_by': '-gameScore'},\n format='json'\n )\n self.assertEquals(response.status_code, 200)\n content = self.parser.parse(BytesIO(response.content))\n for i in range(4):\n self.assertEquals(content['results'][i]['user'], 'ply{}'.format(3 - i))", "def sort_by_parser_scores(self):\n self.parses.sort(key=lambda parse: -parse.parser_score)", "def sort_standings_rows(self, standings_rows, heat_games, players, rank_finals=False):\n non_finals_sort_key_fn = self.get_standings_row_sort_key_fn()\n self.calculate_secondary_rank_values(standings_rows, heat_games, players)\n standings_rows.sort(key=non_finals_sort_key_fn, reverse=True)\n\n if rank_finals:\n # If someone has played in a final or third-place playoff then we\n # fix their position accordingly.\n relocate_indices_to = []\n for (i, s) in enumerate(standings_rows):\n if len(s.finals_form) >= 3 and s.finals_form[2] != '-':\n fixed_pos = finals_form_to_position(s.finals_form)\n if fixed_pos:\n relocate_indices_to.append((i, fixed_pos))\n\n relocate_row_to = []\n for (i, fixed_pos) in reversed(relocate_indices_to):\n relocate_row_to.append((standings_rows[i], fixed_pos))\n del standings_rows[i]\n\n for (s, fixed_pos) in sorted(relocate_row_to, key=lambda x : x[1]):\n assert(fixed_pos >= 1 and fixed_pos <= 8)\n standings_rows.insert(fixed_pos - 1, s)\n\n if rank_finals:\n sort_key_fn = lambda s : (s.finals_points, non_finals_sort_key_fn(s))\n else:\n sort_key_fn = non_finals_sort_key_fn\n\n prev_s = None\n pos = 0\n joint = 0\n for s in standings_rows:\n if prev_s and sort_key_fn(prev_s) == sort_key_fn(s):\n joint += 1\n else:\n pos += joint + 1\n joint = 0\n s.position = pos\n prev_s = s\n standings_rows.sort(key=lambda s : (s.position, s.name))", "def get_list_team_scores(self):\n scores = defaultdict(lambda: {\n \"scored_xg\": [],\n \"conceded_xg\": [],\n \"home_adv\": 0,\n \"expected_points\": 0\n })\n\n for g in self.games:\n scores[g.HomeTeam][\"scored_xg\"].append(g.FTHG)\n scores[g.HomeTeam][\"conceded_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"scored_xg\"].append(g.FTAG)\n scores[g.AwayTeam][\"conceded_xg\"].append(g.FTHG)\n\n for team in scores.keys():\n scores[team][\"expected_points\"] = (self.get_table(metric='points')[team] /\n len(scores[team][\"scored_xg\"]))\n\n return scores", "def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores", "def per100_top_stat_players(game_type, stat, player_pk, excluded_pks, season_id=None):\n season = None\n if season_id:\n season = bmodels.Season.objects.get(id=season_id)\n\n if player_pk:\n players = bmodels.Player.objects.filter(pk=player_pk)\n else:\n players = bmodels.Player.objects.all().exclude(\n Q(first_name__contains=\"Team\") | Q(pk__in=excluded_pks))\n player_list = []\n for player in players:\n if season:\n result = player.statline_set.filter(game__game_type=game_type, game__date__range=(\n season.start_date, season.end_date)).aggregate(Sum(stat), Sum('off_pos'))\n else:\n result = player.statline_set.filter(\n game__game_type=game_type).aggregate(Sum(stat), Sum('off_pos'))\n if result['off_pos__sum'] and result['off_pos__sum'] is not 0:\n percentage = (result[stat + '__sum'] /\n result['off_pos__sum']) * 100\n else:\n percentage = 0.0\n player_list.append((player.first_name, percentage))\n return sorted(player_list, key=lambda x: x[1], reverse=True)", "def sort(self):\n # Sort here actually uses the tuple comparison we defined in the Card class\n self.cards.sort()", "def tournament( pl, game ):\r\n\tlosses=[0 for p in pl]\r\n\tfor i in range(len(pl)):\r\n\t\tfor j in range(len(pl)):\r\n\t\t\tif i==j: continue\r\n\t\t\twinner=game([pl[i],pl[j]])\r\n\t\t\t\r\n\t\t\tif winner==0:\r\n\t\t\t\tlosses[j]+=2\r\n\t\t\telif winner==1:\r\n\t\t\t\tlosses[i]+=2\r\n\t\t\telif winner==-1:\r\n\t\t\t\tlosses[i]+=1\r\n\t\t\t\tlosses[j]+=1\r\n\t\t\t\tpass\r\n\tz=zip(losses,pl)\r\n\tz.sort()\r\n\treturn z", "def players_report(self, sort_order='alpha'):\n self._view.report(self._model.get_players(sort_order))", "def TeamScores(level,team_N):\r\n \r\n groupresults = Combined_Non_Compound_Results(level).groupby('Club') \r\n # groups clubs together in a big list just for NMR\r\n # will need to generalise for all categories\r\n\r\n LoR = [ frame for LoRs, frame in groupresults ]\r\n \r\n TeamTable = pd.DataFrame({},columns=['Club','Total Score', # initial empty\r\n 'Total Golds', 'Total Hits']) # dataframe\r\n \r\n# Uni = pd.DataFrame({},columns=['Name','Club','Score','Golds', 'Hits'])\r\n TeamComposition = [[],[],[],[]]\r\n for j in range(4): # only four clubs in the dataframe\r\n\r\n Uni = LoR[j][0:team_N] # jth club in index, gets top team_N archers\r\n Uni = Uni.reset_index(drop=True) # resets the index for UCL sublist\r\n UniName = Uni.loc[0,'Club']\r\n\r\n Scores=0\r\n Golds=0\r\n Hits=0\r\n \r\n TeamComposition[j].append(UniName)\r\n\r\n for i in range(team_N): # sums the score,golds and hits for uni club j\r\n Scores += Uni.loc[i,'Score']\r\n Golds += Uni.loc[i,'Golds']\r\n Hits += Uni.loc[i,'Hits']\r\n\r\n TeamComposition[j].append(Uni.loc[i,'Name'])\r\n \r\n TeamTable2 = pd.DataFrame({'Club': [UniName], \r\n 'Total Score': [Scores],\r\n 'Total Golds': [Golds], \r\n 'Total Hits': [Hits]},\r\n columns=['Club','Total Score', \r\n 'Total Golds', 'Total Hits'])\r\n \r\n TeamTable = TeamTable.append(TeamTable2) # appends each club data\r\n\r\n TeamTable = TeamTable.sort_values(['Total Score','Total Golds',\r\n 'Total Hits'],ascending=[False,False,\r\n False],na_position='last')\r\n TeamTable = TeamTable.reset_index(drop=True)\r\n print()\r\n print(TeamTable)\r\n print()\r\n \r\n \r\n FinalList = [[],[],[],[]]\r\n \r\n for h in range(4):\r\n for g in range(4):\r\n if TeamTable.iloc[h,0] == TeamComposition[g][0]:\r\n FinalList[h] = TeamComposition[g]\r\n\r\n \r\n for k in range(4):\r\n print(FinalList[k])\r\n print()\r\n\r\n if level == NovCategories:\r\n \r\n return print(\"----- End of Novice Team Scores -----\")\r\n \r\n if level == AllCategories:\r\n \r\n return print(\"----- End of Experienced Team Scores -----\")", "def pairing_other_rounds(self, players_list: list[Player]) -> list[Player]:\n\n if self.check_same_tournaments_points(players_list):\n players_list = sorted(players_list, key=lambda player: player.tournament_score)\n else:\n players_list = sorted(players_list, key=lambda player: player.ranking)\n players_list.reverse()\n apairing_players = self.generating_pairs(players_list)\n\n return apairing_players", "def personal_top_three(scores):\n return sorted(scores, reverse=True)[:3]", "def personal_top_three(scores):\n return sorted(scores, reverse=True)[:3]", "def sort_results(self):\n pass", "def leaderboard():\n \n global score_dictinary\n data = []\n fields = []\n scores = []\n names = []\n users = []\n i=0\n \n #Reads the winners from a mongo database \n read_mongo(scores, names)\n \n #Sorts the list in descending order\n quicksort(scores, names, 0, len(scores) - 1)\n \n #Joins the names and scores arrays together\n while i < len(scores):\n users.append(names[i] + \" \" + scores[i])\n i += 1\n \n users = (reversed(users))\n \n return render_template(\"leaderboard.html\", users=users)", "def __cmp__(self, x):\n if self.score < x.score: return -1\n elif self.score == x.score: return 0\n else: return 1", "def _sort(self):\n self.rows.sort(key=lambda x: (x['PERC1'], x['EQ'], x['PASS'], x['W2']),\n reverse=True)\n\n rank = 0\n prev_perc = 0\n prev_rank = 0\n for row in self.rows:\n if row[\"NR\"] == 0:\n # Something has already populated NR as 0 - so we set rank as\n # 0 too\n row['_RANK'] = 0\n row['_NR'] = 0\n continue\n\n # Increment our count\n rank += 1\n if row['PERC1'] == prev_perc:\n row['NR'] = \"\"\n row['_NR'] = prev_rank # I.e. joint 6th will be 6 here\n row['_RANK'] = rank # I.e. joint 6th could be 7, or 8 etc. here\n else:\n row['NR'] = rank\n row['_NR'] = rank\n row['_RANK'] = rank\n prev_perc = row['PERC1']\n prev_rank = rank", "def calculate_scores():\n all_people = models.Leaderboard.query.order_by(\n models.Leaderboard.score.desc()).all()\n print(all_people)\n users = []\n scores = []\n for person in all_people:\n users.append(person.username)\n scores.append(person.score)\n return users, scores", "def _update_score(self) -> None:\n\n # setting new score by iterating over players\n self.score_play[self.n_play_turns, ] = [\n self._score_table[(\n self.contract.level,\n self.contract.suit,\n self.tricks[i],\n self.contract.player_vulnerability[i],\n int(self.contract.double + self.contract.redouble)\n )]\n for i in range(NUM_PLAYERS)\n ]", "def test_boxscore_player_stats(self):\n test_v_player_stat = self.BS.vTeam_player_stats[0]['firstName']\n test_answer_v = 'Isaac'\n test_h_player_stat = self.BS.hTeam_player_stats[0]['firstName']\n test_answer_h = 'Pascal'\n self.assertEqual(test_v_player_stat, test_answer_v)\n self.assertEqual(test_h_player_stat, test_answer_h)", "def sort(self):\n self.cards.sort()", "def sort(self):\n self.cards.sort()", "def order_scores(doctors):\n\n # return doctors.sort(key=operator.methodcaller('get_review_score'))\n # print doctors\n print\n print\n ret_docs = sorted(doctors, key=operator.itemgetter('review_score'), reverse=True)\n # ret_docs = doctors.sort(key=lambda k: k['review_score'])\n # print ret_docs\n return ret_docs", "def abilityScores():\n\n scores_list = []\n\n for i in range(6):\n temp_list = []\n for j in range(4):\n temp_list.append(r.choice([1,2,3,4,5,6]))\n temp_list.sort()\n scores_list.append(temp_list[1]+temp_list[2]+temp_list[3])\n scores_list.sort()\n return scores_list", "def bestscorers():\n players = Player.query.filter_by(is_admin=False).all()\n players = list(reversed(sorted(players, key=lambda player: player.goals)))\n for player in players:\n player.image = url_for('static', filename='images/players/{}'.format(player.image))\n player.team_name = player.team.name\n player.team_logo = url_for('static', filename='images/teams/{}'.format(player.team.logo_image))\n\n return render_template('standings/best-scorers.html', players=players, title='Best Scorers')", "def climbingLeaderboard(scores, alice):\n unique_scores = list({score: None for score in scores}.keys())[::-1]\n ranks = []\n # last_score_index = 0\n for game_score in alice:\n for i, score in enumerate(unique_scores):\n if score > game_score:\n ranks.append(len(unique_scores) - i + 1)\n break\n elif score == game_score:\n ranks.append(len(unique_scores) - i)\n break\n elif i == len(unique_scores) - 1:\n ranks.append(1)\n else:\n continue\n\n return ranks", "def _accounce_winner(self):\n\n winner = sorted(((player.get_name(), player.get_last_roll(), player.get_total_score())\n for player in self._players.get_players()),\n key=lambda player: (player[1]),\n reverse=True)[0]\n\n print(\"\\n\\nCongratulations {}, you rolled a {} and your total score is {}. You won the game!\"\n .format(winner[0], winner[1], winner[2]))", "def ranked(pmi_ranking, tscore_ranking, logdice_ranking, tscore, PMI, logdice):\r\n tsc_rank = tscore_ranking[(float(tscore))]\r\n pmi_rank = pmi_ranking[float(PMI)]\r\n logdice_rank = logdice_ranking[float(logdice)]\r\n summary_tscpmi = tsc_rank + pmi_rank\r\n summary_logdpmi = logdice_rank + pmi_rank\r\n return tsc_rank, pmi_rank, logdice_rank, summary_tscpmi, summary_logdpmi", "def _order_for_score_state_graph(toidf):\n temp = toidf.assign(LeadTrail=toidf.ScoreState.apply(lambda x: 'Lead' if x > 0 else 'Trail')) \\\n .query(\"ScoreState != 0\") \\\n [['Team', 'LeadTrail', 'Secs']] \\\n .groupby(['Team', 'LeadTrail'], as_index=False) \\\n .sum() \\\n .pivot(index='Team', columns='LeadTrail', values='Secs') \\\n .reset_index()\n temp = temp.assign(Diff=temp.Lead - temp.Trail).sort_values('Diff').assign(Y=1)\n temp.loc[:, 'Y'] = temp.Y.cumsum() - 1\n return temp[['Team', 'Y']]", "def tournament(submissions, num_rounds):\n strategies = [Strategy(submission) for submission in submissions]\n game_num = 1\n games = []\n for i in range(len(strategies)):\n for j in range(i+1, len(strategies)):\n #print(strategies[i].name, strategies[j].name)\n g = Game(strategies[i], strategies[j], num_rounds, game_num)\n score1, score2 = g.play()\n strategies[i].update_score(score1)\n strategies[j].update_score(score2)\n game_num += 1\n games.append(g)\n \n for strat in strategies:\n print(\"Final score for {} submitted by {} is {}\".format(strat.name, strat.author, strat.score))\n write_results(strategies, games)", "def __cmp__(self, other) :\n if self.strength > other.strength:\n return 1;\n elif self.strength == other.strength :\n if self.rank > other.rank :\n return 1;\n elif self.rank == other.rank :\n return 1 if self.kickers > other.kickers else -1 if self.kickers < other.kickers else 0;\n return -1;", "def sorting(recommendation: List[Tuple[str, int]]) -> None:\n \n for tup in range(len(recommendation)):\n score = recommendation[tup][1]\n alpha = recommendation[tup][0]\n for j in range(tup + 1, len(recommendation)):\n if recommendation[j][1] > score or \\\n (recommendation[j][1] == score and recommendation[j][0] < alpha):\n recommendation[j], recommendation[tup] = recommendation[tup], \\\n recommendation[j]", "def bestplayers():\n players = Player.query.filter_by(is_admin=False).all()\n players = list(reversed(sorted(players, key=lambda player: player.points)))\n for player in players:\n player.image = url_for('static', filename='images/players/{}'.format(player.image))\n player.team_name = player.team.name\n player.team_logo = url_for('static', filename='images/teams/{}'.format(player.team.logo_image))\n\n return render_template('standings/best-players.html', players=players, title='Best Players')", "def _compare(self, other): \n if(self.rank==other.rank):\n if (self.rank == 5 and other.rank==5) or (self.rank ==3 and other.rank==3):\n maxself = max(self.ranks) \n maxother = max(other.ranks)\n if(maxself>maxother):\n if(Card.ranks[maxself]=='Ace' or Card.ranks[maxother] == 'Ace'):\n maxself1 = sorted(self.ranks,reverse=True)\n maxself1 = maxself1[1]\n maxother1 = sorted(other.ranks,reverse=True)\n maxother1 = maxother1[1]\n if(maxself1>maxother1):\n return 1\n else:\n return 0\n else:\n if(Card.ranks[maxself]=='Ace' or Card.ranks[maxother] == 'Ace'):\n maxself1 = sorted(self.ranks,reverse=True)\n maxself1 = maxself1[1]\n maxother1 = sorted(other.ranks,reverse=True)\n maxother1 = maxother1[1]\n if(maxself1<maxother1):\n return -1\n else:\n return 0\n \n if (self.rank == 4 and other.rank==4):\n maxself = max(self.ranks) \n maxother = max(other.ranks)\n if(maxself>maxother):\n return 1\n else:\n return -1\n if (self.rank ==2 and other.rank==2) or (self.rank ==0 and other.rank==0):\n newself = sorted(self.ranks,reverse=True)\n newother = sorted(other.ranks,reverse=True)\n maxsel = max(newself)\n maxoth = max(newother)\n if(maxsel>maxoth):\n return 1\n elif(maxsel<maxoth):\n return -1\n else:\n maxsel1= newself[1]\n maxoth1 = newother[1]\n if(maxsel1>maxoth1):\n return 1\n elif(maxsel1<maxoth1):\n return -1\n else:\n maxsel2= newself[2]\n maxoth2 = newother[2]\n if(maxsel2>maxoth2):\n return 1\n elif(maxsel2<maxoth2):\n return -1\n else:\n return 0\n if self.rank ==1 and other.rank==1:\n pairwali1 = {}\n pairwali2={}\n for i in range(0,3):\n if other.ranks[i] not in pairwali1:\n pairwali1[other.ranks[i]] = 1\n else:\n pairwali1[other.ranks[i]]= pairwali1[other.ranks[i]]+1\n if self.ranks[i] not in pairwali2:\n pairwali2[self.ranks[i]] = 1\n else:\n pairwali2[self.ranks[i]] = pairwali2[self.ranks[i]]+1\n t = list(pairwali1.keys())[list(pairwali1.values()).index(2)]\n r = list(pairwali2.keys())[list(pairwali2.values()).index(2)]\n if t!=r:\n if t>r:\n return -1\n elif t<r:\n return 1\n elif t==r:\n t= list(pairwali1.keys())[list(pairwali1.values()).index(1)]\n r = list(pairwali2.keys())[list(pairwali2.values()).index(1)]\n if t>r:\n return -1\n elif t<r:\n return 1\n else:\n return 0\n\n else:\n if(self.rank>other.rank):\n return 1\n else:\n return -1", "def liste_triee():\n\n global liste_username_score\n\n liste_username_score = sorted(liste_username_score, reverse = True, key=score_trie)", "def sort_scores(self, data: List[int]) -> List[int]:\n # Base Case\n if len(data) < 2:\n return data\n\n pivot = data[0]\n l = self.sort_scores([x for x in data[1:] if x < pivot])\n u = self.sort_scores([x for x in data[1:] if x >= pivot])\n return l + [pivot] + u", "def swapStats(population, masterList, firstPos, secondPos):\n tempTeam = [Team(population[\n firstPos].roster)] # has to be a list since getStats takes a list of teams (meant to be used for populations)\n tempTeam = getStats(tempTeam, masterList)\n # print \"Showing stats for tempTeam:\"\n # showStats(tempTeam,masterList)\n # print tempTeam[0].roster\n population[firstPos].roster = population[secondPos].roster\n population[firstPos].totAvg = population[secondPos].totAvg\n population[firstPos].totSb = population[secondPos].totSb\n population[firstPos].totHr = population[secondPos].totHr\n population[firstPos].totRbi = population[secondPos].totRbi\n population[firstPos].totRuns = population[secondPos].totRuns\n population[firstPos].points = population[secondPos].points\n\n population[secondPos].roster = tempTeam[0].roster\n population[secondPos].totAvg = tempTeam[0].totAvg\n population[secondPos].totSb = tempTeam[0].totSb\n population[secondPos].totHr = tempTeam[0].totHr\n population[secondPos].totRbi = tempTeam[0].totRbi\n population[secondPos].totRuns = tempTeam[0].totRuns\n population[secondPos].points = tempTeam[0].points", "def analyse_and_sort(self, df):\n if (type(df) is pd.Series):\n df = df.to_frame(\"score\")\n elif (type(df) is pd.DataFrame):\n df.columns = [\"score\"]\n df = self._filter(df)\n df = self._argrelmax(df)\n df = self._drop_close_extrema(df) # by = [deb1[0]]\n return df.sort_values(by=[\"score\"])[::-1]", "def nflteamrankings(self, irc, msg, args, optteam):\n \n optteam = optteam.upper().strip()\n\n if optteam not in self._validteams():\n irc.reply(\"Team not found. Must be one of: %s\" % self._validteams())\n return\n \n url = self._b64decode('aHR0cDovL2VzcG4uZ28uY29tL25mbC90ZWFtL18vbmFtZQ==') + '/%s/' % optteam\n\n try:\n request = urllib2.Request(url)\n html = (urllib2.urlopen(request)).read()\n except:\n irc.reply(\"Cannot open page: %s\" % url)\n return\n \n soup = BeautifulSoup(html)\n div = soup.find('div', attrs={'class':'mod-container mod-stat'}) \n h3 = div.find('h3')\n statsfind = div.findAll('div', attrs={'class':re.compile('span-1.*?')})\n\n append_list = []\n \n for stats in statsfind:\n header = stats.find('h4')\n stat = stats.find('span', attrs={'class':'stat'})\n rank = stat.findNext('strong')\n append_list.append(ircutils.bold(header.text) + \" \" + stat.text + \" (\" + rank.text + \")\")\n \n descstring = string.join([item for item in append_list], \" | \")\n irc.reply(ircutils.mircColor(optteam,'red') + \" :: \" + ircutils.underline(h3.text) + \" :: \" + descstring)", "def tweet_sort(twitter_data, results, cmp):\r\n \r\n # Insertion sort\r\n for i in range(1, len(results)):\r\n current = results[i]\r\n position = i\r\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\r\n results[position] = results[position - 1]\r\n position = position - 1 \r\n results[position] = current", "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the base information to calculate player & opponent\n # feature values\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n if len(player_legal_moves) != len(opponent_legal_moves):\n return float(len(player_legal_moves) - len(opponent_legal_moves))\n \n # Get_center_coordinates and opponent. Then set the list of participants\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n participants = [player, opponent]\n \n # Then, for each participant obtain his/her feature values \n for participant in participants:\n if participant == player:\n p_legal_moves = player_legal_moves\n player_either = player\n participant_coordinates = p_y, p_x = \\\n game.get_player_location(participant)\n player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, \\\n player_path_count, player_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either,participant_coordinates, p_legal_moves)\n else:\n p_legal_moves = opponent_legal_moves\n player_either = opponent\n participant_coordinates = p_y, p_x \\\n = game.get_player_location(participant)\n opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, opponent_min_center_diff \\\n = \\\n get_player_feature_values(game, center_coordinates, \\\n player_either, participant_coordinates, p_legal_moves)\n \n # Place each participant's feature values in a tuple/vector surrogate \n pro_player_vector = \\\n (player_legal_move_count, player_start_center_distance, \\\n player_total_next_moves, player_max_path_length, player_path_count, \\\n opponent_min_center_diff)\n pro_opponent_vector = \\\n (opponent_legal_move_count, opponent_start_center_distance, \\\n opponent_total_next_moves, opponent_max_path_length, \\\n opponent_path_count, player_min_center_diff)\n \n # Provide a weighting vector for the features \n weight_vector = (1.5,0.1,1.0,0.001,0.001,0.001)\n \n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*(q-r ) for p,q,r \\\n in zip(weight_vector, pro_player_vector, pro_opponent_vector))\n \n return float(weighted_difference_dot_product)", "def letsplay(players, ncards, printmode,score_stich, score_game,did_cheat):\n printIntro()\n\n nplayers = len(players)\n stiche = np.zeros(nplayers)\n history = -np.ones((ncards,nplayers),dtype=int)\n playerorder = np.arange(nplayers)\n for nturn in range(ncards):\n #print(playerorder)\n for playerid in playerorder:\n #print(history[:nturn+1,:])\n cheated = did_cheat(history)\n player = players[playerid]\n if nturn == 0: # first round, everyone plays with cards covered\n card = player(nplayers, ncards, nturn, playerid, -np.ones((ncards,nplayers),dtype=int), cheated)\n else: # after that, we go round-robin with open cards\n card = player(nplayers, ncards, nturn, playerid, history, cheated)\n\n history[nturn,playerid] = card\n\n \n stich, winnerid =score_stich(history[nturn,:], did_cheat(history))\n stiche += stich\n playerorderCopy = playerorder\n playerorder = np.roll(np.arange(nplayers),-winnerid) # winner comes first next round\n\n #if True:\n #print()\n # print(history[:nturn+1])\n if printmode:\n printTurn(cheated,nturn,stich,history[nturn,:],playerorderCopy)\n\n score = score_game(stiche,did_cheat(history))\n if printmode:\n printResults(stiche, score)\n\n return score, history", "def make_player_stats(tournament, lineups):\n positions = find_player_positions(lineups)\n positions = positions.set_index('player_id')[['player_position']]\n player_stats = load_player_stats(tournament)\n player_stats.set_index('player_id', inplace=True)\n mask = player_stats['goals'] > player_stats['shots']\n player_stats.loc[mask, 'shots'] = player_stats[mask]['goals']\n res = player_stats.join(positions)\n res = res[pd.notna(res['player_position'])]\n return res", "def getLeaderboard(self, **kwargs):\n board = []\n scores = sorted(self._players, key=lambda score: score.dct_net['total'])\n pos = 1\n prev_total = None\n for sc in scores:\n score_dct = {\n 'player': sc.doc,\n 'total' : sc.dct_net['total'],\n }\n if prev_total != None and score_dct['total'] > prev_total:\n pos += 1\n prev_total = score_dct['total']\n score_dct['pos'] = pos\n for n,net in enumerate(sc.dct_net['holes']):\n if net == None:\n break\n else:\n n += 1\n score_dct['thru'] = n\n score_dct['line'] = '{:<3} {:<6} {:>5} {:>4}'.format(\n score_dct['pos'], score_dct['player'].nick_name, score_dct['total'], score_dct['thru'])\n board.append(score_dct)\n self.dctLeaderboard['leaderboard'] = board\n return self.dctLeaderboard", "def test_SetPlayerPuzzleScores_manyplayers(self):\r\n puzzle_id = ['1']\r\n player1_score = 0.08\r\n player2_score = 0.02\r\n response1 = self.make_puzzle_score_request(\r\n puzzle_id, player1_score, self.user\r\n )\r\n\r\n # There should now be a score in the db.\r\n top_10 = Score.get_tops_n(10, puzzle_id)\r\n self.assertEqual(len(top_10), 1)\r\n self.assertEqual(top_10[0]['score'], Score.display_score(player1_score))\r\n\r\n response2 = self.make_puzzle_score_request(\r\n puzzle_id, player2_score, self.user2\r\n )\r\n\r\n # There should now be two scores in the db\r\n top_10 = Score.get_tops_n(10, puzzle_id)\r\n self.assertEqual(len(top_10), 2)\r\n\r\n # Top score should be player2_score. Second should be player1_score\r\n self.assertAlmostEqual(\r\n top_10[0]['score'],\r\n Score.display_score(player2_score),\r\n delta=0.5\r\n )\r\n self.assertAlmostEqual(\r\n top_10[1]['score'],\r\n Score.display_score(player1_score),\r\n delta=0.5\r\n )\r\n\r\n # Top score user should be self.user2.username\r\n self.assertEqual(top_10[0]['username'], self.user2.username)", "def top_matches(prefs, person, n=5, similarity=sim_pearson):\n scores = [(similarity(prefs, person, other), other)\n for other in prefs if other != person]\n\n scores.sort()\n scores.reverse()\n return scores[0:n]", "def post_games(game_data, sort_by=\"overall_score\", weighting=[0, 0, 0, 1]):\n valid_sort_by = [\"complexity_score\", \"gameplay_score\", \"visual_score\", \"overall_score\"]\n # test arguments\n if type(sort_by) != str and sort_by not in valid_sort_by:\n raise TypeError(\"sort_by must be one of the following: {}\".format(\", \".join(valid_sort_by)))\n if type(weighting) != list or len(weighting) != 4 or not all(type(n) is int for n in weighting):\n raise TypeError(\"weighting must be a list of 4 int values\")\n\n games = game_data\n reviews = validate_data_store(review_file, review_terms)\n\n # calculate mean of sort_by and return game data in descending order\n sorted_games = calculations.game_review_mean(games, reviews, sort_by, weighting)\n return sorted_games", "def tweet_sort(twitter_data, results, cmp):\n\n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n position = position - 1\n results[position] = current", "def find_winner(players_dict: dict) -> list:\n\n podium = []\n for player, points in players_dict.items():\n podium.append((player, sum_points(points)))\n\n podium.sort(key=lambda x: x[1], reverse=True)\n\n return podium", "def get_fb_team_rankings(self):\n\n ranks = []\n self._logger.debug(\"Getting foosball team rankings\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT team_id, team_name FROM team\")\n teams = cursor.fetchall()\n\n for team_id, team_name in teams:\n cursor.execute(\"SELECT fb_team_rating FROM \\\nteam WHERE team_id = {0}\".format(team_id))\n team_rating = cursor.fetchall()[0]\n\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(team_rating[0]))\n mu, sigma = cursor.fetchall()[0]\n\n team_rank = float(mu) - (3 * float(sigma))\n\n # get player_ids\n cursor.execute(\"SELECT player from player_team_xref \\\nWHERE team = {0}\".format(team_id))\n players = cursor.fetchall()\n player_one = players[0]\n player_two = players[1]\n\n cursor.execute(\"SELECT first_name FROM player WHERE \\\nplayer_id = {0}\".format(player_one[0]))\n player_one_name = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT first_name FROM player WHERE \\\nplayer_id = {0}\".format(player_two[0]))\n player_two_name = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\n(offense_winner = {0} AND defense_winner = {1}) OR (offense_winner = {1} \\\nAND defense_winner = {0})\".format(player_one[0], player_two[0]))\n team_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\n(offense_loser = {0} AND defense_loser = {1}) OR (offense_loser = {1} \\\nAND defense_loser = {0})\".format(player_one[0], player_two[0]))\n team_loss_count = cursor.fetchone()[0]\n\n intermediate_rank = (team_name, round(team_rank, 4),\n team_win_count, team_loss_count, player_one_name,\n player_two_name)\n ranks.append(intermediate_rank)\n del intermediate_rank\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return ranks", "def get_scores(self):\n return [(self.players[p.get_color()], p.get_score()) for p in self.state.get_players()]", "def sort(self):\n self.deckcards.sort()", "def __cmp__(self, other):\n if options.rank_by.lower() != \"money\":\n \"\"\"flags ▲, money ▲, hints ▼, time ▼\"\"\"\n this, that = len(self.flags), len(other.flags)\n if this == that:\n this, that = self.money, other.money\n if this == that:\n this, that = len(other.hints), len(self.hints)\n if this == that:\n this, that = other.last_scored(), self.last_scored()\n else:\n \"\"\"money ▲, hints ▼, time ▼, flags ▲\"\"\"\n this, that = self.money, other.money\n if this == that:\n this, that = len(other.hints), len(self.hints)\n if this == that:\n this, that = other.last_scored(), self.last_scored()\n if this == that:\n this, that = len(self.flags), len(other.flags)\n if this < that:\n return 1\n elif this == that:\n return 0\n else:\n return -1", "def opt_play():\n global piles\n global num_piles \n nim_sum = game_nim_sum()\n pile_sum = list(piles)\n for x in range(len(piles)):\n pile_sum[x] = nim_sum^piles[x]\n \n for y in range(len(piles)):\n if pile_sum[y] < piles[y]:\n return (y, piles[y]-pile_sum[y])\n\n for z in range(len(piles)):\n if piles[z] != 0:\n return (z,1)", "def playerStandings():\n\n getPlayers = \"SELECT id, name, wins, matches FROM playerstats ORDER BY wins DESC\"\n players = executeQuery({'dbname': 'tournament', 'query' : getPlayers, 'type' : 'find'})\n return players", "def bboxes_sort(classes, scores, bboxes, top_k = 400):\n# if priority_inside:\n# inside = (bboxes[:, 0] > margin) & (bboxes[:, 1] > margin) & \\\n# (bboxes[:, 2] < 1-margin) & (bboxes[:, 3] < 1-margin)\n# idxes = np.argsort(-scores)\n# inside = inside[idxes]\n# idxes = np.concatenate([idxes[inside], idxes[~inside]])\n idxes = np.argsort(-scores)\n classes = classes[idxes][:top_k]\n scores = scores[idxes][:top_k]\n bboxes = bboxes[idxes][:top_k]\n return classes, scores, bboxes", "def sorted_matches(name_matches: Set[NameMatch],\n nmax: int = None, min_score: float = None) \\\n -> List[NameMatch]:\n\n if not name_matches:\n return []\n\n name_matches = list(name_matches)\n\n match_scores = np.asarray([nm.key_length for nm in name_matches])\n\n if min_score is not None:\n nz_i = np.nonzero(match_scores >= min_score)[0]\n if not nz_i:\n return []\n else:\n nz_i = np.arange(match_scores.shape[0])\n\n # This will do a compound sort on (score, match_name_type)\n # ASSUMES: name_type_preferences < 100\n # nz = np.around(match_scores[nz_i], self.ROUND_NBR_DECIMALS) * np.power(10, self.ROUND_NBR_DECIMALS + 2)\n # ... Don't need np.around() since match_scores are int here\n nz = match_scores[nz_i] * np.power(10, 2)\n\n name_type_preferences = np.asarray([name_matches[i].name_type_preference for i in nz_i])\n nz += name_type_preferences\n\n # Use \"mergesort\" for stable sort\n if not nmax:\n s_i = np.argsort(-nz, kind=\"mergesort\")\n else:\n s_i = np.argsort(-nz, kind=\"mergesort\")[:nmax]\n\n sorted_idxs = nz_i[s_i]\n\n return [name_matches[i] for i in sorted_idxs]", "def all_games(self):\r\n return sorted(self.games + list(g for sp in self.sub_pools for g in sp.games) + self.finals,\r\n key=lambda g: (g.datetime, g.pitch.rank))", "def playerStandings():\n # place all players in a dictionary\n player_dict = {}\n conn, c = connect()\n c.execute(\"\"\"SELECT * FROM players;\"\"\")\n for row in c.fetchall():\n player_dict[row[0]] = [row[1], 0, 0]\n\n # count the number of win and matches in for all matches\n c.execute(\"\"\"SELECT winner, loser FROM matches;\"\"\")\n for row in c.fetchall():\n if row[0] in player_dict:\n player_dict[row[0]][1] += 1\n player_dict[row[0]][2] += 1\n if row[1] in player_dict:\n player_dict[row[1]][2] += 1\n\n # compile win counts as the key to dictionary\n win_count = {}\n for i in player_dict:\n wins = player_dict[i][1]\n if wins in win_count:\n win_count[wins].append((i, player_dict[i][0],\n wins, player_dict[i][2]))\n else:\n win_count[wins] = [(i, player_dict[i][0],\n wins, player_dict[i][2])]\n\n # compile output list\n output_list = []\n for i in sorted(win_count.keys(), reverse=True):\n for j in win_count[i]:\n output_list.append(j)\n\n return output_list" ]
[ "0.77570707", "0.6806551", "0.6747068", "0.6733204", "0.66566753", "0.6630154", "0.65957487", "0.6527466", "0.64854187", "0.64108056", "0.64096063", "0.6390266", "0.6356324", "0.63238096", "0.6280292", "0.6257902", "0.6251791", "0.6247992", "0.62457335", "0.62404174", "0.62368613", "0.6233627", "0.62175924", "0.61583096", "0.61139727", "0.610417", "0.61032665", "0.6097963", "0.60863805", "0.6056896", "0.60068935", "0.60061085", "0.6000717", "0.5995107", "0.59729064", "0.5972524", "0.59320265", "0.5926777", "0.59114033", "0.58812845", "0.58555335", "0.583466", "0.5831055", "0.58043265", "0.5803239", "0.5774305", "0.57735115", "0.5761954", "0.5760888", "0.57600665", "0.57537097", "0.5751116", "0.57451993", "0.57451993", "0.57415026", "0.5733231", "0.56955063", "0.56809145", "0.5679542", "0.5673141", "0.5668813", "0.56680596", "0.56680596", "0.5665316", "0.5663336", "0.5661775", "0.5648414", "0.56195045", "0.560709", "0.5605085", "0.5602955", "0.55967134", "0.55919576", "0.5584245", "0.5583649", "0.55786055", "0.55681944", "0.55606484", "0.5553905", "0.55368656", "0.5533056", "0.5526102", "0.5522498", "0.5512894", "0.55099994", "0.547422", "0.54741824", "0.5470497", "0.5462996", "0.54620945", "0.54591495", "0.54590565", "0.54586285", "0.54548264", "0.54486275", "0.5445436", "0.5435612", "0.54333055", "0.54261965", "0.54207754" ]
0.5901569
39
Sends message to RabbitMQ exchange
def send_message(msg, exchange, key=None): print(msg) connection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbitmq')) channel = connection.channel() exchange_type = 'direct' if exchange == 'other' else 'topic' channel.exchange_declare(exchange=exchange, exchange_type=exchange_type) if key is not None and exchange == 'logs': routing_key = f'scheduler.{key}' else: routing_key = '' channel.basic_publish(exchange=exchange, routing_key=routing_key, body=msg) connection.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_rabbit_message (params ):\n print \"sending message to rabbitmq exchange\"\n logging.basicConfig()\n rabbitmq_host = params.get( 'host' )\n rabbitmq_port = params.get( 'port' )\n rabbitmq_username = params.get( 'user-name' )\n rabbitmq_password = params.get( 'password' )\n exchange_name = params.get( 'exchange' )\n routing_key = params.get( 'routing' )\n message = params.get( 'message' )\n \n amqp_url='amqp://'+rabbitmq_username+':'+rabbitmq_password+'@'+rabbitmq_host+':'+rabbitmq_port+'/%2f'\n amqp_url = str(amqp_url)\n parameters = pika.URLParameters(amqp_url)\n connection = pika.BlockingConnection(parameters)\n channel = connection.channel()\n \n channel.basic_publish(exchange=exchange_name,routing_key=routing_key,body=message)\n ## close connection at the end \n connection.close()", "def send(self):\n if self._stopping:\n return\n\n mytype = 'text/plain'\n\n try:\n if isinstance(json.loads(self.message),dict):\n mytype = 'application/json'\n except (TypeError,json.JSONDecodeError):\n if (isinstance(self.message,dict)):\n mytype = 'application/json'\n self.message = json.dumps(self.message)\n else:\n self.message = str(self.message)\n\n properties = pika.BasicProperties(app_id='sender',\n content_type=mytype)\n\n self._channel.basic_publish(self.exchange, self.routing_key, self.message, properties)\n self._message_number += 1\n self._deliveries.append(self._message_number)\n self.logger.info('published message # %i', self._message_number)", "def send(self, json):\n try:\n retval = self._channel.basic_publish(\n exchange=self.exchange_config['name'],\n routing_key=self.queue_config['name'],\n body=json,\n mandatory=False,\n properties=self._msg_properties\n )\n\n if retval == False:\n raise exceptions.MessageNotSentException(\"Message not sent, enable pika logging for more information\")\n except Exception as e:\n raise exceptions.ConnectionException(\"Connection error\", e)", "def send(self, msg):\n self.house.PLM.send_queue.put( msg )", "def send_message(self, message):\n self.client.queue.put(message)", "def send_message(self, message):\n self.send_message_queue.put(message)", "def publish_message(message: str, broker_ip: str, exchange_name: str, exchange_type: str):\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=broker_ip))\n channel = connection.channel()\n channel.exchange_declare(exchange=exchange_name, exchange_type=exchange_type, durable=True)\n channel.basic_publish(exchange=exchange_name, routing_key='', body=message)\n print(f'Published {message} to the exchange')\n connection.close()", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "def send_msg(self, msg):\n self.msg_queue.put(dict(to=settings.IOTTLY_XMPP_SERVER_USER,msg='/json ' + json.dumps(msg)))", "def send(self, msg):\n self._mailbox.put(msg)", "def send_msg(self, my_queue, my_msg):", "def send(self, msg):\n return self._channel_action(msg, 1)", "def send(event):\n\n\tid = get_hostname()\n\n\tmessage = str(id) + \"|\" + str(event)\n\n\tif mq is None: # if no mq exists\n\t\tprint \"mq is None\"\n\n\telse: # if mq exists\n\t\ttry:\n\n\t\t\tmq.send(message)\n\t\t\tprint 'completed sending message'\n\n\t\texcept Exception as e:\n\n\t\t\tprint 'failed to send message: {}'.format(e)", "def publish(self, queue, message):\n # 1. Setup the channel to use to publish message\n channel_handler = ChannelHandler(self._connection)\n\n # 2. Open the channel before using it\n channel_handler.open_channel()\n\n # 3. Send the message via the channel\n channel_handler.send_message(self._exchange_name, queue, message)\n\n # 4. Close the channel after publishing the message\n channel_handler.close_channel()\n LOGGER.info('Bellow message `%s` is published in `%s`', message, queue)", "def publish_message(self):\n\n message_count = 0\n while message_count < self._messages:\n message_count += 1\n message_body = \"task number %i\" %(message_count)\n self._channel.basic_publish(exchange='',\n routing_key=self._queue_name,\n body=message_body,\n properties=pika.BasicProperties(\n delivery_mode=2 # make message persistant\n ))\n print(\"Published message %i\" %(message_count))\n time.sleep(self._message_interval)", "async def send_event(\n self,\n payload: bytes,\n exchange_name: Optional[str] = None,\n routing_key: Optional[str] = None,\n **kwargs,\n ) -> None:\n exchange_name = exchange_name or os.getenv(\n \"PUBLISH_EXCHANGE_NAME\", \"default.in.exchange\"\n )\n routing_key = routing_key or os.getenv(\"PUBLISH_ROUTING_KEY\", \"#\")\n try:\n await self.channel.publish(\n payload=payload,\n exchange_name=exchange_name,\n routing_key=routing_key,\n **kwargs,\n )\n except ChannelClosed as err:\n await self.configure()\n if err.message.find(\"no exchange\") > 0:\n raise ExchangeNotFound(exchange_name) # type: ignore", "def send_mail(self, msg):\n mail_queue.put(msg)", "def send_message(self, message):\n self.outbox.put(message)\n if message.TYPE_STRING != \"ack\":\n self.awaiting_ack.put((message, time.time()))", "def send(self, message):\n pass", "async def send(self, message):", "def send(self, msg):\n with self._send_lock:\n self._rt.send_message(msg.bytes())", "def send_message(self, message):\n pass", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def send(self, msg):\n sleep(self.m_to)\n self.conn.send(msg)", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "def process(self, message):\n if self.debug:\n self.log(\"Publishing: \" + str(message.data))\n self.channel.basic.publish(\n AMQPMessage(str(message.data)),\n self.exchange, self.routing_key)", "def send(self, message):\n if isinstance(message, basestring):\n self.send_queue.put(message)\n else:\n self.send_queue.put(struct.pack(\"!B\", message.type_id) +\n message.pack())", "def _send_via_transport(self, message):\n\n self.message_interface.send(message)", "def on_reply(self, msg: str):\n self._logger.debug(f\"Got msg: {msg}\")\n self._rabbit_channel.basic_publish(exchange='', routing_key=QueueName.MSG_REPLY, body=str(msg))", "def send_and_flush(self, msg):\r\n try:\r\n self.bus.send(msg)\r\n msg.data[:4] = bytearray(4)\r\n # print(\"Message sent on {}\".format(self.bus.channel_info))\r\n except can.CanError:\r\n print(\"Message NOT sent\")", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def producer():\n\n connection = pika.BlockingConnection(pika.ConnectionParameters('rabbit'))\n channel = connection.channel()\n\n channel.queue_declare(queue=QUEUE_NAME)\n\n # Create two unique device ids to provide more example data\n timestamp = arrow.now().timestamp\n device_name = b'A' if timestamp % 2 == 0 else b'B'\n '''\n This creates the same hash value each time so we can use the Raspberry Pi\n serial number to create a unique ID for each device\n '''\n device_id = hashlib.sha1(device_name).hexdigest()\n\n # Currently a python dict\n data = {\n 'device_id': device_id,\n 'timestamp': timestamp,\n 'data': {\n 'key': 'value'\n }\n }\n\n channel.basic_publish(exchange='',\n routing_key=QUEUE_NAME,\n body=json.dumps(data)) # Encode as a JSON string\n msg = f' [x] Sent {data}'\n print(msg)\n logging.info(msg)\n connection.close()", "def _send(self, message: str) -> None:\n logger.info(\"Send: {}\".format(message['type']))\n logger.debug(\"Send: {}\".format(message))\n\n message_b = (json.dumps(message) + '\\r\\n').encode()\n self.transport.write(message_b)", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def __answer(self, msg: str):\n self.channel.basic_publish(\n exchange='main', routing_key='answer', body=msg)", "def test_basic():\n client = CloudAMQPClient(CLOUDAMQP_URL, TEST_QUEUE_NAME)\n send_msg = {'test': 'ans'}\n client.send_message(send_msg)\n received_msg = client.get_message()\n assert send_msg == received_msg\n print 'test_basic() passed'", "def send(message):\n\tmessage = message.encode()\n\tconn.send(message)", "def send(self, msg):\n self.ws.send(json.dumps(msg))", "def _send(self, command, payload):\n self.work_queue_client.send(command, payload)", "def publish(self, message_body, routing_key, exchange=None):\n\n publish_exchange = exchange or self.producer.exchange\n\n self.producer.publish(\n body=message_body,\n exchange=publish_exchange,\n routing_key=routing_key,\n retry=settings.PUBLISH_RETRY,\n retry_policy={\n # First retry immediately,\n 'interval_start': settings.PUBLISH_RETRY_INTERVAL_START,\n # then increase by 2s for every retry.\n 'interval_step': settings.PUBLISH_RETRY_INTERVAL_STEP,\n # but don't exceed 30s between retries.\n 'interval_max': settings.PUBLISH_RETRY_INTERVAL_MAX,\n # give up after 30 tries.\n 'max_retries': settings.PUBLISH_RETRY_MAX_RETRIES,\n # callback for logging\n 'errback': self.on_publish_error,\n 'on_revive': self.on_connection_revival\n },\n # declare exchange and queue and bind them\n declare=list(self.queues.values())) # queues is a dict.\n log.info(f'Published '\n f'message: {self.producer.exchange.name}::{routing_key}')\n log.debug(f'Published '\n f'message_body: {message_body}')", "def publish(self, message):\n pika_message = message.to_pika_message()\n self._channel.basic_publish(exchange='',\n routing_key=self.name,\n properties=pika_message.properties,\n body=message.body)", "def send(self, message):\n if self.connection:\n self.connection.send(message)", "async def send_msg(self, msg):\n try:\n logging.info(\"Sending: %s\", msg)\n self.writer.write(msg.encode())\n await self.writer.drain()\n\n except Exception as e:\n logging.error(\"Command could not be encoded; %s\", e)", "def send(self, msg):\n #assert(isinstance(msg, Message))\n\n msg = envelp(msg, self.get_msg_id())\n self.send_raw(msg)\n\n # TODO: Fix this: this little delay is to be able to\n # send messages one after the other\n #\n # without this delay, following code is not working:\n #\n # the_actor.send({'a': 'message'})\n # the_actor.send({'a': 'different message'})\n #\n gevent.sleep(0.000000000000000000000000001)", "def send_event(event, exchange=None, routing_key=None):\n settings = get_settings()\n exchange = exchange or settings.EVENT_QUEUE_EXCHANGE\n routing_key = routing_key or getattr(settings, 'EVENT_QUEUE_ROUTING_KEY', '')\n\n if not settings.EVENT_QUEUE_URL:\n raise ImproperlyConfigured('EVENT_QUEUE_URL is not configured in settings')\n if not exchange:\n raise ImproperlyConfigured('EVENT_QUEUE_EXCHANGE is not configured in settings '\n 'and no exchange provided in parameters.')\n\n event.update({'timestamp': datetime.datetime.now(tz=utc).isoformat()})\n with open_channel(settings.EVENT_QUEUE_URL) as channel:\n channel.basic_publish(\n exchange=exchange,\n routing_key=routing_key,\n body=json.dumps(event),\n properties=pika.BasicProperties(delivery_mode=2, content_type='application/json')\n )", "def send(self, msg):\n self.message('Me', msg)", "def sendMessage(self):\n ps = pubsub.PubSub(from_jid=self.jid, to_jid=self.recipient, stream=self.stream, stanza_type=\"get\")\n ps.publish(self.message, self.node)\n self.stream.set_response_handlers(ps, self.onSuccess, self.onError, lambda stanza: self.onTimeout(stanza, message, recipient))\n self.stream.send(ps)", "def send_message(self, data):\n self.transport.write(data)", "def on_message(self, unused_channel, basic_deliver, properties, body):\n\n start = time.time()\n self.invocations += 1\n\n logger.info(\n u\"[{}] received message #{} from exchange {}: {}\".format(self.bot_id,\n basic_deliver.delivery_tag, self.exchange,\n body.decode('utf-8')))\n\n self.statsd.incr(self.statsd_prefix + \"message.receive\")\n\n # Ack the message before processing to tell rabbit we got it.\n # TODO before sending ack we should persist the message in a local queue to avoid the possibility of losing it\n self.acknowledge_message(basic_deliver.delivery_tag)\n\n try:\n\n try:\n json_body = json.loads(body)\n\n except ValueError as ve:\n logger.exception(\n \"[{}] Invalid JSON received from exchange: {} error: {} msg body: []\".format(self.bot_id,\n self.exchange,\n ve.message, body))\n raise\n\n else:\n response_messages = self.callback_func(json_body)\n\n if response_messages is None:\n response_messages = []\n\n logger.info(\"[{}] Sending {} response messages\".format(self.bot_id, len(response_messages)))\n\n for message in response_messages:\n self._channel.basic_publish(exchange=message.get('exchange', self.exchange),\n routing_key=message.get('queue', self.queue_name),\n body=message.get('body'))\n logger.info(\"[{}] published message {}\".format(self.bot_id, message))\n self.statsd.incr(self.statsd_prefix + \"message.publish\")\n\n except Exception as e:\n msg = \"[{}] Unexpected error - {}, message {}, from exchange {}. sending to error queue {}\"\n self.statsd.incr(self.statsd_prefix + \"message.error\")\n logger.exception(msg.format(self.bot_id, e, body, self.exchange, self.error_queue_name))\n self._channel.basic_publish(exchange='',\n routing_key=self.error_queue_name,\n body=body)\n\n\n exec_time_millis = int((time.time() - start) * 1000)\n self.total_execution_time += exec_time_millis\n\n logger.debug(\"Consumer {0} message handling time: {1}ms\".format(self.consumer_id, exec_time_millis))\n\n # if we have processed 100 messages, log out the average execution time at INFO then reset the total\n if self.invocations % 100 == 0:\n average_execution_time = self.total_execution_time / 100\n logger.info(\"Consumer {0} Avg message handling time (last 100): {1}ms\".format(self.consumer_id, average_execution_time))\n self.total_execution_time = 0\n\n self.statsd.timing(self.statsd_prefix + 'message.process.time', int((time.time() - start) * 1000))", "async def send_message(self, channel : str, message : str):\n await self._connection.send_message(channel, message)", "async def _send_message_in_queue(self, queue_name, body, reply_to=None):\n message = aio_pika.Message(body=body, reply_to=reply_to)\n await self.channel.default_exchange.publish(message, routing_key=queue_name)", "def publish(self, queue, message):\n\n # Instead of passing a queue to the constructor, the publish checks if\n # the target queue exists. If not, it declares the target queue\n if not self.queue:\n self.channel.queue_declare(queue=queue)\n self.queue = queue\n\n self.channel.basic_publish(\n exchange='', routing_key=queue, body=message)", "def send_message(self, msg):\n if msg is None:\n raise ValueError('message cannot be None!')\n\n if not isinstance(msg, message.Message):\n raise ValueError('message must be a type of Message')\n\n message_json = json.dumps(msg.__dict__)\n message_length = len(message_json)\n message_length_binary = struct.pack('>I', message_length)\n\n logging.info(\"Send: {0}\".format(message_json))\n\n self.sck.send(message_length_binary)\n self.sck.send(message_json)", "def send_message(self):\n\t\t\t\tpriority_message = self.message_queue.pop(0)\n\t\t\t\t# send this message to Africa's Talking or NexmoClient\n\t\t\t\tresponse = nexmo.send_message(priority_message)\n\t\t\t\t# response = africas_talking.send_message(priority_message)\n\t\t\t\tprint(response);\n\t\t\t\treturn response['message']", "async def send_message(self, message):\n if message is None:\n return\n if isinstance(message, dict):\n message = json.dumps(message)\n if isinstance(message, Number):\n message = str(message)\n\n await self.ws.send(message)", "def send(self, message, delay=0, message_attributes=None):\n if message_attributes is None:\n message_attributes = {}\n return self.client.send_message(\n QueueUrl=self.app.settings['SQS_OUTBOUND_QUEUE_URL'],\n MessageBody=json.dumps(message),\n DelaySeconds=delay,\n MessageAttributes=message_attributes,\n )", "def linkRabbit(self):\n\n print(\"Listening for RabbitMQ messages\")\n\n # RabbitMQ setup\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\n channel = connection.channel()\n\n #channel.exchange_declare(exchange='freqSweep', exchange_type='fanout')\n channel.exchange_declare(exchange='pwrSweep', exchange_type='fanout')\n\n result = channel.queue_declare(queue='', exclusive=True)\n queue_name = result.method.queue\n\n # channel.queue_bind(exchange='freqSweep', queue=queue_name)\n channel.queue_bind(exchange='pwrSweep', queue=queue_name)\n channel.basic_consume(queue=queue_name, on_message_callback=self.rabbitCallback, auto_ack=True)\n channel.start_consuming()", "async def send_message(self, message: dict) -> None:\n _LOGGER.debug(f\"send_message - {message}\")\n await self._client.send_message(json.dumps(message))", "def message_routed(self, message):\n \n # Send it through the transport\n self.send_message(message = message)", "def send(self, msg: Message, **kwargs):\n\n pass", "def handle_delivery(channel, method, header, body):\n print(body)", "def callback(ch, method, properties, body):\n print(f\"[X] Received %r\" % body)\n\n # wait for certain time until task is completed\n time.sleep(body.count(b'.'))\n print(\"[X] Done\")\n\n \"\"\"Acknowledge after completing task this prevents message\n message loss when the worker dies. And when worker\n dies message will be passes to another online worker.\n Caution: We are not talking about worker node of RabbitMQ.\n \"\"\"\n ch.basic_ack(delivery_tag=method.delivery_tag)", "def send_message(self, msg):\n self.logger.debug(msg)\n self.writer.send(json.dumps(msg))", "def on_message_received(ch, method, properties, body):\n # the body contains the command flag followed by a colon ':' and the message for the drone\n # decode the body to utf8\n received_bytes = body.decode('utf-8')\n # split the received_bytes to get the command _flag and message\n recieved_message = received_bytes.split(':')\n # since rabbit mq body is a byte\n if (str(recieved_message[0]) == \"c01\"):\n # c01 - command center orders the drone to deliver a item\n print(\"Order Received from the command center to deliver an item to the following address \\n\", str(\n recieved_message[1]))\n time.sleep(2)\n # print in the drone's console that the item has been lift off\n print('\\nLifting off the Item to the delivery address.')\n print('\\nUpdating Status to the command centre ......')\n # Assume the drone has reached the delivery address . Now send a\n # message to the warehouse command center that it has reached the\n # delivery area\n time.sleep(5)\n rpc_sendback(\"c02\")\n # Assume the drone has delivered the item and issue the status message\n # to the command center\n time.sleep(5)\n rpc_sendback(\"c03\")\n # #Assume the drone has reached the parking spot and issue the message to the command center that is available for next instruction\n time.sleep(5)\n rpc_sendback(\"c04\")\n\n else:\n print(\"Received Instruction from Warehouse \" +\n str(recieved_message[1]))\n channel.basic_ack(delivery_tag=method.delivery_tag)\n # channel.start_consuming()", "def send_message(self, data):\n self.agent_msg_queue.put(data)\n self._send_counter += 1", "def send(self, msg: str):\n message = msg.encode(HttpClient.FORMAT)\n self.client.send(message)\n print(\"[MESSAGE] message sent:\", msg)", "def consume_message(message):\n # Assign the message to the global drone_message\n global drone_message\n drone_message = message\n # The Rabbit mq runs in the localhost and the username , password is\n # athavan\n credentials = pika.PlainCredentials('guest', 'guest')\n # Pass the mqhost , port , virtualhost and credentials\n parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)\n connection = pika.SelectConnection(parameters, on_connected)\n try:\n connection.ioloop.start()\n except KeyboardInterrupt:\n # close the connnection\n connection.close()\n # loop until we are fully closed. It will stop on its own\n connection.ioloop.start()", "def send_message(self, text):\n self.redis.publish('message_to_user', json.dumps((self.operator_token, text)))", "def send_email(self, message):\n pass", "def transmit(self, msg):\r\n # send our message to the client\r\n self.conn.sendall(msg)", "async def testsay(self, ctx, *, message):\n await ctx.send(message)", "def sendEvent(self, event):\n if not self.chan:\n log.msg(\"No AMQP channel. Dropping event.\")\n return\n\n event = copy.copy(event)\n\n # Pass timestamp on as a string, to work around Logstash bug 279.\n event['timestamp'] = repr(event['timestamp'])\n\n # Make sure isError is always a boolean\n if 'isError' in event:\n event['isError'] = bool(event['isError'])\n\n body = simplejson.dumps(event)\n content = Content(body)\n return self.chan.basic_publish(exchange=self.exchange,\n content=content)", "def sendMsg(self, channel, message, length=None):\n self.logger.info(\"Sending in %s: %s\" % (channel, message))\n self.msg(channel, message, length)", "def publish(self, message: str) -> None:", "def amqp(self, **options):\n pass", "def push(message: str, date: datetime.datetime):\n msg_id = str(uuid.uuid4())\n pipeline = connection.pipeline()\n pipeline.set(msg_id, message)\n pipeline.zadd(QUEUE_KEY, {\n msg_id: date.timestamp()\n })\n pipeline.execute()\n logger.info(f'Save a new future email: [message: {message}, date: {date}]')", "async def send(self):\n message = b'foo\\nbar\\nbaz\\nqux\\n'\n for b in message:\n await asyncio.sleep(0.5)\n self.transport.serial.write(bytes([b]))\n print(f'Writer sent: {bytes([b])}')\n self.transport.close()", "def send(self, message) -> None:\n raise NotImplementedError", "def new_task(data):\n rabbit_host = os.getenv('RABBIT_HOST', 'localhost')\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(rabbit_host)\n )\n channel = connection.channel()\n channel.basic_publish(\n exchange='',\n routing_key='task_queue',\n body=json.dumps(data),\n properties=pika.BasicProperties(\n delivery_mode=2, # make message persistent\n )\n )\n connection.close()", "def _send(self, message):\n logger.info(message)\n self.buffer.put(message)", "async def send_message(user, message):\n try:\n return await user.send(message)\n except ConnectionClosed:\n pass", "def send(self, msg):\n msg = stc.pack('>I', len(msg)) + msg\n self.sendall(msg)", "def send(self, phone_number):\n #response = self.client.publish(PhoneNumber=phone_number, Message=self.message)\n return True", "def send_message(self, message):\n if self.connected:\n self.send(\n json.dumps(message.request))", "def send(self, msg):\n self.__sock.send(msg)", "def produce(self, message):\n self.producer.send(self.topic, message)", "def send(self, message):\n _check_message_type(message=message)\n response = requests.post(\n self._server_url + _SEND_URL,\n data={\"id\": self._chat_id, \"msg\": message}\n )", "def send(self,msg):\n try:\n if self.mutexCmd.tryLock(100):\n self.cmd.append(msg)\n self.mutexCmd.unlock()\n #print(\"ADD TO QUEUE: {}\".format(msg))\n else:\n print(\"WARN: cmd not added to queue\")\n except Exception as e:\n print(\"ERROR:Serial:send:\",e)\n self.ConnexionError.emit(True)", "def publish(self, message, routing_key, mandatory=True):\n\n log.debug(\"Publishing message via exchange %s: %r\", self, message)\n if self.internal:\n # Caught on the client side to prevent channel closure\n raise ValueError(\"cannot publish to internal exchange: '%s'!\" % self.name)\n\n raise gen.Return((yield self.__publish_method(\n self.name,\n routing_key,\n message.body,\n properties=message.properties,\n mandatory=mandatory)))", "def sendResponse(results, queue_name): \n \n url = os.getenv('CLOUDAMQP_URL')\n\n if not url:\n print(\"Environment variable CLOUDAMQP_URL not set\")\n exit(-1)\n\n try:\n params = pika.URLParameters(url)\n params.socket_timeout = 5\n connection = pika.BlockingConnection(params) # Connect to CloudAMQP\n channel = connection.channel() # start a channel\n routing_key = queue_name\n \n message = str(results)\n\n if not message:\n message = \"No results available\"\n\n message = message.encode()\n\n channel.confirm_delivery()\n \n channel.exchange_declare(exchange=queue_name, exchange_type='direct')\n result = channel.queue_declare(queue_name, durable=True, exclusive=False)\n\n binding_key = queue_name\n\n# channel.queue_bind(exchange=queue_name, queue=queue_name, routing_key=binding_key)\n \n channel.basic_publish(exchange=queue_name, \n routing_key=routing_key, body=message, \n properties=pika.BasicProperties(content_type = \"text/plain\"), mandatory=True) # False: dont care whether someone picks up message \n\n except pika.exceptions.UnroutableError:\n print(\"Error in send response. Unroutable\")\n except:\n print(\"Caught an exception\")\n\n finally:\n channel.close()\n connection.close()", "async def publish(self, body, routing_key=None):\n properties = pika.BasicProperties(\n app_id='example-publisher',\n content_type='application/json'\n )\n self.log.debug(\"Publish to %s:%s\", self.exchange,\n routing_key or self.routing_key)\n channel = await self._backend.channel('publish')\n try:\n channel.basic_publish(\n self.exchange,\n routing_key or self.routing_key or '',\n # pylint: disable=c-extension-no-member\n ujson.dumps(body, ensure_ascii=False),\n properties)\n except pika.exceptions.ChannelClosed: # pragma: no cover\n self.log.error(\n 'Message not delivered (%s): %s',\n routing_key, body\n )", "def _send(self, message):\r\n if not message:\r\n return\r\n\r\n self._maybe_print('twitch out queued: ' + message)\r\n self.buffer.append(message + \"\\n\")", "def send_message(self, msg) -> object:\n payload = {'content': str(msg)}\n try:\n return requests.post(url = self.__webhooks, data = payload)\n except exceptions.ConnectionError as cer:\n print(cer)\n exit(1)", "def send(self, msg, adb_info):\n with self._transport_lock:\n self._send(msg, adb_info)", "def send(self, signal, msg=\"\"):\n self.transport.write(str(str(signal) + msg).encode())", "def publish(self, message):\n logger.info(\"Publishing to topic [{0}]: {1}\".format(self._topic_name, message))\n self._executor.send(json.dumps({\n 'op': 'publish',\n 'id': 'publish:{0}:{1}'.format(self._topic_name, self._id),\n 'topic': self._topic_name,\n 'msg': message\n }))", "def msg(self, chan, msg):\n self._msg(chan, msg)", "async def send_message(self, message: dict) -> None:\n await self.client.chat_postMessage(channel=self.channel_id, **message)" ]
[ "0.7955153", "0.73282826", "0.69862974", "0.6899452", "0.68242073", "0.676501", "0.6764902", "0.6729278", "0.669033", "0.668374", "0.66331697", "0.66054446", "0.65912455", "0.6582951", "0.6516775", "0.64865685", "0.6484264", "0.64665216", "0.64554477", "0.6451518", "0.643939", "0.6429638", "0.64238006", "0.64164287", "0.6393193", "0.6393193", "0.6388049", "0.63857627", "0.63579065", "0.63451535", "0.63360053", "0.63360053", "0.63360053", "0.6325418", "0.63251805", "0.629763", "0.62794745", "0.627238", "0.6263556", "0.62575674", "0.62552434", "0.6251164", "0.6245023", "0.6234112", "0.6221924", "0.6208745", "0.61995924", "0.61887056", "0.61653006", "0.61560595", "0.6149634", "0.6130501", "0.61242586", "0.61166066", "0.6111846", "0.6111056", "0.6104228", "0.61031145", "0.6096535", "0.60694206", "0.60620624", "0.6057697", "0.6052459", "0.60488856", "0.60441476", "0.6030354", "0.6025447", "0.6016566", "0.5995492", "0.5994058", "0.5989288", "0.5987619", "0.59860504", "0.5967569", "0.59666896", "0.5953347", "0.59531415", "0.59423745", "0.59392977", "0.59363896", "0.5928914", "0.592182", "0.590566", "0.58984685", "0.5882005", "0.58797914", "0.5879132", "0.5871236", "0.5867507", "0.5865527", "0.5859512", "0.5857904", "0.5855593", "0.58555496", "0.5840986", "0.5825728", "0.58124757", "0.58110625", "0.5808362", "0.58071935" ]
0.75768226
1
The Slack Real Time Messaging API is an events firehose. this parsing function returns None unless a message is directed at the Bot, based on its ID.
def parse_slack_output(slack_rtm_output, bot): output_list = slack_rtm_output if output_list and len(output_list) > 0: for output in output_list: at_bot = "<@%s>" % bot['bot_id'] if output and 'text' in output and at_bot in output['text'] and 'channel' in output: # return text after the @ mention, whitespace removed return output['text'].split(at_bot)[1].strip(), \ output['channel'] return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_bot_commands(slack_events):\n\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n print event[\"text\"]\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == starterbot_id:\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(slack_events):\r\n for event in slack_events:\r\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\r\n user_id, message = parse_direct_mention(event[\"text\"])\r\n if user_id == starterbot_id:\r\n return message, event[\"channel\"]\r\n return None, None", "def parse_bot_commands(slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and \"subtype\" not in event:\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == kbot_id:\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(starterbot_id, slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == starterbot_id:\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == backtalker_id:\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == starterbot_id:\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == starterbot_id:\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == starterbot_id:\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(slack_events):\n for event in slack_events:\n # print(event)\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_id, message = parse_direct_mention(event[\"text\"])\n # print(user_id)\n # print(starterbot_id)\n if user_id == starterbot_id:\n # print(message) # e.g. \"quiz me\"\n # print(event)\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == snack_the_north_id:\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(self, slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and \"subtype\" not in event:\n user_id, message = self.parse_direct_mention(event[\"text\"])\n if user_id == self.username:\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_id, message = parse_crypto_calls(event[\"text\"])\n if user_id == moonrat_id or message != None:\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == course_desc_bot_id:\n return message, event[\"channel\"]\n return None, None", "def parse_bot_commands(self, slack_events):\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n user_mention_id = event[\"user\"]\n user_id, message = self.parse_direct_mention(event[\"text\"])\n if user_id == self.starterbot_id:\n return message, event[\"channel\"], user_mention_id\n return None, None, None", "def parse_bot_commands(slack_events):\n\tfor event in slack_events:\n\t\tif event[\"type\"] == \"message\" and not \"subtype\" in event:\n\t\t\tuser_id, message = parse_direct_mention(event[\"text\"])\n\t\t\tif user_id == starterbot_id:\n\t\t\t\treturn message, event[\"channel\"]\n\treturn None, None", "def parse_bot_commands(self,slack_events):\n user = None\n for event in slack_events:\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\n print(\"User: \" + event[\"user\"])\n user_id, message = self.parse_direct_mention(event[\"text\"])\n print(message)\n if user_id == self.starterbot_id:\n print('That was toward me!!!')\n return message, event[\"channel\"], event[\"user\"]\n\n return None, None, None", "def listen_to_message(**payload):\n\n data = payload['data']\n\n try:\n message = data['text']\n user = data['user']\n message_id = data['client_msg_id']\n time = data['event_ts']\n channel = data['channel']\n process_data({'user': user, 'message': message, 'message_id': message_id, 'channel': channel, 'time': time})\n except KeyError:\n pass\n except Exception as e:\n logging.error(e)\n return None", "def __parse_channel_id(self, data):\n if 'channel_id' in data:\n return data['channel_id']\n if 'channel' in data:\n return data['channel']['id']\n return None", "def messaging_events(payload):\n data = json.loads(payload)\n message = data[\"entry\"][0][\"messaging\"]\n for event in message:\n if \"message\" in event and \"text\" in event[\"message\"]:\n # if message in event and text in message set id and text\n sender_id = event[\"sender\"][\"id\"]\n text = event[\"message\"][\"text\"]\n quick_reply_payload = None\n\n if \"quick_reply\" in event[\"message\"]:\n # if quick_reply i message set payload\n quick_reply_payload = event[\"message\"][\"quick_reply\"][\"payload\"]\n yield sender_id, text, quick_reply_payload\n else:\n yield event[\"sender\"][\"id\"], \"I can't echo this\", None", "def parse_slack_events(slack_events):\n for event in slack_events:\n # Filter out certain event types from processing\n if not event[\"type\"] in FILTERED_OUT_TYPES:\n debug_print(event)\n else:\n return None, None\n\n if \"thread_ts\" in event:\n ts = event[\"thread_ts\"]\n elif \"ts\" in event:\n ts = event[\"ts\"]\n else:\n ts = None\n\n # Look for qualifying events\n\n # Standard message to room\n # if event[\"type\"] == \"message\" and \"subtype\" not in event:\n if event[\"type\"] == \"message\" and \"subtype\" not in event:\n user_id, message = parse_direct_mention(event[\"text\"])\n if user_id == starterbot_id or is_active_conv(ts):\n return message, \\\n {\n \"channel\": event[\"channel\"],\n \"user\": event[\"user\"],\n \"ts\": ts\n }\n\n return None, None", "def messaging_events(payload):\n data = json.loads(payload)\n messaging_events = data[\"entry\"][0][\"messaging\"]\n for event in messaging_events:\n if \"message\" in event and \"text\" in event[\"message\"]:\n yield event[\"sender\"][\"id\"], event[\"message\"][\"text\"].encode('unicode_escape')\n else:\n yield event[\"sender\"][\"id\"], \"rez can't parse this\"", "def _parse_message(self, data):\r\n if TwitchChatStream._check_has_ping(data):\r\n self._maybe_print('got ping')\r\n self._send_pong()\r\n\r\n channel_name_or_false = TwitchChatStream._check_has_channel(data)\r\n if channel_name_or_false:\r\n current_channel = channel_name_or_false[0]\r\n print('Connected to channel: ' + current_channel)\r\n\r\n if TwitchChatStream._check_has_message(data):\r\n msg = {\r\n 'channel': re.findall(r'^:.+![a-zA-Z0-9_]+'\r\n r'@[a-zA-Z0-9_]+'\r\n r'.+ '\r\n r'PRIVMSG (.*?) :',\r\n data)[0],\r\n 'username': re.findall(r'^:([a-zA-Z0-9_]+)!', data)[0],\r\n 'message': re.findall(r'PRIVMSG #[a-zA-Z0-9_]+ :(.+)',\r\n data)[0]\r\n }\r\n if msg['channel'].startswith('#'):\r\n msg['channel'] = msg['channel'][1:]\r\n self._maybe_print(\r\n 'got msg: #{} @{} -- {}'.format(msg['channel'], msg['username'], msg['message']))\r\n return msg\r\n elif len(data):\r\n self._maybe_print('other data: {}'.format(data))\r\n else:\r\n return None", "def parse_webhook_data(self, data):\n\n message = data['message']\n\n self.chat_id = message['chat']['id']\n self.incoming_message_text = message['text'].lower()\n self.first_name = message['from']['first_name']\n if 'last_name' in message['from']:\n self.last_name = message['from']['last_name']", "def retrieve_message(channel, message_id):\n\n if not settings.SLACK_TOKEN:\n return {'ok': False, 'error': 'config_error'}\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.conversations_history(channel=channel, latest=message_id, inclusive=True, limit=1)\n assert response['ok'] is True\n return response\n except SlackApiError as e:\n assert e.response['ok'] is False\n return e.response", "async def interpret(self, response, id):\n if(response['id'] == id and response['event'] == \"data\"):\n return await self.on_data(response['data'])\n if(response['id'] == None and response['event'] == \"heartbeat\"):\n return await self.on_heartbeat(response['data'])\n if(response['id'] == id and response['event'] == \"subscribed\" and response['data']['success'] == True):\n return await self.on_subscribed(response['data'])\n if(response['event'] == 'error' and response['data']['success'] == False):\n return await self.on_error(response['data'])\n return None", "def _parse_response(resp):\n for header in resp['payload']['headers']:\n if header['name'] == 'From':\n email = _parse_email_value(header['value'])\n sender_user_id = EMAIL_TO_USER_ID.get(email)\n if not sender_user_id:\n print(\"sender_user_id not found {}\".format(email))\n return\n\n if resp['payload']['mimeType'] in ['text/html', 'text/plain']:\n encoded_data = resp['payload']['body']['data'].encode('utf-8')\n body = base64.urlsafe_b64decode(encoded_data)\n else:\n # unclear if other options may come through\n print(\"found new mimeType: {}, id: {}\".format(resp['payload']['mimeType'], resp['id']))\n return\n\n # we only care about chat labels for now\n label = 'chats' if 'chats' in resp['labelIds'] else None\n time_secs = int(resp['internalDate']) / 1000 # convert to seconds\n timestamp = datetime.fromtimestamp(time_secs)\n\n return MessageData(\n body=body,\n timestamp=timestamp,\n message_id=resp['id'],\n label=label,\n data=json.dumps(resp),\n sender_user_id=sender_user_id,\n thread_id=resp['threadId']\n )", "def handle_inbound_message():\n data = json.loads(request.data)\n\n if data[0][\"type\"] == \"message-received\":\n if \"call me\" in data[0][\"message\"][\"text\"]:\n handle_inbound_sms_call_me(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n elif \"media\" in data[0][\"message\"]:\n handle_inbound_media_mms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"], data[0][\"message\"][\"media\"])\n else:\n handle_inbound_sms(data[0][\"message\"][\"to\"][0], data[0][\"message\"][\"from\"])\n else:\n print(data)\n return \"\"", "def get_message():\n # get params from the POST request\n # try\n print(\"zeeeeeeeeeeeeeee\", request.data.decode())\n user_id = request.json['user_id']\n bot_id = request.json['bot_id'] # ex: 5005\n message = request.json['message']\n # query the concerned bot\n bot_url = \"http://localhost:\" + str(bot_id) + \"/webhooks/rest/webhook\"\n params = {\"sender\": user_id, \"message\": message}\n result = http_json_request(params, bot_url)\n new_msg = \"\"\n pile_run = deepcopy(result)\n while len(pile_run) > 0:\n msg = pile_run.pop(0)\n if \"buttons\" in msg:\n params[\"message\"] = msg[\"buttons\"][0][\"payload\"]\n pile_run.extend(http_json_request(params, bot_url))\n elif \"custom\" in msg:\n message += \"<{}>\\n\".format(msg[\"custom\"][\"type\"])\n else:\n new_msg += \"{}\\n\".format(msg[\"text\"])\n return new_msg\n # except Exception as err:\n # print(\"Erreur dans get_message() :\", err)\n # return \"Error\"", "def parse_slack_output(slack_rtm_output):\n\toutput_list = slack_rtm_output\n\tif output_list and len(output_list) > 0:\n\t\tfor output in output_list:\n\t\t\tif output and 'user' in output and output['user'] != BOT_ID:\n\t\t\t\t# if output and 'text' in output:\n\t\t\t\t\t# send_message(json.dumps(output), output['channel'])\n\n\t\t\t\tif output and 'text' in output and AT_BOT in output['text'] and output['channel'].startswith('C'):\n\t\t\t\t\t# return text after the @ mention, whitespace removed\n\t\t\t\t\tsend_message(\n\t\t\t\t\t\t'Please message me directly to play', output['channel'])\n\n\t\t\t\tif output and 'text' in output and output['channel'].startswith('D'):\n\t\t\t\t\treturn output['text'].strip().lower(), \\\n\t\t\t\t\t\toutput['channel'], output['user']\n\treturn None, None, None", "def on_bot_message():\n handle_bot_message(request.get_json())\n return \"ok\"", "def parse_slack_output(slack_rtm_output):\n output_list = slack_rtm_output\n if output_list and len(output_list) > 0:\n for output in output_list:\n if output and 'text' in output and AT_BOT in output['text']:\n # return text after the @ mention, whitespace removed\n return output['text'].split(AT_BOT)[1].strip(), \\\n output['channel']\n return None, None", "def parse_slack_output(slack_rtm_output):\n output_list = slack_rtm_output\n if output_list and len(output_list) > 0:\n for output in output_list:\n if output and 'text' in output and AT_BOT in output['text']:\n # return text after the @ mention, whitespace removed\n return output['text'].split(AT_BOT)[1].strip().lower(), \\\n output['channel']\n return None, None", "def process_message(self, slack):\n # There are two circumstances in which the bot should respond; either you\n # message it directly (in a direct channel), or you include a mention (@bot)\n # at the start of your message. It should not respond to it's own messages.\n if (slack['type'] == 'message' and not 'subtype' in slack.keys()):\n if slack['user'] != self.user_id and (slack['channel'].startswith('D') or slack['text'].startswith('<@%s>' % self.user_id)):\n self.filter_message(slack)", "def process_incoming_message(self):\n\n # Get the webhook data\n post_data = request.json\n\n # Determine the Spark Room to send reply to\n room_id = post_data[\"data\"][\"roomId\"]\n\n # Get the details about the message that was sent.\n message_id = post_data[\"data\"][\"id\"]\n message = self.spark.messages.get(message_id)\n if self.DEBUG:\n sys.stderr.write(\"Message content:\" + \"\\n\")\n sys.stderr.write(str(message) + \"\\n\")\n\n # First make sure not processing a message from the bots\n # Needed to avoid the bot talking to itself\n # We check using IDs instead of emails since the email\n # of the bot could change while the bot is running\n # for example from bot@sparkbot.io to bot@webex.bot\n if message.personId in self.spark.people.me().id:\n if self.DEBUG:\n sys.stderr.write(\"Ignoring message from our self\" + \"\\n\")\n return \"\"\n\n # Log details on message\n sys.stderr.write(\"Message from: \" + message.personEmail + \"\\n\")\n\n # Find the command that was sent, if any\n command = \"\"\n for c in self.commands.items():\n if message.text.find(c[0]) != -1:\n command = c[0]\n sys.stderr.write(\"Found command: \" + command + \"\\n\")\n # If a command was found, stop looking for others\n break\n\n # Build the reply to the user\n reply = \"\"\n\n # Take action based on command\n # If no command found, send the default_action\n if command in [\"\"] and self.default_action:\n # noinspection PyCallingNonCallable\n reply = self.commands[self.default_action][\"callback\"](message)\n elif command in self.commands.keys():\n # noinspection PyCallingNonCallable\n reply = self.commands[command][\"callback\"](message)\n else:\n pass\n\n # allow command handlers to craft their own Spark message\n if reply and isinstance(reply, Response):\n reply.roomId = room_id\n reply = reply.as_dict()\n self.spark.messages.create(**reply)\n reply = \"ok\"\n elif reply:\n self.spark.messages.create(roomId=room_id, markdown=reply)\n return reply", "def parseSlackFeed(data):\n\tslackResult = []\n\tmessages = data.get('messages')\n\tif messages:\t\n\t\tfor message in messages:\n\t\t\ttext = message.get('text')\n\t\t\tuser = message.get('user')\n\t\t\tts = message.get('ts')\n\t\t\tts = ts.split('.')[0]\n\t\t\tts = datetime.datetime.fromtimestamp(int(ts))\n\t\t\tsubtype = message.get('subtype')\n\t\t\titem = copy.deepcopy(templateResult)\n\t\t\titem['message'] = text\n\t\t\titem['author'] = user\n\t\t\titem['datetime'] = ts\n\t\t\titem['source'] = 'slack'\n\t\t\tslackResult.append(item)\n\treturn slackResult", "def parse_slack_output(slack_rtm_output):\r\n output_list = slack_rtm_output\r\n if output_list and len(output_list) > 0:\r\n for output in output_list:\r\n if output and 'text' in output and output['text'].startswith(AT_BOT):\r\n # return text after the @ mention, whitespace removed\r\n return output['text'].split(AT_BOT)[1].strip().lower(), \\\r\n output['channel'], output['user']\r\n return None, None, None", "def api_getmessage():\n message = receive_lyrics()\n if message != \"\":\n return jsonify(ready=True, msg=message)\n else:\n return jsonify(ready=False)", "def message(**payload):\n web_client = payload[\"web_client\"]\n\n # Getting information from the response\n data = payload[\"data\"]\n channel_id = data.get(\"channel\")\n text = data.get(\"text\")\n subtype = data.get(\"subtype\")\n ts = data['ts']\n user = data.get('username') if not data.get('user') else data.get('user')\n # Creating a Converstion object\n message = Message(ts, user, text)\n\n # Appending the converstion attributes to the logs\n conversation.append(message.toDict())\n\n if subtype == 'bot_message': return\n\n do_respond(web_client, channel_id, text)", "def parse(self, message):\n resp = json.loads((self.send_api_request(message)).decode('utf-8'))\n\n nlu_response = NLUResponse()\n nlu_response.text = message\n intent_schema = IntentSchema()\n if resp[\"result\"][\"metadata\"]:\n intent_schema.name = resp[\"result\"][\"metadata\"][\"intentName\"]\n intent_schema.confidence = resp[\"result\"][\"score\"]\n else: # fallback if no intent is given by the nlu\n intent_schema.name = \"greet\"\n intent_schema.confidence = 0.0\n nlu_response.intent = intent_schema\n print(\"Recognized Intent by Dialogflow {}\".format(intent_schema.name ))\n\n pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(resp)\n\n try:\n nlu_response.entities = []\n entities = resp[\"result\"][\"parameters\"]\n resolved_query = resp[\"result\"][\"resolvedQuery\"]\n\n for key, value in entities.items():\n if value:\n entity_schema = EntitiesSchema()\n entity_schema.start = resolved_query.find(value)\n entity_schema.end = resolved_query.find(value) + len(value)\n entity_schema.entity = key\n entity_schema.value = value\n nlu_response.entities.append(entity_schema)\n #print(\"Key: {}, Value: {}\".format(key, value))\n except Exception as err:\n logging.warning('No Entites extracted {}'.format(err))\n\n schema = RasaNLUSchema()\n data, error = schema.dump(nlu_response)\n\n return data", "def message_link(channel, message_id):\n\n if not settings.SLACK_TOKEN:\n return None\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.chat_getPermalink(channel=channel, message_ts=message_id)\n assert response['ok'] is True\n return response['permalink']\n except SlackApiError as e:\n assert e.response['ok'] is False\n return None", "def getMessage(self, msg_id: str) -> str:\n message = self.service.users().messages().get(userId='me', id=msg_id, format='raw').execute()\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n mime_msg = email.message_from_bytes(msg_str)\n message_main_type = mime_msg.get_content_maintype()\n \n if message_main_type == 'multipart':\n for part in mime_msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif message_main_type == 'text':\n return mime_msg.get_payload()", "def parseMsg(self):\n # These 4 elements are always present\n # \"ToUserName\"\n # \"FromUserName\"\n # \"CreateTime\"\n # \"MsgType\"\n\n # Following elements depends on MsgType\n # \"MsgId\"\n # \"Content\"\n # \"MediaId\"\n # \"PicUrl\"\n # \"Format\"\n # \"ThumbMediaId\"\n # \"Location_X\"\n # \"Location_Y\"\n # \"Scale\"\n # \"Label\"\n # \"Title\"\n # \"Description\"\n # \"Url\"\n # \"Event\"\n # \"EventKey\"\n # \"Ticket\"\n # \"Latitude\"\n # \"Longitude\"\n # \"Precision\"\n # \"Recognition\"\n\n def getField(req, key):\n if req.find(key) != None:\n return req.find(key).text\n\n\n msg = {}\n req = et.fromstring(self.request.body.decode(\"utf-8\"))\n\n # These 4 elements are always present\n msg[\"ToUserName\"] = getField(req, \"ToUserName\")\n msg[\"FromUserName\"] = getField(req, \"FromUserName\")\n msg[\"CreateTime\"] = getField(req, \"CreateTime\")\n msg[\"MsgType\"] = getField(req, \"MsgType\")\n\n # Following elements depends on MsgType\n msg[\"MsgId\"] = getField(req, \"MsgId\")\n msg[\"Content\"] = getField(req, \"Content\")\n msg[\"MediaId\"] = getField(req, \"MediaId\")\n msg[\"PicUrl\"] = getField(req, \"PicUrl\")\n msg[\"Format\"] = getField(req, \"Format\")\n msg[\"ThumbMediaId\"] = getField(req, \"ThumbMediaId\")\n msg[\"Location_X\"] = getField(req, \"Location_X\")\n msg[\"Location_Y\"] = getField(req, \"Location_Y\")\n msg[\"Scale\"] = getField(req, \"Scale\")\n msg[\"Label\"] = getField(req, \"Label\")\n msg[\"Title\"] = getField(req, \"Title\")\n msg[\"Description\"] = getField(req, \"Description\")\n msg[\"Url\"] = getField(req, \"Url\")\n msg[\"Event\"] = getField(req, \"Event\")\n msg[\"EventKey\"] = getField(req, \"EventKey\")\n msg[\"Ticket\"] = getField(req, \"Ticket\")\n msg[\"Latitude\"] = getField(req, \"Latitude\")\n msg[\"Longitude\"] = getField(req, \"Longitude\")\n msg[\"Precision\"] = getField(req, \"Precision\")\n msg[\"Recognition\"] = getField(req, \"Recognition\")\n return msg", "def parse_message(self, message):\n pass", "def parse_slack_output(slack_rtm_output):\n output_list = slack_rtm_output\n if output_list and len(output_list) > 0:\n for output in output_list:\n if output and 'text' in output and AT_BOT in output['text']:\n # return text after the @ mention, whitespace removed\n s = output['text'].split(AT_BOT)[1].strip().lower()\n exclude = set(string.punctuation)\n s = ''.join(ch for ch in s if ch not in exclude)\n return output['channel'], s, output['user'],\n return None, None, None", "def handle_event(event_data):\n # define variable of data\n message = event_data.get('event')\n channel = message.get('channel')\n msg = message.get('text').lower()\n userid = message.get('user')\n username = convert_unicode(sc.api_call('users.info', user=userid)).get('user').get('profile').get('display_name')\n text = None\n print(msg)\n\n if \"tasks\" in msg or \"task\" in msg:\n ret_data = fb.display_list('Business', False)\n ret_data = filter(lambda x:username in [names.strip() for names in x[2].split(',')], ret_data)\n text = \"Click <http://team8tasks.serveo.net|here> to go to the Task Website\\n\"\n ongoing_tasks = return_tasks(ret_data, 'ongoing')\n overdue_tasks = return_tasks(ret_data, 'overdue')\n completed_tasks = return_tasks(ret_data, 'completed')\n sc.api_call('chat.postMessage', channel=channel, text=text, as_user=True, attachments=[{'text': ongoing_tasks, 'mrkdwn_in': [\"text\"], 'color': '#03572C'}, {'text': overdue_tasks, 'mrkdwn_in': [\"text\"], 'color': '#ff6666'}, {'text': completed_tasks, 'mrkdwn_in': [\"text\"]}])\n return\n elif \"hello\" in msg or \"hi\" in msg or \"hey\" in msg:\n text = \"Hello <@\" + userid + \">! What's up?\"\n elif \"no u\" in msg:\n text = \"no u\"\n else:\n text = 'Sorry I do not know what that command means. Try \"tasks\" to list your tasks.'\n\n sc.api_call('chat.postMessage', channel=channel, text=text, as_user=True)", "def parse_output(slack_rtm_output): # pragma: no cover\n output_list = slack_rtm_output\n if output_list and len(output_list) > 0:\n for output in output_list:\n if output and 'text' in output and AT_BOT in output['text']:\n # return text after the @ mention, whitespace removed\n return output['text'].split(AT_BOT)[1].strip().lower(), \\\n output['channel'], output['user']\n return None, None, None", "def get(self, id):\n return read_msg(id)", "def process_incoming(self, msg, status):\n return msg[0]", "def fetch_email_status_by_message_id(cls, message_id: str):\n result = cls.mailjet_retrieve.messagehistory.get(id=message_id).json()\n if len(result[\"Data\"]) == 0:\n return None\n recent_event = result[\"Data\"][-1]\n return recent_event", "def get_message(self, resp):\n return resp['message']", "def _decode1(self, body, data):\r\n if \" \" in body:\r\n evtype,body = body.split(\" \",1)\r\n else:\r\n evtype,body = body,\"\"\r\n evtype = evtype.upper()\r\n if evtype == \"CIRC\":\r\n m = re.match(r\"(\\d+)\\s+(\\S+)(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?(\\s\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"CIRC event misformatted.\")\r\n ident,status,path,purpose,reason,remote = m.groups()\r\n ident = int(ident)\r\n if path:\r\n if \"PURPOSE=\" in path:\r\n remote = reason\r\n reason = purpose\r\n purpose=path\r\n path=[]\r\n elif \"REASON=\" in path:\r\n remote = reason\r\n reason = path\r\n purpose = \"\"\r\n path=[]\r\n else:\r\n path_verb = path.strip().split(\",\")\r\n path = []\r\n for p in path_verb:\r\n path.append(p.replace(\"~\", \"=\").split(\"=\")[0])\r\n else:\r\n path = []\r\n\r\n if purpose and \"REASON=\" in purpose:\r\n remote=reason\r\n reason=purpose\r\n purpose=\"\"\r\n\r\n if purpose: purpose = purpose[9:]\r\n if reason: reason = reason[8:]\r\n if remote: remote = remote[15:]\r\n event = CircuitEvent(evtype, ident, status, path, purpose, reason,\r\n remote, body)\r\n elif evtype == \"STREAM\":\r\n #plog(\"DEBUG\", \"STREAM: \"+body)\r\n m = re.match(r\"(\\S+)\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)?:(\\d+)(\\sREASON=\\S+)?(\\sREMOTE_REASON=\\S+)?(\\sSOURCE=\\S+)?(\\sSOURCE_ADDR=\\S+)?(\\s+PURPOSE=\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"STREAM event misformatted.\")\r\n ident,status,circ,target_host,target_port,reason,remote,source,source_addr,purpose = m.groups()\r\n ident,circ = map(int, (ident,circ))\r\n if not target_host: # This can happen on SOCKS_PROTOCOL failures\r\n target_host = \"(none)\"\r\n if reason: reason = reason[8:]\r\n if remote: remote = remote[15:]\r\n if source: source = source[8:]\r\n if source_addr: source_addr = source_addr[13:]\r\n if purpose:\r\n purpose = purpose.lstrip()\r\n purpose = purpose[8:]\r\n event = StreamEvent(evtype, ident, status, circ, target_host,\r\n int(target_port), reason, remote, source, source_addr,\r\n purpose, body)\r\n elif evtype == \"ORCONN\":\r\n m = re.match(r\"(\\S+)\\s+(\\S+)(\\sAGE=\\S+)?(\\sREAD=\\S+)?(\\sWRITTEN=\\S+)?(\\sREASON=\\S+)?(\\sNCIRCS=\\S+)?\", body)\r\n if not m:\r\n raise ProtocolError(\"ORCONN event misformatted.\")\r\n target, status, age, read, wrote, reason, ncircs = m.groups()\r\n\r\n #plog(\"DEBUG\", \"ORCONN: \"+body)\r\n if ncircs: ncircs = int(ncircs[8:])\r\n else: ncircs = 0\r\n if reason: reason = reason[8:]\r\n if age: age = int(age[5:])\r\n else: age = 0\r\n if read: read = int(read[6:])\r\n else: read = 0\r\n if wrote: wrote = int(wrote[9:])\r\n else: wrote = 0\r\n event = ORConnEvent(evtype, status, target, age, read, wrote,\r\n reason, ncircs, body)\r\n elif evtype == \"STREAM_BW\":\r\n m = re.match(r\"(\\d+)\\s+(\\d+)\\s+(\\d+)\", body)\r\n if not m:\r\n raise ProtocolError(\"STREAM_BW event misformatted.\")\r\n event = StreamBwEvent(evtype, body, *m.groups())\r\n elif evtype == \"BW\":\r\n m = re.match(r\"(\\d+)\\s+(\\d+)\", body)\r\n if not m:\r\n raise ProtocolError(\"BANDWIDTH event misformatted.\")\r\n read, written = map(long, m.groups())\r\n event = BWEvent(evtype, read, written, body)\r\n elif evtype in (\"DEBUG\", \"INFO\", \"NOTICE\", \"WARN\", \"ERR\"):\r\n event = LogEvent(evtype, body)\r\n elif evtype == \"NEWDESC\":\r\n ids_verb = body.split(\" \")\r\n ids = []\r\n for i in ids_verb:\r\n ids.append(i.replace(\"~\", \"=\").split(\"=\")[0].replace(\"$\",\"\"))\r\n event = NewDescEvent(evtype, ids, body)\r\n elif evtype == \"ADDRMAP\":\r\n # TODO: Also parse errors and GMTExpiry\r\n m = re.match(r'(\\S+)\\s+(\\S+)\\s+(\\\"[^\"]+\\\"|\\w+)', body)\r\n if not m:\r\n raise ProtocolError(\"ADDRMAP event misformatted.\")\r\n fromaddr, toaddr, when = m.groups()\r\n if when.upper() == \"NEVER\": \r\n when = None\r\n else:\r\n when = time.strptime(when[1:-1], \"%Y-%m-%d %H:%M:%S\")\r\n event = AddrMapEvent(evtype, fromaddr, toaddr, when, body)\r\n elif evtype == \"NS\":\r\n event = NetworkStatusEvent(evtype, parse_ns_body(data), data)\r\n elif evtype == \"NEWCONSENSUS\":\r\n event = NewConsensusEvent(evtype, parse_ns_body(data), data)\r\n elif evtype == \"BUILDTIMEOUT_SET\":\r\n m = re.match(\r\n r\"(\\S+)\\sTOTAL_TIMES=(\\d+)\\sTIMEOUT_MS=(\\d+)\\sXM=(\\d+)\\sALPHA=(\\S+)\\sCUTOFF_QUANTILE=(\\S+)\",\r\n body)\r\n set_type, total_times, timeout_ms, xm, alpha, quantile = m.groups()\r\n event = BuildTimeoutSetEvent(evtype, set_type, int(total_times),\r\n int(timeout_ms), int(xm), float(alpha),\r\n float(quantile), body)\r\n elif evtype == \"GUARD\":\r\n m = re.match(r\"(\\S+)\\s(\\S+)\\s(\\S+)\", body)\r\n entry, guard, status = m.groups()\r\n event = GuardEvent(evtype, entry, guard, status, body)\r\n elif evtype == \"TORCTL_TIMER\":\r\n event = TimerEvent(evtype, data)\r\n else:\r\n event = UnknownEvent(evtype, body)\r\n\r\n return event", "def parse_stdin(self, dispatcher):\n\n message = json.load(sys.stdin)\n\n log.debug(pretty(message))\n if self.read_token == None:\n log.warning('no read token provided - bluntly trusting incoming message')\n else:\n if 'token' not in message or message['token'] != self.read_token:\n log.warning('Unauthorized message - ignoring')\n return\n if 'authed_users' in message and len(message['authed_users']) > 0:\n self.my_username = message['authed_users'][0]\n message = message['event']\n if not 'user' in message:\n # not a user-generated message\n # probably a bot-generated message\n # TODO: maybe check only for self.my_username here - to allow bots\n # talk to each other?\n log.warning('Not a user message - ignoring')\n return\n self.reply_to_channel = message['channel']\n self.reply_to_user = message['user']\n # remove bot username from string\n text = re.sub('<@{}> *:? *'.format(self.my_username), '', message['text'])\n dispatcher.parse_text(text)", "def callback(parsed_msg, msg_object):\n assert msg_object.stream_id == stream_id\n assert parsed_msg in msg", "def parse_slack_output(slack_rtm_output):\n output_list = slack_rtm_output\n if output_list and len(output_list) > 0:\n print 'output list is: ', output_list\n for output in output_list:\n \n if (output and 'text' in output and AT_BOT in output['text'] \n and output['channel'].startswith('D')\n ):\n # return text after the @ mention, whitespace removed\n return output['text'].split(AT_BOT)[1].strip().lower(), \\\n output['channel'], output['user']\n\n elif (output and 'text' in output and output['channel'].startswith('D')\n and output['user'] != BOT_ID):\n return output['text'], output['channel'], output['user']\n\n return None, None, None", "def parse_response(self, response):\n\t\tself.context = response['context']\n\t\ttext = response['output']['text']\n\t\tintents = response['intents'] #is a list, should filter\n\t\tif len(intents) > 0:\n\t\t\tintent = intents[0]['intent'] #get the intent of the message\n\t\telse:\n\t\t\tintent = \"\"\n\t\t\t\n\t\treturn str(text[0]), intent", "def _extract_updated_object(self, msg):\n value = getattr(msg, 'value', None)\n if not value:\n return None\n str_value = str(msg.value, 'utf-8')\n payload = json.loads(str_value)['payload']\n object_dict = payload['after']\n return object_dict", "def parse_event(event):\n attrs = event.get('Records')[0].get('Sns').get('MessageAttributes')\n\n try:\n if attrs.get('X-Github-Event') and attrs.get('X-Github-Event').get('Value') == 'push':\n event_obj = json.loads(event.get('Records')[0].get('Sns').get('Message'))\n found_branch = event_obj['ref'].split('/')[-1]\n if found_branch == os.environ['branch']:\n return event_obj['head_commit']['id']\n return False\n return False\n except AttributeError:\n raise", "def telebot():\n payload = json.loads(request.data)\n message = payload.get('message', payload.get('edited_message',''))\n msg_from = message.get('from')\n user_id = msg_from.get('id')\n user_first_name = msg_from.get('first_name','')\n user_last_name = msg_from.get('last_name','')\n user_is_bot = msg_from.get('is_bot')\n chat = message.get('chat')\n chat_id = chat.get('id')\n command = message.get('text')\n \n if user_is_bot or message == '':\n return jsonify({'method': 'sendMessage','chat_id' : chat_id,'text': 'Sorry I can\\'t answer you!'})\n \n bot_response = {\n 'method': 'sendMessage',\n 'chat_id' : chat_id,\n 'text': f'[{user_first_name} {user_last_name}](tg://user?id={user_id}) {command}',\n 'parse_mode':'Markdown',\n }\n\n return jsonify(bot_response)", "def pubsub_consume(event, context):\n pubsub_message = base64.b64decode(event['data']).decode('utf-8')\n event_data = json.loads(pubsub_message)\n\n message = event_data['event']\n channel = message['channel']\n\n if message.get('bot_id') is None:\n text = message.get('text')\n\n if \"help\" in text:\n slack_text = \"\\n\\n *How to use the Tableau Slackbot* :robot_face: : \\n\" \\\n \"\\n 1. `list @tableau_server_app`: list views available to output to Slack\" \\\n \"\\n\\n 2. `gimmie @tableau_server_app What If Forecast`: generate the report\"\n response = client.chat_postMessage(\n channel=channel,\n text=slack_text)\n return response\n\n if \"list\" in text:\n slack_text = list('view')\n response = client.chat_postMessage(\n channel=channel,\n text=slack_text)\n return response\n\n if \"gimmie\" in text:\n\n filepath = time.strftime(\"%Y%m%d-%H%M%S\")\n view = event_data['event']['blocks'][0]['elements'][0]['elements'][2]['text']\n view_list = list('view')\n if view.strip() in view_list:\n generate_report(view, filepath)\n\n # Upload view from /tmp to Slack\n response = client.files_upload(\n channels=channel,\n file=\"/tmp/view_{0}.png\".format(filepath),\n title=\"View\"\n )\n\n # Delete the view generated locally\n if os.path.exists(\"/tmp/view_{0}.png\".format(filepath)):\n os.remove(\"/tmp/view_{0}.png\".format(filepath))\n\n else:\n slack_text = \":shrug: See the available views with: `list @tableau_server_app`\"\n response = client.chat_postMessage(\n channel=channel,\n text=slack_text)\n\n return response", "def respond_to_events(self):\n event_response = MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events)\n\n if event_response == []:\n return {}\n return event_response[0]", "def _process_message(self, json_object):\n\n message = json.loads(json_object)\n if message['type'] == \"relay\":\n self._process_relay(message)\n elif message['type'] == \"control\":\n self._process_control(message)\n else:\n print(\"ERROR Received message has invalid type\\n\")\n return", "def get_message(variant):\n from targets.models import Timeseries\n event = get_event(variant)\n if not event or \"message\" in event:\n return event\n message_id = next(\n (\n label.get(\"value\")\n for label in event.get(\"labels\", [])\n if label.get(\"key\") == \"message-id\"\n ),\n None,\n )\n if message_id is None:\n return {\n **event,\n \"message\": [],\n }\n s = Search().filter_by_message(message_id)\n timeseries = Timeseries.objects.filter(canonical_name=event[\"name\"]).first()\n if timeseries is not None:\n collection, _ = build_raw_graph(timeseries, \"inputs\")\n s = s.filter_by_name([ts.canonical_name for ts in collection.values()])\n count = search(s[:0]).count\n return {\n **event,\n \"message\": search(s[:count]).hits,\n }", "def parse_slack_output(ARGS, slack_rtm_output):\n output_list = slack_rtm_output\n if output_list and len(output_list) > 0:\n for output in output_list:\n if output and 'text' in output and ARGS.atbot in output['text']:\n # return text after the @ mention, whitespace removed\n return output['text'].split(ARGS.atbot)[1].strip().lower(), \\\n output['channel']\n return None, None", "def get_event(self):\n line = self.ser.readline()\n if line is None:\n return None\n line = line.strip()\n\n if len(line) > 1:\n if line == 'ok':\n return GrblEvent(ID_OK)\n elif line.startswith('error:'):\n return GrblEvent(ID_ERROR, line[6:])\n elif line.startswith('ALARM:'):\n return GrblEvent(ID_ALARM, line[6:])\n elif line[0] == '[' and line[-1] == ']':\n return GrblEvent(ID_INFO, line[1:-1]) \n elif line[0] == '<' and line[-1] == '>':\n s = self._parse_state(line[1:-1])\n return GrblEvent(ID_STATE, s)\n\n raise GrblHostError(\"Invalid event text: \"+line)", "def parse_slack_message_object(message_obj):\n metadata = dict(message_obj._body)\n try:\n metadata['channel_name'] = message_obj._client.channels[metadata['channel']]['name']\n except KeyError:\n metadata['channel_name'] = 'DIRECT_MESSAGE:{}'.format(\n message_obj._client.users[metadata['user']]['name']\n )\n metadata['user_name'] = message_obj._client.users[metadata['user']]['name']\n metadata['team_name'] = message_obj._client.login_data['team']['name']\n\n return metadata", "def get_message(self, user):\n return None", "def parse(message):\n try:\n return json.loads(message)\n except TypeError:\n print(\"Ignoring message because it did not contain valid JSON.\")", "def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n if \"in_reply_to_status_id\" in data:\n status = Status.parse(None, data)\n return self.on_status(status)\n if \"delete\" in data:\n delete = data[\"delete\"][\"status\"]\n return self.on_delete(delete[\"id\"], delete[\"user_id\"])\n if \"disconnect\" in data:\n return self.on_disconnect_message(data[\"disconnect\"])\n if \"limit\" in data:\n return self.on_limit(data[\"limit\"][\"track\"])\n if \"scrub_geo\" in data:\n return self.on_scrub_geo(data[\"scrub_geo\"])\n if \"status_withheld\" in data:\n return self.on_status_withheld(data[\"status_withheld\"])\n if \"user_withheld\" in data:\n return self.on_user_withheld(data[\"user_withheld\"])\n if \"warning\" in data:\n return self.on_warning(data[\"warning\"])\n\n log.error(\"Received unknown message type: %s\", raw_data)", "def on_message(msg, server):\n global MY_INFO\n\n if MY_INFO is None:\n MY_INFO = server.slack.login_data['self']\n # MY_INFO['id']\n\n pprint.pprint(msg)\n text = msg.get(\"text\", \"\").lower()\n text += msg.get(\"file\", {}).get(\"preview\", \"\")\n recommendation = room_recommender(text)\n if recommendation:\n trigger_string, room_name = recommendation\n room_id = CHANNELS[room_name]['id']\n response_text = \"Hi, I noticed you were talking about “{trigger_string}”\\n You may have better luck posting this in <#{room_id}|{room_name}>\"\n response_msg = response_text.format(\n trigger_string=trigger_string,\n room_id=room_id,\n room_name=room_name\n )\n dm_user(server, msg.get('user'), response_msg)", "def read(self):\n if self._automata.any_message():\n msg = self._automata.get_message()\n # if there is a hello message\n # if len(self._buffer) == 0:\n # if we are not into reading a new herald message\n if to_string(msg) == to_string(HELLO_MESSAGE):\n # call the hello received callback\n if self._hello_received_callback:\n self._hello_received_callback()\n # exiting before continuing in the\n # creation of an herald message\n return None\n self._buffer.append(msg)\n if len(self._buffer) >= 8:\n res = SerialHeraldMessage(*self._buffer)\n self._buffer.clear()\n return res\n return None", "def parsemsg(self,s):\n\t\tprefix = ''\n\t\ttrailing = []\n\t\tif not s:\n\t\t\traise IRCBadMessage(\"Empty line.\")\n\t\tif s[0] == ':':\n\t\t\tprefix, s = s[1:].split(' ', 1)\n\n\t\tif s.find(' :') != -1:\n\t\t\ts, trailing = s.split(' :', 1)\n\t\t\targs = s.split()\n\t\t\targs.append(trailing)\n\t\telse:\n\t\t\targs = s.split()\n\t\tcommand = args.pop(0)\n\t\t\t\t\n\t\treturn {'channel':args[0],'handle':prefix.split('@')[0],'text':args[1]}", "def parse_slack_output(slack_rtm_output, slack_client):\n output_list = slack_rtm_output\n if output_list and len(output_list) > 0:\n for output in output_list:\n _, gym_bot_channels = get_list_of_channels(slack_client)\n if output and 'text' in output:\n if BOT_ID != output['user'] and output['channel'] in gym_bot_channels:\n return output['text'].lower(), output['channel'], output['user']\n elif output and 'text' in output and AT_BOT in output['text']:\n # return text after the @ mention, whitespace removed\n return output['text'].split(AT_BOT)[1].strip().lower(), \\\n output['channel'], output['user']\n return None, None, None", "def _parse_data(self, queue_msg):\r\n try:\r\n result = json.loads(queue_msg)\r\n except (TypeError, ValueError):\r\n log.error(\"External message should be a JSON serialized dict.\"\r\n \" Received queue_msg = %s\", queue_msg)\r\n raise\r\n msg = result['msg']\r\n return msg", "def handle_message(self, msg):\n Logger.debug(\"Slave: Trying to parse\")\n if MessageKeys.command_key in msg.fields:\n Logger.info(\"Slave: Message command: %s\", str(msg.get_command()))\n return self.messagehandler[msg.get_command()](self, msg)\n return self.handle_invalid_command(msg)", "def parse_topic(bot, trigger, from_topic, topic_reset):", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def handle(msg):\n\n # glance to get some meta on the message\n content_type, chat_type, chat_id = telepot.glance(msg)\n chat_id = str(chat_id)\n\n # we only want to process text messages from our specified chat\n if (content_type == 'text') and (chat_id in allowed_chat_ids):\n command = msg['text']\n try:\n _cmd = get_command(command)\n except UserWarning as ex:\n logger.error(ex)\n raise\n _cmd.execute(chat_id)", "def _parse_message(self, string, protocol):\n #print(\"Parsing message: %s\" % string)\n msg = parse_message_string(string)\n result = MessageResult(original_message=msg)\n\n if isinstance(msg, MethodCallMessage):\n # Handle method call\n res = self._method_call(msg)\n response_msg = ResponseMessage(result_code=0, result=res, response_to=msg.id)\n result.response = create_message_string(response_msg)\n elif isinstance(msg, SubscribeMessage):\n # Handle subscription to event\n response_msg = ResponseMessage(result_code=0, result=None, response_to=msg.id)\n result.response = create_message_string(response_msg)\n else:\n raise MessageHandleError(MessageHandleError.RESULT_UNEXPECTED_MESSAGE, msg)\n\n return result", "def message(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'message')\r\n return http.Request('GET', url), parsers.parse_json", "def handle(msg):\n # Get text or data from the message\n text = msg.get(\"text\", None)\n data = msg.get(\"data\", None)\n\n if data is not None:\n # This is a message from a custom keyboard\n chat_id = msg[\"message\"][\"chat\"][\"id\"]\n content_type = \"data\"\n elif text is not None:\n # This is a text message from the user\n chat_id = msg[\"chat\"][\"id\"]\n content_type = \"text\"\n else:\n # This is a message we don't know how to handle\n content_type = \"unknown\"\n \n if content_type == \"text\":\n message = msg[\"text\"]\n logging.info(\"Received from chat_id={}: {}\".format(chat_id, message))\n\n if message == \"/start\":\n # Check against the server to see\n # if the user is new or not\n # TODO\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/register', json=payload)\n response = json.loads(r.content)\n if response['exists']:\n message = \"Welcome back!\"\n else:\n message = \"Welcome!\"\n bot.sendMessage(chat_id, message)\n\n \n elif message == \"/rate\":\n # Ask the server to return a random\n # movie, and ask the user to rate the movie\n # You should send the user the following information:\n # 1. Name of the movie\n # 2. A link to the movie on IMDB\n # TODO\n\n # Create a custom keyboard to let user enter rating\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/get_unrated_movie', json=payload)\n response = json.loads(r.content)\n movieid = response['id']\n movieinfo = '%s: %s' % (response['title'], response['url'])\n bot.sendMessage(chat_id, movieinfo)\n my_inline_keyboard = [[\n InlineKeyboardButton(text='1', callback_data=str(movieid)+' rate_movie_1'),\n InlineKeyboardButton(text='2', callback_data=str(movieid)+' rate_movie_2'),\n InlineKeyboardButton(text='3', callback_data=str(movieid)+' rate_movie_3'),\n InlineKeyboardButton(text='4', callback_data=str(movieid)+' rate_movie_4'),\n InlineKeyboardButton(text='5', callback_data=str(movieid)+' rate_movie_5')\n ]]\n keyboard = InlineKeyboardMarkup(inline_keyboard=my_inline_keyboard )\n bot.sendMessage(chat_id, \"How do you rate this movie?\", reply_markup=keyboard)\n\n \n elif message == \"/recommend\":\n # Ask the server to generate a list of\n # recommended movies to the user\n payload = {'chat_id':chat_id, 'top_n':3}\n r = requests.post(host_addr+'/recommend', json=payload)\n response = json.loads(r.content)\n # print(response)\n if response['movies']==[]:\n message = 'You have not rated enough movies, we cannot generate recommendation for you.'\n bot.sendMessage(chat_id, message)\n else:\n bot.sendMessage(chat_id, \"My recommendations:\")\n for item in response['movies']:\n movieinfo = '%s: %s' % (item['title'], item['url'])\n bot.sendMessage(chat_id, movieinfo)\n\n\n else:\n # Some command that we don't understand\n bot.sendMessage(chat_id, \"I don't understand your command.\")\n\n elif content_type == \"data\":\n # This is data returned by the custom keyboard\n # Extract the movie ID and the rating from the data\n # and then send this to the server\n # TODO\n # print(data)\n info = str.split(data)\n movieid = int(info[0])\n rate = info[1][-1]\n logging.info(\"Received rating: {}\".format(rate))\n bot.sendMessage(chat_id, \"Your rating is received!\")\n # logging.info('Movie id = %d' % movieid)\n payload = {'chat_id':chat_id, 'movie_id': movieid, 'rating': rate}\n r = requests.post(host_addr+'/rate_movie', json=payload)\n response = json.loads(r.content)\n logging.info('Update status: '+response['status'])", "def handle_message(event):\n intention = parse_intention(event.message.text)\n if intention == config.QUERY_INTENTION:\n handle_query_weather_message(event)\n elif intention == config.SUBSCRIBE_INTENTION:\n handle_subscribe_message(event)\n else:\n handle_unknown_message(event)", "def message_for_ts(message_history, ts):\n return next((\n (m, i==len(message_history)-1)\n for i, m in enumerate(message_history)\n if m['id'] == ts\n ), (None, None))", "def get_message_content(message): # pylint: disable=too-many-return-statements\n if message.content_type == \"photo\":\n return message.photo[0].file_id\n if message.content_type == \"text\":\n return message.text\n if message.content_type == \"audio\":\n return message.audio.file_id\n if message.content_type == \"document\":\n return message.document.file_id\n if message.content_type == \"sticker\":\n return message.sticker.thumb.file_id\n if message.content_type == \"video\":\n return message.video.file_id\n if message.content_type == \"voice\":\n return message.voice.file_id\n return message.text or \"None\"", "def handle_pubnub_message(self, message: dict) -> None:\n super().handle_pubnub_message(message)\n\n event = None\n\n if message.get(Attribute.CAMERA_THUMBNAIL_DATE):\n event = THUMBNAIL_READY\n elif message.get(Attribute.DING_DONG):\n event = DOORBELL_DING\n elif message.keys() == set([Attribute.ID, Attribute.TYPE]):\n event = VIDEO_READY\n elif message.get(Attribute.VISITOR_DETECTED) or message.keys() in [\n set([Attribute.ID, Attribute.ACTUAL_TYPE, Attribute.STATE]),\n set([Attribute.ID, Attribute.DETER_ON_DUTY, Attribute.TYPE]),\n ]:\n event = MOTION_DETECTED\n\n if event is not None:\n self.emit(event, {\"message\": message})\n\n _LOGGER.debug(\"Message received by %s: %s\", self.name, message)", "def handle_message() -> Response:\n commend = request.get_json()[\"message\"][\"text\"]\n chat_id = request.get_json()[\"message\"][\"chat\"][\"id\"]\n\n if commend == \"/start\":\n txt = \"Welcome to the shopping bot.\"+'\\n'+\"please enter category, or type popular to get the most popular searches \"\n elif str(commend).lower() in items:\n order[0] = str(commend)\n txt = \"choose color\"\n elif str(commend).lower() in colors:\n if order[0] == 0:\n txt = \"choose category\"\n order[1] = str(commend)\n txt = \"choose size\"\n elif str(commend).lower() in size:\n order[2] = str(commend)\n rec(chat_id, order)\n txt = get_url(order)\n elif str(commend).lower() == \"popular\":\n txt = get_popular(orders_dic)\n else:\n txt = \"try again\"\n # print(orders_dic)\n chat_id = request.get_json()[\"message\"][\"chat\"][\"id\"]\n print(chat_id)\n requests.get(f\"https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={chat_id}&text={txt}\")\n return Response(\"Success\")", "def get_message(self) -> Union[\"Message\", None]:\n raw_data = (\n self.raw_data.get(\"message\") or\n self.raw_data.get(\"edited_message\")\n )\n\n if raw_data:\n return Message(raw_data)\n\n return None", "def _message(self, msg):\n\n self.log('Message received:', msg['body'], pretty=True)\n\n if msg['type'] in ('chat', 'normal'):\n body = str(msg['body'])\n if body.startswith('/'):\n cmd, arg_string = body.split(' ', maxsplit=1)\n cmd = cmd.lstrip('/')\n\n if arg_string:\n args = arg_string.split(' ')\n else:\n args = None\n\n self.log('IRC remote command received:', cmd, args)\n return\n else:\n if True:\n msg.reply(\"Sorry, I did not understand that:\\n%s\" % body).send()", "def get_messages(\n event: Dict[str, Any]\n ) -> List[Dict[str, Any]]:\n reply_message = event.get(\"reply_message\", {})\n return [reply_message] if reply_message else event.get(\"fwd_messages\", [])", "def listen():\n if request.method == 'GET':\n print request\n return verify_webhook(request)\n\n if request.method == 'POST':\n payload = request.json\n event = payload['entry'][0]['messaging']\n for x in event:\n if is_user_message(x):\n text = x['message']['text']\n sender_id = x['sender']['id']\n respond(sender_id, text)\n\n return \"ok\"", "def _unwrap(self, msg):\n return msg['content']['data']", "def handleMessage(msg):", "def _read_message(self):\n if self.__eof:\n return None\n result = {}\n line = sys.stdin.readline()\n while line == '\\n':\n line = sys.stdin.readline()\n if not line:\n self.__eof = True\n return None\n s = line.split(\" \", 1)\n result['_number'] = int(s[0])\n result['_text'] = s[1].strip()\n\n while not self.__eof:\n line = sys.stdin.readline()\n if not line:\n self.__eof = True\n return result\n if line == '\\n':\n return result\n s = line.split(\":\", 1)\n result[s[0]] = s[1].strip()", "def _get_message(self, sender_message):\n # type: (str) -> Message or None\n st_re = self.SENDER_TEXT.search(sender_message)\n if st_re is None:\n return None\n else:\n return Message(speaker=st_re.group(1), text=st_re.group(2).strip())", "def _event_handler(event_type, slack_event):\n\n team_id = slack_event[\"team_id\"]\n pyBot.find_team(team_id)\n\n if event_type == \"message\":\n sender_id = None\n\n if \"user\" in slack_event[\"event\"]:\n\n sender_id = slack_event[\"event\"][\"user\"]\n\n adapted_message = sr.adapt_message_to_wit(sender_id, slack_event[\"event\"][\"text\"].encode('utf-8'))\n message = wit.treatment(adapted_message, sender_id)\n channel = slack_event[\"event\"][\"channel\"]\n print \"SLACK DEBUG \\n\"\n print message\n pyBot.send_message(sender_id, channel, message)\n\n return HttpResponse(\"OK\", 200)\n\n # ============= Event Type Not Found! ============= #\n # If the event_type does not have a handler\n #message = \"You have not added an event handler for the %s\" % event_type\n # Return a helpful error message\n #channel = slack_event[\"event\"][\"channel\"]\n\n #if \"user\" in slack_event[\"event\"]:\n # pyBot.send_message(channel, message)\n return HttpResponse(\"OK\", 200)", "def to_bot(message, bot_id):\n bot_re = \"^<@\" + bot_id + '>'\n bot_found = re.search(bot_re, message)\n\n return bot_found is not None", "def get_user_input(self):\n event_list = self.slack_client.rtm_read()\n for event in event_list:\n if event.get('text') and event.get('user') != self.bot_id:\n channel_id = event.get('channel')\n user_id = event.get('user')\n user_input = event.get('text').lower().strip()\n return user_id, channel_id, user_input\n \n return None, None, None", "def parse_telegram_update(bot, update):\n # TODO:\n if hasattr(update, 'message'):\n message = TelegramTextMessage(bot)\n message\n return update", "def check_resp(resp, user_id, message):\n event = resp[\"data\"][\"on_chat_message_sent\"][\"event\"]\n assert json.loads(event) == {\n \"user_id\": user_id,\n \"payload\": message,\n }, \"Subscription notification contains wrong data!\"", "def handle_message(self, message):", "def get_message(self, id):\n url = \"https://api.imgur.com/3/message/{0}\".format(id)\n resp = self._send_request(url)\n return Message(resp, self)" ]
[ "0.66511947", "0.66095793", "0.6603378", "0.6577621", "0.6566526", "0.6535629", "0.6535629", "0.6535629", "0.6493531", "0.6481132", "0.6459471", "0.6458375", "0.6428089", "0.63861305", "0.63535863", "0.63366276", "0.61743027", "0.6160538", "0.6147883", "0.6101927", "0.6010351", "0.58678067", "0.58636326", "0.5791689", "0.5789827", "0.57570016", "0.57068473", "0.56773764", "0.5663367", "0.5645942", "0.5644346", "0.56436265", "0.55765533", "0.55720675", "0.5567896", "0.5560825", "0.5540412", "0.5526346", "0.5506174", "0.5469078", "0.5463443", "0.5437308", "0.54301417", "0.5413305", "0.5408964", "0.54015946", "0.53679585", "0.5344884", "0.53439045", "0.53393316", "0.53351223", "0.5333613", "0.5326015", "0.53151697", "0.53137565", "0.5312436", "0.5306753", "0.52973545", "0.5295123", "0.52893144", "0.52709043", "0.5262377", "0.5261028", "0.52584994", "0.52552474", "0.5252173", "0.5251304", "0.5249449", "0.524632", "0.52375716", "0.5234564", "0.5231336", "0.5228336", "0.52262074", "0.5208797", "0.5208143", "0.51987976", "0.5196343", "0.5193794", "0.5192566", "0.51910686", "0.5190378", "0.5186599", "0.51817507", "0.5172039", "0.5171262", "0.5166936", "0.5162991", "0.51557446", "0.51520485", "0.5141472", "0.5140545", "0.51337814", "0.51254153", "0.51189744", "0.51078504", "0.509921", "0.5097769", "0.50876546", "0.5085688" ]
0.56720054
28
Import, overwrite fixtures from `[app]/fixtures`
def sync_dashboards(app=None): if not cint(frappe.db.get_single_value('System Settings', 'setup_complete')): return if app: apps = [app] else: apps = frappe.get_installed_apps() for app_name in apps: print("Updating Dashboard for {app}".format(app=app_name)) for module_name in frappe.local.app_modules.get(app_name) or []: frappe.flags.in_import = True make_records_in_module(app_name, module_name) frappe.flags.in_import = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixture_example_data():\n import_example_data()", "def fixtures():", "def setUp(self):\n self.fixtures_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"fixtures/\"\n )", "def populate_fixtures():\n languages()\n words()", "def load_fixtures(self):\n for fixture_dir in settings.FIXTURE_DIRS:\n fixture_dir = os.path.join(fixture_dir, self.filesystem_name)\n for (root, dirs, files) in os.walk(fixture_dir):\n for file in files:\n full_file_path = os.path.join(root, *dirs, file)\n with open(full_file_path, 'rb') as f:\n self.save(os.path.relpath(full_file_path, fixture_dir), f)", "def fixtures():\n temp_path = os.path.join(os.path.dirname(__file__), 'temp')\n demo_files_path = os.path.join(os.path.dirname(__file__), 'demo_files')\n\n # Create location\n loc = Location(name='local', uri=temp_path, default=True)\n db.session.add(loc)\n db.session.commit()\n\n # Example files from the data folder\n demo_files = (\n 'markdown.md',\n 'csvfile.csv',\n 'zipfile.zip',\n 'jsonfile.json',\n 'xmlfile.xml',\n 'notebook.ipynb',\n 'jpgfile.jpg',\n 'pngfile.png',\n )\n\n rec_uuid = uuid4()\n provider = RecordIdProvider.create(object_type='rec', object_uuid=rec_uuid)\n data = {\n 'pid_value': provider.pid.pid_value,\n }\n\n record = Record.create(data, id_=rec_uuid)\n bucket = Bucket.create()\n RecordsBuckets.create(record=record.model, bucket=bucket)\n\n # Add files to the record\n for f in demo_files:\n with open(os.path.join(demo_files_path, f), 'rb') as fp:\n record.files[f] = fp\n\n record.files.flush()\n record.commit()\n db.session.commit()", "def setUp(self):\n self.app = load_app(self.application_under_test)\n\n try:\n teardown_db()\n except Exception as e:\n print('-> err ({})'.format(e.__str__()))\n\n setup_app(section_name=self.application_under_test)\n setup_db()\n\n fixtures_loader = FixturesLoader([BaseFixture]) # BaseFixture is already loaded in bootstrap\n fixtures_loader.loads(self.fixtures)", "def setUpFixture(self):\n pass", "def populate(directory=None):\n\n if directory is None:\n directory = current_app.config['FIXTURES_DIR']\n with perform(\n name='dbutils populate',\n before='Loading fixtures from directory %s' % directory,\n fail='Error occured while loading fixtures',\n ):\n load_fixtures(directory)", "def _fixture_setup(self):\n pass", "def load_initial_fixtures_func(app_name):\n return partial(_load_initial_fixtures_impl, app_name)", "def copy_basic_fixtures(cfngin_fixtures: Path, tmp_path: Path) -> None:\n copy_fixture(\n src=cfngin_fixtures / \"envs\" / \"basic.env\", dest=tmp_path / \"test-us-east-1.env\"\n )\n copy_fixture(\n src=cfngin_fixtures / \"configs\" / \"basic.yml\", dest=tmp_path / \"basic.yml\"\n )", "def load_fixtures(self, dbname, table, data):\n db = self.databases[dbname]['db']\n db.execute('BEGIN')\n for row in data:\n columns = row.keys()\n q = db.Insert(table, cols=columns)\n db.execute(q, row)\n db.execute('COMMIT')", "def add_fixtures(ctest):\n\n def test_setup(funct):\n \"\"\"Test setUp decorator to add fixture reloading.\"\"\"\n\n def decorated_setup():\n \"\"\"Decorated test setup.\"\"\"\n testdb.reload_db()\n funct()\n return decorated_setup\n\n for test in ctest._tests:\n test.setUp = test_setup(test.setUp)", "def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)", "def handle(self, *args, **options):\n\n fixtures = []\n\n try:\n for root, dirs, files in os.walk(settings.BASE_DIR):\n for name in files:\n if 'fixtures' in root and name != '__init__.py':\n fixtures.append(name)\n\n call_command('loaddata', *fixtures)\n except Exception as e:\n raise CommandError(\n f'An error has ocurred while loading fixtures. \\n{e}'\n )\n\n self.stdout.write(self.style.SUCCESS('Fixtures have been loaded'))", "def fixture_file(*path):\n return os.path.join(os.path.dirname(__file__), 'fixtures', *path)", "def setUp(self):\n self.epath = 'flyeye/tests/fixtures'\n self.dpath = join(self.epath, 'disc.silhouette')", "def setUp(self):\n super(PlayTests, self).setUp(\n \"tests/data/shakespeare/\", \"structure.json\", \"brief_example.xml\")", "def setUp(self):\n self.settings = MockSettings()\n django_yamlconf.load(project=\"testing\", settings=self.settings)", "def load_fixture(filename):\n path = os.path.join(os.path.dirname(__file__), \"fixtures\", filename)\n with open(path) as fptr:\n return fptr.read()", "def unload_fixture(apps, schema_editor):\n\n objects = deserialize_fixture()\n\n EmailTemplate = apps.get_model(\"helpdesk\", \"emailtemplate\")\n EmailTemplate.objects.filter(pk__in=[ obj.object.pk for obj in objects ]).delete()", "def discovery_data(request):\n file = request.param\n p = Path(file)\n if not p.is_absolute():\n p = Path(__file__).parent / \"fixtures\" / file\n\n with open(p) as f:\n return json.load(f)", "def create_default_settings():\n from flaskbb.fixtures.settings import fixture\n create_settings_from_fixture(fixture)", "def setUp(self):\n self.simplejson = sys.modules.pop('simplejson', None)\n self.json = sys.modules.pop('json', None)\n self.original_import = self.get_import()\n def block_all_jsons(name, *args, **kwargs):\n if 'json' in name:\n if name in sys.modules:\n module = sys.modules[name]\n module.name = name\n return module\n raise ImportError('Unable to find %s' % name)\n else:\n return self.original_import(name, *args, **kwargs)\n self.set_import(block_all_jsons)", "def start_fixture(self):\n pass", "def load_database(db_session, fixture):\n # TODO: the fixture file path controls\n\n # load the fixture\n datas = pickle.loads(fixture)\n db_session.add_all(datas)\n db_session.commit()\n print \"load database ok\"", "def configs() -> Path:\n return TEST_ROOT.parent / \"fixtures\" / \"configs\"", "def load_fixtures(path, **kwargs):\n from .library.files import FixtureFile\n\n if not os.path.exists(path):\n log.error(\"Path does not exist: %s\" % path)\n return None\n\n ini = ConfigParser()\n ini.read(path)\n\n fixtures = list()\n group = None\n for section in ini.sections():\n _kwargs = kwargs.copy()\n\n _section = section\n if \":\" in section:\n _section, group = section.split(\":\")\n\n if \".\" in _section:\n app_label, model_name = _section.split(\".\")\n else:\n app_label = _section\n model_name = None\n\n _kwargs['group'] = group\n _kwargs['model'] = model_name\n\n for key, value in ini.items(section):\n if key == \"db\":\n key = \"database\"\n elif key == \"nfk\":\n key = \"natural_foreign\"\n elif key == \"npk\":\n key = \"natural_primary\"\n else:\n pass\n\n _kwargs[key] = smart_cast(value)\n\n fixtures.append(FixtureFile(app_label, **_kwargs))\n\n return fixtures", "def setUp(self):\n with open(os.path.join(os.path.dirname(__file__), 'valid_resume.json')) as schema:\n self.valid_resume = json.load(schema)\n\n with open(os.path.join(os.path.dirname(__file__), 'invalid_resume.json')) as schema:\n self.invalid_resume = json.load(schema)", "def setUpClass(cls):\n super(ExistingDataTest, cls).setUpClass()\n django.setup()", "def vm_tests_fixtures():\n # FIXME: assert that repo is uptodate\n try:\n vm_fixture = json.load(open('fixtures/vmtests.json', 'r'))\n except IOError as e:\n raise IOError(\"Could not read vmtests.json from fixtures. Make sure you did 'git submodule init'!\")\n check_testdata(vm_fixture.keys(), [u'boolean', u'suicide', u'arith', u'mktx'])\n return vm_fixture", "def _load_test_data(self):\n self._save_test_data()", "def tearDownFixture(self):\n pass", "def fixtures_path():\n return path.abspath(\n path.join(path.dirname(path.realpath(__file__)),\n \"..\",\n \"conf\",\n \"jenkins\",\n \"update-templates.xml\"))", "def setUp(self):\n\n self.test_data_path = 'testing/test_data/'", "def _init_test_project_dir(self, project_dir):\n templates = glob.glob(f'{project_dir}/*.yml.template')\n for template_path in templates:\n # Replace env vars in template\n with open(template_path, 'r', encoding='utf-8') as f_template:\n yaml = f_template.read()\n\n # Detect if every env var configured for the template\n template = os.path.basename(template_path)\n yaml_path = template_path.replace('.template', '')\n env_connectors = self._find_env_conn_by_template_name(template)\n is_configured = self._is_env_connector_configured(env_connectors)\n\n # \"Render\" the template and save to file if env vars configured\n if is_configured:\n template_vars = set(re.findall(r'\\$\\{(.+?)\\}', yaml))\n for var in template_vars:\n yaml = yaml.replace(\n f'${{{var}}}', self._all_env_vars_to_dict().get(var)\n )\n\n # Write the template replaced YAML file\n with open(yaml_path, 'w+', encoding='utf-8') as f_render:\n f_render.write(yaml)\n\n # Delete if exists but not configured\n else:\n try:\n os.remove(yaml_path)\n except OSError:\n pass", "def _pre_setup(self):\n apps.clear_cache()\n call_command('migrate', interactive=False, verbosity=0)\n call_command('loaddata', 'initial_data', verbosity=0)\n super(DatatableViewTestCase, self)._pre_setup()", "def fixture_fixtures_dir() -> Path:\n _dir_path = Path(\"tests/fixtures\")\n return _dir_path", "def setUpBeforeMigration(self, apps):\n pass", "def import_realia(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('git pull')\n run('cd import_scripts;../bin/python import_realia.py load_fixture')\n run('bin/django update_index dasa.Realia')", "def setUp(self):\n super(TranscriptionsTest, self).setUp()\n mommy.make_recipe('grunt.seed', _quantity=2)", "def fixture(fname, label=\"reference\"):\n return os.path.join(FIXTURES, label, fname)", "def test_load_fixture(caplog):\n caplog.set_level('INFO')\n\n with pytest.raises(Advisor.DoesNotExist):\n Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n\n response = _request_load_fixture({'fixture': [ADVISER_FIXTURE]})\n\n assert response.status_code == status.HTTP_201_CREATED\n\n adviser = Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n assert adviser.email == ADVISER_FIXTURE['fields']['email']\n assert adviser.first_name == ADVISER_FIXTURE['fields']['first_name']\n assert adviser.last_name == ADVISER_FIXTURE['fields']['last_name']\n assert str(adviser.dit_team_id) == ADVISER_FIXTURE['fields']['dit_team']\n\n fixture_info = [\n 'Loading fixture: [',\n ' {',\n ' \"fields\": {',\n f' \"dit_team\": \"{ADVISER_FIXTURE[\"fields\"][\"dit_team\"]}\",',\n f' \"email\": \"{ADVISER_FIXTURE[\"fields\"][\"email\"]}\",',\n f' \"first_name\": \"{ADVISER_FIXTURE[\"fields\"][\"first_name\"]}\",',\n f' \"last_name\": \"{ADVISER_FIXTURE[\"fields\"][\"last_name\"]}\"',\n ' },',\n ' \"model\": \"company.advisor\",',\n f' \"pk\": \"{ADVISER_FIXTURE[\"pk\"]}\"',\n ' }',\n ']',\n ]\n assert caplog.messages == ['\\n'.join(fixture_info)]", "def _prepare_fixtures_dir(path):\n if not(os.path.exists(path)):\n os.mkdir(path)", "def fixturize(app=\"All\"):\n\n if app == \"All\":\n local('python manage.py dumpdata resources > resources/fixtures/resources.json')\n local('python manage.py dumpdata military > military/fixtures/military.json')\n local('python manage.py dumpdata arenas > arenas/fixtures/arena.json')\n local('python manage.py dumpdata sciences > sciences/fixtures/technologies.json')\n local('python manage.py dumpdata auth.Group > fixtures/groups.json')\n elif app == \"resource\":\n local('python manage.py dumpdata resources > resources/fixtures/resources.json')\n elif app == \"military\":\n local('python manage.py dumpdata military > military/fixtures/military.json')\n elif app == \"arena\":\n local('python manage.py dumpdata arenas > arenas/fixtures/arena.json')\n elif app == \"sciences\":\n local('python manage.py dumpdata sciences > sciences/fixtures/technologies.json')\n elif app == \"groups\":\n local('python manage.py dumpdata auth.Group > fixtures/groups.json')", "def setUpTestData(cls):\n cls.emulate_off_api_manager_categories()\n cls.emulate_off_api_manager_products()\n cls.db_manager = Command()", "def setUp_extra(self):\n [self.testproject,\n self.root,\n self.projectadmin,\n self.participant,\n self.signedup_user] = self._create_dummy_project(\"test-project\")\n \n self.participant2 = self._create_random_user(\"participant2_\")\n self._register(self.participant2,self.testproject)\n \n \n from django.core.files.storage import default_storage\n #this fake file is included on test pages later to test rendering\n default_storage.add_fake_file(\"fakeinclude.html\",\"This is some fake include content:\" \n \"here is the content of fakecss\" \n \"<somecss>{% insert_file \"+default_storage.FAKE_DIRS[1]+\"/fakecss.css %} </somecss>and a \"\n \"non-existant include: <nonexistant>{% insert_file nothing/nonexistant.txt %}</nonexistant> Also\"\n \" try to include scary file path <scary>{% insert_file ../../../allyoursecrets.log %}</scary>\")", "def setUp(self):\n self.app = Flask(__name__)\n db.init_app(self.app)\n with self.app.app_context():\n db.create_all()\n self.populate_db() # Your function that adds test data.", "def setUp(self):\n with open('test/0a6a357e.json') as read_file:\n self.tx_json_0a6a357e = json.load(read_file)\n with open('test/bip69-synth.json') as read_file:\n self.bip69_synth = json.load(read_file)", "def _load_fixture(filename):\n\n # Read the binary data into text\n with open(filename, 'rb') as stream:\n content = stream.read().decode('utf-8')\n\n # Decode the data as JSON\n data = json.loads(content)\n\n # Instantiate a session.\n session = Session()\n\n # Iterate through the entries to add them one by one.\n for item in data:\n # Resolve model from the table reference.\n table = Base.metadata.tables[item['model'].split('.')[-1]]\n\n # Add the primary key.\n item['fields']['id'] = item['pk']\n\n # Add a new row.\n session.connection().execute(table.insert().values(**item['fields']))\n\n # Commit the session to the database.\n session.commit()", "def install(clean=False):\n\n # Drop tables\n if clean:\n app.logger.info(\"Dropping all tables...\")\n from utils import drop_all_tables\n drop_all_tables(app)\n\n # Create tables\n app.logger.info(\"Creating all tables...\")\n import cavavin.models # NOQA: to register all Model\n db.create_all()\n\n # Import fixture data\n app.logger.info(\"Installing default data...\")\n charlatan_manager = charlatan.FixturesManager(db_session=db.session, use_unicode=True)\n charlatan_manager.load('data/countries.yaml', models_package='cavavin.models')\n charlatan_manager.load('data/users.yaml', models_package='cavavin.models')\n charlatan_manager.install_all_fixtures()", "def orlov_fixture(request, workspace, minicap):\n logger.info('Orlov Fixture : setup minicap service and other.')\n request.cls.workspace = workspace\n request.cls.minicap = minicap\n request.cls.evidence_dir = request.cls.workspace.mkdir('tmp\\\\evidence')\n request.cls.video_dir = request.cls.workspace.mkdir('tmp\\\\video')\n yield\n logger.info('Olorv Fixture : teardown minicap service and other.')", "def setup_module():\n DB.init('tweets_test')\n seed_collection_with_csv(utils.get_env('DATA_FILENAME'))", "def restore_base_data():\n\n db_dirname = os.path.dirname(os.path.realpath(__file__))\n shutil.copyfile(src=os.path.join(db_dirname, 'consolemini.base.json'),\n dst=os.path.join(db_dirname, 'consolemini.test.json'))", "def create_testing_fixtures(project_path):\n functional_testing_tables = [\n \"fedora_software.FeaturedApp\",\n \"fedora_software.Component\",\n \"fedora_software.ComponentIcon\"\n ]\n app_path = project_path.child(\"fedora_software\")\n fixtures_path = app_path.child(\"fixtures\")\n functional_testing_fixture = \"fedora_software_testing.json\"\n return create_fixtures(\n functional_testing_tables,\n project_path,\n fixtures_path,\n functional_testing_fixture\n )", "def setUp(self):\r\n\r\n \r\n self.client = app.test_client()\r\n # Show Flask errors that happen during tests\r\n app.config['TESTING'] = True\r\n \r\n connect_to_db(server.app)\r\n db.create_all()\r\n test_seed.create_test_data()", "def setUp(self):\n fixtures_dir = os.path.abspath(os.path.join(\n os.path.dirname(__file__), 'fixtures'))\n\n config = get_collector_config('NagiosPerfdataCollector', {\n 'perfdata_dir': fixtures_dir\n })\n\n self.collector = NagiosPerfdataCollector(config, None)\n self.fixtures = os.listdir(fixtures_dir)", "def setUp(self):\n\n app.config.from_object(config['testing'])\n db.create_all()", "def setUp(self):\n self.fixtureFile = r\"v:\\workspace\\FileHandling\\src\\test-read-write.txt\"\n self.fixtureList = [\"my\", \"written\", \"text\"]\n self.fixtureListEmptyStrings = [\"my\", \"\", \"\", \"written\", \"text\"]\n self.fixtureListTrailingEmptyString = [\"my\", \"written\", \"text\", \"\", \"\"]", "def fixture(filename):\n abs_filename = os.path.join(\n os.path.dirname(__file__),\n 'fixtures',\n filename,\n )\n\n with open(abs_filename, 'r') as fixture_file:\n return fixture_file.read()", "def setup(base_dir, fixture_prefixes=None):\n global test_dir\n test_dir = base_dir", "def setUp(self, cloud_config_fixture='clouds.yaml'):\n\n super(TestCase, self).setUp()\n\n # Sleeps are for real testing, but unit tests shouldn't need them\n realsleep = time.sleep\n\n def _nosleep(seconds):\n return realsleep(seconds * 0.0001)\n\n self.sleep_fixture = self.useFixture(\n fixtures.MonkeyPatch('time.sleep', _nosleep)\n )\n self.fixtures_directory = 'openstack/tests/unit/fixtures'\n self.os_fixture = self.useFixture(\n os_fixture.ConnectionFixture(project_id=fakes.PROJECT_ID)\n )\n\n # Isolate openstack.config from test environment\n self.os_cloud_fixture = self.useFixture(\n fixtures.EnvironmentVariable('OS_CLOUD'),\n )\n config = tempfile.NamedTemporaryFile(delete=False)\n cloud_path = os.path.join(\n self.fixtures_directory,\n 'clouds',\n cloud_config_fixture,\n )\n with open(cloud_path, 'rb') as f:\n content = f.read()\n config.write(content)\n config.close()\n\n vendor = tempfile.NamedTemporaryFile(delete=False)\n vendor.write(b'{}')\n vendor.close()\n\n self.config = occ.OpenStackConfig(\n config_files=[config.name],\n vendor_files=[vendor.name],\n secure_files=['non-existant'],\n )\n\n self.oslo_config_dict = {\n # All defaults for nova\n 'nova': {},\n # monasca-api not in the service catalog\n 'monasca-api': {},\n # Overrides for heat\n 'heat': {\n 'region_name': 'SpecialRegion',\n 'interface': 'internal',\n 'endpoint_override': 'https://example.org:8888/heat/v2',\n },\n # test a service with dashes\n 'ironic_inspector': {\n 'endpoint_override': 'https://example.org:5050',\n },\n }\n\n # FIXME(notmorgan): Convert the uri_registry, discovery.json, and\n # use of keystone_v3/v2 to a proper fixtures.Fixture. For now this\n # is acceptable, but eventually this should become it's own fixture\n # that encapsulates the registry, registering the URIs, and\n # assert_calls (and calling assert_calls every test case that uses\n # it on cleanup). Subclassing here could be 100% eliminated in the\n # future allowing any class to simply\n # self.useFixture(openstack.cloud.RequestsMockFixture) and get all\n # the benefits.\n\n # NOTE(notmorgan): use an ordered dict here to ensure we preserve the\n # order in which items are added to the uri_registry. This makes\n # the behavior more consistent when dealing with ensuring the\n # requests_mock uri/query_string matchers are ordered and parse the\n # request in the correct orders.\n self._uri_registry = collections.OrderedDict()\n self.discovery_json = os.path.join(\n self.fixtures_directory, 'discovery.json'\n )\n self.use_keystone_v3()\n self.__register_uris_called = False", "def _find_fixtures(self, start_dir):\r\n fixtures = []\r\n def _find(arg, dirname, names):\r\n if (dirname.endswith('fixtures')) and (dirname.find('unit_test')==-1):\r\n for name in names:\r\n if (name.endswith(FIXTUERS_EXT)) and (name.find('initial_data')==-1):\r\n fixtures.append(name.replace(FIXTUERS_EXT, ''))\r\n os.path.walk(start_dir, _find, None)\r\n \r\n return fixtures", "def load_json_fixture(filename: str) -> Any:\n return json.loads(load_fixture(f\"jellyfin/{filename}\"))", "def setUp(self):\n self.fixture_file = r\"v:\\workspace\\FileHandling\\src\\test-read-write.txt\"\n self.fixture_list = [\"my\", \"written\", \"text\"]\n self.fixture_list_empty_strings = [\"my\", \"\", \"\", \"written\", \"text\"]\n self.fixture_list_trailing_empty_strings = [\"my\", \"written\", \"text\", \"\", \"\"]", "def app_settings_fixture(app, config_file):\n settings = AppSettings(app.app_id)\n settings.settings[SETTINGS_INSTANCE_ID] = config_file[CONF_INSTANCE_ID]\n return settings", "def app_fixture(hass, config_file):\n app = AppEntity(Mock())\n app.apply_data({\n 'appName': APP_NAME_PREFIX + str(uuid4()),\n 'appId': str(uuid4()),\n 'appType': 'WEBHOOK_SMART_APP',\n 'classifications': [CLASSIFICATION_AUTOMATION],\n 'displayName': 'Home Assistant',\n 'description':\n hass.config.location_name + \" at \" + hass.config.api.base_url,\n 'singleInstance': True,\n 'webhookSmartApp': {\n 'targetUrl': webhook.async_generate_url(\n hass, hass.data[DOMAIN][CONF_WEBHOOK_ID]),\n 'publicKey': ''}\n })\n app.refresh = Mock()\n app.refresh.return_value = mock_coro()\n app.save = Mock()\n app.save.return_value = mock_coro()\n settings = AppSettings(app.app_id)\n settings.settings[SETTINGS_INSTANCE_ID] = config_file[CONF_INSTANCE_ID]\n app.settings = Mock()\n app.settings.return_value = mock_coro(return_value=settings)\n return app", "def setUpClass(cls):\n super(ExistingDataSourceTest, cls).setUpClass()\n django.setup()", "def setUp(self):\n self.setUpPyfakefs()\n\n self.fs.create_file(f\"{BASE_PATH}/_info.yml\", contents=CATEGORY_INFO)\n self.fs.create_file(f\"{BASE_PATH}/root.md\", contents=MARKDOWN_WITH_METADATA)\n self.fs.create_file(\n f\"{BASE_PATH}/root_without_metadata.md\", contents=MARKDOWN_WITHOUT_METADATA\n )\n self.fs.create_file(f\"{BASE_PATH}/not_a_page.md/_info.yml\", contents=CATEGORY_INFO)\n self.fs.create_file(f\"{BASE_PATH}/category/_info.yml\", contents=CATEGORY_INFO)\n self.fs.create_file(\n f\"{BASE_PATH}/category/with_metadata.md\", contents=MARKDOWN_WITH_METADATA\n )\n self.fs.create_file(f\"{BASE_PATH}/category/subcategory/_info.yml\", contents=CATEGORY_INFO)\n self.fs.create_file(\n f\"{BASE_PATH}/category/subcategory/with_metadata.md\", contents=MARKDOWN_WITH_METADATA\n )\n self.fs.create_file(\n f\"{BASE_PATH}/category/subcategory/without_metadata.md\",\n contents=MARKDOWN_WITHOUT_METADATA\n )\n\n temp = f\"{BASE_PATH}/tmp\" # noqa: S108\n self.fs.create_file(f\"{temp}/_info.yml\", contents=CATEGORY_INFO)\n self.fs.create_file(f\"{temp}.md\", contents=MARKDOWN_WITH_METADATA)\n self.fs.create_file(f\"{temp}/category/_info.yml\", contents=CATEGORY_INFO)\n self.fs.create_dir(f\"{temp}/category/subcategory_without_info\")", "def setUp(self):\n if os.path.exists('file.json'):\n os.remove(\"file.json\")", "def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)", "def setUp(self):\n self.setUpPyfakefs()", "def setUp(self):\n super(TestCase, self).setUp()\n # Change the default directory that the tempfile\n # module places temporary files and directories in\n self.useFixture(fixtures.NestedTempfile())\n # Create a temporary directory and set it as $HOME in the environment.\n self.useFixture(fixtures.TempHomeDir())\n self.useFixture(tools.StandardLogging())\n self.addCleanup(self._clear_attrs)", "def setUp(self):\n super(TestCase, self).setUp()\n # Change the default directory that the tempfile\n # module places temporary files and directories in\n self.useFixture(fixtures.NestedTempfile())\n # Create a temporary directory and set it as $HOME in the environment.\n self.useFixture(fixtures.TempHomeDir())\n self.useFixture(tools.StandardLogging())\n self.addCleanup(self._clear_attrs)", "def create_db():\n database.db.create_all()\n get_ulm()\n for fixture_file in glob.glob(config.DevelopmentConfig.FIXTURES_DIRS + '/*.json'):\n fixtures = JSONLoader().load(fixture_file)\n load_fixtures(database.db, fixtures)\n MigrationManager().stamp_db()", "def setUp(self):\n self.test_data = self.read_data('test_data/clients.txt')", "def test_00_setup(self):\n with mock_api(magento_base_responses):\n import_batch(self.session, 'magento.website', self.backend_id)\n import_batch(self.session, 'magento.store', self.backend_id)\n import_batch(self.session, 'magento.storeview', self.backend_id)\n import_record(self.session, 'magento.res.partner.category',\n self.backend_id, 1)", "def setUp(self):\n self.test_data = MockPyMySqlDataSource().load()", "def create_fake_data():\n User.create_fake_users()", "def testResetFixture(self):\n self.users.TESTAPI_resetFixture()\n self.users.add(\"katie\", \"password\")\n self.assertEqual(len(models.UsersModel.objects.all()), 1)\n self.users.TESTAPI_resetFixture()\n self.assertEqual(len(models.UsersModel.objects.all()), 0)", "def ignore_test_load_and_persist_without_train(self):\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n # create tmp train set\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n # rm tmp train set\n rm_tmp_file(\"tmp.json\")\n\n # interpreter = trainer.train(train_data)\n # test persist and load\n persisted_path = trainer.persist(config['path'],\n config['project'],\n config['fixed_model_name'])\n\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse(\"hello\") is not None\n assert interpreter_loaded.parse(\"Hello today is Monday, again!\") is not None\n # remove tmp models\n shutil.rmtree(config['path'], ignore_errors=False)", "def setUp(self):\n\n # Get the Flask test client.\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n # Connect to the test database.\n connect_to_db(app, db_uri=\"postgresql:///testnourish\") \n\n # Create the tables and add the sample data.\n db.create_all()\n load_test_data()", "def fixture(filename):\n\n absolute_filename = os.path.join(\n os.path.dirname(__file__),\n 'fixtures',\n filename,\n )\n\n with open(absolute_filename, 'r') as fixture_file:\n\n return fixture_file.read()", "def setUp(self):\n super().setUp()\n self.file_path = 'file.json'", "def fixture_runner():\n return CliRunner()", "def tearDown(self):\n try:\n os.remove(self.fixture_file)\n except OSError:\n pass", "def collect_articles():\n # Create a json fixture for all articles which don't exist in the current\n # fixture file\n # TODO: Parse content before writing content to fixture\n for article in get_new_local_articles():\n with open(article_root + \"/\" + article, 'r') as f:\n #extension = article.split(\".\")[1]\n content = f.read()\n fixture = construct_fixture(pk=None, title=filesystem_to_pretty(article.split(\".\")[0]),\n content=content)\n with open(fixture_dir + \"/{}.json\".format(article.split(\".\")[0]), 'w+') as f:\n f.write(fixture)", "def setUp(self):\n db.create_all()", "def setUp(self):\n\n self.client = app.test_client()\n app.config['Testing'] = True\n app.config['SECRET_KEY'] = 'test'\n connect_to_db(app, db_uri='postgresql:///testdb', echo=False)\n db.create_all()\n\n example_data() # Need to expand!", "def test_export_datasources_original(app_context, fs):\n # pylint: disable=reimported, redefined-outer-name\n import superset.cli.importexport # noqa: F811\n\n # reload to define export_dashboards correctly based on the\n # feature flags\n importlib.reload(superset.cli.importexport)\n\n runner = app.test_cli_runner()\n response = runner.invoke(\n superset.cli.importexport.export_datasources, (\"-f\", \"datasources.yaml\")\n )\n\n assert response.exit_code == 0\n\n assert Path(\"datasources.yaml\").exists()\n\n # check that file is valid JSON\n with open(\"datasources.yaml\") as fp:\n contents = fp.read()\n yaml.safe_load(contents)", "def fixture_file(*args):\n with open(os.path.join(FIXTURE_DATA, *args)) as file:\n yield file.read()", "def config_file_fixture():\n return {\n CONF_INSTANCE_ID: str(uuid4()),\n CONF_WEBHOOK_ID: webhook.generate_secret()\n }", "def setUp(self):\n super().setUp()\n Tenant.objects.get_or_create(schema_name=\"public\")", "def load_static():\n\n for i, row in enumerate(open(\"seed_data/homepage_feature.static\")):\n row = row.rstrip()\n title, body, img_path_xs, img_path_sm, img_path_md, img_path_lg, is_active = row.split(\"|\")\n homepage_feature = HomepageFeatureModel(title=title,\n body=body,\n img_path_xs=img_path_xs,\n img_path_sm=img_path_sm,\n img_path_md=img_path_md,\n img_path_lg=img_path_lg,\n is_active=is_active)\n db.session.add(homepage_feature)\n\n for i, row in enumerate(open(\"seed_data/help_article.static\")):\n row = row.rstrip()\n title, description, body = row.split(\"|\")\n help_article = HelpArticleModel(title=title, \n description=description, \n body=body)\n db.session.add(help_article)\n\n db.session.commit()", "def setUp(self):\n self.app = create_app(\"configmodule.TestingConfig\")\n self.app.testing = True\n\n self.client = self.app.test_client()\n\n with self.app.app_context():\n db.drop_all()\n db.create_all()", "def setUp(self):\n self.path = os.path.dirname(os.path.abspath(__file__))", "def setUp(self):\n db.drop_all() # clean up the last tests\n db.create_all() # create new tables\n self.app = app.test_client()", "def tearDown(self):\n try:\n os.remove(self.fixtureFile)\n except OSError:\n pass", "def load_label(self, fixture_label):\n show_progress = self.verbosity >= 3\n for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):\n _, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))\n open_method, mode = self.compression_formats[cmp_fmt]\n fixture = open_method(fixture_file, mode)\n try:\n self.fixture_count += 1\n objects_in_fixture = 0\n loaded_objects_in_fixture = 0\n if self.verbosity >= 2:\n self.stdout.write(\"Installing %s fixture '%s' from %s.\" %\n (ser_fmt, fixture_name, humanize(fixture_dir)))\n\n objects = serializers.deserialize(ser_fmt, fixture,\n using=self.using, ignorenonexistent=self.ignore)\n\n create_dict = OrderedDict()\n\n for object in objects:\n obj = object.object\n objects_in_fixture += 1\n model = obj.__class__\n if router.allow_migrate_model(self.using, model):\n self.models.add(model)\n if model in create_dict.keys():\n create_dict[model].append(obj)\n else:\n create_dict[model] = [obj]\n for model in create_dict.keys():\n objs = create_dict[model]\n loaded_objects_in_fixture += len(objs)\n try:\n model.objects.using(self.using).bulk_create(objs)\n if show_progress:\n self.stdout.write(\n '\\rProcessed %i object(s).' % loaded_objects_in_fixture,\n ending=''\n )\n except (DatabaseError, IntegrityError) as e:\n e.args = (\"Could not load %(app_label)s.%(object_name)s: %(error_msg)s\" % {\n 'app_label': model._meta.app_label,\n 'object_name': model._meta.object_name,\n 'error_msg': force_text(e)\n },)\n raise\n if objects and show_progress:\n self.stdout.write('') # add a newline after progress indicator\n self.loaded_object_count += loaded_objects_in_fixture\n self.fixture_object_count += objects_in_fixture\n except Exception as e:\n if not isinstance(e, CommandError):\n e.args = (\"Problem installing fixture '%s': %s\" % (fixture_file, e),)\n raise\n finally:\n fixture.close()\n\n # Warn if the fixture we loaded contains 0 objects.\n if objects_in_fixture == 0:\n warnings.warn(\n \"No fixture data found for '%s'. (File format may be \"\n \"invalid.)\" % fixture_name,\n RuntimeWarning\n )", "def test_sync_incorrect_user_yaml_file(syncer, monkeypatch, db_session):\n path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"data/yaml/incorrect_user.yaml\"\n )\n monkeypatch.setattr(syncer, \"sync_from_local_yaml_file\", path)\n with pytest.raises(AssertionError):\n syncer.sync()\n assert syncer.arborist_client.create_resource.not_called()\n assert syncer.arborist_client.create_role.not_called()\n assert syncer.arborist_client.create_policy.not_called()" ]
[ "0.7610767", "0.71214706", "0.70407146", "0.6833973", "0.6788867", "0.6743615", "0.6529511", "0.650409", "0.64089555", "0.6364059", "0.6342745", "0.6301757", "0.6232977", "0.6109557", "0.61017966", "0.6088769", "0.58966875", "0.5851127", "0.5831919", "0.58071595", "0.57650894", "0.572647", "0.5715614", "0.5715208", "0.57128143", "0.57074803", "0.5701131", "0.56845826", "0.5684132", "0.5661477", "0.5637598", "0.5635623", "0.5610248", "0.5607635", "0.55996245", "0.55979824", "0.55977494", "0.55973256", "0.5588321", "0.55852664", "0.5566494", "0.55662715", "0.5562481", "0.5557076", "0.55403626", "0.5539312", "0.55392176", "0.55389243", "0.55156076", "0.5512505", "0.54853415", "0.5484043", "0.5478358", "0.5475268", "0.54705656", "0.5460056", "0.5458086", "0.5458063", "0.54525495", "0.5447585", "0.5440028", "0.5437123", "0.5434328", "0.54236966", "0.54222536", "0.54134804", "0.5410532", "0.5407699", "0.53971803", "0.53945893", "0.5389827", "0.5384568", "0.53779125", "0.5371905", "0.5371905", "0.53560674", "0.5354102", "0.5350095", "0.5345053", "0.53420293", "0.53418326", "0.534047", "0.5335163", "0.53332883", "0.533142", "0.5328085", "0.5325256", "0.53229755", "0.5313303", "0.5312311", "0.53037477", "0.5302447", "0.52920306", "0.5290998", "0.5280967", "0.52733886", "0.5273289", "0.52711904", "0.5256907", "0.52496594", "0.5246231" ]
0.0
-1
Used with optparser for multiple arguments of the same type.
def key_callback(option,opt_str,value,parser): if "--epi-key" in opt_str: parser.values.epi_keys.append(value) elif "--exclude" in opt_str: parser.values.exclude_paths.append(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Args(parser):", "def consume_options(cls, data, hittype, args):\n opt_position = 0\n data[\"t\"] = hittype # integrate hit type parameter\n if hittype in cls.option_sequence:\n for expected_type, optname in cls.option_sequence[hittype]:\n if opt_position < len(args) and isinstance(args[opt_position],\n expected_type):\n data[optname] = args[opt_position]\n opt_position += 1", "def parse_arguments(args):", "def _process_args(self, largs, rargs, values):\n while rargs:\n arg = rargs[0]\n try:\n if arg[0:2] == \"--\" and len(arg) > 2:\n # process a single long option (possibly with value(s))\n # the superclass code pops the arg off rargs\n self._process_long_opt(rargs, values)\n elif arg[:1] == \"-\" and len(arg) > 1:\n # process a cluster of short options (possibly with\n # value(s) for the last one only)\n # the superclass code pops the arg off rargs\n self._process_short_opts(rargs, values)\n else:\n # it's either a non-default option or an arg\n # either way, add it to the args list so we can keep\n # dealing with options\n del rargs[0]\n raise Exception\n except:\n largs.append(arg)", "def _process_args(self, largs, rargs, values):\n while rargs:\n arg = rargs[0]\n try:\n if arg[0:2] == \"--\" and len(arg) > 2:\n # process a single long option (possibly with value(s))\n # the superclass code pops the arg off rargs\n self._process_long_opt(rargs, values)\n elif arg[:1] == \"-\" and len(arg) > 1:\n # process a cluster of short options (possibly with\n # value(s) for the last one only)\n # the superclass code pops the arg off rargs\n self._process_short_opts(rargs, values)\n else:\n # it's either a non-default option or an arg\n # either way, add it to the args list so we can keep\n # dealing with options\n del rargs[0]\n raise Exception\n except:\n largs.append(arg) # pylint: disable-msg=W0702", "def _arg2kw(self, mixed_args):\n def insert(dict_, k, v):\n if k in dict_:\n print \"duplicated args : %s \" % kv[0]\n raise ArgParseError\n dict_[k] = v\n \n opts = []\n args = {}\n\n n = len(mixed_args)\n i = 0\n while i < n:\n a = mixed_args[i]\n if a == '-' or a == '--' :\n opts.append(a)\n elif a.startswith(\"---\"):\n print \"invalid args: %s\" % mixed_args\n print \"only the following formats are supported:\"\n print \" arg1\"\n print \" --input=name1\"\n print \" --output name3\"\n print \" -oname2\"\n print \" -o name4\"\n raise ArgParseError\n elif a.startswith(\"--\"):\n kv = a[2:].split(\"=\", 1)\n if len(kv) == 2:\n insert(args, kv[0], kv[1])\n else:\n i += 1\n insert(args, kv[0], mixed_args[i])\n elif a.startswith(\"-\"):\n if len(a) > 2:\n insert(args, a[1], a[2:])\n else:\n i += 1\n insert(args, a[1], mixed_args[i])\n else:\n opts.append(a)\n i += 1\n \n return opts, args", "def add_arguments(self, parser):", "def options(self, a: str) -> typing.Any:", "def getopt(self, longs, argv) :\n\t\t# analyse the arguments to extract those which may be lists\n\t\t# from those which must be single values\n\t\t# we put them into lists, since not so many options\n\t\t# will generally be used, this should be at least as fast\n\t\t# as putting them in mappings\n\t\tsingle = []\n\t\tmultiple = []\n\t\tfor l in range(len(longs)) :\n\t\t\tlong = longs[l]\n\t\t\tif long[-1] == '+' :\n\t\t\t\t# this option may be specified multiple times\n\t\t\t\t# but we must modify it for getopt.getopt to\n\t\t\t\t# work correctly\n\t\t\t\tlongs[l] = long[:-1] + '='\n\t\t\t\tmultiple.append(long[:-1])\n\t\t\telif long[-1] == '=' :\n\t\t\t\tsingle.append(long[:-1])\n\t\t\telse :\n\t\t\t\tsingle.append(long)\n\t\ttry :\n\t\t\tresult = {}\n\t\t\toptions,args = getopt.getopt(argv, '', longs)\n\t\t\tif options :\n\t\t\t\tfor (o, v) in options :\n\t\t\t\t\to = o[2:]\n\t\t\t\t\tif o in single :\n\t\t\t\t\t\tif not result.has_key(o) :\n\t\t\t\t\t\t\tresult[o] = v\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\traise getopt.error, \"Option --%s can't be specified more than one time on this command line\" % o\n\t\t\t\t\telif o in multiple :\n\t\t\t\t\t\tif not result.has_key(o) :\n\t\t\t\t\t\t\tresult[o] = []\n\t\t\t\t\t\tresult[o].append(v)\n\t\t\t\t\telse :\n\t\t\t\t\t\t# there's a very big problem !\n\t\t\t\t\t\traise getopt.error, \"ZShell internal error while parsing command line arguments\"\n\t\t\telif not args :\n\t\t\t\targs = argv\t# no option and no argument, return argv inchanged\n\t\t\treturn (result, args)\n\t\texcept getopt.error, msg :\n\t\t\tself.errormessage(\"%s\" % msg)\n\t\t\treturn (None, None)", "def one_or_more_options(option, opt_str, value, parser):\r\n args = []\r\n for arg in parser.rargs:\r\n if arg[0] != \"-\":\r\n args.append(arg)\r\n else:\r\n del parser.rargs[:len(args)]\r\n break\r\n if getattr(parser.values, option.dest):\r\n args.extend(getattr(parser.values, option.dest))\r\n setattr(parser.values, option.dest, args)", "def parse_args():\n\n product_help = ('The type of output products to process. Can be '\n '\"individual\", \"composite\", or \"both\".')\n\n parser = argparse.ArgumentParser()\n parser.add_argument('product_type', action='store', type=str, help=product_help)\n args = parser.parse_args()\n\n # Make sure the argument is a valid option\n valid_options = ['individual', 'composite', 'both']\n explanation = '{} is not a valid option. Please choose \"individual\", \"composite\", or \"both\".'.format(args.product_type)\n assert args.product_type in valid_options, explanation\n\n return args", "def parse_args(args, optinfos):\n\n for opt_identifier, optinfo in optinfos:\n try:\n options, arguments = getopt.gnu_getopt(args, optinfo)\n return opt_identifier, options, arguments\n except getopt.GetoptError:\n # That version doesn't work, so try the next one\n continue\n \n # If we got this far, they both failed (read: syntax error)\n error(2, \"Syntax Error: Incorrect option passed. See the man page for more information.\\nA common cause is using old LPRng syntax.\\nValid options: %s\\n\" % \n (string.replace(re.sub(r'([a-zA-Z])', r'-\\1 ',\n optinfos[SYSTEM_CUPS][1]), ':', '[arg] ')))", "def __add_arguments__(cls, parser):", "def _parse_args():\n parser = optparse.OptionParser(usage=\"\", description=\"\")\n parser.add_option(\"-o\", dest=\"outfile\", default=None, help=\"File to write to\")\n parser.add_option(\"-w\", dest=\"write_format\", default=\"pidgin\", help=\"Write format. [default: %default]\")\n parser.add_option(\"-r\", dest=\"read_format\", default=\"adium\", help=\"Read format. [default: %default]\")\n\n return parser.parse_args()", "def value_options(*args):\n\n @with_pattern(r\"|\".join(args))\n def parse_options(text):\n return text\n\n return parse_options", "def parse_args ( self , dataset = None , *args , **kwargs ) :\n _args = []\n for a in args :\n if not isinstance ( a , ROOT.RooCmdArg ) :\n self.error ( 'parse_args: unknown argument type %s/%s, skip' % ( a , type ( a ) ) )\n else : _args.append ( a ) \n\n from ostap.plotting.fit_draw import keys as drawing_options\n\n silent = None\n verbose = None\n \n for k , a in items_loop ( kwargs ) :\n \n klow = k.lower ().replace('_','')\n kup = k.upper ().replace('_','')\n \n ## skip \"drawing\" options \n if klow in drawing_options : continue \n if klow in ( 'draw' ,\n 'drawoption' ,\n 'drawoptions' ) : continue \n \n if isinstance ( a , ROOT.RooCmdArg ) : _args.append ( a )\n \n elif kup in ( 'VERBOSE' , ) and isinstance ( a , bool ) :\n \n if not verbose is None :\n if a != verbose : \n logger.warning ( 'parse_args: Redefine VERBOSE to %s' % a ) \n verbose = a \n if not silent is None :\n if a == silent :\n logger.warning ( 'parse_args: confusing VERBOSE/SILENT %s/%s' % ( a , silent ) )\n silent = not a \n _args.append ( ROOT.RooFit.Verbose ( a ) )\n elif kup in ( 'SILENT' ,\n 'SILENCE' ) and isinstance ( a , bool ) :\n if not silent is None :\n if a != silent : \n logger.warning ( 'parse_args: Redefine SILENT to %s' % a ) \n verbose = a \n if not verbose is None :\n if a == verbose :\n logger.warning ( 'parse_args: confusing SILENT/VERBOSE %s/%s' % ( a , verbose ) )\n verbose = not a\n _args.append ( ROOT.RooFit.Verbose ( not a ) ) \n elif kup in ( 'STRATEGY' , \n 'MINUITSTRATEGY' ,\n 'STRATEGYMINUIT' ) and isinstance ( a , integer_types ) and 0 <= a <= 2 : \n _args.append ( ROOT.RooFit.Strategy ( a ) ) \n elif kup in ( 'PRINTLEVEL' ,\n 'MINUITPRINT' ,\n 'MINUITLEVEL' ) and isinstance ( a , integer_types ) and -1 <= a <= 3 :\n _args.append ( ROOT.RooFit.PrintLevel ( a ) ) \n elif kup in ( 'PRINTEVALERRORS' ,\n 'PRINTERRORS' ,\n 'ERRORSPRINT' ) and isinstance ( a , integer_types ) and -1 <= a :\n _args.append ( ROOT.RooFit.PrintEvalErrors ( a ) ) \n elif kup in ( 'TIMER' ,\n 'TIMING' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Timer ( a ) ) \n elif kup in ( 'WARNING' ,\n 'WARNINGS' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Warnings ( a ) ) \n \n elif kup in ( 'SUMW2' ,\n 'SUMW2ERR' ,\n 'SUMW2ERROR' ,\n 'SUMW2ERRORS' ) and isinstance ( a , bool ) :\n \n if a and dataset and dataset.isWeighted() : pass \n elif a and dataset and not dataset.isWeighted() :\n self.warning ('parse_args: SumW2-flag is True for non-weighted dataset')\n elif dataset and not dataset.isWeighted() and not a : pass \n elif dataset and dataset.isWeighted() and not a :\n self.warning ('parse_args: SumW2-flag is False for weighted dataset') \n\n _args.append ( ROOT.RooFit.SumW2Error( a ) )\n \n elif kup in ( 'ASYMPTOTIC' ,\n 'ASYMPTOTICERR' ,\n 'ASYMPTOTICERROR' ,\n 'ASYMPTOTICERRORS' ) and isinstance ( a , bool ) and 61900 <= root_version_int :\n \n if a and dataset and dataset.isWeighted() : pass \n elif a and dataset and not dataset.isWeighted() :\n self.warning ('parse_args: AsymptoticError-flag is True for non-weighted dataset')\n elif dataset and not dataset.isWeighted() and not a : pass \n elif dataset and dataset.isWeighted() and not a :\n self.warning ('parse_args: AsymptoticError-flag is False for weighted dataset') \n\n if a and root_version_int < 62006 :\n self.warning (\"``Asymptotic=True'' will crash if Title!=Name (ROOT-10668)\")\n \n _args.append ( ROOT.RooFit.AsymptoticError ( a ) )\n \n elif kup in ( 'BATCH' ,\n 'BATCHMODE' ) and isinstance ( a , bool ) and 62000 <= root_version_int :\n _args.append ( ROOT.RooFit.BatchMode ( a ) ) \n elif kup in ( 'EXTENDED' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Extended ( a ) ) \n elif kup in ( 'CPU' ,\n 'CPUS' ,\n 'NCPU' ,\n 'NCPUS' ,\n 'NUMCPU' ,\n 'NUMCPUS' ) and isinstance ( a , int ) and 1<= a : \n _args.append ( ROOT.RooFit.NumCPU( a ) ) \n elif kup in ( 'CPU' ,\n 'CPUS' ,\n 'NCPU' ,\n 'NCPUS' ,\n 'NUMCPU' ,\n 'NUMCPUS' ) and \\\n isinstance ( a , list_types ) and 2 == len ( a ) and \\\n isinstance ( a[0] , integer_types ) and 1 <= a[1] and \\\n isinstance ( a[1] , integer_types ) and 0 <= a[1] <=3 :\n _args.append ( ROOT.RooFit.NumCPU( a[0] , a[1] ) ) \n \n elif kup in ( 'RANGE' ,\n 'FITRANGE' ,\n 'RANGES' ,\n 'FITRANGES' ) and isinstance ( a , string_types ) :\n _args.append ( ROOT.RooFit.Range ( a ) ) \n elif kup in ( 'RANGE' ,\n 'FITRANGE' ) and isinstance ( a , list_types ) \\\n and isinstance ( a[0] , num_types ) \\\n and isinstance ( a[1] , num_types ) \\\n and a[0] < a[1] : \n _args.append ( ROOT.RooFit.Range ( a[0] , a[1] ) )\n elif kup in ( 'MINIMIZER' , ) and isinstance ( a , list_types ) \\\n and isinstance ( a[0] , string_types ) \\\n and isinstance ( a[1] , string_types ) :\n _args.append ( ROOT.RooFit.Minimizer ( a[0] , a[1] ) ) \n elif kup in ( 'HESSE' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Hesse ( a ) )\n elif kup in ( 'INITIALHESSE' ,\n 'INITHESSE' ,\n 'HESSEINIT' ,\n 'HESSEINITIAL' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.InitialHesse ( a ) )\n elif kup in ( 'OPTIMIZE' ,\n 'OPTIMISE' ) and isinstance ( a , integer_types ) :\n _args.append ( ROOT.RooFit.Optimize ( a ) )\n elif kup in ( 'MINOS' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Minos ( a ) )\n elif kup in ( 'MINOS' , ) and isinstance ( a , ROOT.RooArgSet ) :\n _args.append ( ROOT.RooFit.Minos ( a ) )\n elif kup in ( 'MINOS' , ) and isinstance ( a , string_types ) \\\n and hasattr ( self , 'params' ) and a in self.params ( dataset ) : \n _v = self.params()[ a ]\n _s = ROOT.RooArgSet ( _v )\n self.aux_keep.append ( _s ) \n _args.append ( ROOT.RooFit.Minos ( _s ) ) \n elif kup in ( 'MINOS' , ) and not isinstance ( a , string_types ) :\n\n _s = ROOT.RooArgSet()\n _pars = self.params ( dataset ) if hasattr ( self , 'params' ) else ROOT.RooArgSet() \n for v in a :\n if v in _pars and isinstance ( v , string_types ):\n _v = _pars [ v ] \n _s.add ( _v )\n elif v in _pars and isinstance ( v , ROOT.RooAbsArg ) :\n _s.add ( v )\n else :\n self.error ( \"Can not find %s in parameetrs\" % v )\n\n self.aux_keep.append ( _s ) \n _args.append ( ROOT.RooFit.Minos ( _s ) )\n \n elif kup in ( 'SAVE' , ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Save ( a ) )\n elif kup in ( 'CLONE' ,\n 'CLONEDATA' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.CloneData ( a ) )\n elif kup in ( 'OFFSET' ) and isinstance ( a , bool ) :\n _args.append ( ROOT.RooFit.Offset ( a ) )\n elif kup in ( 'FITOPTIONS' ,\n 'FITOPTION' ) and isinstance ( a , string_types ) :\n _args.append ( ROOT.RooFit.FitOptions ( a ) )\n \n elif kup in ( 'CONSTRAINT' ,\n 'CONSTRAINTS' ,\n 'PARS' ,\n 'PARAMS' ,\n 'PARAMETER' ,\n 'PARAMETERS' ) :\n c = self.parse_constraints ( a )\n if c is None : self.error ('parse_args: Invalid constraint specification: %s/%s' % ( a , type ( a ) ) )\n else : _args.append ( c ) \n \n else :\n \n self.error ( 'parse_args: Unknown/illegal keyword argument: %s/%s, skip it ' % ( k , type ( a ) ) )\n \n \n if not check_arg ( 'numcpu' , *_args ) :\n if dataset and not isinstance ( dataset , ROOT.RooDataHist ) :\n _args.append ( ncpu ( len ( dataset ) ) )\n else :\n nc = numcpu()\n if 1 < nc : _args.append ( ROOT.RooFit.NumCPU ( nc ) )\n\n \n # =============================================================\n ## check options for the weighted datasets \n if dataset :\n \n weighted = dataset.isWeighted () \n sw2 = check_arg ( 'SumW2Error' , *_args )\n aer = check_arg ( 'AsymptoticError' , *_args )\n\n if sw2 and aer :\n logger.warning ( \"parse_args: Both ``SumW2Error'' and ``AsymptoticError'' are specified\" ) \n if weighted and sw2 :\n value = bool ( sw2.getInt( 0 ) )\n if not value : logger.warning (\"parse_args: 'SumW2=False' is specified for the weighted dataset!\")\n elif weighted and aer : \n value = bool ( aer.getInt( 0 ) )\n if not value : logger.warning (\"parse_args: 'AsymptoticError=False' is specified for the weighted dataset!\")\n ## elif weighted : \n ## logger.warning ( \"parse_args: Neither ``SumW2Error'' and ``AsymptoticError'' are specified for weighted dataset! ``SumW2=True'' is added\" )\n ## _args.append ( ROOT.RooFit.SumW2Error ( True ) ) \n elif not weighted and sw2 :\n logger.warning ( \"parse_args:``SumW2Error'' is specified for non-weighted dataset\" )\n elif not weighted and aer :\n logger.warning ( \"parse_args:``AsymptoticError'' is specified for non-weighted dataset\" )\n\n keys = [ str ( a ) for a in _args ]\n keys.sort ()\n \n ## check presence of \"non-trivial\" keys\n kset = set( keys ) \n kset.discard ( 'Save' ) ## trivial\n kset.discard ( 'NumCPU' ) ## trivial\n kset.discard ( 'Verbose' ) ## trivial \n kset.discard ( 'Timer' ) ## trivial \n kset.discard ( 'PrintLevel' ) ## trivial\n\n ## duplicates? \n if len ( kset ) != len ( keys ) :\n self.warning (\"duplicated options!\") \n #\n if kset : self.debug ( 'parse_args: Parsed arguments %s' % keys )\n else : self.debug ( 'parse_args: Parsed arguments %s' % keys )\n\n\n ## store them \n self.aux_keep.append ( _args ) \n \n return self.merge_args ( 5 , *_args )", "def _disallow_repeated_options(self, attrs):\n # General strategy:\n #\n # Make this option a \"multiple option\" (``multiple=True``) and use a\n # callback to unpack the stored values and assert that only a single\n # value was supplied.\n\n # Use the user-supplied callback or define a passthrough callback if\n # one wasn't supplied.\n if 'callback' in attrs:\n callback = attrs['callback']\n else:\n def callback(ctx, param, value):\n return value\n\n # Wrap the callback to intercept stored values so that they can be\n # unpacked and validated.\n def callback_wrapper(ctx, param, value):\n # When `multiple=True` Click will use an empty tuple to represent\n # the absence of the option instead of `None`.\n if value == ():\n value = None\n if not value or ctx.resilient_parsing:\n return callback(ctx, param, value)\n\n # Empty/null case is handled above, so attempt to unpack the value.\n try:\n value, = value\n except ValueError:\n click.echo(ctx.get_usage() + '\\n', err=True)\n click.secho(\n \"Error: Option --%s was specified multiple times in the \"\n \"command.\" % q2cli.util.to_cli_name(param.name),\n err=True, fg='red', bold=True)\n ctx.exit(1)\n\n return callback(ctx, param, value)\n\n # Promote this option to a \"multiple option\" and use the callback\n # wrapper to make it behave like a regular \"single\" option.\n attrs['callback'] = callback_wrapper\n attrs['multiple'] = True\n\n # If the user set a default, promote it to a \"multiple option\" default\n # by putting it in a list. A default of `None` is a special case that\n # can't be promoted.\n if 'default' in attrs and attrs['default'] is not None:\n attrs['default'] = [attrs['default']]", "def Args(parser):\n parser.add_argument('name', help='Type name.')\n parser.add_argument('--provider',\n help='Type provider name or its self-link.',\n required=True)", "def parse_args(self, args):\n (options, args) = optparse.OptionParser.parse_args(self, args)\n return options", "def add_args(self, parser):", "def multi(self, *argv, **kwargs):\n pass", "def _merge_args_opts(args_opts_dict, **kwargs):\n merged = []\n\n if not args_opts_dict:\n return merged\n\n for arg, opt in args_opts_dict.items():\n if not _is_sequence(opt):\n opt = shlex.split(opt or \"\")\n merged += opt\n\n if not arg:\n continue\n\n if \"add_input_option\" in kwargs:\n merged.append(\"-i\")\n\n merged.append(arg)\n\n return merged", "def parse_options(self,arg_str,opt_str,*long_opts,**kw):\n\n mode = kw.get('mode','string')\n list_all = kw.get('list_all',0)\n\n opts,args = getopt(arg_str.split(),opt_str,*long_opts)\n odict = {}\n for o,a in opts:\n if o.startswith('--'):\n o = o[2:]\n else:\n o = o[1:]\n try:\n odict[o].append(a)\n except AttributeError:\n odict[o] = [odict[o],a]\n except KeyError:\n if list_all:\n odict[o] = [a]\n else:\n odict[o] = a\n opts = Struct(odict)\n\n if mode == 'string':\n args = ' '.join(args)\n elif mode == 'list':\n pass\n else:\n raise ValueError,'incorrect mode given:'+`mode`\n return opts,args", "def test_arg_option_mix_short_long(self):\n optional_mix = [\n arg for arg in cli_args.values() if len(arg.flags) == 2 and arg.flags[0].startswith(\"-\")\n ]\n for arg in optional_mix:\n assert LEGAL_SHORT_OPTION_PATTERN.match(arg.flags[0]) is not None, f\"{arg.flags[0]} is not match\"\n assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[1]) is None, f\"{arg.flags[1]} is not match\"", "def parse_args(self, unknown_args):\n arg_list = list()\n for arg in unknown_args:\n if arg.startswith((\"-\", \"--\")):\n if \".\" not in arg:\n raise Exception(\"All arguments must have a '.' in their name, like 'Robot.setting'\")\n arg_list.append(arg[2:])\n parser.add_argument(arg, type=str)\n opt_args = parser.parse_args(unknown_args)\n for arg in arg_list:\n section, setting = arg.split(\".\")\n self.logger.debug(\"Adding %s, %s from cmd line\" % (section, setting))\n self._add_setting(section, setting, opt_args.__getattribute__(arg))", "def test_parse_kwargs_nargsplus(self):\n parser = ParlaiParser(False, False)\n parser.add_argument('--example', nargs='+', choices=['a', 'b', 'c'])\n opt = parser.parse_args(['--example', 'a', 'b'])\n assert opt['example'] == ['a', 'b']\n\n parser = ParlaiParser(False, False)\n parser.add_argument('--example', nargs='+', choices=['a', 'b', 'c'])\n opt = parser.parse_kwargs(example=['a', 'b'])\n assert opt['example'] == ['a', 'b']\n\n parser = ParlaiParser(False, False)\n parser.add_argument('--example', nargs='+')\n opt = parser.parse_kwargs(example=['x', 'y'])\n assert opt['example'] == ['x', 'y']", "def parse_args(self, args):\n raise Exception(\"Not implemented\")", "def parse_options(self, options):\n pass", "def add_arguments(parser):\n for arg, properties in AgentArgs.OPTIONS.items():\n parser.add_argument('--' + arg, **properties)\n verbosity = parser.add_mutually_exclusive_group()\n for arg, properties in AgentArgs.EXCLUSIVE_OPTIONS_1.items():\n verbosity.add_argument('--' + arg, **properties)\n progress_reporting = parser.add_mutually_exclusive_group()\n for arg, properties in AgentArgs.EXCLUSIVE_OPTIONS_2.items():\n progress_reporting.add_argument('--' + arg, **properties)", "def add_arguments(parser):\n return", "def add_arguments(self, parser):\n parser.add_argument('asins', nargs='+', type=str)", "def _parse_args():\n usage = \"usage: %prog [options] arg1 arg2\"\n parser = optparse.OptionParser()\n parser.add_option(\n '--platform', dest='platform', default=\"\", type = \"string\",\n help='platform name: UC 360 baidu etc.')\n parser.add_option(\n '--workspace', dest='workspace', default=\"./\", type = \"string\",\n help='project directory.')\n parser.add_option(\n '--project', dest='projectDir', default=\"./destProject\", type = \"string\",\n help='project directory.')\n # parser.add_option(\n # \"-t\", dest=\"test\", action=\"store_const\", const=lambda:_test, default=_test2, help=\"////////////\"\n # )\n options, args = parser.parse_args()\n # positional arguments are ignored\n return options, args", "def parse_options(type):\n # TODO: conflict_handler='resolve' is really required ??\n parser = ArgumentParser(conflict_handler='resolve')\n if type == 'backup':\n for name, description in _get_parameters_backup().items():\n parser.add_argument('--{}'.format(name),\n help=description, required=True)\n elif type == 'restore':\n for name, description in _get_parameters_restore().items():\n if name in _get_parameters_restore_optional().keys():\n \tparser.add_argument('--{}'.format(name), help=description, required=False)\n else:\n parser.add_argument('--{}'.format(name), help=description, required=True)\n elif type == 'blob_operation':\n for name, description in _get_parameters_blob_operation().items():\n parser.add_argument('--{}'.format(name),\n help=description, required=True)\n else:\n raise Exception('Use either \\'backup\\' or \\'restore\\' as type.')\n\n for key, credentials in _get_parameters_credentials().items():\n for name, description in credentials.items():\n parser.add_argument('--{}'.format(name), help=description)\n configuration = vars(parser.parse_args())\n assert configuration['type'] == 'online' or configuration['type'] == 'offline', \\\n '--type must be \\'online\\' or \\'offline\\''\n return configuration", "def _parse(self, args):\r\n\r\n ordered = []\r\n opt_full = dict()\r\n opt_abbrev = dict()\r\n\r\n args = args + [''] # Avoid out of range\r\n i = 0\r\n\r\n while i < len(args) - 1:\r\n arg = args[i]\r\n arg_next = args[i+1]\r\n if arg.startswith('--'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_full[arg[2:]] = arg_next\r\n i += 2\r\n elif arg.startswith('-'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_abbrev[arg[1:]] = arg_next\r\n i += 2\r\n else:\r\n ordered.append(arg)\r\n i += 1\r\n \r\n return ordered, opt_full, opt_abbrev", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def parse_arguments(self,parser):\r\n return parser.parse_args()", "def parse_arguments():\n\tparser = optparse.OptionParser(\n\t\tusage=\"%prog [options] --command=COMMAND (--nexopia-userid|--remote-ip)\",\n\t\tversion=\"%prog r\" + re.sub(\"[^0-9]\", \"\", __version__)\n\t)\n\tparser.add_option(\n\t\t\"--command\",\n\t\thelp=\"execute this command when the rate limit is exceeded (replacements available: $UID$ and $IP$)\"\n\t)\n\tparser.add_option(\n\t\t\"--debug\",\n\t\taction=\"store_true\",\n\t\tdefault=False,\n\t\thelp=\"enable display of verbose debugging information\"\n\t)\n\tparser.add_option(\n\t\t\"--nexopia-userid\",\n\t\taction=\"store_true\",\n\t\tdefault=False,\n\t\tdest=\"nexopia_userid\",\n\t\thelp=\"rate-limit based on aggregation by nexopia user id\"\n\t)\n\tparser.add_option(\n\t\t\"--rate\",\n\t\tdefault=20,\n\t\thelp=\"trigger the rate-limit if the aggregated data shows more than this many hits within a WINDOW_SIZE period (measured in seconds)\",\n\t\ttype=\"int\"\n\t)\n\tparser.add_option(\n\t\t\"--remote-ip\",\n\t\taction=\"store_true\",\n\t\tdefault=False,\n\t\tdest=\"remote_ip\",\n\t\thelp=\"rate-limit based on aggregation by remote ip address\"\n\t)\n\tparser.add_option(\n\t\t\"--repeat-command\",\n\t\taction=\"store_true\",\n\t\tdefault=False,\n\t\tdest=\"repeat_command\",\n\t\thelp=\"trigger the command for EACH request that exceeds the rate-limit, rather than only once per data aggregation key\"\n\t)\n\tparser.add_option(\n\t\t\"--whitelist\",\n\t\taction=\"append\",\n\t\thelp=\"whitelist an aggregation key (remote ip address or nexopia user id) so that it will not trigger COMMAND\"\n\t)\n\tparser.add_option(\n\t\t\"--window-size\",\n\t\tdefault=60,\n\t\tdest=\"window_size\",\n\t\thelp=\"trigger the rate-limit if the aggregated data shows more than RATE hits within this many seconds\",\n\t\ttype=\"int\"\n\t)\n\t\n\t(options, args) = parser.parse_args()\n\t\n\tif options.rate <= 0:\n\t\tparser.error(\"option --rate: must be larger than zero\")\n\tif options.window_size <= 0:\n\t\tparser.error(\"option --window-size: must be larger than zero\")\n\tif not options.nexopia_userid and not options.remote_ip:\n\t\tparser.error(\"must aggregate over at least one identifier, use either --nexopia-userid or --remote-ip (or both)\")\n\tif not options.whitelist:\n\t\toptions.whitelist = []\n\toptions.whitelist = set(options.whitelist)\n\n\treturn options", "def cmd_type(args):", "def set_options(args):\n\n (options, args) = parser.parse_args(args)\n return options", "def cmd(*options):\n return dosagelib.cmd.main(('--allow-multiple',) + options)", "def parseArguments(self):\n iterator = iter(sys.argv[1:]) # Skip file name\n for argument in iterator:\n if len(argument) < 2 or argument[:2] != '--':\n self.error('syntax error \"{}\"'.format(argument))\n else:\n def getValueOfArgument(): return next(iterator)\n self.parseArgument(argument[2:], getValueOfArgument)", "def _parse_args(self, prepared_args):\n pass", "def test_options2args():\n args, kwargs = util.options2args([\"--arg1\", \"-arg2\", \"--arg3=10\"])\n assert all([\"--arg1\" in args, \"-arg2\" in args, \"arg3\" in kwargs.keys()])", "def _parse_args():\n parser = optparse.OptionParser(usage=\"%prog imagefile+number.suffix\", description=\"Opens up a sequence of pictures in a web browser.\")\n\n return parser.parse_args()", "def options(self, parser):\n pass", "def add_arguments(self, parser):\n pass", "def add_arguments(self, parser):\n pass", "def _get_argparse_kwargs(self, group, **kwargs):\n kwargs = super(StrOpt, self)._get_argparse_kwargs(group)\n\n if getattr(self.type, 'choices', None):\n choices_text = ', '.join([self._get_choice_text(choice)\n for choice in self.type.choices])\n if kwargs['help'] is None:\n kwargs['help'] = ''\n\n kwargs['help'].rstrip('\\n')\n kwargs['help'] += '\\n Allowed values: %s\\n' % choices_text\n\n return kwargs", "def CommandArgs(args):\n if len(args) > 1:\n if args[1].startswith('--'):\n option = args[1] [2:]\n if len(args) > 2:\n content = args[2]\n return option, content\n return True, None\n return False, None", "def set_subparser_for(cls, command, method, subparser):\n\n\t\tdef add_pos_argument(sub, label, arg):\n\t\t\tif arg[\"type\"] == bool:\n\t\t\t\traise CommandTypeError(\"bool type not supported as positional argument\")\n\t\t\tif \"value\" in arg:\n\t\t\t\tif arg[\"type\"] in [str, int, float]:\n\t\t\t\t\tsub.add_argument(label, nargs='?', default=arg[\"value\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\telif \"values\" in arg:\n\t\t\t\tif arg[\"type\"] in [str, int, float]:\n\t\t\t\t\tsub.add_argument(label, nargs='?', default=arg[\"values\"][0], choices=arg[\"values\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\t\telif arg[\"type\"] == list:\n\t\t\t\t\tsub.add_argument(label, nargs='+', default=arg[\"values\"][0], choices=arg[\"values\"], help=arg[\"help_line\"])\n\t\t\telse:\n\t\t\t\tsub.add_argument(label, type=arg[\"type\"], help=arg[\"help_line\"])\n\n\t\tdef add_opt_argument(sub, label, arg, add_alias=True):\n\t\t\tif arg[\"type\"] == bool:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], action=\"store_true\", default=False, help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], action=\"store_true\", default=False, help=arg[\"help_line\"])\n\n\t\t\telif arg[\"type\"] in [str, int, float] and \"value\" in arg:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], type=arg[\"type\"], default=arg[\"value\"], help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], type=arg[\"type\"], default=arg[\"value\"], help=arg[\"help_line\"])\n\t\t\telif arg[\"type\"] == list and \"values\" not in arg:\n\t\t\t\tsub.add_argument(label, nargs=\"*\", help=arg[\"help_line\"])\n\t\t\telif \"values\" in arg:\n\t\t\t\tif arg[\"type\"] == list:\n\t\t\t\t\tsub.add_argument(label, choices=arg[\"values\"], default=arg[\"values\"][0], nargs=\"*\", help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(label, type=arg[\"type\"], choices=arg[\"values\"], default=arg[\"values\"][0], nargs=\"?\", help=arg[\"help_line\"])\n\t\t\telse:\n\t\t\t\tif add_alias:\n\t\t\t\t\tsub.add_argument(arg[\"alias\"], arg[\"name\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\t\telse:\n\t\t\t\t\tsub.add_argument(arg[\"name\"], type=arg[\"type\"], help=arg[\"help_line\"])\n\t\t\n\t\tfunc = getattr(cls, method)\n\n\t\targs_info = cls.__parse_docstring(func.__doc__)\n\t\tif args_info == {}:\n\t\t\treturn\n\n\t\tc = subparser.add_parser(command, help=args_info[\"help_line\"])\n\n\t\tif \"arguments\" in args_info:\n\t\t\tfor label, arg in args_info[\"arguments\"].items():\n\t\t\t\tif arg[\"pos\"]:\n\t\t\t\t\tadd_pos_argument(c, label, arg)\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tadd_opt_argument(c, label, arg, add_alias=True)\n\t\t\t\t\texcept ArgumentError as e:\n\t\t\t\t\t\tadd_opt_argument(c, label, arg, add_alias=False)", "def parseOptions(self):\n\n\t\tparser = OptionParser()\n parser.add_option(\n \"-u\",\n \"--user\",\n dest=\"user\",\n help=\"enter a user or 'all'\"\n )\n\n parser.add_option(\n \"-p\",\n \"--projects\",\n dest=\"projects\",\n help=\"enter a project or 'all'\"\n )\n (self.options, self.args) = parser.parse_args()", "def test_add_common_arguments():\n parser = argparse.ArgumentParser()\n add_common_arguments(parser)\n\n options = parser.parse_args([])\n assert hasattr(options, 'config')\n assert hasattr(options, 'configdir')\n assert options.config == 'default'\n assert options.configdir == config.DEFAULT_HOMEDIR\n\n options = parser.parse_args(['-c', 'test-short'])\n assert options.config == 'test-short'\n\n options = parser.parse_args(['--config', 'test-long'])\n assert options.config == 'test-long'\n\n options = parser.parse_args(['--config-dir', 'test-long'])\n assert options.configdir == 'test-long'\n\n options = parser.parse_args(\n ['-c', 'test-short', '--config-dir', 'test-long-dir'])\n assert options.config == 'test-short'\n assert options.configdir == 'test-long-dir'\n\n options = parser.parse_args(\n ['--config', 'test-long', '--config-dir', 'test-long-dir'])\n assert options.config == 'test-long'\n assert options.configdir == 'test-long-dir'", "def add_arguments(cls, arg_parser: ArgParser) -> None:", "def test_parsingValues(self):\n argV = (\"--fooint 912 --foofloat -823.1 \"\n \"--eggint 32 --eggfloat 21\").split()\n self.usage.parseOptions(argV)\n self.failUnlessEqual(self.usage.opts['fooint'], 912)\n self.assert_(isinstance(self.usage.opts['fooint'], int))\n self.failUnlessEqual(self.usage.opts['foofloat'], -823.1)\n self.assert_(isinstance(self.usage.opts['foofloat'], float))\n self.failUnlessEqual(self.usage.opts['eggint'], 32)\n self.assert_(isinstance(self.usage.opts['eggint'], int))\n self.failUnlessEqual(self.usage.opts['eggfloat'], 21.)\n self.assert_(isinstance(self.usage.opts['eggfloat'], float))", "def processCmdLineArgs(expectedTypes, usage):\n\targs = []\n\tnumComLineArgs = len(sys.argv)\n\tnumExpected = len(expectedTypes)\n\tif (numComLineArgs - 1 == len(expectedTypes)):\n\t\ttry:\n\t\t\tfor i in range(0, numExpected):\n\t\t\t\tif (expectedTypes[i] == typeInt):\n\t\t\t\t\targs.append(int(sys.argv[i+1]))\n\t\t\t\telif (expectedTypes[i] == typeFloat):\n\t\t\t\t\targs.append(float(sys.argv[i+1]))\n\t\t\t\telif (expectedTypes[i] == typeString):\n\t\t\t\t\targs.append(sys.argv[i+1])\n\t\texcept ValueError:\n\t\t\tprint (\"expected number of command line arguments found but there is type mis match\")\n\t\t\tsys.exit(1)\n\telse:\n\t\tprint (\"expected number of command line arguments not found\")\n\t\tprint (usage)\n\t\tsys.exit(1)\n\treturn args", "def add_parser_options(cls, parser):\n for arg in cls.configurables():\n getattr(cls, arg).add_argument(parser)", "def Args(parser):\n flags.AddRegion(parser)\n flags.AddCluster(parser)", "def command_line_arguments():\n _parser.add_argument('-l', '--list', nargs='+',\n help='<Required> Set flag', required=True)\n _parser.add_argument(\"-A\", \"--access\", required=True,\n help=\"access to host => grant/revoke\")", "def get_options(args):\n parser = ArgumentParser()\n for arg in args:\n parser.add_argument(arg.pop('val'), **arg)\n\n options = parser.parse_args()\n\n return options", "def getOptions():\n\tdescription=\"\"\"This script takes an input fasta file of fusions and identifies all of the identical fusions.\"\"\"\n\tparser = argparse.ArgumentParser(description=description)\n\tparser.add_argument(\"-bowtie\", \"--bowtie_log_names\", dest=\"bowtie\", action='store', required=False, nargs = '*', help=\"bowtie log file names [Optional]\")\n\tparser.add_argument(\"-last\", \"--last_log_names\", dest=\"last\", action='store', required=False, help=\"LAST log file names [Optional]\")\n\tparser.add_argument(\"-treatment\",\"--treatment_name\",dest=\"treatment\",action='store',required=True,nargs= '*', help=\"Treatment variables [Required]\")\n\tparser.add_argument(\"-o\",\"--output_file\",dest=\"output\",action='store',required=True,help=\"Output file name [Required]\")\n\targs = parser.parse_args()\n\tif not args.bowtie and not args.last: #The user should give at least one bowtie or last log argument; otherwise the program does nothing\n\t parser.error('No input logs given; add -bowtie or -last')\n\treturn(args)", "def parse_options():\n global parser\n parser.add_option(\"-r\", \"--regions\", dest=\"input_brain_regions\",\n help=\"Input file for brain region data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-g\", \"--gray\", dest=\"input_gray_levels\",\n help=\"Input file for gray level data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-n\", \"--nissl\", dest=\"input_nissl\",\n help=\"Input file for nissl data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-o\", \"--output\", dest=\"output_folder_path\",\n help=\"Output folder for extracted data files\",\n action=\"store\", type='string')\n\n return parser.parse_args()", "def positionals(self, *args, **kwargs):\n return self.parser.add_argument(*args, **kwargs, nargs=\"+\")", "def test_duplicate_flags():\n parser = CmdParser([noArgs, onearg])\n with pytest.raises(CmdParseError):\n out = parser.parse(\"onearg -a -a\")", "def command_option(self, *args, **kwargs):\r\n def register_option(function):\r\n added_option = self._get_option_from_args(args, kwargs)\r\n if not hasattr(function, self.OPTIONS_ATTR):\r\n setattr(function, self.OPTIONS_ATTR, deque())\r\n getattr(function, self.OPTIONS_ATTR).appendleft(added_option)\r\n return function\r\n return register_option", "def setup_parser(cls, option_group, args, mkflag):", "def setup_parser(cls, option_group, args, mkflag):", "async def options(arg):\n match = command_pattern.match(arg)\n assert match\n assert not match.group(1).lower() == match.group(2).lower(), \"**The choices cannot be the same.**\"\n\n return match.group(1), match.group(2)", "def setup_options_parser(self, argparser):\n pass", "def parse_args(self):\n parsed, _ = self.parser.parse_args()\n final = {}\n append = getattr(parsed, self.append_option)\n subtract = getattr(parsed, self.subtract_option)\n for option in self.all_options():\n name = option.dest\n if name is not None:\n value = getattr(parsed, name)\n default = self.defaults.get(name)\n if append and option.get_opt_string() in self.appendable:\n value = self.append(option, value)\n elif subtract and option.get_opt_string() in self.appendable:\n value = self.subtract(option, value)\n if value is None:\n value = default\n if value is None:\n value = raw_input(\"Please enter '%s': \" % option.help)\n self[name] = value\n return self", "def options_by_name(self):\n pass", "def parse_user_arguments(*args, **kwds):\n\n parser = argparse.ArgumentParser(\n description = \"Predict if a pair of drugs is a drug combination\",\n epilog = \"@oliva's lab 2017\")\n parser.add_argument('-d1','--drug_name1',dest='drug_name1',action = 'store',\n help = \"\"\" Name of the drug number 1. If you do not provide targets for this drug or the number of targets is not large enough,\n the program will use this name to search for targets in BIANA database. If targets are provided, this field will be only used\n for naming purposes and will be completely optional.\n If the name of the drug has more than one word or special characters (parentheses, single quotes), introduce the name between \n double quotes. \"\"\")\n parser.add_argument('-d2','--drug_name2',dest='drug_name2',action = 'store',\n help = \"\"\" Name of the drug number 2. If you do not provide targets for this drug or the number of targets is not large enough,\n the program will use this name to search for targets in BIANA database. If targets are provided, this field will be only used\n for naming purposes and will be completely optional.\n If the name of the drug has more than one word or special characters (parentheses, single quotes), introduce the name between \n double quotes. \"\"\")\n parser.add_argument('-t1','--targets1',dest='targets1',action = 'store',\n help = 'Input file with the targets of the drug 1. Each target must be separated by a newline character.')\n parser.add_argument('-t2','--targets2',dest='targets2',action = 'store',\n help = 'Input file with the targets of the drug 2. Each target must be separated by a newline character.')\n parser.add_argument('-pt','--proteins_type_id',dest='proteins_type_id',action = 'store', default='geneid',\n help = 'Input the type of ID of the targets introduced / proteins of the network. It must be the same! (default is geneid).')\n parser.add_argument('-th','--threshold_list',dest='threshold_list',action = 'store',\n help = \"\"\"List of percentages that will be used as cut-offs to define the profiles of the drugs. It has to be a file containing:\n - Different numbers that will be the threshold values separated by newline characters. \n For example, a file called \"top_threshold.list\" containing:\n 0.1\n 0.5\n 1\n 5\n 10\n \"\"\")\n parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'),\n help = \"\"\"Define the workspace directory where the data directory and the results directory will be created\"\"\")\n\n options=parser.parse_args()\n\n return options", "def options():\n parser = ArgumentParser()\n logging = parser.add_argument_group(\"log\")\n logging.add_argument(\n \"--log\",\n dest=\"loglevel\",\n default=\"WARNING\",\n choices=[\"WARNING\", \"INFO\", \"DEBUG\", \"ERROR\"],\n help=\"Set the log level\",\n )\n monitoring = parser.add_argument_group(\"monitoring\")\n monitoring.add_argument(\n \"--monitoring\", action=\"store_true\", help=\"Set the monitoring\"\n )\n mpi = parser.add_argument_group(\"mpi splitting\")\n mpi.add_argument(\n \"-npx\",\n dest=\"npx\",\n default=1,\n type=int,\n help=\"Set the number of processes in x direction\",\n )\n mpi.add_argument(\n \"-npy\",\n dest=\"npy\",\n default=1,\n type=int,\n help=\"Set the number of processes in y direction\",\n )\n mpi.add_argument(\n \"-npz\",\n dest=\"npz\",\n default=1,\n type=int,\n help=\"Set the number of processes in z direction\",\n )\n args, _ = parser.parse_known_args()\n return args", "def split_option(option, length):\n length = list(length)\n args = option.split(',')\n if len(args) not in length:\n sys.stderr.write('mpl-graph: Argument expected length {}. '\n 'Actual length of \"{}\" is {}\\n'.format(length, option, len(args)))\n sys.exit(ERR_NUM_OPTIONS)\n return args", "def parser_arguments():\n parser = argparse.ArgumentParser(prog = 'OIDv6_ToolKit',\n usage = 'python3 %(prog)s [command] --classe [option] --limit [option] --location [option]',\n description='This programm allows to download images from OIDv6')\n parser.add_argument(\"command\",\n metavar= \"<command>: 'getURL', 'downloader' or 'listClasses'.\",\n help = \"'getURL' or 'listClasses'.\")\n parser.add_argument('--classes', required=False, nargs='+',\n metavar=\"list of classes\",\n help=\"Sequence of 'strings' of the wanted classes\")\n parser.add_argument('--limit', required=False, type=int, default=None,\n metavar=\"integer number\",\n help='Optional limit on number of images to download')\n parser.add_argument('--location',required=False, nargs='+',\n metavar='where to download',\n help=\"where to download: local repository or Minio serveur\")\n\n args = parser.parse_args()\n return args", "def _parse_arg_list(self):\n\t\targ_list = {}\n\t\tfor arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:\n\t\t\targ_list[arg[0][1:]] = arg[1]\n\t\n\t\treturn arg_list", "def parseargs():\n parser = argparse.ArgumentParser(description=\\\n \"Combine multiple linker json files into one\")\n parser.add_argument('configs',\n help=\"list of linker files to combine\", nargs='+')\n parser.add_argument('-o', '--outfile',\n help=\"name of combined linker json file\", default='Linker.json')\n parser.add_argument('-v', '--verbose', action='store_true', \n help=\"verbose output\")\n args = parser.parse_args()\n return (args.configs, args.outfile, args.verbose)", "def parse_args():\n global flag_num_classes, flag_num_instances\n\n try:\n optlist, args = getopt.getopt(sys.argv[1:], \"dc:I:\")\n except getopt.GetoptError as err:\n # unrecognized option\n usage(str(err))\n\n if args:\n usage(\"uknown extra args\")\n for opt, arg in optlist:\n if opt == \"-d\":\n u.increment_verbosity()\n elif opt == \"-c\":\n flag_num_classes = int(arg)\n elif opt == \"-I\":\n flag_num_instances = int(arg)", "def comma_separated_type(choices):\n def split_arg(arg):\n values = arg.replace(' ', '').lower().split(',')\n for value in values:\n if value not in choices:\n raise argparse.ArgumentTypeError(\n f'invalid choice: {value!r} (choose from {\", \".join([repr(choice) for choice in choices])})')\n return values\n return split_arg", "def parse_args():\n parser = argparse.ArgumentParser(description='VCF to Typhi genotypes')\n parser.add_argument('--mode', choices=('vcf', 'bam', 'vcf_parsnp'), default='bam',\n help='Mode to run in based on input files (vcf, bam, or vcf_parsnp)')\n parser.add_argument('--vcf', nargs='+', type=str, required=False,\n help='VCF file(s) to genotype (Mapping MUST have been done using CT18 as a reference sequence)')\n parser.add_argument('--bam', nargs='+', type=str, required=False,\n help='BAM file(s) to genotype (Mapping MUST have been done using CT18 as a reference sequence)')\n parser.add_argument('--ref_id', type=str, required=False,\n help='Name of the reference in the VCF file (#CHROM column) or fasta file. Note that CT18 has '\n 'genotype 3.2.1. If all your strains return this genotype, it is likely you have '\n 'specified the name of the refrence sequence incorrectly; please check your VCFs.')\n parser.add_argument('--phred', type=int, required=False, default=20,\n help='Minimum phred quality to count a variant call vs CT18 as a true SNP (default 20)')\n parser.add_argument('--min_prop', type=float, required=False, default=0.1,\n help='Minimum proportion of reads required to call a SNP (default 0.1)')\n parser.add_argument('--ref', type=str, required=False,\n help='Reference sequence in fasta format. Required if bam files provided.')\n parser.add_argument('--output', type=str, required=False, default=None,\n help='Location and name for output file (default=stdout)')\n return parser.parse_args()", "def cli_options():\n\n parser = argparse.ArgumentParser(\n description='c[apirca]grep',\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n parser.add_argument('-d', '--def', dest='defs',\n help='Network Definitions directory location. \\n',\n default='./def')\n\n # -i and -t can be used together, but not with any other option.\n ip_group = parser.add_argument_group()\n # take 1 or more IPs\n ip_group.add_argument('-i', '--ip', dest='ip', nargs='+', type=is_valid_ip,\n help='Return list of definitions containing the '\n 'IP(s).\\nMultiple IPs permitted.')\n\n ip_group.add_argument('-t', '--token', dest='token',\n help=('See if an IP is contained within the given '\n 'token.\\nMust be used in conjunction with '\n '-i/--ip [addr].'))\n\n exclusive_group = parser.add_mutually_exclusive_group()\n # the rest of the arguments are mutually exclusive with each other,\n # and -i / -t\n exclusive_group.add_argument('-c', '--cmp', dest='cmp', nargs=2,\n metavar=('OBJ', 'OBJ'),\n help=('Compare the two given network '\n 'definition tokens'))\n\n exclusive_group.add_argument('-g', '--gmp', dest='gmp', nargs=2,\n type=is_valid_ip, metavar=('IP', 'IP'),\n help=('Diff the network objects to'\n ' which the given IP(s) belong'))\n\n exclusive_group.add_argument('-o', '--obj', dest='obj', nargs='+',\n help=('Return list of IP(s) contained within '\n 'the given token(s)'))\n\n exclusive_group.add_argument('-s', '--svc', dest='svc', nargs='+',\n help=('Return list of port(s) contained '\n 'within given token(s)'))\n\n exclusive_group.add_argument('-p', '--port', dest='port', nargs=2,\n metavar=('PORT', 'PROTO'),\n help=('Returns a list of tokens containing '\n 'the given port and protocol'))\n\n return parser", "def add_command_line_arguments(self, parser):\n # parser.add_option(...)\n pass", "def add_positional_arguments(self,*iterables):\n for iterable in iterables:\n self.parser.add_argument(\n iterable[0],\n type=iterable[1],\n help=iterable[2],\n default='Null',\n )", "def parseArgs():\n\t\n\tparser = argparse.ArgumentParser(\n\t\tprog='device_sniffer',\n\t\tdescription='spot nearby devices using a wireless sniffer.',\n\t\tusage='%(prog)s <mode> [options]')\n\t\n\tparser.add_argument('--interface','-i', help=\"interface to scan on.\", dest=\"inf\", type=str)\n\tparser.add_argument('--verbse','-v', help=\"show extra output.\", dest=\"verbosity\", action=\"count\")\n\tparser.add_argument('types', help=\"type of devices to scan for.\", choices=['connected', 'scanning','both'], default='both', nargs='?')\n\targs = parser.parse_args()\n\treturn args", "def add_optional_arguments(self,*iterables):\n for iterable in iterables:\n self.parser.add_argument(\n iterable[0],\n metavar=iterable[1],\n help=iterable[2],\n dest=iterable[3],\n default='Null',\n )", "def Args(parser):\n flags.AddTagOrDigestPositional(parser, verb='untag', tags_only=True)", "def parser_add_options(parser, options):\n for option, properties in options.items():\n parser.add_argument(option, **properties)", "def _parse_arguments():\n import argparse\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\n 'list_of_files', type=str,\n help='Input ASCII file with a list of files to be downloaded')\n\n return parser.parse_args()", "def _interface(cls, argp: ArgumentParserWithKeywordHooks) -> ArgumentParserWithKeywordHooks:\n base = argp.add_argument_group('generic options')\n\n base.set_defaults(reverse=False, squeeze=False)\n base.add_argument('-h', '--help', action='help', help='Show this help message and exit.')\n base.add_argument('-L', '--lenient', action='count', default=0, help='Allow partial results as output.')\n base.add_argument('-Q', '--quiet', action='store_true', help='Disables all log output.')\n base.add_argument('-0', '--devnull', action='store_true', help='Do not produce any output.')\n base.add_argument('-v', '--verbose', action='count', default=0,\n help='Specify up to two times to increase log level.')\n\n if cls.is_reversible:\n base.add_argument('-R', '--reverse', action='count', default=0,\n help='Use the reverse operation; Specify twice to normalize (first decode, then encode).')\n\n groups = {None: argp}\n\n for argument in reversed(cls._argument_specification.values()):\n gp = argument.group\n if gp not in groups:\n groups[gp] = argp.add_mutually_exclusive_group()\n groups[gp].add_argument @ argument\n\n return argp", "def parse_params(params):\n def isoption(x):\n return x.startswith('-')\n solo_flags = []\n arg_flags = dict()\n i = 0\n while i < len(params):\n if not isoption(params[i]):\n raise ValueError('\"' + params[i] + '\" does not look like an option.')\n if i == len(params) - 1 or isoption(params[i+1]):\n solo_flags.append(params[i])\n i += 1\n continue\n else:\n arg_flags[params[i]] = process_arg(params[i+1])\n i += 2\n continue\n return solo_flags, arg_flags", "def add_argparse_args(cls, parent_parser: ArgumentParser) -> ArgumentParser:\n parser = ArgumentParser(parents=[parent_parser], add_help=False,)\n\n blacklist = ['kwargs']\n depr_arg_names = cls.get_deprecated_arg_names() + blacklist\n\n allowed_types = (str, int, float, bool)\n\n args_help = parse_args_from_docstring(cls.__init__.__doc__ or cls.__doc__)\n for arg, arg_types, arg_default in (\n at for at in get_init_arguments_and_types(cls) if at[0] not in depr_arg_names\n ):\n arg_types = [at for at in allowed_types if at in arg_types]\n if not arg_types:\n # skip argument with not supported type\n continue\n arg_kwargs = {}\n if bool in arg_types:\n arg_kwargs.update(nargs=\"?\", const=True)\n # if the only arg type is bool\n if len(arg_types) == 1:\n use_type = parsing.str_to_bool\n elif str in arg_types:\n use_type = parsing.str_to_bool_or_str\n else:\n # filter out the bool as we need to use more general\n use_type = [at for at in arg_types if at is not bool][0]\n else:\n use_type = arg_types[0]\n\n if arg == 'gpus' or arg == 'tpu_cores':\n use_type = _gpus_allowed_type\n arg_default = _gpus_arg_default\n\n # hack for types in (int, float)\n if len(arg_types) == 2 and int in set(arg_types) and float in set(arg_types):\n use_type = _int_or_float_type\n\n # hack for track_grad_norm\n if arg == 'track_grad_norm':\n use_type = float\n\n parser.add_argument(\n f'--{arg}',\n dest=arg,\n default=arg_default,\n type=use_type,\n help=args_help.get(arg),\n **arg_kwargs,\n )\n\n return parser", "def optargs(args):\n parser = OptionParser()\n parser.add_option(\"-a\", \"--abandon\", dest=\"abandon_current\", default=False, action=\"store_true\",\n help=\"Abandon outstanding changes when updating to migration\")\n parser.add_option(\"-d\", \"--dry\", dest=\"dry_run\", default=False, action=\"store_true\",\n help=\"Just update the revision number, don't perform updates\")\n (options, args) = parser.parse_args(args)\n return (options, args)", "def parse_arguments():\n # shift away script name\n scriptname=sys.argv[0]\n shift()\n ncl_cmd=list()\n quali_cmd=list()\n id_cmd=list() \n while(len(sys.argv)>0):\n carg = sys.argv[0]\n shift()\n if(carg == \"--nucleotide\"):\n ncl_cmd = mungeArgs(sys.argv)\n elif(carg == \"--quality\"):\n quali_cmd = mungeArgs(sys.argv)\n elif(carg == \"--id\" ):\n id_cmd = mungeArgs(sys.argv)\n elif(carg in [\"-h\", \"--help\"]):\n usage()\n else:\n usage(error=True)\n # Excess arguments which are not processed \n if(len(sys.argv) > 0):\n sys.stdout.write(\"Excess arguments!\\n\")\n sys.stdout.flush()\n usage(error=True)\n\n # external modules rely on non-empty argv array, \n # re-append the script name as first command line argument\n sys.argv.append(scriptname)\n return (id_cmd, ncl_cmd, quali_cmd)", "def options(self, parser, env):\n pass", "def parse_user_arguments(*args, **kwds):\n\n parser = argparse.ArgumentParser(\n description = \"Generates the profiles of the input drug by using a network of expansion created expanding the protein-protein interactions from the targets of the drug\",\n epilog = \"@oliva's lab 2017\")\n parser.add_argument('-d','--drug_name',dest='drug_name',action = 'store',\n help = \"\"\" Name of the drug. If you do not provide targets for this drug or the number of targets is not large enough,\n the program will use this name to search for targets in BIANA database. If targets are provided, this field will be only used\n for naming purposes and will be completely optional.\n If the name of the drug has more than one word or special characters (parentheses, single quotes), introduce the name between \n double quotes. \"\"\")\n parser.add_argument('-t','--targets',dest='targets',action = 'store',\n help = 'Input file with the targets of the drug. Each target must be separated by a newline character.')\n parser.add_argument('-pt','--proteins_type_id',dest='proteins_type_id',action = 'store', default='geneid',\n help = 'Input the type of ID of the targets introduced / proteins of the network. It must be the same! (default is geneid).')\n parser.add_argument('-rad','--radius',dest='radius',action = 'store',default='3',\n help = \"\"\" Define the radius of expansion for the creation of the network from targets (default is 3). \"\"\")\n parser.add_argument('-tax','--taxid',dest='taxid',action = 'store',default='9606',\n help = \"\"\"Define the restriction of species for the creation of the network from targets using a Taxonomy ID (default is '9606' (human))\"\"\")\n parser.add_argument('-res','--restriction',dest='restriction',action = 'store',\n help = \"\"\"Define an experiment restriction for the creation of the network from targets.\\n\n Options:\\n\n - AFF: Use interactions at least described by affinity methods (i.e. Tandem Affinity Purification)\\n\n - Y2H: Use interactions at least described by yeast two hybrid methods (Y2H)\\n\n - eAFF: Use all interactions except those described by affinity methods (i.e. Tandem Affinity Purification)\\n\n - eY2H: Use all interactions except those described by yeast two hybrid methods (Y2H)\\n\n - None: Not use experiment restrictions\n \"\"\")\n parser.add_argument('-th','--threshold_list',dest='threshold_list',action = 'store',\n help = \"\"\"List of percentages that will be used as cut-offs to define the profiles of the drugs. It has to be a file containing:\n - Different numbers that will be the threshold values separated by newline characters. \n For example, a file called \"top_threshold.list\" containing:\n 0.1\n 0.5\n 1\n 5\n 10\n \"\"\")\n parser.add_argument('-ws','--workspace',dest='workspace',action = 'store',default=os.path.join(os.path.join(os.path.dirname(__file__), '..'), 'workspace'),\n help = \"\"\"Define the workspace directory where the data directory and the results directory will be created\"\"\")\n\n options=parser.parse_args()\n\n return options", "def options(argv=[]):\r\n parser = HendrixOptionParser\r\n return vars(parser.parse_args(argv)[0])", "def _arg_parse(self, **options) -> Dict[str, Any]:\n extra_options = dict()\n for key, value in options.items():\n private_key = f\"__{key}\"\n if hasattr(self, private_key):\n setattr(self, private_key, value)\n else:\n extra_options[key] = value\n\n return extra_options", "def _parse_cli_options(func):\n options = []\n for param in inspect.signature(func).parameters.values():\n if param.kind not in {param.POSITIONAL_OR_KEYWORD, param.KEYWORD_ONLY}:\n # Only keyword arguments are currently supported\n continue\n\n option_name = '--' + param.name.lower().replace('_', '-').strip('-')\n kwargs = {}\n if param.annotation in {str, int, float, bool}:\n # Only basic types are currently supported\n kwargs['type'] = param.annotation\n\n if param.default != param.empty:\n kwargs['default'] = param.default\n else:\n # If the param doesn't have a default, then it's required\n kwargs['required'] = True\n\n if param.annotation == bool or isinstance(param.default, bool):\n if param.default is True:\n # If the default of a boolean option is ``True``, then add a\n # ``--no-x` off switch\n option_name += '/--no-' + option_name.lstrip('-')\n else:\n # If the default is ``False``, just make it a basic flag\n kwargs['is_flag'] = True\n\n args = (option_name, param.name)\n\n options.append((args, kwargs))\n\n # Reverse it so the decorators are applied in the correct order\n return options[::-1]", "def parse_argumentos():\n parser = argparse.ArgumentParser()\n parser.add_argument('--n_clientes',\n help='Numero de Clientes. [3]',\n type=int,\n default=3)\n parser.add_argument('--n_garcons',\n help='Numero de garcons. [2]',\n type=int,\n default=2)\n parser.add_argument('--cap_garcons',\n help='Capacidade dos garcons. [2]',\n type=int,\n default=2)\n parser.add_argument('--n_rodadas',\n help='Numero de rodadas. [4]',\n type=int,\n default=4)\n return parser.parse_args()", "def _get_argparse_kwargs(self, group, **kwargs):\n kwargs = super(MultiOpt, self)._get_argparse_kwargs(group)\n if not self.positional:\n kwargs['action'] = 'append'\n else:\n kwargs['nargs'] = '*'\n return kwargs", "def handle_options( args ):\n parser = optparse.OptionParser( version = \"BitTest Bitbake Testing Tool version %s\" % __version__,\n usage = \"\"\"%prog [options] [test ...]\nExecutes the specified tests or if none are given all tests. The result is printed\non the cmd line\n\"\"\" )\n\n parser.add_option( \"-o\", \"--output\", help = \"print the output to the file\",\n action = \"store\", dest = \"output\", default = None )\n parser.add_option( \"-f\", \"--format\", help = \"print the output in the specified format\",\n action = \"store\", dest = \"format\", default = None )\n\n options, args = parser.parse_args( args )\n return options, args[1:]", "def Args(parser):\n flags.AddNetworkToParser(parser, positional=True)\n base.ASYNC_FLAG.AddToParser(parser)\n base.ASYNC_FLAG.SetDefault(parser, True)" ]
[ "0.66007906", "0.6587666", "0.6514029", "0.6472643", "0.64082843", "0.6338237", "0.6190322", "0.6178333", "0.61771864", "0.61676735", "0.61545366", "0.6126562", "0.61167157", "0.60940254", "0.60646427", "0.605548", "0.60479504", "0.60187566", "0.5961234", "0.59607315", "0.59420663", "0.5937053", "0.5932809", "0.5925599", "0.5923097", "0.5909557", "0.59088343", "0.5900713", "0.5897627", "0.58921576", "0.58611035", "0.58449286", "0.583607", "0.58212745", "0.5820005", "0.58148324", "0.58057696", "0.5796548", "0.5794716", "0.57645875", "0.57369035", "0.5729692", "0.571742", "0.5716327", "0.5715329", "0.5713661", "0.5713661", "0.56957996", "0.56955504", "0.56866306", "0.5683426", "0.56825006", "0.56775814", "0.5663415", "0.5653772", "0.56287616", "0.5624113", "0.5623386", "0.5621537", "0.5617502", "0.56152374", "0.5614232", "0.560837", "0.56015205", "0.5597608", "0.5597608", "0.55970985", "0.55929303", "0.5592507", "0.5587776", "0.55718654", "0.5571298", "0.557045", "0.5560029", "0.5558363", "0.55506617", "0.5548942", "0.5538938", "0.5536489", "0.55333215", "0.55241734", "0.5523497", "0.5522935", "0.5513594", "0.55053765", "0.54992545", "0.5497794", "0.54958206", "0.54946274", "0.5493195", "0.5493141", "0.54930717", "0.54926443", "0.5490493", "0.5488571", "0.5476182", "0.54754525", "0.54743856", "0.54713947", "0.54664433", "0.546513" ]
0.0
-1
Email summary of results to user.
def EmailResults(recipient, error_mesg, topdir, dumpfile, logfile, motcor_summary): #********************************************************************************* if recipient is None: return elif 'noname' in recipient: return sender = 'preprocess' if 'Abnormal' in error_mesg > 0: subject = 'Problem while preprocessing %s' % topdir else: subject = 'Preprocessing complete for %s' % topdir mssg = error_mesg if logfile is not None and isinstance(logfile, str): f = open(logfile, 'r') lines = f.readlines() f.close() logged_errors = '' for i in xrange(len(lines)): if 'rror' in lines[i]: mssg += ''.join(lines[i-1:]) break mssg += motcor_summary if dumpfile is not None: f = open(dumpfile,'r') mssg += '\nSummary of processing:\n' mssg += f.read() f.close() send_email(recipient, subject, mssg, sender)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _show_summary(self):\n print 'Summary:'\n print ' Reports downloaded successfully: %d' % self.counts\n print ' Reports not downloaded: %d\\n' % self.failed", "def publish_summary(self, jobs):\n pass", "def report(self, results):\n self.notice(\"Test Report\\n\")\n\n for count, group in enumerate(results, 1):\n results = (self._format_test(test, res) for test, res in group)\n results = (', ').join(results)\n self.notice(\"Test group %s:\\t%s\" % (count, results))\n\n self.divider()", "def message_user_results(self, request, successes, failures, action):\n\n self.message_user_success(request, successes, action)\n self.message_user_failure(request, failures, action)", "def composeSummaryEmail(self):\r\n message = \"\"\"From: Douglas Gregor <dgregor@osl.iu.edu>\r\nTo: boost@lists.boost.org\r\nReply-To: boost@lists.boost.org\r\nSubject: [Report] \"\"\"\r\n message += str(self.numFailures()) + \" failures on \" + branch\r\n if branch != 'trunk':\r\n message += ' branch'\r\n message += \" (\" + str(datetime.date.today()) + \")\"\r\n message += \"\"\"\r\n\r\nBoost regression test failures\r\n\"\"\"\r\n message += \"Report time: \" + self.date + \"\"\"\r\n\r\nThis report lists all regression test failures on high-priority platforms.\r\n\r\nDetailed report:\r\n\"\"\"\r\n\r\n message += ' ' + self.url + '\\n\\n'\r\n\r\n if self.numFailures() == 0:\r\n message += \"No failures! Yay!\\n\"\r\n return message\r\n \r\n # List the platforms that are broken\r\n any_broken_platforms = self.numReportableFailures() < self.numFailures()\r\n if any_broken_platforms:\r\n message += \"\"\"The following platforms have a large number of failures:\r\n\"\"\"\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n message += (' ' + platform + ' ('\r\n + str(len(self.platforms[platform].failures))\r\n + ' failures)\\n')\r\n\r\n message += \"\"\"\r\nFailures on these \"broken\" platforms will be omitted from the results below.\r\nPlease see the full report for information about these failures.\r\n\r\n\"\"\"\r\n \r\n # Display the number of failures\r\n message += (str(self.numReportableFailures()) + ' failures in ' + \r\n str(len(self.libraries)) + ' libraries')\r\n if any_broken_platforms:\r\n message += (' (plus ' + str(self.numFailures() - self.numReportableFailures())\r\n + ' from broken platforms)')\r\n \r\n message += '\\n'\r\n\r\n # Display the number of failures per library\r\n for k in sorted_keys( self.libraries ):\r\n library = self.libraries[k]\r\n num_failures = library.numFailures()\r\n message += ' ' + library.name + ' ('\r\n \r\n if library.numReportableFailures() > 0:\r\n message += (str(library.numReportableFailures())\r\n + \" failures\")\r\n \r\n if library.numReportableFailures() < num_failures:\r\n if library.numReportableFailures() > 0:\r\n message += ', plus '\r\n \r\n message += (str(num_failures-library.numReportableFailures()) \r\n + ' failures on broken platforms')\r\n message += ')\\n'\r\n pass\r\n\r\n message += '\\n'\r\n\r\n # Provide the details for the failures in each library.\r\n for k in sorted_keys( self.libraries ):\r\n library = self.libraries[k]\r\n if library.numReportableFailures() > 0:\r\n message += '\\n|' + library.name + '|\\n'\r\n for test in library.tests:\r\n if test.numReportableFailures() > 0:\r\n message += ' ' + test.name + ':'\r\n for failure in test.failures:\r\n platform = failure.platform\r\n if not platform.isBroken():\r\n message += ' ' + platform.name\r\n message += '\\n'\r\n\r\n return message", "def email_article_summary(to_address, summary_filename, start_year, start_month, end_year, end_month, num_articles):\n \n host = HOST\n from_address = FROM_ADDRESS\n body = \"\"\"\n Good morning,\n \n There were %i peer-reviewed papers produced by researchers at this institute between %i/%i and %i/%i. A summary file containing the front page from each article is attached with this email. Please print out these summary pages, highlight the author(s) on each article and pin them to the monthly papers noticeboard.\n \n Thanks a bunch,\n \n Skynet.\n \n \"\"\" % (num_articles, start_month, start_year, end_month, end_year, )\n \n recipients = [to_address, ADMIN_ADDRESS]\n \n logging.info(\"Preparing summary email report for %s\" % (', '.join(recipients), ))\n \n successful = True\n for recipient in recipients:\n \n message = MIMEMultipart()\n message[\"From\"] = from_address\n message[\"To\"] = recipient\n message[\"Subject\"] = \"Refereed papers summary between %i/%i and %i/%i\" % (start_month, start_year, end_month, end_year, )\n message[\"Date\"] = formatdate(localtime=True)\n \n message.attach(MIMEText(textwrap.dedent(body).lstrip()))\n \n part = MIMEBase('application', 'octet-stream')\n part.set_payload(open(summary_filename, 'rb').read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(summary_filename))\n message.attach(part)\n \n server = smtplib.SMTP(host)\n \n try:\n failed = server.sendmail(from_address, to_address, message.as_string())\n server.close()\n \n except Exception as e:\n logging.critical(\"Unable to send email to %s. Error: %s\" % (recipient, str(e), ))\n successful = False\n \n else:\n logging.info(\"Email successfully sent to %s\" % recipient)\n \n \n return successful", "def SendResultTask(job_id):\n job = Job.objects.get(pk=job_id)\n owner = job.owner\n msg_plain = render_to_string('wordscraper/email.txt',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n msg_html = render_to_string('wordscraper/email.html',\n {'first_name': owner.first_name, 'last_name': owner.last_name,\n 'result_id': job.result_id})\n send_mail('Your CULTR web scraper results', msg_plain, 'no-reply@cultrtoolkit.com',\n [job.email], html_message=msg_html, fail_silently=False)\n logger.info(\"Sent result email to owner of job %d.\" % job_id)", "def post(self, request, *args, **kwargs):\n self.form = self.get_form()\n self.form.full_clean()\n results = self.get_queryset()\n nb_results = results.count()\n first_results = results[:10]\n site = get_current_site(self.request)\n querystring = self.get_form_data().urlencode()\n scheme = 'https'\n search_url = reverse('search_view')\n full_url = '{scheme}://{domain}{search_url}?{querystring}'.format(\n scheme=scheme,\n domain=site.domain,\n search_url=search_url,\n querystring=querystring)\n results_body = render_to_string('emails/search_results.txt', {\n 'user_name': self.request.user.full_name,\n 'aids': first_results,\n 'nb_results': nb_results,\n 'full_url': full_url,\n 'scheme': scheme,\n 'domain': site.domain,\n })\n send_mail(\n self.EMAIL_SUBJECT,\n results_body,\n settings.DEFAULT_FROM_EMAIL,\n [self.request.user.email],\n fail_silently=False)\n return HttpResponse('')", "def send_results(self, collected_results: list):\n\n for scan in collected_results:\n raw_scan = scan.original_results\n scan_time = raw_scan.scan_info.scan_start_time.ToJsonString()\n logger.info('Scan: ' + raw_scan.tool_name + ' run at ' + scan_time)\n for issue in raw_scan.issues:\n logger.info('Issue: ' + str(issue))", "def summary_print(self):\r\n self.ensure_one()\r\n self.sent = True\r\n #return self.env['ir.actions.report'].report_action(self, 'proandsys_purchase_14.summary_landed_report')\r\n return self.env.ref('proandsys_purchase_14.summary_landedcost').report_action(self)", "def printSummary(self):\n pass", "def summaryView(request):\n\n alert_errors = []\n alert_infos = []\n alert_filters = []\n\n runs = get_runs_from_request_filters(\n request, alert_errors, alert_infos, alert_filters\n )\n\n summary = SummaryReport(runs)\n\n context = {\n \"refs\": summary.reference_runs(),\n \"runs\": summary.runs_checked_per_type(),\n \"tk_maps\": summary.tracker_maps_per_type(),\n \"certified_runs\": summary.certified_runs_per_type(),\n \"sums\": summary.sum_of_quantities_per_type(),\n \"alert_errors\": alert_errors,\n \"alert_infos\": alert_infos,\n \"alert_filters\": alert_filters,\n }\n\n return render(request, \"certhelper/summary.html\", context)", "def _publish_results(self):\n\n doc = Document()\n date = get_stamp()\n\n labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)\n\n title = 'Mutual Information labels vs ' + self._experiment_name\n self.plot_save(title,\n self._mutual_info,\n self._baseline_mutual_info,\n 'Norm. mutual information',\n labels, date, self._docs_folder, doc)\n\n title = 'Weak classifier accuracy labels vs ' + self._experiment_name\n self.plot_save(title,\n self._classifier_accuracy,\n self._baseline_classifier_accuracy,\n 'Classifier accuracy',\n labels, date, self._docs_folder, doc) #, smoothing_size=3)\n\n title = 'average delta'\n f = plot_multiple_runs(\n self._different_steps[0], # here the X axes are identical\n self._average_delta,\n title=title,\n ylabel='log(delta)',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n title = 'average boosting duration'\n f = plot_multiple_runs(\n self._different_steps[0],\n self._average_boosting_dur,\n title=title,\n ylabel='duration',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n doc.write_file(path.join(self._docs_folder, to_safe_name(self._complete_name() + date + \".html\")))\n\n print('done')", "def _display_results(self):\n self._display_summary()\n self._display_domain_record()\n self._display_ip_record()\n self._display_cert_details()\n self._display_ti_data()\n self._display_screenshot()\n self._display_related_alerts()\n self._display_bookmarks()\n self._display_dns_results()\n self._display_hosts()\n self._display_flows()", "def display_summary(self, *args):\n logger.debug(u\"{} Summary\".format(self.joueur))\n yield(self.remote.callRemote(\n \"display_summary\", self.currentperiod.todict()))\n self.joueur.info(\"Ok\")\n self.joueur.remove_waitmode()", "def trigger_result_email(\n self, project_id: str, topic_name: str,\n operation_counts_dict: Mapping[str, operation_counts.OperationCounts]\n ) -> None:\n topic = f'projects/{project_id}/topics/{topic_name}'\n message = {\n 'attributes': {\n 'content_api_results':\n json.dumps(\n operation_counts_dict,\n default=_convert_operation_counts_into_json)\n }\n }\n try:\n self._client.publish(topic, json.dumps(message).encode('utf-8'))\n except exceptions.GoogleCloudError as cloud_error:\n logging.exception('PubSub to mailer publish failed: %s', cloud_error)", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())", "def _print_summary(results):\n if not len(results) > 0:\n print 'No results to show in summary.'\n return\n\n table = {}\n for res in results:\n for k, v in res.iteritems():\n table.setdefault(k, []).append(v)\n print tabulate(table, headers='keys', tablefmt=\"simple\")", "def send_status_mail(self):\n from django.core.mail import send_mail\n subject = \"App Load Status | %s - %s | S%02d/-R%02d\" % (\n self.publication.account, self.title, self.load_count, self.reject_count)\n body = \"Account: %s\\nPublication: %s\\nStatus: %s\\nTime:%s - %s\\nLoaded: %02d\\nRejected: %02d\\n\\nComments\\n:%s\\n\" % (\n self.publication.account, self.publication, self.load_status, self.start_time, self.end_time, self.load_count, self.reject_count, self.comments)\n body = body + settings.EMAIL_DEFAULT_SIGNATURE\n if self.publication.id in [2,3,60,61,62,63,370,39]:\n return send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [settings.ADMIN_EMAIL, 'anand.kumar@contify.com', 'rajesh.swain@contify.com', 'tapan.puhan@contify.com'], fail_silently=True)\n else:\n return send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [settings.ADMIN_EMAIL], fail_silently=True)", "def results_summary(self, num_models=10, sort_metric=None):\n if self.state.dry_run:\n info(\"Dry-Run - no results to report.\")\n return\n\n # FIXME API documentation\n _results_summary(input_dir=self.state.host.results_dir,\n project=self.state.project,\n architecture=self.state.architecture,\n num_models=num_models,\n sort_metric=sort_metric)", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def display_results():\n pass", "def report(self, result):\n raise NotImplementedError", "def _print_aggregate_results(\n task: Task, task_results: Dict[Task, List[List[Dict[str, Any]]]]\n) -> None:\n aggregate_task_result = aggregate_nvs_results(task_results[task])\n print(\"\")\n print(f\"Aggregate results for task={task}:\")\n pretty_print_nvs_metrics(aggregate_task_result)\n print(\"\")", "def show_results(self):\n print(\"Survey results:\")\n for response in self.responses:\n print('- ' + response)", "def print_results(self):\n pass", "def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)", "def report(self, **options):\n pass", "def dump_total_results(statistic_entries):\n individual_tests = sum([entry['correct answers'] + entry['wrong answers']\n for entry in statistic_entries])\n average_per_test = sum([entry['total time (s)'] for entry in statistic_entries]) \\\n / float(individual_tests)\n average_per_run = sum([entry['total time (s)'] for entry in statistic_entries]) \\\n / float(len(statistic_entries))\n\n best_time = min([entry['best time (s)'] for entry in statistic_entries])\n worst_time = max([entry['worst time (s)'] for entry in statistic_entries])\n\n print(\"\\nSummary for all done tests:\")\n print(\" %5d total test runs\" % len(statistic_entries))\n print(\" %5d individual tests\" % individual_tests)\n print(\" %5.1f individual tests per run\" % (individual_tests / float(len(statistic_entries))))\n print(\" %5.2f seconds per answer (average)\" % average_per_test)\n print(\" %5.2f seconds per run (average)\" % average_per_run)\n print(\" %5.2f seconds was best time.\" % best_time)\n print(\" %5.2f seconds was worst time.\" % worst_time)", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)", "def generate_email(mail, env):\n race, results, standings = get_last_results_and_standings()\n next_race = get_next_race()\n\n subject = f\"Race digest - F1 2021 | Round {race.round} | {race.name}\"\n body = (f\"Results:\\n{results}\\n\\nCurrent standings:\\n\"\n f\"{standings}\\n\\nNext race: {next_race}\")\n\n login_info = env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']\n\n subs = update_db_and_get_subs(mail, (env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']))\n\n for sub in subs:\n send_email(subject, body, sub, login_info)", "def sales_rep():\n utils.messageSalesRep(request.form['name'], request.form['email'], request.form['message'])\n return redirect('/')", "def delegate_about_event():\n\n regs = Registration.objects.all()\n\n template = 'notifications/sprints_about_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Details of the individual events'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='madhusudancs@gmail.com')", "def recs():\n click.echo(\"Emailing recommendations to destination...\")\n dio_dir: DioDir = DioDir()\n sched: ScheduleABC = DefaultSchedule()\n today: datetime.date = datetime.datetime.now().date()\n res: Optional[List[Person]] = get_recs(dio_dir, sched, today)\n next_day: datetime.date = sched.next_emailing_day(today)\n message: str = recs_to_message(res, next_day)\n settings: Optional[Settings] = dio_dir.get_settings()\n assert settings is not None, \"Have to setup diogenes to get emails. Run `dio setupemail`\"\n send_message(message, today, settings)\n click.echo(\"Recommendations emailed!\")", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")", "def show_results():\n\n if request.args.get('cheery'):\n msg_type = \"cheery\"\n message = \"\"\"\n Life’s like a movie. Write your own ending.\n \"\"\"\n auth = \"— Kermit the Frog\"\n elif request.args.get('dreary'):\n msg_type = \"dreary\"\n message = \"\"\"\n Bad days happen to everyone, but when one happens to you, just\n keep doing your best and never let a bad day make you feel bad\n about yourself.\n \"\"\"\n author = \"— Big Bird\"\n elif request.args.get('honest'):\n msg_type = \"honest\"\n message = \"\"\"\n Who care if me eat carrot or collard greens? Me also like broccoli\n and lettuce and lima beans. Me still Cookie Monster. That not a sham.\n \"\"\"\n author = \"— Cookie Monster\"\n\n return render_template(\"results.html\",\n message=message,\n author=author,\n msg_type=msg_type)", "def sendMail(self):\n content = '<table><thead><tr><th>IP</th><th>DOWN</th></tr></thead><tbody>'\n for f in self.failedList:\n content += '<tr><td style=\"padding: 8px;line-height: 20px;vertical-align: top;border-top: 1px solid #ddd;\">'\n content += f + '</td>'\n content += '<td style=\"color: red;padding: 8px;line-height: 20px;vertical-align: top;border-top: 1px solid #ddd;\">yes</td>'\n content += '</tbody></table>'\n mailConfig = settings.get_mail()\n sendemail.send(mailConfig.get('FromAddr'), mailConfig.get('ToAddr'), mailConfig.get('SMTPServer'), content)", "def _summarize_as_html_helper( ret, diagnostics=True):\n summary = []\n summary.append(\"<table>\\n\")\n for i in range(0, len(ret)):\n result = ret[i]\n f = result.function_name\n summary.append(\"<tr>\")\n summary.append(\"<td>Question\")\n summary.append(str(i+1))\n summary.append(\":</td>\")\n summary.append(\"<td colspan='2'>Called function \")\n summary.append(escapeHTML(f) + \"()\")\n summary.append(\"</td>\")\n summary.append(\"</tr>\\n\")\n if result.internal_error:\n summary.append(\"<tr><td></td><td></td><td class='error'>\");\n summary.append(\"Server error. Please retry and if the problem persists report a bug.\\n\");\n summary.append(result.internal_error);\n summary.append(\"</td></tr>\\n\")\n elif result.error:\n summary.append(\"<tr><td></td><td></td><td class='error'>Student error. \")\n summary.append(escapeHTML(result.error) + \"</td></tr>\\n\")\n else:\n summary.append(\"<tr><td></td><td colspan='2'>Return values:</td></tr>\\n\")\n ret_values = result.ret\n for j in range(0, len(ret_values)):\n summary.append(\"<tr>\")\n summary.append(\"<td></td>\")\n summary.append(\"<td>Return value \" + str(j+1) + \"=</td>\")\n summary.append(\"<td>\" + str(ret_values[j]) + \"</td>\")\n summary.append(\"</tr>\\n\")\n if diagnostics:\n if result.internal_error:\n summary.append(\"<tr><td></td><td></td><td>Diagnostics</td></tr>\")\n else:\n summary.append(\"<tr><td></td><td></td><td>MATLAB Output</td></tr>\")\n summary.append(\"<tr><td></td><td></td><td><pre>\")\n if result.internal_error:\n summary.append( result.internal_error )\n else:\n summary.append( result.output )\n summary.append(\"</pre></td></tr>\")\n summary.append(\"</table>\\n\")\n summary = \"\".join(summary)\n return summary", "def summaryText(self):\n\n print('\\nReport Summary:\\n')\n for author in self.lowQuality.keys():\n if len(self.lowQuality[author]) > 0:\n print('Author: ' + author)\n print('---------------------')\n # do some sorting for readability\n files = []\n file2rating = {}\n for fileRating in self.lowQuality[author]:\n files.append(fileRating[1])\n file2rating[fileRating[1]] = fileRating[0]\n files.sort()\n for fileRating in files:\n print(file2rating[fileRating] + ' :: ' + fileRating)\n print('\\n\\n')", "def summary_page() :\r\n logger.debug(\"\")\r\n model = session_info.get_user_model(session)\r\n return render_template( \"summary_page.html\" , model=model ,\r\n stat_types=param_stats.StatTypes )", "def display_results(summary):\n print ('Total running time %.2f secs (includes DB checks)'\n % summary.total_time)\n\n print 'OK:', summary.ok\n print 'Errors:', summary.errors\n\n # Display stats\n print 'Changes stats:'\n for var, s in summary.stats.iteritems():\n print '\\t%s:' % var,\n for x in s.iteritems():\n print '%s=%.2f' % x,\n print\n\n # Display profiling data\n print 'Profiling data:'\n for name, data in summary.profile.iteritems():\n print '\\t%s: %d calls, %.2fms' % (name, data['callcount'],\n data['time'] * 1000)", "def sendNotification(self):\n if not(self.errors or self.accounting):\n return S_OK()\n\n emailBody = \"\"\n rows = []\n for instanceName, val in self.accounting.iteritems():\n rows.append([[instanceName],\n [val.get('Treatment', 'No Treatment')],\n [str(val.get('LogAge', 'Not Relevant'))]])\n\n if rows:\n columns = [\"Instance\", \"Treatment\", \"Log File Age (Minutes)\"]\n emailBody += printTable(columns, rows, printOut=False, numbering=False, columnSeparator=' | ')\n\n if self.errors:\n emailBody += \"\\n\\nErrors:\"\n emailBody += \"\\n\".join(self.errors)\n\n self.log.notice(\"Sending Email:\\n\" + emailBody)\n for address in self.addressTo:\n res = self.nClient.sendMail(address, self.emailSubject, emailBody, self.addressFrom, localAttempt=False)\n if not res['OK']:\n self.log.error(\"Failure to send Email notification to \", address)\n continue\n\n self.errors = []\n self.accounting.clear()\n\n return S_OK()", "def log_summary(self, no_run_list):\n self.log_message('Entries not run' ,step='summary',status='start',name='config_file_reader')\n for name in no_run_list.keys():\n self.log_message('Did not run: '+name+', '+no_run_list[name],status='running')\n \n ret_total = 0\n for x in xrange(2):\n for ent in self.entries[x]:\n ret_total = ret_total + 0 if ent.return_val == None else ent.return_val\n self.log_message('Summary Complete, Run Time = ('+str(self.total_time)+')',status='complete')\n return ret_total", "def summary(self) -> str:\n pass", "def main(argv):\n data = load_data(\"../car_sales.json\")\n summary = process_data(data)\n print(summary)\n summary_with_brakes = \"\"\n summary_with_lines = \"\"\n for item in summary:\n summary_with_brakes += item + '<br/>'\n summary_with_lines += item + '\\n'\n print(summary_with_brakes)\n # TODO: turn this into a PDF report\n table_data = cars_dict_to_table(data)\n reports.generate(\"/tmp/cars.pdf\", \"Cars\", summary_with_brakes, table_data)\n # TODO: send the PDF report as an email attachment\n recipient = \"{}@example.com\".format(os.environ.get('USER'))\n message = emails.generate('automation@example.com', recipient, 'Sales summary for last month', summary_with_lines, \"/tmp/cars.pdf\")\n emails.send(message)", "def account_summary(self):\n pass", "def text_summary_message(self):\n failed = [e for e in self.evaluations if not e.passes]\n if failed == []:\n return \"SUCCESS - all constraints evaluations pass\"\n else:\n return \"FAILURE: %d constraints evaluations failed\" % len(failed)", "def show_results(bill, tip, pct):\n \n total = tip + bill\n\n print(\"Bill amount: $\" + str(bill))\n print(\"Tip percentage: \" + str(pct) + \"%\")\n print(\"Tip amount due: $\" + str(tip))\n print(\"Total with tip: $\" + str(total))\n\n print(\"\"\"\n-----------------------------------\n GOOD BYE \n-----------------------------------\n\"\"\")", "def report(self):\n if self.integration.channels.n_mapping_channels > 0:\n msg = f'({self.get_mean_point_response()})'\n else:\n msg = '(---)'\n self.integration.comments.append(msg)", "def summary(self):\n\n self.model.summary(print_fn=lambda x: logging.info(x))", "def final_report(self):\n print('Final Count for', self.reason, self.successes, 'of', self.tests, 'tests passed')", "def print_results(results):\n print(f\"Intial Entries: {results[0]}\")\n print(f\"Added Entries: {results[1]}\")\n print(f\"Final Entries: {results[2]}\")\n print(f\"Total Run Time: {results[3]}\")\n print(\"\\n\")", "def test_summary_with_results(self):\n add_constituency_result_line('X, 10, C')\n r = self.client.get('/summary')\n self.assertEqual(r.status_code, 200)\n soup = BeautifulSoup(r.data, 'html.parser')\n self.assertIs(soup.find(id='no-results'), None)\n self.assertIsNot(soup.find(id='results-table'), None)", "def test_rr_summary(results):\n # pylint: disable=unidiomatic-typecheck\n test_result = results.summary()\n assert type(test_result).__name__ == \"Summary\"\n assert type(test_result.tables) == list\n assert len(test_result.tables) == 3\n assert len(test_result.extra_txt) > 0", "def admin_post():\n try:\n total_sent = send_bulk_emails()\n except:\n return render_template('bulkfail.html')\n return render_template('bulksent.html', total_sent=total_sent)", "def createReport(query):\n sentiments = get_sentiments(query)\n print(\"Based on the query, %s has an average sentiment value of %d\", query, sentiments)", "def main_email(name, total, answered, not_answered, declines, remaining):\n\n start = smtplib.SMTP(host=HOST, port=PORT)\n start.starttls()\n start.login(ADDRESS, PASSWORD)\n\n date = datetime.datetime.now()\n date_now = date.strftime(\"%m-%d-%Y\")\n\n print_list, email_dict = simple_contacts('contacts.txt')\n\n emails = get_emails(print_list, email_dict)\n\n message_template = read_template()\n\n for mail in emails:\n pretty_print(f\"Sending email to {mail}\", \"!\")\n msg = MIMEMultipart()\n\n message = message_template.substitute(PERSON_NAME=name, DATE=date_now, TOTAL_CALLED=total, ANSWERED=answered, NOT_ANSWERED=not_answered, DECLINES=declines, REMAINING=remaining)\n\n msg['From'] = ADDRESS\n msg['To'] = mail\n msg['Subject'] = f\"{name} - Calling Campaign Summary - {date_now}\"\n\n msg.attach(MIMEText(message, 'plain'))\n start.send_message(msg)\n pretty_print(f\"Mail sent to {mail}\", \"!\")\n\n del msg\n\n start.quit()", "def composeTestingSummaryEmail(self):\r\n brokenPlatforms = 0\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n brokenPlatforms = brokenPlatforms + 1\r\n\r\n if brokenPlatforms == 0:\r\n return None;\r\n \r\n message = \"\"\"From: Douglas Gregor <dgregor@osl.iu.edu>\r\nTo: boost-testing@lists.boost.org\r\nReply-To: boost-testing@lists.boost.org\r\nSubject: [Report] \"\"\"\r\n message += str(brokenPlatforms) + \" potentially broken platforms on \" + branch\r\n if branch != 'trunk':\r\n message += ' branch'\r\n message += \" (\" + str(datetime.date.today()) + \")\"\r\n message += \"\"\"\r\n\r\nPotentially broken platforms for Boost regression testing\r\n\"\"\"\r\n message += \"Report time: \" + self.date + \"\"\"\r\n\r\nThis report lists the high-priority platforms that are exhibiting a\r\nlarge number of regression test failures, which might indicate a problem\r\nwith the test machines or testing harness.\r\n\r\nDetailed report:\r\n\"\"\"\r\n\r\n message += ' ' + self.url + '\\n'\r\n\r\n message += \"\"\"\r\nPlatforms with a large number of failures:\r\n\"\"\"\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n message += (' ' + platform + ' ('\r\n + str(len(self.platforms[platform].failures))\r\n + ' failures)\\n')\r\n\r\n return message", "def summary():\r\n\r\n average_age, counted = _find_average_age()\r\n male, female = _find_male_female_percentage()\r\n headings = [\"Total Number of Patients\", \"Average Age\",\r\n \"Patients Involved In Average Age\", \"Percentage of Male\",\r\n \"Percentage of Female\"]\r\n data = [len(resources), average_age, counted, male, female]\r\n return render_template(\"summary.html\", headings=headings, data=data)", "def send_report(done, failed, scraper, op=\"scrape\"):\n if done > 0:\n msg_success = \"👍 Successfully {}d {}\".format(op, _symbol_str(done))\n else:\n msg_success = \"No symbols where successfully scraped 😭\"\n if len(failed) > 0:\n msg_fail = \"⚠️️ Failed to {} {}: {}\".format(op,\n _symbol_str(len(failed)),\n \", \".join(failed))\n else:\n msg_fail = \"No symbols failed to scrape! 🤩\"\n\n msg = msg_success + '\\n' + msg_fail\n slack_notification(msg, scraper, status=Status.Warning)", "def printReport(self):\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)", "def print_acts_summary(master_results_data,\n master_results_pass,\n master_results_fail,\n master_results_unknown,\n pass_counter,\n fail_counter,\n unknown_counter,\n split_results=False,\n ):\n widths = [max(map(len, col)) for col in zip(*master_results_data)]\n if not split_results:\n for row in master_results_data:\n print(' '.join((val.ljust(width) for val, width in zip(row,\n widths))))\n print('')\n print('Pass: %s '\n 'Fail: %s '\n 'Unknown: %s '\n 'Total: %s' % (pass_counter,\n fail_counter,\n unknown_counter,\n pass_counter+fail_counter+unknown_counter))\n else:\n print('')\n for row in master_results_pass:\n print(' '.join((val.ljust(width) for val, width in zip(row,\n widths))))\n print('Pass: %s' % pass_counter)\n\n print('')\n for row in master_results_fail:\n print(' '.join((val.ljust(width) for val, width in zip(row,\n widths))))\n print('Fail: %s' % fail_counter)\n if unknown_counter is not 0:\n print('')\n for row in master_results_unknown:\n print(' '.join((val.ljust(width)\n for val, width in zip(row, widths))))\n print('Unknown: %s' % unknown_counter)", "def send_results_output(**kwargs):\n logging.info(\"Printing query results to output\")\n print(kwargs[\"results_dataset_json\"])\n return True", "def perform(self):\n emails.notify(\n event=self.event_type,\n user=self.user,\n node=self.node,\n timestamp=self.timestamp,\n message=self.html_message,\n profile_image_url=self.profile_image_url,\n url=self.url\n )", "def report(self, stream):\n from collections import OrderedDict\n self.stats['total'] = sum(self.stats.values())\n for group in self.report_data.values():\n group.stats['total'] = sum(group.stats.values())\n self.report_file.write(self.jinja.get_template('report.html').render(\n report=OrderedDict(sorted(self.report_data.items())),\n stats=self.stats,\n ))\n self.report_file.close()\n if self.config.verbosity > 1:\n stream.writeln(\"-\" * 70)\n stream.writeln(\"HTML: %s\" % self.report_file.name)", "def print_results(self, out_file):\n extra_results = [\n # Total test methods processed, excluding reruns.\n [\"Test Methods\", len(self.result_events)],\n [\"Reruns\", self.test_method_rerun_count]]\n\n # Output each of the test result entries.\n categories = [\n # result id, printed name, print matching tests?, detail label\n [EventBuilder.STATUS_SUCCESS,\n \"Success\", False, None],\n [EventBuilder.STATUS_EXPECTED_FAILURE,\n \"Expected Failure\", False, None],\n [EventBuilder.STATUS_FAILURE,\n \"Failure\", True, \"FAIL\"],\n [EventBuilder.STATUS_ERROR,\n \"Error\", True, \"ERROR\"],\n [EventBuilder.STATUS_EXCEPTIONAL_EXIT,\n \"Exceptional Exit\", True, \"ERROR\"],\n [EventBuilder.STATUS_UNEXPECTED_SUCCESS,\n \"Unexpected Success\", True, \"UNEXPECTED SUCCESS\"],\n [EventBuilder.STATUS_SKIP, \"Skip\", False, None],\n [EventBuilder.STATUS_TIMEOUT,\n \"Timeout\", True, \"TIMEOUT\"],\n [EventBuilder.STATUS_EXPECTED_TIMEOUT,\n # Intentionally using the unusual hyphenation in TIME-OUT to\n # prevent buildbots from thinking it is an issue when scanning\n # for TIMEOUT.\n \"Expected Timeout\", True, \"EXPECTED TIME-OUT\"]\n ]\n\n # Partition all the events by test result status\n result_events_by_status = self._partition_results_by_status(\n categories)\n\n # Print the details\n have_details = self._has_printable_details(\n categories, result_events_by_status)\n if have_details:\n self._print_banner(out_file, \"Issue Details\")\n for category in categories:\n self._report_category_details(\n out_file, category, result_events_by_status)\n\n # Print the summary\n self._print_summary_counts(\n out_file, categories, result_events_by_status, extra_results)\n\n if self.options.dump_results:\n # Debug dump of the key/result info for all categories.\n self._print_banner(out_file, \"Results Dump\")\n for status, events_by_key in result_events_by_status.items():\n out_file.write(\"\\nSTATUS: {}\\n\".format(status))\n for key, event in events_by_key:\n out_file.write(\"key: {}\\n\".format(key))\n out_file.write(\"event: {}\\n\".format(event))", "def get_summary():\n callback = bottle.request.query.get('callback')\n db_conn = tools.get_db_conn('%s.db' % bottle.request.query.test_run_id)\n sql = 'SELECT code, reason, COUNT(*) FROM recs GROUP BY reason'\n result = tools.db_query(db_conn, sql)[1]\n results = [{'reason': item[1] or 'Incompleted (still running or aborted)',\n 'count': item[2], 'code': str(item[0])} for item in result if item[2]]\n return '{0}({1})'.format(callback, results)", "def PrintReport(self):\n print('=== Summary of Baidu Real-time Bidding test ===')\n print('Requests sent: %d' % self._requests_sent)\n print('Responses with a 200/OK HTTP response code: %d' % self._responses_ok)\n print('Responses with a non-200 HTTP response code: %d' % len(self._error))\n print('Good responses (no problems found): %d' % len(self._good))\n print('Invalid (unparseable) with a 200/OK HTTP response code: %d' % len( self._invalid))\n print('Parseable responses with problems: %d' % len(self._problematic))\n if self._responses_successful_without_bids == self._requests_sent:\n print('ERROR: None of the responses had bids!')", "def summary(self, fromdt, todt):\r\n totalSaved = self.miser.totalSaved(fromdt, todt) \r\n sumStr = \"%s: %s to %s\\n\" % (self.miser.name, fromdt, todt)\r\n sumStr += \"Total saved: %.2f\" % totalSaved\r\n\r\n sumStr += \"\\n\\nGoals:\\n\"\r\n sumStr += self._goalsMetStr(fromdt, todt, totalSaved)\r\n\r\n return sumStr", "def print_summary_stats(self) -> None:\n print(\"Number of Users: {}\".format(len(self.all_users)))\n print(\"Number of Utterances: {}\".format(len(self.utterances)))\n print(\"Number of Conversations: {}\".format(len(self.conversations)))", "def summarize(self):\n \n print self._num_tests, \"tests ran with\", len(self._failed_tests), \"failures:\", sorted(list(self._failed_tests))\n\n self._num_tests = 0\n self._failed_tests = set()", "def main():\n summary = process_text()\n # TODO: turn this into a PDF report\n paragraph = \"<br/>\".join(summary)\n title = \"Processed Update on {}\".format(date.today().strftime('%B %d, %Y'))\n attachment = f'{path}/processed.pdf'\n reports.generate_report(attachment, title, paragraph)\n\n # TODO: send the PDF report as an email attachment\n sender = \"automation@example.com\"\n receiver = \"{}@example.com\".format(os.environ.get('USER'))\n subject = \"Upload Completed - Online Fruit Store\"\n body = \"All fruits are uploaded to our website successfully. A detailed list is attached to this email.\"\n message = emails.generate_email(sender, receiver, subject, body, attachment)\n emails.send_email(message)", "def summary_str(self):\n if not self.results:\n return self.summary.empty() or ''\n elif self.state == Ok:\n return self.summary.ok(self.results) or ''\n return self.summary.problem(self.results) or ''", "def send_email_to_reporting_user(subject, template):\n send_connect_email(subject=subject,\n template=template,\n recipient=logged_by,\n logged_against=logged_against,\n site=site,\n comments=comments)", "def summary(self, summary):\n\n self._summary = summary", "def summary(self, summary):\n\n self._summary = summary", "def summary(self, summary):\n\n self._summary = summary", "def add_daily_summary(self):\n auth_date = self.report_date.strftime(\"%b %-d, %Y\")\n now = datetime.now().strftime(\"%x %X\")\n report_title = ' '.join([\n f'Report for {self.origin_value} participant consents authored on: {auth_date} 12:00AM-11:59PM UTC',\n f'(generated on {now} Central)'\n ])\n\n report_notes = [\n ['Notes:'],\n [f'Validation details on this sheet for {self.origin_value} participants only'],\n ['Checkbox validation currently only performed on GROR consents'],\n ['Total Errors can exceed Consents with Errors if any consents had multiple validation errors']\n ]\n\n self._add_text_rows(text_rows=[[report_title]], format_spec=self.format_specs.get('bold_text'))\n # Add any explanatory text / details about the report that have been included in the layout\n self._add_text_rows(text_rows=report_notes, format_spec=self.format_specs.get('legend_text'),\n row_pos=self.row_pos + 1)\n\n if not self._has_needs_correcting(self.consent_df):\n self._add_text_rows(text_rows=[['No consent validation errors detected']],\n format_spec=self.format_specs.get('italic_text'), row_pos=self.row_pos+1)\n\n # Daily summary counts for all the recently authored consents that were processed (regardless of errors)\n self._add_text_rows([['Total Consent Validation Counts']],\n format_spec=self.format_specs.get('bold_text'), row_pos=self.row_pos+1)\n self._add_consent_issue_count_header_section(hpo='All Entities')\n self._add_consent_issue_counts(self.consent_df, show_all_counts=True)", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r", "def resultset_output(self, key, mail):\n\t\thandle = sys.stdout\n\t\tif key is not None:\n\t\t\thandle = open(os.path.normpath(self.output + \"/\" + key + \".mbox\"), \"a\")\n\t\tself.output_mail(handle, mail)", "def proceedings_detail():\n\n talks = Talk.objects.all()\n\n template = 'notifications/proceedings_detail_mail.html'\n\n for talk in talks:\n subject = 'SciPy.in 2011 Proceedings'\n message = loader.render_to_string(\n template, dictionary={'name': talk.speaker.username})\n\n talk.speaker.email_user(subject=subject, message=message,\n from_email='admin@scipy.in')", "def print_results(self, data: SimData) -> None:\n pass", "def send_email(self,to, subj):\r\n\r\n \"\"\" Currently not implemented. \"\"\"\r\n print(to+'-'+subj)\r\n print(self.body)\r\n # Send the finalized email here.\r", "def summary(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")", "async def summarise(self, ctx, start=None, end=None):\n if ctx.message.author.bot:\n return\n\n if not start or not end:\n await ctx.send(\n \"Insufficient arguments!\\n Arguements: <start ID> <end ID>\"\n )\n return\n\n summary, keywords, clean_messages = await convert_to_summary(\n ctx, start, end\n )\n\n if summary:\n summary = \"```\\n\" + summary + \"```\"\n await ctx.send(summary)\n else:\n await ctx.send(\"```Not enough messages to generate summary```\")\n\n if keywords:\n keyword_str = \"Keywords: \"\n for word in keywords:\n keyword_str += f\"{word}, \"\n\n keyword_str = \"```\\n\" + keyword_str + \"```\"\n await ctx.send(keyword_str)\n else:\n await ctx.send(\"```Not enough messages to generate keywords```\")", "def print_summary(self):\n self.model.summary()", "def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}", "def run_mailshot(subject):\n\n users = get_users()\n articles = get_articles()\n users_articles = get_articles_by_user(users, articles)\n users_emails = get_emails_by_user(users_articles)\n mailshot_data = get_mailshot_data(subject, users_emails)\n send_mailshot(mailshot_data)", "def print_results(self):\n self.accuracy = round(accuracy_score(self.y_val, self.y_pred, 'weighted'), 4)\n self.f1 = round(f1_score(self.y_val, self.y_pred, average='weighted'), 4)\n self.precision = round(precision_score(self.y_val, self.y_pred, average='weighted'), 4)\n\n print(f'Results for {self.title}:')\n print(f'{self.title} accuracy: {self.accuracy}')\n print(f'{self.title} f-score: {self.f1}')\n print(f'{self.title} precision: {self.precision}')", "def summary(self, yname=None, xname=None, title=None, alpha=.05):\n # TODO: Make this raise upstream instead of just \"pass\"\n raise NotImplementedError # pragma: no cover\n # TODO: move the GenericLikelihoodModelResults implementation here?", "def _print_results_header(self):\n print(\"\\033[94m\"+\"Summary\\n\"+\"-\"*32+\"\\033[0m\")\n print(\"Subroutine: {}\".format(self.mc_sample.__name__))\n print(\"Num Runs: {:2.1e}\".format(self.num_runs))\n print(\"-\"*32+'\\n')", "def printReport(self): \n \n print('Distribution: ', self._distribution_type)\n print('Distribution Type: ', str(self._measure_type).replace('MeasureType.','')) \n print('Type Detection Match: ', str(self._measure_type_match))\n print('MLE: ', str(self._mle))\n print('Goodness of Fit: ', str(self._gof)) \n print('Goodness of Fit Pass: ', str(self._pass)) \n print('Overall Score: ', str(self._score)) \n print('-------------')", "def pytest_terminal_summary(self, terminalreporter, exitstatus):\n # pylint: disable=unused-argument\n terminalreporter.section(\"Test Information\")\n for test, info in self._info.items():\n for datum in info:\n terminalreporter.write(\"{}: {}\\n\".format(test, datum))", "def summary(self, yname=None, xname=None, title=None, alpha=.05):\n #TODO: add a summary text for options that have been used\n\n jvalue, jpvalue, jdf = self.jtest()\n\n top_left = [('Dep. Variable:', None),\n ('Model:', None),\n ('Method:', ['GMM']),\n ('Date:', None),\n ('Time:', None),\n ('No. Observations:', None),\n #('Df Residuals:', None), #[self.df_resid]), #TODO: spelling\n #('Df Model:', None), #[self.df_model])\n ]\n\n top_right = [#('R-squared:', [\"%#8.3f\" % self.rsquared]),\n #('Adj. R-squared:', [\"%#8.3f\" % self.rsquared_adj]),\n ('Hansen J:', [\"%#8.4g\" % jvalue] ),\n ('Prob (Hansen J):', [\"%#6.3g\" % jpvalue]),\n #('F-statistic:', [\"%#8.4g\" % self.fvalue] ),\n #('Prob (F-statistic):', [\"%#6.3g\" % self.f_pvalue]),\n #('Log-Likelihood:', None), #[\"%#6.4g\" % self.llf]),\n #('AIC:', [\"%#8.4g\" % self.aic]),\n #('BIC:', [\"%#8.4g\" % self.bic])\n ]\n\n if title is None:\n title = self.model.__class__.__name__ + ' ' + \"Results\"\n\n # create summary table instance\n from statsmodels.iolib.summary import Summary\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n yname=yname, xname=xname, title=title)\n smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,\n use_t=self.use_t)\n\n return smry", "def post(self):\n return send_email(request.args)", "def gen_report(self):\n self.report = '#Report for {0}\\n'.format(self.ip)\n self.report += 'This report was generated by the chameleon pentest bot. We cannot grant 100% accurate results.\\n'\n self.report += '###Services:\\n'\n for service in self.services:\n self.report += '#####{0}:\\n- Port: {1}\\n- Info:{2}'.format(service.name, service.port, service.info)\n self.report += '###Vulnerabilities:\\n'\n for vuln in self.vulns:\n self.report += '- {0}\\n'.format(vuln.name)\n self.report += 'Open an issue for wrong results at github.com/coretool/chameleon.'", "def results_aggregator(self, names):\n\t\tfor name in names:\n\t\t\tresult = self.main(name)\n\t\t\tself.results.append(result)\n\t\t\tprint(\"'%s' has been written to the file.\" % result[0])\n\t\t\t\"\"\"result is formatted name, number, rating, review count\"\"\"", "async def report(cls, description, **kwargs):\n return await cls.message(description, color = discord.Color(0xbfbfbf))", "def test_summary_data(self):\n self.driver.get('http://psl-outbreak.herokuapp.com/report')\n self.driver.find_element_by_id('summary_find_out').click()" ]
[ "0.6340017", "0.6205421", "0.6195181", "0.61352056", "0.60605377", "0.60505825", "0.5933892", "0.5929879", "0.5912047", "0.57762545", "0.57740766", "0.57343775", "0.56606674", "0.56550497", "0.5646071", "0.56351393", "0.5634105", "0.5610652", "0.5604074", "0.56035924", "0.55907", "0.55873597", "0.55791926", "0.5572864", "0.5547156", "0.5494798", "0.5490097", "0.5475123", "0.54725397", "0.5471057", "0.5440959", "0.5439942", "0.5436555", "0.54311216", "0.5430835", "0.54285735", "0.54164773", "0.5403305", "0.540098", "0.5392949", "0.539172", "0.5388187", "0.53874654", "0.53807104", "0.53588605", "0.5344139", "0.53386813", "0.53304565", "0.5321506", "0.5320567", "0.5320064", "0.53190494", "0.5304972", "0.529483", "0.5291778", "0.52893", "0.5276401", "0.526915", "0.5269114", "0.52524513", "0.5249328", "0.5239888", "0.5238089", "0.5229264", "0.52268136", "0.52205575", "0.5213511", "0.521221", "0.5204114", "0.52008045", "0.5198663", "0.51971704", "0.5192273", "0.519011", "0.5187011", "0.5187011", "0.5187011", "0.5180724", "0.51682395", "0.51584756", "0.5156633", "0.5151443", "0.5145014", "0.5141146", "0.5140102", "0.51321507", "0.51314366", "0.5128212", "0.51256144", "0.512434", "0.5120689", "0.51170737", "0.5111711", "0.51115394", "0.51010036", "0.50999665", "0.5099547", "0.5090939", "0.50852334", "0.5084948" ]
0.648858
0
Walk through directories and categorize the data. This method builds the "info" attribute a dictionary that characterizes each data series and defines the options and input/output filenames for each stage of processing.
def FindStuffToDo(self): while self.topdir.endswith('/'): self.topdir = self.topdir[:-1] if hasattr(self, 'LogProcess'): self.LogProcess() # Look for data to process. self.WalkPath(self.topdir) if os.path.islink('%s/anatomicals' % self.topdir): # os.walk won't follow links, so do this one manually. if not os.path.exists('%s/dicoms' % self.topdir): # Don't do a duplicate search. pathname = os.readlink('%s/anatomicals' % self.topdir) self.WalkPath(pathname) # Pair-up fieldmaps with EPI's self._SetFmapInfo() # Pair fieldmaps with strucural images. self._SetAnatTgts() # Assocate a ref.dat file with each EPI. self._GetRefdat() self._MakeEpiScratchDir() # Order the EPIs so the names are correct. self._GetEpiOrder() # Associate each EPI with an anatomical, determine if it was # acquired before or after the epi self._SetBaseEpi() self.motcor_summary = self.SummarizeMotionTargets() f = open('%s/motion_corr.txt' % self.logdir, 'w') f.write(self.motcor_summary) f.close() if self.verbose: print self.motcor_summary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_data_dir(self, data_dir):\n categories = os.listdir(data_dir)\n for folder_name in categories:\n all_fnames_list_fname = os.path.join(data_dir, folder_name,\n folder_name + \".bmf\")\n if not os.path.isfile(all_fnames_list_fname):\n raise IOError(\"Not found file {}\".format(all_fnames_list_fname))\n all_fnames_list = np.loadtxt(all_fnames_list_fname, dtype=np.str,\n skiprows=1)\n # Correct from pgm to jpg\n all_fnames_list = [f.split('.')[0]+'.jpg' for f in all_fnames_list]\n\n all_fnames_list = [os.path.join(data_dir, folder_name, f) for f \\\n in all_fnames_list]\n\n self.samples += len(all_fnames_list)\n # Append the last\n self.image_filenames.append(all_fnames_list)", "def categorize (self):\n\n fout = defaultdict(list)\n\n # Flat lists of files to collect keyed by platform,category\n collect_files = dict()\n for platform in wanted_files:\n for category, flist in wanted_files[platform].items():\n for f in flist:\n collect_files[(platform,category,f)] = list()\n\n for a in self.artifacts:\n try:\n with zfile.ZFile(a.lpath, 'r') as zf:\n if os.path.splitext(a.lpath)[-1] == '.rpm':\n a.info['plat'] = 'rhel'\n\n platform = a.info['plat']\n if platform not in platforms:\n continue\n\n zfiles = zf.getnames()\n if len(zfiles) == 0:\n print('No files in %s?' % a)\n for category, flist in wanted_files[platform].items():\n for f in flist:\n matches = [(a,x) for x in zfiles if os.path.basename(x) == f]\n if len(matches) > 0:\n collect_files[(platform,category,f)] += matches\n fout[category] += matches\n\n except zfile.tarfile.ReadError as e:\n print('ignoring artifact: %s: %s' % (a.lpath, str(e)))\n\n # Verify that all wanted combinations were matched\n errors = 0\n for missing in [x for x in collect_files if len(collect_files[x]) == 0]:\n errors += 1\n print('ERROR: No matching artifact files for', missing)\n\n if errors > 0:\n raise Exception('Not all wanted files found in artifacts, see above.')\n return fout", "def get_data(self, injparam=None, trueordering=None,\n systematic=None, direction=None):\n data_sets = OrderedDict()\n minimiser_info = OrderedDict()\n if injparam is not None:\n content = nsort(os.listdir(self.scandir))\n elif trueordering is not None:\n content = nsort(os.listdir(self.systdir))\n else:\n content = nsort(os.listdir(self.logdir))\n for basename in content:\n if injparam is not None:\n m = self.labels[injparam].subdir_re.match(basename)\n wanted_labels = self.labels[injparam]\n elif trueordering is not None:\n if direction is not None:\n m = self.labels[trueordering][systematic][\n direction].subdir_re.match(basename)\n wanted_labels = self.labels[trueordering][\n systematic][direction]\n else:\n m = self.labels[trueordering][\n systematic].subdir_re.match(basename)\n wanted_labels = self.labels[trueordering][systematic]\n else:\n m = self.labels.subdir_re.match(basename)\n wanted_labels = self.labels\n if m is None or 'pckl' in basename:\n continue\n\n if self.fluctuate_data:\n data_ind = int(m.groupdict()['data_ind'])\n dset_label = data_ind\n else:\n dset_label = wanted_labels.data_prefix\n if not wanted_labels.data_name in [None, '']:\n dset_label += '_' + wanted_labels.data_name\n if not wanted_labels.data_suffix in [None, '']:\n dset_label += '_' + wanted_labels.data_suffix\n\n lvl2_fits = OrderedDict()\n lvl2_fits['h0_fit_to_data'] = None\n lvl2_fits['h1_fit_to_data'] = None\n minim_info = OrderedDict()\n minim_info['h0_fit_to_data'] = None\n minim_info['h1_fit_to_data'] = None\n\n if injparam is not None:\n subdir = os.path.join(self.scandir, basename)\n elif trueordering is not None:\n subdir = os.path.join(self.systdir, basename)\n else:\n subdir = os.path.join(self.logdir, basename)\n\n # Account for failed jobs. Get the set of file numbers that\n # exist for all h0 an h1 combinations\n self.get_set_file_nums(\n filedir=subdir,\n injparam=injparam,\n trueordering=trueordering,\n systematic=systematic,\n direction=direction\n )\n fnum = None\n \n for fnum, fname in enumerate(nsort(os.listdir(subdir))):\n fpath = os.path.join(subdir, fname)\n for x in ['0', '1']:\n k = 'h{x}_fit_to_data'.format(x=x)\n if fname == wanted_labels.dict[k]:\n lvl2_fits[k] = self.extract_fit(fpath, 'metric_val')\n break\n # Also extract fiducial fits if needed\n if 'toy' in dset_label:\n ftest = ('hypo_%s_fit_to_%s'\n %(wanted_labels.dict['h{x}_name'.format(x=x)],\n dset_label))\n elif dset_label == 'data':\n ftest = ('hypo_%s_fit_to_data'\n %(wanted_labels.dict['h{x}_name'.format(x=x)]))\n if ftest in fname:\n k = 'h{x}_fit_to_{y}'.format(x=x, y=dset_label)\n lvl2_fits[k] = self.extract_fit(\n fpath,\n ['metric_val', 'params']\n )\n break\n k = 'h{x}_fit_to_{y}'.format(x=x, y=dset_label)\n for y in ['0', '1']:\n k = 'h{x}_fit_to_h{y}_fid'.format(x=x, y=y)\n r = wanted_labels.dict[k + '_re']\n m = r.match(fname)\n if m is None:\n continue\n if self.fluctuate_fid:\n fid_label = int(m.groupdict()['fid_ind'])\n else:\n fid_label = wanted_labels.fid\n if k not in lvl2_fits:\n lvl2_fits[k] = OrderedDict()\n minim_info[k] = OrderedDict()\n if fid_label in self.set_file_nums:\n lvl2_fits[k][fid_label] = self.extract_fit(\n fpath,\n ['metric', 'metric_val', 'params']\n )\n minim_info[k][fid_label] = self.extract_fit(\n fpath,\n ['minimizer_metadata', 'minimizer_time']\n )\n break\n\n if fnum is None:\n raise ValueError('No files?')\n\n data_sets[dset_label] = lvl2_fits\n minimiser_info[dset_label] = minim_info\n data_sets[dset_label]['params'] = self.extract_fit(\n fpath,\n ['params']\n )['params']\n\n if injparam is not None:\n self.data_sets[injparam] = data_sets\n self.minimiser_info[injparam] = minimiser_info\n elif trueordering is not None:\n if direction is not None:\n self.data_sets[trueordering][systematic][direction] = data_sets\n else:\n self.data_sets[trueordering][systematic]= data_sets\n else:\n self.data_sets = data_sets\n self.minimiser_info = minimiser_info", "def _analyze(self):\n for _, self.subdirs, files in os.walk(self.path):\n if self.p.sort:\n self.subdirs.sort()\n files.sort()\n for f in files:\n self._analyze_file(fileextlow(f), f)\n break # stop walk() from entering subdirectories\n\n self.p.nr_dirs += 1\n if self.lossless or self.compressed or self.videos:\n if self.lossless or self.compressed:\n if not self.images:\n if self.p.warn_covers:\n print(f\"{W}{self.path}{R}: no cover file\")\n self.p.nr_no_cover += 1\n elif not have_valid_cover_name(self.images):\n if self.p.warn_covers:\n print(f\"{W}{self.path}{R}: wrong cover names\")\n self.p.nr_wrong_cover_name += 1\n if self.lossless:\n if self.compressed:\n self.p.nr_mixed_lossless_compressed += 1\n else:\n self.p.nr_lossless_dirs += 1\n\n if self.cue:\n if not self.lossless:\n if self.p.warn_cue:\n print(f\"{W}{self.path}{R}: cue but no lossless files\")\n self.p.nr_lossy_cue += 1\n elif not self.compressed:\n if len(self.cue) == 1:\n self.p.nr_cue += 1\n else:\n if self.p.warn_cue:\n print(f\"{W}{self.path}{R}: {len(self.cue)} cue files\")\n self.p.nr_multiple_cue += 1\n\n self.p.nr_media_dirs += 1\n self.p.nr_lossless += len(self.lossless)\n self.p.nr_compressed += len(self.compressed)\n self.p.nr_video_files += len(self.videos)\n self.p.nr_ignored += self.ignored\n self.p.unknown.update(self.unknown)\n else:\n if self.images and not self.subdirs:\n self.p.nr_only_images += 1\n else:\n self.p.nr_no_media_dirs += 1", "def __concatonate_files_controller(self):\n\n # find all barcode file paths\n barcode_directories = []\n for root, directory, files in os.walk(self.input_directory):\n for name in directory:\n barcode_directories.append( os.path.join(root, name) )\n\n # iterate through each barcode directory, item is the file path\n for item in barcode_directories:\n file = os.listdir(item)[0]\n path = item\n\n new_file_name = self.__return_new_file_name(file_name=file, file_path=path)\n self.__concatonate_files(new_file_name=new_file_name, parent_folder=path)\n self.__write_logs_to_file(new_file_name)", "def categorize_classifier_files(out_dir):\n\n #sort all of the classifier files into a dictionary\n class_files = glob.glob(\"feature_extraction_m*\")\n class_file_dict = {\"positive\":[], \"negative\":[]}\n class_cand_dict = {\"m1\":class_file_dict, \"m2\":class_file_dict, \"m3\":class_file_dict, \"m4\":class_file_dict, \"m5\":class_file_dict}\n\n for filename in class_files:\n split_name = filename.split(\"_\")[-1].split(\".\")\n model_num = split_name[0]\n det = split_name[-1]\n class_cand_dict[model_num][det].append(filename)\n\n #get all of the pfd files into a list\n class_file_m1 = glob.glob(\"feature_extraction_m1*\")\n pfd_files = []\n for afile in class_file_m1:\n f = open(afile, \"r\")\n for line in f.readlines():\n pfd_files.append(line)\n f.close()\n\n #fill a dictionary with pfds and a value for how many positive IDs each pfd has\n pulsar_pfds={}\n for key in pfd_files:\n pulsar_pfds[key]=0\n for model_num in class_cand_dict.keys():\n if class_cand_dict[model_num][\"positive\"]:\n print(class_cand_dict[model_num][\"positive\"])\n f = open(class_cand_dict[model_num][\"positive\"][0], \"r\")\n for line in f.readlines():\n pulsar_pfds[line]+=1\n f.close()\n\n #For each pfd with >=3 positive IDs, write that pfd to 'positive' file, else write to 'negative' file\n pos_f = open(os.path.join(out_dir, \"LOTAAS_positive_detections.txt\"), \"w+\")\n neg_f = open(os.path.join(out_dir, \"LOTAAS_negative_detections.txt\"), \"w+\")\n for pfd_key in pulsar_pfds.keys():\n if pulsar_pfds[pfd_key]>=3:\n print(\"detected pulsar: {}\".format(pfd_key))\n pos_f.write(pfd_key.split(\"/\")[-1])\n else:\n neg_f.write(pfd_key.split(\"/\")[-1])\n pos_f.close()\n neg_f.close()", "def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n for charge_selection in self.charge_selections:\n key_dir = getKey(process_name, charge_selection)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.configDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.configDir, dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n ##print \"self.dirs = \", self.dirs\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_name, sample_info, self.max_files_per_job, self.debug)\n \n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name)) \n\n is_mc = (sample_info[\"type\"] == \"mc\")\n lumi_scale = 1. if not (self.use_lumi and is_mc) else sample_info[\"xsection\"] * self.lumi / sample_info[\"nof_events\"]\n apply_genWeight = sample_info[\"apply_genWeight\"] if (is_mc and \"apply_genWeight\" in sample_info.keys()) else False\n sample_category = sample_info[\"sample_category\"]\n triggers = sample_info[\"triggers\"]\n apply_trigger_bits = (is_mc and (self.era == \"2015\" or (self.era == \"2016\" and sample_info[\"reHLT\"]))) or not is_mc\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n\n inputFileList = inputFileLists[sample_name]\n for jobId in inputFileList.keys():\n if central_or_shift != \"central\" and not is_mc:\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttH\") and sample_category != \"signal\":\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttW\") and sample_category != \"TTW\":\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttZ\") and sample_category != \"TTZ\":\n continue\n\n # build config files for executing analysis code\n key_dir = getKey(process_name, charge_selection)\n key_analyze_job = getKey(process_name, charge_selection, central_or_shift, jobId)\n\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n print \"Warning: ntupleFiles['%s'] = %s --> skipping job !!\" % (key_job, ntupleFiles)\n continue\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : os.path.join(self.dirs[key_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%s_%i_cfg.py\" % \\\n (self.channel, process_name, charge_selection, central_or_shift, jobId)),\n 'histogramFile' : os.path.join(self.dirs[key_dir][DKEY_HIST], \"%s_%s_%s_%i.root\" % \\\n (process_name, charge_selection, central_or_shift, jobId)),\n 'logFile' : os.path.join(self.dirs[key_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%s_%i.log\" % \\\n (self.channel, process_name, charge_selection, central_or_shift, jobId)),\n 'sample_category' : sample_category,\n 'triggers' : sample_info[\"triggers\"],\n 'charge_selection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_denominator' : self.hadTau_selection_denominator,\n 'hadTau_selections_numerator' : self.hadTau_selections_numerator,\n 'absEtaBins' : self.absEtaBins,\n ##'use_HIP_mitigation_mediumMuonId' : sample_info[\"use_HIP_mitigation_mediumMuonId\"],\n 'use_HIP_mitigation_mediumMuonId' : True,\n 'is_mc' : is_mc,\n 'central_or_shift' : central_or_shift,\n 'lumi_scale' : 1. if not (self.use_lumi and is_mc) else sample_info[\"xsection\"] * self.lumi / sample_info[\"nof_events\"],\n 'apply_genWeight' : sample_info[\"genWeight\"] if (is_mc and \"genWeight\" in sample_info.keys()) else False,\n 'apply_trigger_bits' : (is_mc and (self.era == \"2015\" or (self.era == \"2016\" and sample_info[\"reHLT\"]))) or not is_mc,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job])\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1 = getKey(process_name, charge_selection)\n if not key_hadd_stage1 in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1] = os.path.join(self.dirs[DKEY_HIST], \"histograms_harvested_stage1_%s_%s_%s.root\" % \\\n (self.channel, process_name, charge_selection))\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1 = getKey(process_name, charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n if not key_hadd_stage2 in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2].append(self.outputFile_hadd_stage1[key_hadd_stage1])\n self.outputFile_hadd_stage2[key_hadd_stage2] = os.path.join(self.dirs[DKEY_HIST], \"histograms_harvested_stage2_%s_%s.root\" % \\\n (self.channel, charge_selection))\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n key_comp_jetToTauFakeRate_job = getKey(charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_cfg.py\" % charge_selection),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s.log\" % charge_selection),\n 'looseRegion' : \"jetToTauFakeRate_%s/denominator/\" % charge_selection,\n 'tightRegion' : \"jetToTauFakeRate_%s/numerator/\" % charge_selection,\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n self.targets.append(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile'])\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_makePlots_job = getKey(charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_makePlots_job = getKey(charge_selection, absEtaBin, \"denominator\")\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_%s_denominator_%s_cfg.py\" % (self.channel, charge_selection, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s/denominator/%s\" % (charge_selection, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_makePlots_job = getKey(charge_selection, absEtaBin, \"numerator\", hadTau_selection_numerator)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_%s_numerator_%s_%s_cfg.py\" % (self.channel, charge_selection, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s_%s_numerator_%s_%s.png\" % (self.channel, charge_selection, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s/numerator/%s/%s\" % (charge_selection, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile)\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n \n logging.info(\"Done\")", "def collect_data():\n mapping = {'nginx': Nginx,\n 'apache': Apache,\n 'server': Server,\n 'buildout': Buildout}\n with utils.cd(utils.displayer_dir()):\n for dirpath, dirnames, filenames in os.walk('.'):\n # server_id = dirpath\n for json_file in [f for f in filenames if f.endswith('.json')]:\n kind = json_file.split('___')[0]\n filepath = os.path.join(dirpath, json_file)\n logger.debug(\"Loading info from %s\",\n os.path.abspath(filepath))\n json_content = open(filepath).read()\n klass = mapping[kind]\n obj = klass(json_content)\n data[kind][obj.id.lower()] = obj\n # Link buildouts and nginx sites.\n for nginx in data['nginx'].values():\n buildout_id = nginx.data.get('buildout_id')\n if buildout_id is not None:\n buildout = data['buildout'].get(buildout_id)\n if buildout is not None:\n nginx.buildout = buildout\n buildout.site = nginx\n # Link buildouts and apache sites.\n for apache in data['apache'].values():\n buildout_id = apache.data.get('buildout_id')\n if buildout_id is not None:\n buildout = data['buildout'].get(buildout_id)\n if buildout is not None:\n apache.buildout = buildout\n buildout.site = apache\n # Link buildouts+sites with servers.\n for kind in ['nginx', 'apache', 'buildout']:\n for obj in data[kind].values():\n hostname = obj.data.get('hostname')\n if hostname is not None:\n hostname = hostname.lower()\n server = data['server'].get(hostname)\n if server is None:\n logger.error(\"Server with hostname %s not found.\",\n hostname)\n else:\n obj.server = server\n if kind == 'nginx' or kind == 'apache':\n server.sites.append(obj)\n elif kind == 'buildout':\n server.buildouts.append(obj)\n # Link nginx gunicorn ports with servers.\n for kind in ['nginx']:\n for obj in data[kind].values():\n hostname = obj.data.get('hostname')\n port = obj.data.get('proxy_port')\n try:\n port = int(port)\n except:\n pass\n if hostname is not None and port is not None:\n hostname = hostname.lower()\n server = data['server'].get(hostname)\n if server is None:\n logger.error(\"Server with hostname %s not found.\",\n hostname)\n continue\n server.ports[port] = obj", "def _create_directories(self):\n print \"[--init] creating directory structure in %s\" % (self.target_path)\n ensure_path(self.conf_path)\n for subdir in config.PROCESSING_AREAS:\n subdir_path = self.data_path + os.sep + subdir\n ensure_path(subdir_path)", "def run_pipeline(directory):\n\n # io = IO(path)\n # df = io.load_cleaned_file(download_always=False)\n # df = add_choke_events(df)\n\n # Add calls to features.Xxx here\n\n #directory = main_directory\n site=os.listdir(directory)\n site_dicom={}\n site_dicom_sub={}\n site_sub_files={}\n i,k,j=0,0,0\n for filename in site:\n site_dicom[i]=directory+'/'+filename+'/DICOM-raw'\n temporary_path=os.listdir(site_dicom[i])\n\n for another_file in temporary_path:\n site_dicom_sub[j]=site_dicom[i]+'/'+another_file+'/scans'\n temporary_path_1 = os.listdir(site_dicom_sub[j])\n for another_file_1 in temporary_path_1:\n site_sub_files[k]=site_dicom_sub[j]+'/'+another_file_1+'/'\n k=k+1\n j = j + 1\n i=i+1\n splitted={}\n output_mif={}\n for i in range (len(site_sub_files)):\n splitted[i]=site_sub_files[i].split('/')\n output_mif[i]=directory+'/'+splitted[i][5]+'/MIF-raw/'+splitted[i][5]+'_'+splitted[i][7]+'_'+splitted[i][9]+'.mif'\n\n\n # save (or return) dataframe here?\n return site_sub_files,output_mif", "def parse_directory_of_series_files(self):\n if self.series_base_dir is None or len(self.series_file_list) < 1:\n self.logger.warn('Fatal: Base Directory not set %s')\n raise Exception('Error Base Directory not set')\n\n self.logger.info('Parsing dir of files from %s' % self.series_base_dir)\n\n self.ref_series_df = pd.DataFrame([], columns=['SERIES_ID', 'SERIES_SEQ_ID', 'CONTEXT',\n 'FRAG', 'MOL_ID', 'ACTIVITY'])\n\n required_col = ['SERIES_ID', 'SERIES_SEQ_ID', 'CONTEXT', 'FRAG', 'MOL_ID', 'ACTIVITY']\n max_series_id = 0\n\n for series_file in self.series_file_list:\n\n # print series_file\n temp_df = pd.read_csv(series_file) # , index_col=False)\n # print temp_df.columns\n\n # sanity check the data table for the columns we need\n for col in required_col:\n if col not in temp_df.columns:\n raise Exception(\"Input CSV %s does not have required columns: %s\" % (series_file, col))\n\n # re-sequence the series ID's\n if max_series_id == 0:\n max_series_id = temp_df['SERIES_ID'].max()\n else:\n max_series_id = self.ref_series_df['SERIES_ID'].max()\n # print max_series_id\n\n temp_df['SERIES_ID'] = temp_df['SERIES_ID'] + max_series_id\n temp_df['SOURCE_FILE'] = os.path.basename(series_file)\n\n # py2>3 explicit sort=False added\n self.ref_series_df = self.ref_series_df.append(temp_df, sort=False)\n self.logger.info('Appended dataframe shape %s to master dataframe %s' %\n (str(temp_df.shape), str(self.ref_series_df.shape)))\n # print ('Appended dataframe shape %s to master dataframe %s' % (str(temp_df.shape),\n # str(self.ref_series_df.shape)))\n # print self.ref_series_df['SERIES_ID'].max()\n\n self.series_comparison_df = self.ref_series_df", "def before_process(self,data,labels):\n # JM: if integer labels are given, then create different output\n # directories for each new label\n if all(isinstance(lbl,int) for lbl in labels):\n self.batch_dirs = \\\n [os.path.join(self.output_dir,str(lbl)) for lbl in labels]\n # JM: otherwise create the same output directory for each image\n else:\n self.batch_dirs = [self.output_dir] * len(data)\n\n # create output directories if they don't already exist\n uniques = set(self.batch_dirs)\n for out_dir in uniques:\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n self.batch_index = 0", "def main(self, verbose=0):\n indepdict=self.scan_for_loop(self.indeploop)\n pegdict1 = self.scan_for_loop(self.pegloop1)\n pegdict2 = self.scan_for_loop(self.pegloop2)\n if len(indepdict.keys()) == 0 and len(pegdict1.keys()) == 0 and len(pegdict2.keys()) == 0:\n return dict()\n alldict = dict(indepdict)\n alldict.update(pegdict1)\n alldict.update(pegdict2)\n indepcomb=self.get_combo_list(indepdict, 0)\n pegcomb1=self.get_combo_list(pegdict1, 1)\n pegcomb2=self.get_combo_list(pegdict2, 1)\n allcombs = self.combine_three_combo_lists(indepcomb, pegcomb1, pegcomb2)\n datasets = self.prepare_looped_datasets(alldict, allcombs)\n createdfiles = self.create_input_files(datasets)\n if verbose == 1:\n self.print_list(indepcomb)\n self.print_list(pegcomb1)\n self.print_list(pegcomb2)\n self.print_list(allcombs)\n for datakey in datasets:\n self.print_list(datasets[datakey])\n return createdfiles", "def _preprocess(self):\n for f in self._variables:\n self._path.joinpath(f).mkdir(parents=True, exist_ok=True)\n\n for i in tqdm(range(self._size)):\n linear, w = self._get_spectrograms(i)\n self._store_entry(i, linear, w)", "def _preprocess_data(self, name, directory):\n if name.endswith('data'):\n for path in glob(str(directory / '**/*.jpg'), recursive=True):\n try:\n with Image.open(path) as img:\n if not name.startswith('feature'):\n img = img.rotate(-90, 0, 1)\n img = img.resize(self.input_shape)\n except (ValueError, OSError):\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n filename = path.name.split('img-')[1]\n target = (path.parent / filename).with_suffix('.image.png')\n img.save(target, 'PNG')\n os.remove(str(path))\n elif name.endswith('targets'):\n for path in glob(str(directory / '**/*.mat'), recursive=True):\n try:\n mat = spio.loadmat(path)['depthMap']\n img = spmisc.toimage(mat).resize(self.target_shape)\n except ValueError:\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n name = path.name[path.name.index('-') + 1:]\n target = (path.parent / name).with_suffix('.depth.png')\n img.save(target, 'PNG')\n os.remove(str(path))", "def walkthrough(software_map):\n\n for i in software_map:\n\n if not i[\"is_file\"]:\n\n # for each directory: make a index.md\n dname = \"./docs/\" + i[\"name\"]\n index = \"./docs/\" + i[\"name\"] + \"/index.md\"\n print(index)\n os.mkdir(dname)\n\n with open(index, \"w+\") as f:\n\n children = i[\"children\"]\n\n # list files\n f.write(\"Files:\\n\\n\")\n for i in children:\n if i[\"is_file\"]:\n\n fname = i[\"name\"]\n fext = fname.split(\".\")\n if len(fext) == 2:\n fext = fext[1]\n else:\n fext = \"none\"\n # for each file, note name and extension\n f.write(fname + \" : \" + fext + \"\\n\")\n\n # list subdirectories\n f.write(\"\\nSubdirectories:\\n\\n\")\n for i in children:\n if not i[\"is_file\"]:\n\n dirname = i[\"name\"]\n\n # note the number of files and subdirs in it\n num_files, num_dirs = 0, 0\n for child in i[\"children\"]:\n if child[\"is_file\"]:\n num_files += 1\n elif not child[\"is_file\"]:\n num_dirs += 1\n\n # note down name and numbers for each dir\n f.write(dirname + \" : \" + str(num_files) + \" files, \" +\n str(num_dirs) + \" directories\\n\")\n\n # goto subdir\n if len(i[\"children\"]) > 0:\n walkthrough(i[\"children\"])", "def extractSeriesInfo(self, inputdir):\n self.m_status.SetLabelText(\"Detecting DICOM data ... please wait\")\n allfiles = [y for x in walk(inputdir) for y in iglob(join(x[0], '*.IMA'))]\n self.controller.parseDicom(self, allfiles)\n # n = 1\n # for filename in allfiles:\n # try:\n # if not self.db.hasFile(filename):\n # dcm = dicom.read_file(filename)\n # updatemsg = \"Detecting DICOM data ... %d of %d\" % (n, len(allfiles))\n # self.m_status.SetLabelText(updatemsg)\n # n += 1\n #\n # # Check DICOM header info\n # series_num = str(dcm.SeriesInstanceUID)\n # uuid = self.generateuid(series_num)\n # imagetype = str(dcm.ImageType[2])\n # dicomdata = {'uuid': uuid,\n # 'patientid': str(dcm.PatientID),\n # 'patientname': str(dcm.PatientName),\n # 'seriesnum': series_num,\n # 'sequence': str(dcm.SequenceName),\n # 'protocol': str(dcm.ProtocolName),\n # 'imagetype': imagetype\n # }\n #\n # if not self.db.hasUuid(uuid):\n # self.db.addDicomdata(dicomdata)\n # if not self.db.hasFile(filename):\n # self.db.addDicomfile(uuid, filename)\n # except InvalidDicomError:\n # print(\"Not DICOM - skipping: \", filename)\n # continue\n # Load for selection\n # Columns: Toggle Select\n # Text PatientID\n # Text Sequence\n # Text Protocol\n # Text Image Type\n # Text Num Files\n # Text Series ID\n\n # for suid in db.getNewUuids():\n # numfiles = db.getNumberFiles(suid)\n # self.m_dataViewListCtrl1.AppendItem(\n # [True, self.controller.db.getDicomdata(suid, 'patientname'),\n # self.controller.db.getDicomdata(suid, 'sequence'),\n # self.controller.db.getDicomdata(suid, 'protocol'),\n # self.controller.db.getDicomdata(suid, 'imagetype'), str(numfiles),\n # self.controller.db.getDicomdata(suid, 'seriesnum')])\n #\n # msg = \"Total Series loaded: %d\" % self.m_dataViewListCtrl1.GetItemCount()\n # self.m_status.SetLabelText(msg)", "def run(self):\n super().run()\n date_subdirs = sorted(self.list_directory(self.input_location,\n self.input_location_type))\n for date_subdir in date_subdirs:\n if not re.search(\"^([\\d]{4}-[\\d]{2}-[\\d]{2})\", date_subdir):\n print(\"{}: Directory name {} not in YYYY-MM-DD format\"\\\n .format(self.name, date_subdir))\n continue\n date_path = os.path.join(self.input_location, date_subdir, \"RAW\")\n if len(self.list_directory(date_path, self.input_location_type)) == 0:\n continue\n processed_ok = self.process_single_date(date_path)\n if not processed_ok:\n continue", "def prepare_series(self, result_dir):\n output = {}\n output['title'] = self.title\n output['x'] = self.x\n output['y'] = self.y\n output['series'] = []\n for series in self.series:\n idfile = os.path.join(result_dir, 'benchmark_' + str(series['id']) +\n '.json')\n rows = json_from_file(idfile)\n # it is assumed each row has the same names of columns\n keys = rows[0].keys()\n # skip the series if it does not have required keys\n if self.x not in keys or self.y not in keys:\n continue\n points = [[row[self.x], row[self.y]] for row in rows]\n output['series'].append({'label': series['label'], 'points': points})\n # save the series to a file\n series_path = self._series_file(result_dir)\n if os.path.exists(series_path):\n figures = json_from_file(series_path)\n else:\n figures = {}\n figures[self.key] = output\n with open(series_path, 'w') as file:\n json.dump(figures, file, indent=4)\n # mark as done\n self.output['done'] = True", "def _make_files(self):\n if not self.path.is_dir():\n raise FileNotFoundError(f\"Path {self.path} does not exist.\")\n\n # Make the filepaths\n self.file_points = self.path / \"point.dat\"\n self.file_lines = self.path / \"line.dat\"\n self.file_cadastre = self.path / \"cadastre.dat\"\n self.file_portals = self.path / \"portals.dat\"\n\n with open(self.file_points, \"w\") as f:\n # 2 lines ignored\n header = datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n f.write(header)\n self.points_dfs = []\n with open(self.file_lines, \"w\") as f:\n # 5 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + 3 * \"Generated: \\n\"\n + \"Name,Section,source_group,x1,y1,z1,x2,y2,z2,width,vert. ext.,-,-,\"\n \"emission_rate[kg/h/km],-,-,-,-\\n\"\n )\n f.write(header)\n with open(self.file_cadastre, \"w\") as f:\n # 1 line ignored\n header = \"x,y,z,dx,dy,dz,emission_rate[kg/h],-,-,-,source_group\\n\"\n f.write(header)\n with open(self.file_portals, \"w\") as f:\n # 2 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + \"x1,y1,x2,y2,z0,z1,emission_rate[kg/h],-,-,-,source_group\\n\"\n )\n f.write(header)\n\n\n # File to save the source groups values\n self.file_source_groups = self.path / \"source_groups.json\"\n with open(self.file_source_groups, \"w\") as f:\n # reverse the dict (items become keys and vice versa)\n reversed_source_groups = {v: k for k, v in self.source_groups.items()}\n json.dump(reversed_source_groups, f, indent=2)", "def group_by_filenames(self):\n package = self.container.config.output.package\n class_map = collections.group_by(self.container, key=get_location)\n groups = self.group_common_paths(class_map.keys())\n\n for keys in groups:\n if len(keys) == 1:\n common_path = os.path.dirname(keys[0])\n else:\n common_path = os.path.commonpath(keys)\n\n for key in keys:\n items = class_map[key]\n suffix = \".\".join(Path(key).parent.relative_to(common_path).parts)\n\n package_name = f\"{package}.{suffix}\" if suffix else package\n self.assign(items, package_name, module_name(key))", "def processData(self,data_file,directory,comment = '',way='mpcc',opt=[1024]):\n self.dirs = [name for name in os.listdir(directory)\n if os.path.isdir(os.path.join(directory, name))]\n\n # directory names are names of instruments\n #self.dirs =\n # ['cel', 'cla', 'flu', 'gac', 'gel', 'org', 'pia', 'sax', 'tru', 'vio', 'voi']\n self.dirs = [name for name in os.listdir(directory)\n if os.path.isdir(os.path.join(directory, name))]\n \n # example: self.files['sax'] =\n # IRMAS-TrainingData\\sax\\006__[sax][nod][cla]1686__1.wav\n self.files = {}\n for d in self.dirs:\n self.files[d] = [] \n sub_dir = os.path.join(directory, d)\n for filename in glob.glob(os.path.join(sub_dir, '*.wav')):\n self.files[d].append(filename)\n\n # Ex) self.output['cel']: np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n i = 0\n for name in self.dirs:\n temp = []\n for j in range(len(self.dirs)):\n if i == j:\n temp.append(1)\n else:\n temp.append(0)\n self.output[name] = np.array(temp)\n i +=1\n\n #self.X = [] # list of input vectors\n #self.Y = [] # list of output vectors\n t0 = time.time()\n for name in self.dirs:\n t1 = time.time()\n for file in self.files[name]:\n #input_vector = processFile(file,length=length1,q=q1,fs_in=fs_in1,divide=divide1,plot = False)\n if way == 'mpcc':\n input_vector = processMPCC(file,*opt)\n elif way == 'fft':\n input_vector = processFFT(file,*opt)\n else:\n raise ValueError('Invalid Way, valid types include: \\'mpcc\\' or \\'fft\\'')\n if input_vector != 'failed':\n self.X.append(input_vector)\n self.Y.append(self.output[name])\n print('Time take to process '+str(name)+ ': ' + str((time.time()-t1)/60)[0:4] + ' min.')\n print('Total Processing Time: ' + str((time.time()-t0)/60)[0:4] + ' min.')\n\n # Now we can store all of the data in a json\n # Need to store self.X, self.Y, self.dirs,self.output,self.files,self.data\n # self.dirs is a list of strings -> fine\n # self.files is a dict() with string:string -> fine\n # self.output is a dict() with string:np.array\n output = {}\n for d in self.output:\n out_list = []\n for value in self.output[d]:\n out_list.append(int(value))\n output[d] = out_list # -> fine\n #self.X is a list of np.arrays\n X = []\n for i in range(len(self.X)):\n x = []\n for ele in self.X[i]:\n x.append(float(ele))\n X.append(x) # -> fine\n #self.Y is a list of np.arrays\n Y = []\n for i in range(len(self.Y)):\n y = []\n for ele in self.Y[i]:\n y.append(float(ele))\n Y.append(y) # -> fine\n \n store = {}\n store['dirs'] = self.dirs # good\n store['output'] = output # good\n store['files'] = self.files # good\n store['X'] = X # good\n store['Y'] = Y # good\n store['comment'] = comment\n with open(data_file, 'w') as outfile:\n json.dump(store, outfile)\n print('Preprocessed data stored in ' + str(data_file))\n return", "def _set_dirs(self, datafolder):\n self.List_of_dir = []\n self.List_of_files = dict()\n folders = os.listdir(datafolder)\n folders.sort()\n for i in folders:\n if os.path.isdir(os.path.join(datafolder,i)) and i != '.ipynb_checkpoints': # ignore .ipynb_checkpoints, allowing the generator to work in Amazon\n self.List_of_dir.append(os.path.join(datafolder,i))\n self.List_of_files[os.path.join(datafolder,i)]=[]\n for file in os.listdir(os.path.join(datafolder, i, 'Input')):\n if file.split('.')[-1] == 'hdf5':\n self.List_of_files[os.path.join(datafolder,i)].append(file.split('.')[-2])\n self._nb_dir = len(self.List_of_dir)", "def process_patients(self):\n if not os.path.exists(self.out_dir):\n os.makedirs(self.out_dir)\n\n for patient in os.listdir(self.root_dir):\n if \".csv\" in patient or \".md\" in patient:\n continue\n patient_pth = os.path.join(self.root_dir, patient)\n out_patient_pth = os.path.join(self.out_dir, patient)\n num_imgs = len(os.listdir(patient_pth)) // 2 # Half the length to exclude mask counts\n img_stack, msk_stack = self._stack_images_masks_flair(patient_pth, patient, num_imgs)\n if not os.path.exists(out_patient_pth):\n os.mkdir(out_patient_pth)\n self._make_slices(img_stack, msk_stack, patient, out_patient_pth)", "def __init__(self, root_dir):\n self.paths = glob.glob(root_dir + \"/*.csv\")\n self.target = 'Default'\n # Grouping variable names", "def collect_and_rename() -> None:\n image_source_folder = 'image_dir'\n label_source_folder = 'annotation_dir'\n image_target_folder = 'images'\n label_target_folder = 'labels'\n for i, (subdir, _, files) in enumerate(os.walk(image_source_folder), -1):\n # it walks the parent folder first, not a file\n if i == -1: \n continue\n subdir_name = subdir.split('\\\\')[1]\n for file_name in files:\n with open(f'{image_source_folder}/{subdir_name}/{file_name}') as image_file, \\\n open(f'{label_source_folder}/{subdir_name}/{file_name}'.split('.')[0] + '.txt') as label_file:\n shutil.copy2(image_file.name, f'{image_target_folder}/{\"%06d\" % i}.jpg')\n shutil.copy2(label_file.name, f'{label_target_folder}/{\"%06d\" % i}.txt')\n print(f'Processed {i} images')", "def run(self):\n for lof in self.data_files:\n if lof[0]:\n base = getattr(self, 'install_' + lof[0])\n else:\n base = getattr(self, 'install_base')\n dir = convert_path(lof[1])\n if not os.path.isabs(dir):\n dir = os.path.join(base, dir)\n elif self.root:\n dir = change_root(self.root, dir)\n self.mkpath(dir)\n\n files = lof[2]\n if len(files) == 0:\n # If there are no files listed, the user must be\n # trying to create an empty directory, so add the\n # directory to the list of output files.\n self.outfiles.append(dir)\n else:\n # Copy files, adding them to the list of output files.\n for f in files:\n f = convert_path(f)\n (out, _) = self.copy_file(f, dir)\n #print \"DEBUG: \", out # dbg\n self.outfiles.append(out)\n \n\n return self.outfiles", "def process_all(fileinfos, args):\n # create overall figure\n count_and_draw(fileinfos,args)\n # create figures for all the files\n for key in fileinfos:\n count_and_draw(fileinfos,args,key)\n # create figures for all the elements\n els_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_el\"]:\n if key not in els_processed:\n count_and_draw(fileinfos,args,key)\n els_processed.append(key)\n # create figures for all the attributes\n atts_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_att\"]:\n if key not in atts_processed:\n count_and_draw(fileinfos,args,\"@\"+key)\n atts_processed.append(key)", "def process_samples(self, itr, paths):\n samples_data = dict()\n critic_rewards = self.critic.critique(itr, paths)\n for level in self.hierarchy:\n samples_data[level.depth] = level.process_samples(itr, paths, critic_rewards)\n return samples_data", "def FeaturesGen(ChopChopresults, outputDir, sgRNA_type):\n \n #make output Directory if it does not already exist\n if not os.path.isdir(outputDir):\n os.makedirs(outputDir)\n \n #list the directory contents \n for i,j,k in os.walk(ChopChopresults): #use walk to go through and find all directories\n \n if j == []: #no subdirectories\n saveDF = pd.DataFrame() #initiate dataframe\n for target in k: #loop through to find the sgRNA sequences\n if target.endswith('.offtargets'):\n with open(os.path.join(i,target), 'r+') as f:\n guide = f.readlines()\n #add them to a dataframe\n temp = pd.Series()\n temp['guideNo'] = target.split('.')[0] + sgRNA_type\n temp['guideSeq'] = guide.pop(0).rstrip()\n \n saveDF = saveDF.append(temp.to_frame().transpose())\n saveDF['type'] = 'sgRNA'\n \n if sgRNA_type == 'General' or sgRNA_type == None:\n saveDF['fwd'] = 'pink'\n saveDF['rev'] = 'green'\n elif sgRNA_type == 'GG':\n saveDF['fwd'] = 'yellow'\n saveDF['rev'] = 'plum'\n elif sgRNA_type == 'GA':\n saveDF['fwd'] = 'cyan'\n saveDF['rev'] = 'cornflower blue'\n \n \n #save to txt file with tab delimiter\n saveDF.to_csv(os.path.join(outputDir, os.path.basename(i) + '_features.txt'),\\\n index = False, header = False, sep = '\\t')\n \n del saveDF", "def tree_construct(self, *args, **kwargs):\n l_files = []\n d_constructCallback = {}\n fn_constructCallback = None\n d_probe = {}\n l_range = []\n\n for k, v in kwargs.items():\n if k == 'l_files': l_files = v\n if k == 'constructCallback': fn_constructCallback = v\n if k == 'd_probe': d_probe = v\n\n if d_probe: l_files = d_probe['l_files']\n index = 0\n total = len(l_files)\n if int(self.verbosityLevel) and self.toConsole():\n l_range = tqdm(l_files, desc = ' Constructing tree')\n else:\n l_range = l_files\n for l_series in l_range:\n if len(l_series):\n str_path = os.path.dirname(l_series[0])\n l_series = [ os.path.basename(i) for i in l_series]\n # self.simpleProgress_show(index, total)\n self.d_inputTree[str_path] = l_series\n if fn_constructCallback:\n kwargs['path'] = str_path\n d_constructCallback = fn_constructCallback(l_series, **kwargs)\n self.d_inputTreeCallback[str_path] = d_constructCallback\n self.d_outputTree[str_path] = \"\"\n index += 1\n return {\n 'status': True,\n 'd_constructCallback': d_constructCallback,\n 'totalNumberOfAllSeries': index,\n 'd_probe': d_probe\n }", "def CreateDirs(self):\n# First, create a list of directories.\n dnames = []\n tags = ['', '_m', '_mf']\n for entry in self.info.keys():\n if self.info[entry]['type'] == 'epi':\n for tag in tags:\n fname = self.info[entry].get('imgfile%s' % tag, None)\n if fname is not None:\n dnames.append(os.path.dirname(fname))\n else:\n if self.info[entry].get('outdir',None) is not None:\n dnames.append(self.info[entry]['outdir'])\n\n# Create them if they don't already exist.\n for dname in dnames:\n if not os.path.exists(dname):\n self.MakeDir(dname)\n if self.verbose:\n print 'mkdir %s' % dname", "def do_dir(arguments):\n #print(\"Outputting in directory: \" + dsum)\n \n if not os.path.exists(arguments.file_pathout): \n os.mkdir(arguments.file_pathout)\n\n num = 0\n detected = 0\n fileCount = 0\n zero_image = 0\n bad_image = 0\n bad_image_paths = []\n\n # debug/verbose\n if arguments.v:\n print('DEBUG: shape=%g area=%g contour=%g' % (arguments.shape,arguments.area,arguments.contour))\n \n ffs = glob.glob(arguments.file_pathin+'/*.FIT') + glob.glob(arguments.file_pathin+'/*.fit') + \\\n glob.glob(arguments.file_pathin+'/*.FTS') + glob.glob(arguments.file_pathin+'/*.fts') + \\\n glob.glob(arguments.file_pathin+'/*.FITS') + glob.glob(arguments.file_pathin+'/*.fits')\n ffs = list(set(ffs)) # needed for dos\n ffs.sort() # on linux wasn't sorted, on dos it was \n f = open(arguments.file_pathout+'/summary.txt','w') # Creates summary text file\n f.write('Streaks found in files: \\n') #Creates first line for summary file\n\n sf = arguments.start_frame\n ef = arguments.end_frame\n \n if sf <= 0:\n sf = 1\n \n if ef <= 0 or ef > len(ffs):\n ef = len(ffs)\n \n if ef < sf:\n temp = sf\n sf = ef\n ef = temp\n\n print('Processing %d files from %d to %d' % ((ef-sf+1), sf, ef))\n for ff in ffs[sf-1:ef]:\n # creates directory one directory back from the folder which contains fits files\n \n num = do_one(ff,arguments.file_pathout+'/'+ff[ff.rfind(os.sep)+1:ff.rfind('.')],arguments.shape,arguments.area,arguments.contour)\n \n \n if num == 0:\n zero_image += 1\n elif num < 0:\n bad_image += 1\n bad_image_paths.append(ff)\n else:\n detected += int(num) #Counter of how many streaks detected\n f.write(ff + '\\n') \n fileCount += 1 #Counter for how many files analyzed \n print(\"\\n\")\n # Produce and write summary file \n f.write('\\n' 'Files analyzed: ' + str(fileCount)+ '\\n' )\n f.write('Streaks detected: ' + str(detected) + '\\n' )\n f.write('Files with no detections: ' + str(zero_image) + '\\n')\n f.write('Bad files: ' + str(bad_image)+ '\\n')\n \n temp_string = \"\\n\"\n temp_string = temp_string.join(bad_image_paths)\n f.write(temp_string)\n \n f.write('\\n\\n')\n\n if arguments.diff:\n f.write('Streaks found in Files: \\n')\n num = 0\n detected = 0\n fileCount = 0\n zero_image = 0\n bad_image = 0\n bad_image_paths = []\n dfs = []\n# print('Computing %d differences' % (ef-sf+1))\n for i in range(len(ffs)-1):\n dfs.append(arguments.file_pathout+'/'+ffs[i+1][len(arguments.file_pathin):]+'DIFF')\n# mk_diff(ffs[i],ffs[i+1],dfs[i],v)\n \n if sf <= 0:\n sf = 1\n\n if ef <= 0 or ef > len(dfs):\n ef = len(dfs)\n \n if ef <= sf:\n temp = sf\n sf = ef\n ef = temp\n\n print('Processing %d files from %d to %d' % ((ef-sf+1), sf, ef))\n i = sf-1\n for df in dfs[sf-1:ef]:\n try:\n mk_diff(ffs[i],ffs[i+1],dfs[i],arguments.v)\n # num = do_one(df,dsum+'/'+df[df.rfind(os.sep)+1:df.rfind('.')],shape,area,contour)\n #diff_file = dsum+'/'+df[df.rfind(os.sep)+1:df.find('.')]+'DIFF'\n \n #directory one directory back\n new_dir = arguments.file_pathout+'/'+df[df.rfind(os.sep)+1:df.rfind('.')]+'DIFF'\n num = do_one(df,new_dir,arguments.shape,arguments.area,arguments.contour)\n os.remove(df)\n \n except:\n num=-1\n sys.stdout.write('X')\n \n\n\n if num == 0:\n zero_image += 1\n elif num < 0:\n bad_image += 1\n bad_image_paths.append(df)\n else:\n detected += int(num) #Counter of how many streaks detected\n f.write(df + '\\n') \n fileCount += 1 #Counter for how many files analyzed \n i += 1\n print(\"\\n\")\n # Produce and write summary file \n f.write('\\n' 'Files analyzed: ' + str(fileCount)+ '\\n' )\n f.write('Streaks detected: ' + str(detected) + '\\n' )\n f.write('Files with no detections: ' + str(zero_image) + '\\n')\n f.write('Bad files: ' + str(bad_image)+ '\\n')\n\n temp_string = \"\\n\"\n temp_string = temp_string.join(bad_image_paths)\n f.write(temp_string)\n\n f.close()\n else:\n f.close()", "def get_result_files(self):\n name_pattern = \"{mapper}.{ngs_library.name}\"\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"out\", name_pattern + \"{ext}\"), ext=EXT_VALUES\n )\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"log\", \"{mapper}.{ngs_library.name}.{ext}\"),\n ext=(\n \"log\",\n \"conda_info.txt\",\n \"conda_list.txt\",\n \"log.md5\",\n \"conda_info.txt.md5\",\n \"conda_list.txt.md5\",\n ),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt.md5\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html.md5\"\n )\n )\n\n for sheet in self.shortcut_sheets:\n for ngs_library in sheet.all_ngs_libraries:\n if ngs_library.name in self.ngs_library_to_kit:\n extraction_type = ngs_library.test_sample.extra_infos[\"extractionType\"]\n suffix = (\n \"_long\"\n if ngs_library.extra_infos[\"seqPlatform\"] in (\"PacBio\", \"ONP\")\n else \"\"\n )\n # Per-sample target coverage report.\n yield from expand(\n os.path.join(\n \"output\", name_pattern, \"report\", \"cov_qc\", name_pattern + \".{ext}\"\n ),\n mapper=self.config[\"tools\"][extraction_type.lower() + suffix],\n ngs_library=[ngs_library],\n ext=[\"txt\", \"txt.md5\"],\n )\n yield \"output/target_cov_report/out/target_cov_report.txt\"\n yield \"output/target_cov_report/out/target_cov_report.txt.md5\"\n if (\n self.config[\"picard_hs_metrics\"][\"path_targets_interval_list\"]\n and self.config[\"picard_hs_metrics\"][\"path_baits_interval_list\"]\n ):\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt.md5\"\n )\n )\n if self.config[\"compute_coverage_bed\"]:\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"report\", \"coverage\", name_pattern + \"{ext}\"),\n ext=(\".bed.gz\", \".bed.gz.tbi\"),\n )\n else:\n print(\n \"Genome-wide coverage BED generation disabled\", file=sys.stderr\n ) # pragma: no cover", "def process_group(directory: str, files: dict, channel: str, year: str) -> dict:\n if len(files) == 0:\n raise Exception('empty file list for directory {}'.format(directory)) + 1\n\n dataframes = {}\n for name, ifile in files.items():\n # equivalent of hadding\n update_dfs = uproot.pandas.iterate(ifile, f'{channel}_tree')\n current_dfs = []\n for update_df in update_dfs:\n update_df.fillna(-999, inplace=True)\n current_dfs.append(update_df)\n \n if len(current_dfs) > 0:\n dataframes[name] = pd.concat(current_dfs)\n\n dataframes['metadata'] = pd.DataFrame({'channel': [channel], 'year': [year]})\n return dataframes", "def features_from_folder(label_folder, audio_folder, output_folder):\n print('Listing label files from folder.')\n #scan labels folder\n labels_list = os.listdir(label_folder)\n label_files = []\n for filename in labels_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'txt':\n continue\n #save to without its extension\n label_files.append(filename[:-4])\n\n print('Listing audio files from folder.')\n #scan audio folder\n audios_list = os.listdir(audio_folder)\n audio_files = []\n for filename in audios_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'wav':\n continue\n #save to without its extension\n audio_files.append(filename[:-4])\n\n print('Removing files without matches')\n #use only the files with matching audio/label\n files_to_process = []\n for label_file in label_files:\n if label_file in audio_files:\n files_to_process.append(label_file)\n\n print('Processing each file...')\n i = 1\n class_count = {}\n total_f = len(files_to_process)\n #for each file\n for processing in files_to_process:\n print('File', str(i) + '/' + str(total_f))\n i += 1\n\n #\n label_file = os.path.join(label_folder, processing + \".txt\")\n audio_file = os.path.join(audio_folder, processing + \".wav\")\n\n #get the segments from the corresponding label file\n segments = get_segments(label_file)\n\n #\n total_s = len(segments)\n j = 1\n #for each segment\n for segment in segments:\n print('\\tSegment', str(j) + '/' + str(total_s), segment['class'])\n j += 1\n\n if class_count.get(segment['class']) is None:\n class_count[segment['class']] = 1\n else:\n class_count[segment['class']] += 1\n output_filename = segment['class']\n output_filename += '-' + format(class_count[segment['class']], '04d')\n output_filename = os.path.join(output_folder, output_filename)\n\n #get its features\n segment_features = features_from_label(audio_file, segment)\n\n #save it to a file\n fe.write_as_bin(output_filename, segment_features)", "def multiple(folder_name: str,\r\n min_plant_pixels: int = MIN_PLANT_SIZE,\r\n output_options = [['rows',\r\n 'centers',\r\n 'row_ids',\r\n 'distances'],\r\n \r\n ['rows',\r\n 'centers',\r\n 'row_ids',\r\n 'numbers'],\r\n \r\n ['dirt',\r\n 'ditches',\r\n 'rows',\r\n 'clusters',\r\n 'centers',\r\n 'row_ids',\r\n 'numbers',\r\n 'lines']\r\n ]) -> None:\r\n\r\n # Go to the specified folder\r\n ls = listdir(folder_name)\r\n ls = [join(folder_name, i) for i in ls]\r\n\r\n # Check if the folder exists\r\n if join(folder_name, 'Analysis') in ls:\r\n\r\n # If it does, rename the old folder\r\n new_name = join(folder_name, 'Analysis')\r\n while new_name in ls:\r\n new_name += '_old'\r\n \r\n rename(join(folder_name,'Analysis'), new_name)\r\n\r\n # Create new folders inside the given directory\r\n mkdir(join(folder_name, 'Analysis'))\r\n mkdir(join(folder_name, 'Analysis/Images'))\r\n mkdir(join(folder_name, 'Analysis/Data'))\r\n \r\n # Gather the images to be analysed\r\n co = 0\r\n pics = [j for j in ls if isfile(j)]\r\n le = len(pics)\r\n\r\n # Analyze each of the pictures\r\n for i in pics:\r\n\r\n # Make the field\r\n field = just_field(i, min_plant_pixels)\r\n\r\n # Measure the field and save results\r\n print('Saving data...\\n')\r\n ruler = Ruler(field)\r\n \r\n ruler.output_distances(\r\n join(folder_name,\r\n 'Analysis/Data/{}_Distances.csv'.format(basename(i).split('.')[0])\r\n ) \r\n )\r\n \r\n ruler.output_row_info(\r\n join(folder_name,\r\n 'Analysis/Data/{}_Rows.csv'.format(basename(i).split('.')[0])\r\n )\r\n )\r\n\r\n # Make and save visuals\r\n print('Saving pictures...\\n')\r\n for k in range(len(output_options)):\r\n output_options[k]\r\n img = field.make_visual(ruler, output_options[k])\r\n img.save(\r\n join(folder_name,\r\n 'Analysis/Images/{}_Visual_{}.png'.format(basename(i).split('.')[0], k + 1)))\r\n\r\n # Increment the progress meter\r\n co += 1\r\n print('Completed {}/{} images\\n\\n'.format(co, le))", "def parse_categories(category, categoryFolder=None):\n\n files = []\n # return File.get_files_by_category(cat)\n if \"image\" in str(category):\n categoryFolder = File.get_folder_by_name(category, parent=categoryFolder)\n for folder in File.get_folders_of_folder_by_keywords(categoryFolder):\n if not folder: continue\n for image in File.get_images_of_folder(folder):\n file = File()\n setattr(file, \"path\", imageget_path())\n setattr(file, \"category\", folder.get_title())\n files.append(file)\n elif \"video\" in str(category):\n categoryFolder = File.get_folder_by_name(category, parent=categoryFolder)\n for folder in File.get_folders_of_folder_by_keywords(categoryFolder):\n if not folder: continue\n videos = File.get_videos_of_folder(folder)\n # if len(videos) > 0:\n # files.append(folder)\n for video in videos:\n file = File()\n setattr(file, \"path\", video.get_path())\n setattr(file, \"category\", folder.get_title())\n files.append(file)\n elif \"performer\" in str(category):\n categoryFolder = File.get_folder_by_name(category, parent=categoryFolder)\n for performer_ in File.get_folders_of_folder_by_keywords(categoryFolder):\n # for performer in File.get_folders_of_folder(folder):\n if not performer_: continue\n p = Folder()\n setattr(p, \"path\", performer_.get_path())\n setattr(p, \"category\", categoryFolder.get_title())\n files.append(p)\n # elif \"galler\" in str(category):\n else:\n categoryFolder = File.get_folder_by_name(category, parent=categoryFolder)\n for folder in File.get_folders_of_folder_by_keywords(categoryFolder):\n if not folder: continue\n galleries = File.get_folders_of_folder(folder)\n if len(galleries) > 0:\n files.append(folder)\n for gallery in galleries:\n file = Folder()\n setattr(file, \"path\", galleryget_path())\n setattr(file, \"category\", folder.get_title())\n files.append(file)\n return files", "def dataset(self):\n for d in dirlist(os.path.join(self.datadir)):\n for f in imlist(d):\n yield ImageDetection(filename=f).category(filebase(d))", "def encompass(self):\n self._printer('Standard Walk')\n count = Counter(length=3)\n for directory in self.directory:\n for root, directories, files in os.walk(directory, topdown=self.topdown):\n root = root[len(str(directory)) + 1:]\n self._printer(str(count.up) + \": Explored path - \" + str(root), stream=True)\n for filename in files:\n fullname = os.path.join(root, filename)\n # Join the two strings in order to form the full filepath.\n self.add_path(directory, fullname)", "def main():\n\n file_name_base = \"./lab-record/result/fairness/\"\n scenarios = ['lan', 'wan1', 'wan2']\n scenario = scenarios[2]\n\n algorithms = [\"bbr\", \"scalable\", \"bic\", \"highspeed\", \"htcp\", \"hybla\",\n \"illinois\", \"vegas\", \"yeah\"]\n names = [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\", \"YeAH\"]\n\n test_types = [\"vs_reno\", \"vs_cubic\", \"vs_itself\"]\n\n fsize = 36\n \n index_reno = []\n index_cubic = []\n index_itself = []\n\n data = []\n \n print 'Loadint statistics for ' + file_name_base + '/' + scenario\n\n for algorithm in algorithms:\n for test in test_types:\n path_base = file_name_base + \"/\" + scenario + \"/\" + test + \"/\" + \\\n algorithm + \"/\"\n if test == \"vs_itself\":\n exp_name = names[algorithms.index(algorithm)] + \"_1\"\n con_name = names[algorithms.index(algorithm)] + \"_2\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \"_1.log\"\n con_filename = \"/\" + algorithm + \"_2.log\"\n process(path_base, exp_filename, con_filename, index_itself)\n if test == \"vs_reno\":\n exp_name = names[algorithms.index(algorithm)]\n con_name = \"Reno\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/reno.log\"\n process(path_base, exp_filename, con_filename, index_reno)\n if test == \"vs_cubic\":\n con_name = \"CUBIC\"\n exp_name = names[algorithms.index(algorithm)]\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/cubic.log\"\n process(path_base, exp_filename, con_filename, index_cubic)\n\n size = 9\n x = numpy.arange(size)\n\n total_width, n = 1.2, 2.5\n width = 1.0 / n\n x = x - (total_width - width) / 2\n\n for i in range(0, len(x)):\n x[i] += 0.5 * i\n\n # Exp\n fig = plt.figure()\n\n # Con\n con_reno = plt.bar(x + 0 * width - 1.2,\n index_reno,\n width=width,\n label='Against Reno',\n alpha=0.5,\n color=\"darkorange\")\n\n con_cubic = plt.bar(x + 1 * width - 1.2,\n index_cubic,\n width=width,\n label='Against CUBIC',\n alpha=0.5,\n color=\"lawngreen\")\n\n con_itself = plt.bar(x + 2 * width - 1.2,\n index_itself,\n width=width,\n label='Against Another Same CCA',\n alpha=0.5,\n color=\"dodgerblue\")\n\n # Index\n plt.xticks(x + 1.5 * width - 1.2, [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\",\n \"YeAH\"],\n fontsize=fsize,\n rotation=\"45\")\n plt.ylabel(\"Jain`s Fairness Index\", fontsize=fsize)\n plt.yticks(fontsize=fsize)\n plt.ylim(0.5, 1.1)\n\n ax = plt.subplot(111)\n ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=3, mode=\"expand\", borderaxespad=0., fontsize=fsize)\n\n plt.subplots_adjust(left=0.07, right=0.98, top=0.9, bottom=0.2)\n\n plt.show()", "def parse(input_dir, output_dir, n_jobs, rewrite):\n output_dir = os.path.abspath(output_dir)\n\n if not os.path.exists(input_dir):\n raise click.FileError(f'\"{input_dir}\"', hint='does not exist')\n\n if os.path.exists(output_dir):\n if len(os.listdir(output_dir)) > 0 and not rewrite:\n raise click.FileError(\n f'\"{output_dir}\"', hint='folder exists and is not empty.')\n if rewrite:\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)\n else:\n os.makedirs(output_dir)\n\n click.echo(f'Parsing archives from the folder: {input_dir}')\n parser = DirectoryParser(input_dir)\n result = parser.parse(num_of_workers=n_jobs)\n\n meta_path = join(output_dir, 'levels.csv')\n objects_path = join(output_dir, 'objects.csv')\n\n result.meta.to_csv(meta_path, index=False)\n result.objects.to_csv(objects_path, index=False)\n\n click.echo(f'Meta data saved: {meta_path}')\n click.echo(f'Objects data saved: {objects_path}')", "def check(self):\n if not os.path.exists(config.configured_root_path):\n raise c8e('root path of data set not exist' + config.configured_root_path)\n \n for it in config.configured_dirs:\n self.__path[it] = os.path.join(config.configured_root_path, it)\n \n for l1 in os.listdir(config.configured_root_path): #1st level directory: train/valid/test\n l1path = os.path.join(config.configured_root_path, l1)\n if not os.path.isdir(l1path):\n raise c8e('regular file exists in data set' + l1)\n \n if l1 not in config.configured_dirs:\n raise c8e('wrong dir in data set' + l1)\n \n for l2 in os.listdir(l1path): #2nd level directory, red/yellow/green\n l2path = os.path.join(l1path, l2)\n if not os.path.isdir(l2path):\n raise c8e('regular file exists in data set' + l2path)\n \n if l2 not in config.configured_classes:\n raise c8e('wrong dir in data set' + l2path)\n \n for l3 in os.listdir(l2path): #3rd level files \n self._check_image(l2path, l3)\n \n #statistic\n self.__phase_sample_count[l1] += 1\n self.__area_sample_count[l2] += 1\n if l1 == config.configured_train_dir:\n self.__train_sample_count[l2] += 1\n \n #process weight of each class of train directory\n total = np.sum(list(self.__train_sample_count.values())) \n max_samples = np.max(list(self.__train_sample_count.values())) #Max\n mu = 1. / (total / float(max_samples)) #\n keys = self.__train_sample_count.keys()\n for key in keys:\n score = math.log(mu * total / float(self.__train_sample_count[key]))\n self.__class_weight[int(key)] = score if score > 1. else 1.", "def _get_filenames_and_classes(dataset_dir):\n # print 'DATASET DIR:', dataset_dir\n # print 'subdir:', [name for name in os.listdir(dataset_dir)]\n # dataset_main_folder_list = []\n # for name in os.listdir(dataset_dir):\n # \tif os.path.isdir(name):\n # \t\tdataset_main_folder_list.append(name)\n dataset_main_folder_list = [name for name in os.listdir(dataset_dir) if os.path.isdir(os.path.join(dataset_dir,name))]\n dataset_root = os.path.join(dataset_dir, dataset_main_folder_list[0])\n directories = []\n class_names = []\n for filename in os.listdir(dataset_root):\n path = os.path.join(dataset_root, filename)\n if os.path.isdir(path):\n directories.append(path)\n class_names.append(filename)\n \n count = 0\n #print(directories)\n for directory in directories:\n #print(directory)\n #continue\n for filename in os.listdir(directory):\n print(filename)\n path = os.path.join(directory, filename)\n\n im = Image.open(path)\n imResize = im.resize((28,28), Image.ANTIALIAS)\n imResize.save(path, 'bmp')\n print(count)\n count = count + 1\n \n\n\n \n return", "def main(root_dir):\n # load annotations\n print('Loading instances and annotations...')\n captions_file = json.load(open('{}/annotations/captions_train2017.json'.format(root_dir), 'r'))\n categories_file = json.load(open('{}/annotations/instances_train2017.json'.format(root_dir), 'r'))\n print('Done.')\n\n # group categories by image\n image_categories = group_categories(categories_file)\n\n # group captions by image\n image_captions = group_captions(captions_file['annotations'])\n\n # get filename of each image\n image_file = get_filename(captions_file['images'])\n\n # assign each category an id.\n # we are not using the default ids given in the dataset because\n # the id ranges are not continuous.\n category_id, id_category = map_category_id(categories_file['categories'])\n \n # save parsed coco dataset\n save_dataset(image_categories, image_captions, image_file, category_id, id_category, root_dir)", "def load_data(self):\n for set_name in self.image_dir_path:\n if self.verbose:\n print('\\n> Loading data files for the set: ' + set_name)\n\n # image dir\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n\n # annotation file path\n annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])\n\n if 'test' in set_name:\n yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)\n else:\n yield self.load_data_trainval(set_name, image_dir, annot_filepath)", "def process_directory(dir, exiftool_path):\n for path_object in pathlib.Path(dir).glob(\"**/*\"):\n if path_object.is_file():\n verbose(f\"Processing file {path_object}\")\n process_file(path_object, exiftool_path)\n elif path_object.is_dir():\n verbose(f\"Processing directory {path_object}\")\n process_directory(path_object, exiftool_path)", "def _walk_dirs(self):\n for project_name in self.new_source_paths.keys():\n # print \"-------- Now mapping ---- \" + project_name\n search_path = self.root + project_name + '\\\\Data'\n for dirpath, subdirs, files in os.walk(search_path):\n for file in files:\n self.new_source_paths[project_name][file] = dirpath\n # print \"------------ Finished mapping ------- \" + project_name\n return self.new_source_paths", "def main():\n arg = parse_args()\n print('Start.')\n arg.folder = Path(arg.folder)\n trees = list(arg.folder.glob('*'))\n trees = [i.absolute() for i in trees]\n info = parse_info(arg)\n types = [arg.folder/i for i in info.keys()]\n types_dict = dict(zip(info.keys(), types))\n for i in types:\n i.mkdir()\n result = divide_trees(trees, info, types)\n result_csv = arg.folder / 'result.csv'\n with open(result_csv, 'w') as out:\n out.write('Tree,Type,Confidence\\n')\n for i in result:\n out.write('{},{},{}\\n'.format(*i))\n Path(types_dict[i[1]]/i[0].name).write_text(i[0].read_text())\n print('Done.')", "def prepare_data(sourcedir):\n # Set up empty lists for storing the data and labels\n data, labels = [], []\n\n # Walk through the source directory\n for (root, subdirs, files) in os.walk(sourcedir):\n # Assign a numerical identifier to each class directory\n for i, class_dir in enumerate(subdirs):\n classes[class_dir] = i\n print(\"[INFO] Found class {}; \"\n \"assigned identifier {}.\".format(class_dir, i))\n\n # Define allowed image extensions\n ext = ['png', 'jpg', 'jpeg']\n\n # Loop over the files in each directory\n for f in files:\n # Check file extension\n if f.split('.')[-1] in ext:\n # Get image path\n path = os.path.join(root, f)\n # Extract class label from path\n label = path.split('/')[-2]\n # Get the corresponding label integer from the classes dict\n numlabel = classes[label]\n # Load image\n image = load_img(path, target_size=target_size)\n # Convert image to numpy array\n features = img_to_array(image)\n\n # Append data and labels to lists\n data.append(features)\n labels.append(numlabel)\n\n # Convert lists to numpy arrays\n data = np.array(data)\n labels = np.array(labels)\n\n # Convert numerical labels into one-hot encoded vectors\n labels = np_utils.to_categorical(labels, len(classes))\n\n # Normalize the RGB values into range 0...1\n data = data.astype('float') / 255.0\n\n # Return data and labels\n return data, labels", "def process(self):\r\n\r\n index = cindex.Index.create()\r\n self.headers = {}\r\n\r\n for f in self.files:\r\n if f in self.processed:\r\n continue\r\n\r\n print \"Processing `%s'\" % (os.path.basename(f),)\r\n\r\n tu = index.parse(f, self.flags)\r\n\r\n if len(tu.diagnostics) != 0:\r\n fatal = False\r\n\r\n for d in tu.diagnostics:\r\n sys.stderr.write(d.format)\r\n sys.stderr.write(\"\\n\")\r\n\r\n if d.severity == cindex.Diagnostic.Fatal or \\\r\n d.severity == cindex.Diagnostic.Error:\r\n fatal = True\r\n\r\n if fatal:\r\n sys.stderr.write(\"\\nCould not generate documentation due to parser errors\\n\")\r\n sys.exit(1)\r\n\r\n if not tu:\r\n sys.stderr.write(\"Could not parse file %s...\\n\" % (f,))\r\n sys.exit(1)\r\n\r\n # Extract comments from files and included files that we are\r\n # supposed to inspect\r\n extractfiles = [f]\r\n\r\n for inc in tu.get_includes():\r\n filename = str(inc.include)\r\n self.headers[filename] = True\r\n\r\n if filename in self.processed or (not filename in self.files) or filename in extractfiles:\r\n continue\r\n\r\n extractfiles.append(filename)\r\n\r\n for e in extractfiles:\r\n db = comment.CommentsDatabase(e, tu)\r\n\r\n self.add_categories(db.category_names)\r\n self.commentsdbs[e] = db\r\n\r\n self.visit(tu.cursor.get_children())\r\n\r\n for f in self.processing:\r\n self.processed[f] = True\r\n\r\n self.processing = {}\r\n\r\n # Construct hierarchy of nodes.\r\n for node in self.all_nodes:\r\n q = node.qid\r\n\r\n if node.parent is None:\r\n par = self.find_parent(node)\r\n\r\n # Lookup categories for things in the root\r\n if (par is None or par == self.root) and (not node.cursor is None):\r\n location = node.cursor.extent.start\r\n db = self.commentsdbs[location.file.name]\r\n\r\n if db:\r\n par = self.category_to_node[db.lookup_category(location)]\r\n\r\n if par is None:\r\n par = self.root\r\n\r\n par.append(node)\r\n\r\n # Resolve comment\r\n cm = self.find_node_comment(node)\r\n\r\n if cm:\r\n node.merge_comment(cm)\r\n\r\n # Keep track of classes to resolve bases and subclasses\r\n classes = {}\r\n\r\n # Map final qid to node\r\n for node in self.all_nodes:\r\n q = node.qid\r\n self.qid_to_node[q] = node\r\n\r\n if isinstance(node, nodes.Class):\r\n classes[q] = node\r\n\r\n # Resolve bases and subclasses\r\n for qid in classes:\r\n classes[qid].resolve_bases(classes)\r\n\r\n self.markup_code(index)", "def build(self, datas):\n\t\t# Browse the list of files\n\t\tfor data in datas:\n\t\t\tif isString(data):\n\t\t\t\tdata = Data(data)\n\t\t\telif isList(data):\n\t\t\t\tstate = None\n\t\t\t\tname = \"\"\n\t\t\t\tif len(data) >= 1:\n\t\t\t\t\tname = data[0]\n\t\t\t\tif len(data) >= 2:\n\t\t\t\t\tstate = data[1]\n\t\t\t\tdata = Data(name, state)\n\t\t\t# Cut the path of the file folder and piece\n\t\t\tself.addNode(self.tree,data.path(),data)", "def ProcessDTI(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'dti':\n if self.verbose:\n print 'Processing DTI data in %s' % os.path.basename(entry)\n# dtiname = '%s/s%s_dti' % \\\n# (self.info[entry]['outdir'],self.info[entry]['series'])\n cmd = 'convert_file %s %s %s' % (entry, \\\n self.info[entry]['imgfile'], self.info[entry]['filetype'])\n fname = '%s%s' % \\\n (self.info[entry]['imgfile'], self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])", "def process_datasets(self):\n\n with open(self.mappings, \"r+\") as json_file:\n emsl_to_jgi = json.load(json_file)\n emsl_to_jgi_copy = copy.deepcopy(emsl_to_jgi)\n\n contaminant_file_loc = emsl_to_jgi[\"contaminant_file_loc\"]\n # run for each dataset\n for dataset_id, values in emsl_to_jgi.items():\n if dataset_id not in [\n \"contaminant_file_loc\",\n \"analysis_activity_file_loc\",\n \"data_objects_file_loc\",\n \"STUDY\",\n \"tools_used\",\n ]:\n raw_file_loc = values[\"raw_file_loc\"]\n self.dataset_name = values[\"dataset_name\"]\n # dataset search against a fasta file\n for genome_directory, locations in values[\n \"genome_directory\"\n ].items():\n # clear object to prepare next job\n ANALYSIS_JOBS_OBJECT.clear()\n\n # create log_dir\n self.save_job_results = os.path.join(\n self.result_loc, dataset_id, genome_directory\n )\n self.log_collected_at = os.path.join(\n os.path.abspath(self.save_job_results), \"analysis_jobs_logs\"\n )\n if not os.path.exists(self.log_collected_at):\n os.makedirs(self.log_collected_at)\n\n files = [locations[\"faa_file_loc\"], contaminant_file_loc]\n contaminated_faa_file_loc = self.contaminate_fasta(files)\n\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"contaminated_faa_file_loc\",\n contaminated_faa_file_loc,\n emsl_to_jgi_copy,\n )\n # convert .faa to .txt\n faa_txt_file = self.convert_faa2txt(\n dataset_id, contaminated_faa_file_loc\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"txt_faa_file_loc\",\n faa_txt_file,\n emsl_to_jgi_copy,\n )\n\n # log & run job\n self.run_n_log_job(\n dataset_id,\n genome_directory,\n contaminated_faa_file_loc,\n raw_file_loc,\n emsl_to_jgi_copy,\n )\n\n # merge analysis\n resultant_file = self.merge_analysis_jobs(\n dataset_id, genome_directory\n )\n self.register_job_in_emsl_to_jgi(\n dataset_id,\n genome_directory,\n \"resultant_file_loc\",\n resultant_file,\n emsl_to_jgi_copy,\n )\n\n # capture the job metadata object\n logger.info(\"Jobrun\", extra=LOGGED_ANALYSIS_JOB)\n\n # update emsl_to_jgi.json\n json_file.seek(0) # move back to BOF.\n json_file.truncate()\n json_file.write(json.dumps(emsl_to_jgi_copy, default=str, indent=4))\n pass", "def sample_series_dirs():\n tmp_dir = tempfile.mkdtemp()\n # Extract Series\n os.mkdir(os.path.join(tmp_dir, \"series_dir\"))\n series_dir_series = os.path.join(tmp_dir, \"series_dir\")\n series_zip = os.path.join(ASSETS_DIR, 'series_dir_series.zip')\n with zipfile.ZipFile(series_zip, \"r\") as zip_ref:\n zip_ref.extractall(series_dir_series)\n # Extract Animes\n os.mkdir(os.path.join(tmp_dir, \"anime_dir\"))\n series_dir_anime = os.path.join(tmp_dir, \"anime_dir\")\n anime_zip = os.path.join(ASSETS_DIR, 'series_dir_anime.zip')\n with zipfile.ZipFile(anime_zip, \"r\") as zip_ref:\n zip_ref.extractall(series_dir_anime)\n\n yield [series_dir_series, series_dir_anime]\n shutil.rmtree(tmp_dir)", "def get_processed_data(self, group_directory):\n processed_dir = [x for x in group_directory.iterdir()\n if x.is_dir() and 'processed' in x.name][0]\n\n task_dirs = [x for x in processed_dir.iterdir()\n if x.is_dir() and 'task' in x.name]\n\n files = dict()\n for task in task_dirs:\n task_camera_dirs = [x for x in task.iterdir()\n if x.is_dir() and 'pc' in x.name]\n\n task_frame_files = list()\n if task_camera_dirs:\n task_frame_files = dict()\n for camera_dir in task_camera_dirs:\n task_frame_files[camera_dir.name] = [x for x in camera_dir.iterdir()\n if not x.is_dir() and x.suffix in VALID_OUTPUT_FILE_TYPES]\n\n for camera, frame_files in task_frame_files.items():\n for frame_file in frame_files:\n frame = int(re.search(r'(?<=_)(\\d{12})(?=_)',\n frame_file.name).group(0))\n if task.name not in files:\n files[task.name] = dict()\n\n if frame not in files[task.name]:\n files[task.name][frame] = dict()\n\n files[task.name][frame][camera] = frame_file\n\n else:\n task_frame_files = [x for x in task.iterdir()\n if not x.is_dir()\n and x.suffix in VALID_OUTPUT_FILE_TYPES]\n\n for frame_file in task_frame_files:\n frame = int(re.search(r'(?<=_)(\\d{12})(?=_)',\n frame_file.name).group(0))\n if task.name not in files:\n files[task.name] = dict()\n files[task.name][frame] = frame_file\n\n return files", "def AnnotateDirectory(self, root_path):\n \n annotation_data = []\n \n for dirpath, _, filenames in os.walk(root_path):\n for filename in filenames:\n if not self._DefaultFileNameFilter(filename):\n continue\n \n file_path = os.path.abspath(os.path.join(dirpath, filename))\n logging.info(\"Processing '%s'\" % file_path)\n \n if self.source_filter and not self.source_filter.IsValid(file_path):\n logging.info(\" *SKIPPING*\")\n continue\n \n annotated_file = self.AnnotateSourceFile(file_path)\n annotation_data.append(annotated_file)\n\n return annotation_data", "def gen_folders(rho, kappa, km, pa, analysis, dbase, analysisdbase):\n \n path1 = 'density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa)\n path2 = analysis + '_density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa) + '.txt' \n datafolder = dbase + path1 + '/'\n analysisfile = analysisdbase + path2 \n\n return datafolder, analysisfile", "def dirGenerator(datadirectory):\n\n subdirectories = [row for row in os.listdir(datadirectory) if '$' not in row]\n\n #iterate through subdirectories\n for day in subdirectories:\n\n #collect raw data set file names in sub directories\n fileNames = [row for row in os.listdir(datadirectory + day + '\\\\RawDataFiles\\\\')]\n\n #iterate over the raw datasets\n print 'There are ' + str(len(fileNames)) + ' datasets in ' + day\n for index, datafile in enumerate(fileNames):\n yield datadirectory + day + '\\\\RawDataFiles\\\\' + datafile, day, datafile, index", "def get_info(self):\n self.id = basename(self.path)\n\n # Check all files\n for filename in listdir(self.path):\n if isfile(join_paths(self.path, filename)):\n self.files.append(filename)\n\n # Check handlers\n for match in self._file_handlers.keys():\n if match in filename:\n handler = getattr(self, self._file_handlers[match])\n handler(filename)", "def _prepare(self):\n\n # table of containing directories (to add implicit dependencies to image builds)\n directories: dict[PurePosixPath, set[FileValue]] = dict()\n\n # map plan elements to values\n v: dict[Any, ValueBase] = dict()\n # map plan elements to actions\n a: dict[Any, Action] = dict()\n\n # Create FileValues for WorkFiles found in plan\n for workfile in self.plan.files():\n value = FileValue(workfile)\n v[workfile] = value\n directory = workfile.posix_path.parent\n if directory not in directories:\n directories[directory] = set()\n directories[directory].add(value)\n\n for image in self.plan.images():\n if image.pull_from_registry:\n image_value = RegistryImageValue(image)\n pull_image_action = PullImageAction(image)\n pull_image_action.set_output_image(image_value)\n v[image] = image_value\n a[image] = pull_image_action\n else:\n image_value = ImageToBuildValue(image)\n build_image_action = BuildImageAction(image)\n build_image_action.set_output_image(image_value)\n v[image] = image_value\n a[image] = build_image_action\n # if context dir contains any WorkFiles, add corresponding FileValues as dependencies\n for directory in directories.keys():\n if directory.is_relative_to(image_value._plan_element.build_from_context):\n for file_value in directories[directory]:\n logging.info(\"Implied dependency %s->%s\", file_value, build_image_action)\n build_image_action.add_input(file_value)\n\n for e in self.plan.execs():\n image_value = v[e.image]\n if not isinstance(image_value, ImageValue):\n raise Exception(\"not an ImageValue %s\" % image_value)\n exec_action = ExecAction(e, image_value)\n a[e] = exec_action\n for inp in e.inputs:\n exec_action.add_input(v[inp.workfile])\n v[inp.workfile].add_consumer(exec_action)\n for output in e.outputs:\n exec_action.add_output(v[output.workfile])\n v[output.workfile].set_producer(exec_action)\n\n self.actions = set(a.values())\n self.values = set(v.values())", "def treat(input, output):\n files = find(input)\n acc = []\n for file in files:\n fileInfo = extract(file)\n out = makeOutputPath(output, fileInfo[\"path\"], fileInfo[\"filename\"])\n if not out == None:\n fileInfo[\"outPath\"] = out\n acc += [fileInfo]\n return acc", "def read_classified_data(root_path, to_size = (200,200), transformation = transforms.ToTensor()):\n label_dict = {}\n # for each folder in the dataset\n # get the label\n for i, label in tqdm(enumerate(sorted(os.listdir(root_path))), desc = \"Read in...\", leave = False):\n if len(os.listdir(sub_path)) == 0:\n continue\n sub_path = os.path.join(root_path, label)\n # write the label in the label dict\n label_dict[i] = label\n # find the csv, there should be one and only one csv\n csv_path = glob.glob(os.path.join(sub_path,\"*.csv\"))[0]\n df = pd.read_csv(csv_path)\n # the csv should have a image_name list indicating the 1-1 correspondense\n image_origin = df[\"image_name\"]\n # get the rest and the features\n df.drop(labels = \"image_name\", axis = \"columns\", inplace = True)\n # concate them to our dataset\n if i == 0:\n features = torch.from_numpy(df.to_numpy())\n images = torch.stack([preprocess(Image.open(os.path.join(sub_path, i)).convert(\"RGB\"),\n to_size = to_size,\n transformation = transformation) for i in image_origin])\n labels = torch.ones(image_origin.shape[0])*label\n else:\n features = torch.cat((features,torch.from_numpy(df.to_numpy())))\n images = torch.cat(images,torch.stack([preprocess(Image.open(os.path.join(sub_path, i)).convert(\"RGB\"),\n to_size = to_size,\n transformation = transformation) for i in image_origin]))\n labels = torch.cat(labels, torch.ones(image_origin.shape[0])*label)\n # return the dataset with our label_dict\n return TensorDataset(images,features, labels),label_dict", "def organise_trials(self, logdir_content):\n config_summary_fpath = os.path.join(\n self.logdir,\n 'config_summary.json'\n )\n cfg = from_file(config_summary_fpath)\n self.data_is_data = cfg['data_is_data']\n # Get naming scheme\n self.labels = Labels(\n h0_name=cfg['h0_name'],\n h1_name=cfg['h1_name'],\n data_name=cfg['data_name'],\n data_is_data=self.data_is_data,\n fluctuate_data=self.fluctuate_data,\n fluctuate_fid=self.fluctuate_fid\n )\n # Look for the pickle files in the directory to indicate\n # that this data may have already been processed.\n pickle_there = self.check_pickle_files(logdir_content)\n if pickle_there:\n self.load_from_pickle()\n # Else we must extract it\n else:\n if self.data_is_data and self.fluctuate_data:\n raise ValueError('Analysis was performed on data, so '\n '`fluctuate_data` is not supported.')\n # Get starting params\n self.get_starting_params(cfg=cfg)\n # Find all relevant data dirs, and from each extract the\n # fiducial fit(s) information contained\n self.get_data()\n self.pickle_data()", "def scan_directories(data_dir, file_filter):\n\n root = os.walk(data_dir)\n\n print('Scanning for files...')\n output = []\n\n for directory in root:\n\n files = directory[2]\n\n # Valid dataset contains video files of both halves and an accompanying label\n if file_filter(files):\n output.append(directory[0])\n\n print('Done')\n\n return output", "def Filter(PATH,Output_folder='.',outcome=0.):\n folders = glob.glob(PATH+'/*')\n folders.sort()\n i=-1\n \n # Create target directories\n if not os.path.exists(Output_folder+'/short'):\n os.makedirs(Output_folder+'/short')\n if not os.path.exists(Output_folder+'/50ohm'):\n os.makedirs(Output_folder+'/50ohm') \n if not os.path.exists(Output_folder+'/antenna'):\n os.makedirs(Output_folder+'/antenna')\n if not os.path.exists(Output_folder+'/Tmeas'):\n os.makedirs(Output_folder+'/Tmeas') \n if not os.path.exists(Output_folder+'/K_jnc'): \n os.makedirs(Output_folder+'/K_jnc')\n \n for subdirs, dirs, files in os.walk(PATH):\n dirs[:] = [d for d in dirs if not d.startswith('.')] # Inore hidden folders (ipynb checkpoints for example)\n dirs.sort()\n files.sort()\n short,antenna,_50ohm,measure,K_jnc = [],[],[],[],[]\n short_date,_50ohm_date,measure_date =[],[],[]\n\n # Walk through directories\n for file in files:\n path = os.path.join(subdirs,file)\n date = file.split(\"_\")[0]\n if os.path.getsize(path)==0: # Filtering empty data\n print 'EMPTY FILE:',path\n continue\n \n data = np.loadtxt(path,unpack=True)\n if data.size == 0:\n print 'NO DATA IN FILE:',path\n continue\n \n elif file.endswith('short.dat'):\n T_short = Res2Temp(data,bwidth)\n short.append(T_short),short_date.append(date)\n elif file.endswith('50ohm.dat'):\n T_50ohm = Res2Temp(data,bwidth)\n _50ohm.append(T_50ohm),_50ohm_date.append(date)\n elif file.endswith('noise.dat'):\n dB_noise = data\n elif file.endswith('antenna.dat'):\n dB_antenna = data\n dB_clean = dB_antenna - dB_noise - outcome\n T_antenna = Radio_source_trans(dB_clean, Freqs, bwidth)\n T_measure = T_antenna/eta_nu - T_short # Uncalibrated measure\n Tamb = round(np.genfromtxt(path,comments='!',skip_header= 18,max_rows=1)[1]+273.15,2)\n Kjnc = Tamb/(T_50ohm-T_short) # Johnson-noise calibration coefficient\n antenna.append(T_antenna),measure.append(T_measure),K_jnc.append(Kjnc)\n measure_date.append(date)\n \n # HDF5 Table Generation \n if i>=0 and i<len(folders) and short and antenna and _50ohm and measure and K_jnc:\n name = os.path.normpath(folders[i])\n name = name.split(\"/\")[1]\n short = np.transpose(short)\n antenna = np.transpose(antenna)\n _50ohm = np.transpose(_50ohm)\n measure = np.transpose(measure)\n K_jnc = np.transpose(K_jnc)\n\n short_table = pd.DataFrame(short[mask], index = Freqs[mask], columns = short_date)\n short_table.to_hdf(Output_folder+'/short/'+name+'.hdf5','df')\n _50ohm_table = pd.DataFrame(_50ohm[mask], index = Freqs[mask], columns = _50ohm_date)\n _50ohm_table.to_hdf(Output_folder+'/50ohm/'+name+'.hdf5','df')\n antenna_table = pd.DataFrame(antenna[mask], index = Freqs[mask], columns = measure_date)\n antenna_table.to_hdf(Output_folder+'/antenna/'+name+'.hdf5','df')\n measure_table = pd.DataFrame(measure[mask], index = Freqs[mask], columns = measure_date)\n measure_table.to_hdf(Output_folder+'/Tmeas/'+name+'.hdf5','df')\n Kjnc_table = pd.DataFrame(K_jnc[mask], index = Freqs[mask], columns = measure_date)\n Kjnc_table.to_hdf(Output_folder+'/K_jnc/'+name+'.hdf5','df')\n i+=1", "def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n sample_category = sample_info[\"sample_category\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n logging.info(\"Building dictionaries for sample %s...\" % process_name)\n for charge_selection in self.charge_selections:\n central_or_shift_extensions = [\"\", \"hadd\", \"addBackgrounds\"]\n central_or_shifts_extended = central_or_shift_extensions + self.central_or_shifts\n for central_or_shift_or_dummy in central_or_shifts_extended:\n process_name_extended = [ process_name, \"hadd\" ]\n for process_name_or_dummy in process_name_extended:\n if central_or_shift_or_dummy in [ \"hadd\" ] and process_name_or_dummy in [ \"hadd\" ]:\n continue\n if central_or_shift_or_dummy != \"central\" and central_or_shift_or_dummy not in central_or_shift_extensions:\n if not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift_or_dummy, sample_info):\n continue\n\n key_dir = getKey(process_name_or_dummy, charge_selection, central_or_shift_or_dummy)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy, central_or_shift_or_dummy)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy)\n for subdirectory in [ \"comp_jetToTauFakeRate\", \"makePlots\" ]:\n key_dir = getKey(subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel, subdirectory)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel, subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n\n numDirectories = 0\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n numDirectories += len(self.dirs[key])\n else:\n numDirectories += 1\n logging.info(\"Creating directory structure (numDirectories = %i)\" % numDirectories)\n numDirectories_created = 0;\n frac = 1\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n numDirectories_created += len(self.dirs[key])\n else:\n create_if_not_exists(self.dirs[key])\n numDirectories_created = numDirectories_created + 1\n while 100*numDirectories_created >= frac*numDirectories:\n logging.info(\" %i%% completed\" % frac)\n frac = frac + 1\n logging.info(\"Done.\")\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_info, self.max_files_per_job)\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n inputFileList = inputFileLists[sample_name]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name))\n\n is_mc = (sample_info[\"type\"] == \"mc\")\n sample_category = sample_info[\"sample_category\"]\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n \n if central_or_shift != \"central\" and not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift, sample_info):\n continue\n\n # build config files for executing analysis code\n key_analyze_dir = getKey(process_name, charge_selection, central_or_shift)\n\n for jobId in inputFileList.keys():\n\n analyze_job_tuple = (process_name, charge_selection, central_or_shift, jobId)\n key_analyze_job = getKey(*analyze_job_tuple)\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n logging.warning(\"No input ntuples for %s --> skipping job !!\" % (key_analyze_job))\n continue\n\n cfgFile_modified_path = os.path.join(self.dirs[key_analyze_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%i_cfg.py\" % analyze_job_tuple)\n logFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%i.log\" % analyze_job_tuple)\n histogramFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_HIST], \"analyze_%s_%s_%s_%i.root\" % analyze_job_tuple)\n rleOutputFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_RLES], \"rle_%s_%s_%s_%i.txt\" % analyze_job_tuple) \\\n if self.select_rle_output else \"\"\n\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : cfgFile_modified_path,\n 'histogramFile' : histogramFile_path,\n 'logFile' : logFile_path,\n 'chargeSelection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_tight' : self.hadTau_selection_tight,\n 'hadTauSelection_denominator' : self.hadTau_selection_denominator,\n 'hadTauSelections_numerator' : self.hadTau_selections_numerator,\n 'trigMatchingOptions' : self.trigMatchingOptions,\n 'selEventsFileName_output' : rleOutputFile_path,\n 'absEtaBins' : self.absEtaBins,\n 'decayModes' : self.decayModes,\n 'central_or_shift' : central_or_shift,\n 'central_or_shifts_local' : [],\n 'apply_hlt_filter' : self.hlt_filter,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job], sample_info)\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1_dir = getKey(process_name, charge_selection)\n hadd_stage1_job_tuple = (process_name, charge_selection)\n key_hadd_stage1_job = getKey(*hadd_stage1_job_tuple)\n if not key_hadd_stage1_job in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1_job] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1_job].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1_job] = os.path.join(self.dirs[key_hadd_stage1_dir][DKEY_HIST],\n \"hadd_stage1_%s_%s.root\" % hadd_stage1_job_tuple)\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1_job = getKey(process_name, charge_selection)\n key_hadd_stage2_dir = getKey(\"hadd\", charge_selection)\n key_hadd_stage2_job = getKey(charge_selection)\n if not key_hadd_stage2_job in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2_job] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2_job].append(self.outputFile_hadd_stage1[key_hadd_stage1_job])\n self.outputFile_hadd_stage2[key_hadd_stage2_job] = os.path.join(self.dirs[key_hadd_stage2_dir][DKEY_HIST],\n \"hadd_stage2_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n charge_key = \"comp_%s\" % charge_selection\n self.comp_input_files[charge_key] = []\n for trigMatchingOption in self.trigMatchingOptions:\n key_hadd_stage2_job = getKey(charge_selection)\n key_comp_jetToTauFakeRate_dir = getKey(\"comp_jetToTauFakeRate\")\n key_comp_jetToTauFakeRate_job = getKey(charge_selection, trigMatchingOption)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_%s_cfg.py\" % (charge_selection, trigMatchingOption)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s_%s.root\" % (charge_selection, trigMatchingOption)),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s_%s.log\" % (charge_selection, trigMatchingOption)),\n 'looseRegion' : \"jetToTauFakeRate_%s_%s/denominator/\" % (charge_selection, trigMatchingOption),\n 'tightRegion' : \"jetToTauFakeRate_%s_%s/numerator/\" % (charge_selection, trigMatchingOption),\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins,\n 'decayModes' : self.decayModes,\n 'hadTauSelections' : self.hadTau_selections_numerator,\n 'trigMatchingOption' : trigMatchingOption,\n 'plots_outputFileName' : os.path.join(self.dirs[key_comp_jetToTauFakeRate_dir][DKEY_PLOT], \"comp_jetToTauFakeRate_%s.png\" % trigMatchingOption)\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n comp_output = self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile']\n self.targets.append(comp_output)\n self.comp_input_files[charge_key].append(comp_output)\n self.comp_output_files[charge_key] = os.path.join(self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_dir = getKey(\"makePlots\")\n key_makePlots_job = getKey(charge_selection) \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for trigMatchingOption in self.trigMatchingOptions:\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"denominator\") \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_denominator_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/denominator/%s\" % (charge_selection, trigMatchingOption, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"numerator\", hadTau_selection_numerator)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_numerator_%s_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_numerator_%s_%s.png\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/numerator/%s/%s\" % (charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile, make_dependency = \"phony_hadd_stage1\", max_mem = '4096M')\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_comp_hadd(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n\n logging.info(\"Done.\")\n\n return self.num_jobs", "def feature_dynamic_filesystem(self):\n def flatten_list(structured):\n \"\"\"Flatten nested list.\"\"\"\n flat = []\n for i in structured:\n flat += i\n return flat\n\n # Get file operations and their number\n self.features[\"file_read\"] = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_read\", [])\n self.features[\"files_read\"] = len(self.features[\"file_read\"])\n self.features[\"file_written\"] = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_written\", [])\n self.features[\"files_written\"] = len(self.features[\"file_written\"])\n self.features[\"file_deleted\"] = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_deleted\", [])\n self.features[\"files_deleted\"] = len(self.features[\"file_deleted\"])\n self.features[\"file_copied\"] = flatten_list(\\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_copied\", [])\n )\n self.features[\"files_copied\"] = len(\\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_copied\", [])\n )\n self.features[\"file_renamed\"] = flatten_list(\\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_moved\", [])\n )\n self.features[\"files_renamed\"] = len(self.features[\"file_renamed\"])\n\n # Get other file operations numbers\n self.features[\"files_opened\"] = len(\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_opened\", [])\n )\n self.features[\"files_exists\"] = len(\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_exists\", [])\n )\n self.features[\"files_failed\"] = len(\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_failed\", [])\n )\n\n # Get total number of unique touched files\n file_operations = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_read\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_written\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_deleted\", []) + \\\n flatten_list(self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_copied\", [])) + \\\n flatten_list(self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_moved\", [])) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_recreated\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_opened\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_exists\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_failed\", [])\n # remove duplicates\n self.features[\"files_operations\"] = len(list(set(file_operations)))", "def handle_full_info(files, directories, args):\n result_info = []\n if not files and len(directories) == 1:\n d = list(directories.keys())[0]\n result_info.extend(full_info(directories[d], args, d))\n log.debug(result_info)\n return result_info\n\n if files:\n result_info.extend(full_info(files, args))\n for d in directories:\n result_info.append(f'{d}:')\n result_info.extend(full_info(directories[d], args, d))\n log.debug(result_info)\n return result_info", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n # read the file in, sample-by-sample\n # build the dictionary recursively\n # add rle file also to generated cfg files\n # print integrations per job as well!\n # consider more than 1 file per jobs -- the jobs are splitted by MEM integration anyways\n\n rle_filters = self.get_filter() if self.rle_filter_file else {}\n statistics = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n if not os.path.exists(sample_info['local_paths'][0]['path']):\n logging.warning(\"Skipping sample {sample_name}\".format(sample_name = sample_name))\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_addMEM, process_name))\n is_mc = (sample_info[\"type\"] == \"mc\")\n if self.rle_filter_file:\n assert(process_name in rle_filters)\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n # typically, the analysis ends here and starts looping b/c the smallest unit of work processes\n # at least one file; we need, however, to split the file into event ranges in such a way that\n # each job performs mem_integrations_per_job MEM integrations\n\n # so what we are going to do is to open each set of files in inputFileList, read the variable\n # requestMEM_*l_*tau and try to gather the event ranges such that each event range\n # performs up to mem_integrations_per_job integrations per job\n memEvtRangeDict = self.memJobList(inputFileList, rle_filters[process_name] if self.rle_filter_file else [])\n\n for jobId in memEvtRangeDict.keys():\n\n key_dir = getKey(sample_name)\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = memEvtRangeDict[jobId]['input_fileset']\n\n # there should always be a job\n assert(self.inputFiles[key_file] > 0), \"More than one input file: %s ?? !!\" % \\\n ', '.join(self.inputFiles[key_file])\n\n #assert(len(self.inputFiles[key_file]) == 1), \"There is more than one input file!\"\n self.cfgFiles_addMEM_modified[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"addMEM_%s_%s_%i_cfg.py\" % (self.channel, process_name, jobId)\n )\n self.shFiles_addMEM_modified[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"addMEM_%s_%s_%i.sh\" % (self.channel, process_name, jobId)\n )\n self.outputFiles[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_NTUPLES], \"%s_%i.root\" % (process_name, jobId)\n )\n self.logFiles_addMEM[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"addMEM_%s_%s_%i.log\" % (self.channel, process_name, jobId)\n )\n self.logFiles_addMEM[key_file] = get_log_version((self.logFiles_addMEM[key_file],))[0]\n self.createCfg_addMEM(\n self.inputFiles[key_file],\n memEvtRangeDict[jobId]['event_range'][0],\n memEvtRangeDict[jobId]['event_range'][1],\n self.outputFiles[key_file],\n self.era,\n sample_info[\"sample_category\"],\n is_mc,\n self.cfgFiles_addMEM_modified[key_file],\n memEvtRangeDict[jobId]['whitelist'],\n )\n\n # associate the output file with the fileset_id\n #UDPATE: ONE OUTPUT FILE PER SAMPLE!\n fileset_id = memEvtRangeDict[jobId]['fileset_id']\n hadd_output_dir = os.path.join(\n self.dirs[key_dir][DKEY_FINAL_NTUPLES],\n '%04d' % (fileset_id // 1000)\n )\n if not os.path.exists(hadd_output_dir):\n os.makedirs(hadd_output_dir)\n hadd_output = os.path.join(\n hadd_output_dir, '%s_%i.root' % ('tree', fileset_id) # UDPATE: ADDED\n #hadd_output_dir, \"tree.root\" # UDPATE: REMOVED\n )\n if hadd_output not in self.hadd_records:\n self.hadd_records[hadd_output] = {}\n self.hadd_records[hadd_output]['output_files'] = []\n self.hadd_records[hadd_output]['fileset_id'] = fileset_id\n self.hadd_records[hadd_output]['output_files'].append(self.outputFiles[key_file])\n self.hadd_records[hadd_output]['process_name'] = process_name\n\n # let's sum the number of integration per sample\n nofEntriesMap = {}\n for v in memEvtRangeDict.values():\n if v['fileset_id'] not in nofEntriesMap:\n nofEntriesMap[v['fileset_id']] = {\n 'nof_entries' : v['nof_entries'],\n }\n statistics[process_name] = {\n 'nof_int' : sum([entry['nof_int'] for entry in memEvtRangeDict.values()]),\n 'nof_entries' : sum([entry['nof_entries'] for entry in nofEntriesMap.values()]),\n 'nof_events_pass' : sum([entry['nof_events_pass'] for entry in memEvtRangeDict.values()]),\n 'nof_int_pass' : sum([entry['nof_int_pass'] for entry in memEvtRangeDict.values()]),\n 'nof_zero' : sum([entry['nof_zero'] for entry in memEvtRangeDict.values()]),\n 'nof_jobs' : len(memEvtRangeDict),\n }\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_addMEM)\n self.createScript_sbatch()\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_addMEM(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n self.createMakefile(lines_makefile)\n\n ws_len = max([len(kk) + 1 for kk in statistics.keys()])\n total_nof_integrations_sum = sum(x['nof_int'] for x in statistics.values())\n total_nof_entires = sum(x['nof_entries'] for x in statistics.values())\n total_nof_zero_int = sum(x['nof_zero'] for x in statistics.values())\n total_nof_jobs = sum(x['nof_jobs'] for x in statistics.values())\n total_nof_pass = sum(x['nof_events_pass'] for x in statistics.values())\n total_nof_int_pass_avg = float(sum(x['nof_int_pass'] for x in statistics.values())) / total_nof_pass\n total_nof_integrations_avg = float(total_nof_integrations_sum) / total_nof_entires\n total_nof_int_per_job = float(total_nof_integrations_sum) / total_nof_jobs\n for k, v in statistics.iteritems():\n if v['nof_entries'] == 0:\n int_per_event = 0.\n evt_pass = 0.\n else:\n int_per_event = float(v['nof_int']) / v['nof_entries']\n evt_pass = (100 * float(v['nof_events_pass']) / v['nof_entries'])\n if v['nof_events_pass'] == 0:\n nof_int_pass = 0.\n else:\n nof_int_pass = float(v['nof_int_pass']) / v['nof_events_pass']\n print('%s%s: %d (%d entries; %d jobs; %.2f int/evt; %d (%.2f%%) evt pass; %.2f int/evt pass; %d evt 0int)' %\n (k,\n ' ' * (ws_len - len(k)),\n v['nof_int'],\n v['nof_entries'],\n v['nof_jobs'],\n int_per_event,\n v['nof_events_pass'],\n evt_pass,\n nof_int_pass,\n v['nof_zero'],\n )\n )\n print('%s%s: %d (%d entries; %d jobs; %.2f int/evt; %d evt pass; %.2f int/evt pass; '\n '%.2f int/job pass; %d evt 0int)' %\n ('total',\n ' ' * (ws_len - len('total')),\n total_nof_integrations_sum,\n total_nof_entires,\n total_nof_jobs,\n total_nof_integrations_avg,\n total_nof_pass,\n total_nof_int_pass_avg,\n total_nof_int_per_job,\n total_nof_zero_int,\n )\n )\n\n if self.max_mem_integrations > 0 and total_nof_integrations_sum > self.max_mem_integrations:\n logging.error(\"Will not start the jobs (max nof integrations exceeded)!\")\n return False\n else:\n logging.info(\"Done\")\n return True", "def main():\n extension_to_category = {}\n # Change to FileToSort directory\n os.chdir(\"FilesToSort\")\n\n # Loop through each file in the (current) directory\n for filename in os.listdir('.'):\n # Ignore directories, just process files\n if os.path.isdir(filename):\n continue\n\n extension = filename.split(\".\")[-1]\n if extension not in extension_to_category:\n category = input(\"What category would you like to sort {} files into?\".format(extension))\n extension_to_category[extension] = category\n print(extension_to_category.items())\n # In case user put in existing folder\n try:\n os.mkdir(category)\n except FileExistsError:\n pass\n\n # Move files to directories based on categories by renaming\n os.rename(filename, \"{}/{}\".format(extension_to_category[extension], filename))", "def set_samples_info():\n white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'}\n dirs_info = {config.train_dir: 0, config.validation_dir: 0}\n for d in dirs_info:\n iglob_iter = glob.iglob(d + '**/*.*')\n for i in iglob_iter:\n filename, file_extension = os.path.splitext(i)\n if file_extension[1:] in white_list_formats:\n dirs_info[d] += 1\n\n config.nb_train_samples = dirs_info[config.train_dir]\n config.nb_validation_samples = dirs_info[config.validation_dir]", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def CollectDatasets(redirector_str):\n \n \n # uploadDir = 'srv/' for lpcjobqueue shell or TTbarAllHadUproot/ for coffea casa and WinterFell\n \n if 'cmsxrootd' in redirector_str:\n uploadDir = 'srv'\n else:\n uploadDir = 'TTbarAllHadUproot'\n \n uploadDir = ''\n \n filedir = 'nanoAODv9Files/'\n Years = ['UL16', 'UL17', 'UL18']\n VFP = ['preVFP', 'postVFP'] # preVFP unavailable in Winterfell for the moment\n # VFP = ['postVFP'] # Only for simple test in WinterFell\n filesets = {} # To be filled and returned by this function\n \n # ---- Before concatenation with +=, lists should be declard ---- # \n \n for y in Years:\n if '16' in y:\n for v in VFP:\n filesets[y+v+'_QCD'] = []\n filesets[y+v+'_TTbar_700_1000'] = []\n filesets[y+v+'_TTbar_1000_Inf'] = []\n # ---- JetHT and SingleMu ---- #\n for l in ['', 'B', 'C', 'D', 'E', 'F']:\n filesets[y+'preVFP_JetHT'+l+'_Data'] = []\n filesets[y+'preVFP_SingleMu'+l+'_Data'] = []\n for l in ['', 'F', 'G', 'H']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n \n elif '17' in y:\n filesets[y+'postVFP_QCD'] = []\n filesets[y+'postVFP_TTbar'] = []\n for l in ['', 'B', 'C', 'D', 'E', 'F']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n else:\n filesets[y+'postVFP_QCD'] = []\n filesets[y+'postVFP_TTbar'] = []\n for l in ['', 'A', 'B', 'C', 'D']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n # ---- Loop through years and VFP status, filling the filesets dictionary with the MC file locations from corresponding txt files ---- #\n \n for y in Years:\n if '16' in y:\n for v in VFP:\n # ---- QCD ---- #\n ulqcdfilename = filedir + 'QCD/QCD_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulqcdfilename) as f:\n ulqcdfiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_QCD'] += ulqcdfiles\n \n # ---- TTbar ---- #\n ulttbar700to1000filename = filedir + 'TT/TT_Mtt-700to1000_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulttbar700to1000filename) as f:\n ulttbar700to1000files = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n ulttbar1000toInffilename = filedir + 'TT/TT_Mtt-1000toInf_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulttbar1000toInffilename) as f:\n ulttbar1000toInffiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_TTbar_700_1000'] += ulttbar700to1000files\n filesets[y+v+'_TTbar_1000_Inf'] += ulttbar1000toInffiles\n \n # ---- JetHT ---- #\n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist:\n if 'pre' in v:\n if 'Run2016' in filename: #preVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2016 = [redirector_str + s.strip() for s in f.readlines() if ('HIPM' in s and not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2016 \n elif 'post' in v:\n if 'Run2016' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2016 = [redirector_str + s.strip() for s in f.readlines() if ('HIPM' not in s and not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2016\n \n # ---- Z' Dark Matter Mediator ---- #\n ulZprimeDMfilename = filedir + 'ZprimeDMToTTbar/ZprimeDMToTTbar_NanoAODv9_' + y + '_' + v + '.txt'\n ulDMfiles=[]\n k=0\n for i in range(1000, 5500, 500):\n with open(ulZprimeDMfilename) as f:\n ulDMfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"ResoIncl_MZp\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_DM'+str(i)] = ulDMfiles[k]\n k += 1\n \n# # ---- RS KK Gluon ---- #\n# ulRSGluonfilename = filedir + 'RSGluonToTT/RSGluonToTT_NanoAODv9_' + y + '_' + v + '.txt'\n# ulRSGluonfiles=[]\n# l=0\n# for i in range(1000, 5500, 500):\n# with open(ulRSGluonfilename) as f:\n# ulRSGluonfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"RSGluonToTT_M-\"+str(i) in s and not s.startswith('#'))])\n# filesets[y+v+'_RSGluon'+str(i)] += ulRSGluonfiles[l]\n# l += 1\n \n else: # UL17 and UL18\n v = VFP[1] # No preVFP after 2016 Run vertex problem was fixed\n \n # ---- QCD ---- #\n ulqcdfilename = filedir + 'QCD/QCD_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulqcdfilename) as f:\n ulqcdfiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_QCD'] += ulqcdfiles\n\n# # ---- TTbar ---- #\n# ulttbar700to1000filename = filedir + 'TT/TT_Mtt-700to1000_NanoAODv9_' + y + '_' + v + '.txt'\n# with open(ulttbar700to1000filename) as f:\n# ulttbar700to1000files = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n# ulttbar1000toInffilename = filedir + 'TT/TT_Mtt-1000toInf_NanoAODv9_' + y + '_' + v + '.txt'\n# with open(ulttbar1000toInffilename) as f:\n# ulttbar1000toInffiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n# filesets[y+v+'_TTbar_700_1000'] += ulttbar700to1000files\n# filesets[y+v+'_TTbar_1000_Inf'] += ulttbar1000toInffiles\n \n # ---- JetHT ---- #\n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist: \n if 'Run2017' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2017 = [redirector_str + s.strip() for s in f.readlines() if (not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2017\n elif 'Run2018' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2018 = [redirector_str + s.strip() for s in f.readlines() if (not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2018\n\n # ---- Z' Dark Matter Mediator ---- #\n ulZprimeDMfilename = filedir + 'ZprimeDMToTTbar/ZprimeDMToTTbar_NanoAODv9_' + y + '_' + v + '.txt'\n ulDMfiles=[]\n k=0\n for i in range(1000, 5500, 500):\n with open(ulZprimeDMfilename) as f:\n ulDMfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"ResoIncl_MZp\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_DM'+str(i)] = ulDMfiles[k]\n k += 1\n \n # ---- RS KK Gluon ---- #\n ulRSGluonfilename = filedir + 'RSGluonToTT/RSGluonToTT_NanoAODv9_' + y + '_' + v + '.txt'\n ulRSGluonfiles=[]\n l=0\n for i in range(1000, 5500, 500):\n with open(ulRSGluonfilename) as f:\n ulRSGluonfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"RSGluonToTT_M-\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_RSGluon'+str(i)] = ulRSGluonfiles[l]\n l += 1\n \n \n # ---- JetHT Eras---- #\n \n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist:\n \n if 'Run2016B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2016b = [redirector_str + s.strip() for s in b.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTB_Data'] += jetdatafiles2016b\n elif 'Run2016C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2016c = [redirector_str + s.strip() for s in c.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTC_Data'] += jetdatafiles2016c\n elif 'Run2016D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2016d = [redirector_str + s.strip() for s in d.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTD_Data'] += jetdatafiles2016d\n elif 'Run2016E' in filename:\n with open(filedir + 'JetHT/' + filename) as e:\n jetdatafiles2016e = [redirector_str + s.strip() for s in e.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTE_Data'] += jetdatafiles2016e\n elif 'Run2016F' in filename:\n with open(filedir + 'JetHT/' + filename) as fold:\n jetdatafiles2016fold = [redirector_str + s.strip() for s in fold.readlines() if ('HIPM' in s and not s.startswith('#'))]\n with open(filedir + 'JetHT/' + filename) as fnew:\n jetdatafiles2016fnew = [redirector_str + s.strip() for s in fnew.readlines() if ('HIPM' not in s and not s.startswith('#'))]\n filesets['UL16preVFP_JetHTF_Data'] += jetdatafiles2016fold\n filesets['UL16postVFP_JetHTF_Data'] += jetdatafiles2016fnew\n elif 'Run2016G' in filename:\n with open(filedir + 'JetHT/' + filename) as g:\n jetdatafiles2016g = [redirector_str + s.strip() for s in g.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_JetHTG_Data'] += jetdatafiles2016g\n elif 'Run2016H' in filename:\n with open(filedir + 'JetHT/' + filename) as h:\n jetdatafiles2016h = [redirector_str + s.strip() for s in h.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_JetHTH_Data'] += jetdatafiles2016h\n \n if 'Run2017B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2017b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTB_Data'] += jetdatafiles2017b\n elif 'Run2017C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2017c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTC_Data'] += jetdatafiles2017c\n elif 'Run2017D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2017d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTD_Data'] += jetdatafiles2017d\n elif 'Run2017E' in filename:\n with open(filedir + 'JetHT/' + filename) as e:\n jetdatafiles2017e = [redirector_str + s.strip() for s in e.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTE_Data'] += jetdatafiles2017e\n elif 'Run2017F' in filename:\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2017f = [redirector_str + s.strip() for s in f.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTF_Data'] += jetdatafiles2017f\n \n if 'Run2018A' in filename:\n with open(filedir + 'JetHT/' + filename) as a:\n jetdatafiles2018a = [redirector_str + s.strip() for s in a.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTA_Data'] += jetdatafiles2018a\n elif 'Run2018B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2018b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTB_Data'] += jetdatafiles2018b\n elif 'Run2018C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2018c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTC_Data'] += jetdatafiles2018c\n elif 'Run2018D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2018d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTD_Data'] += jetdatafiles2018d\n \n\n \n # ---- Single Muon ---- #\n datafilelist = os.listdir(filedir + 'SingleMu/')\n for filename in datafilelist:\n \n if 'Run2016B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2016b = [redirector_str + s.strip() for s in b.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuB_Data'] += jetdatafiles2016b\n elif 'Run2016C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2016c = [redirector_str + s.strip() for s in c.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuC_Data'] += jetdatafiles2016c\n elif 'Run2016D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2016d = [redirector_str + s.strip() for s in d.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuD_Data'] += jetdatafiles2016d\n elif 'Run2016E' in filename:\n with open(filedir + 'SingleMu/' + filename) as e:\n jetdatafiles2016e = [redirector_str + s.strip() for s in e.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuE_Data'] += jetdatafiles2016e\n elif 'Run2016F' in filename:\n with open(filedir + 'SingleMu/' + filename) as fold:\n jetdatafiles2016fold = [redirector_str + s.strip() for s in fold.readlines() if ('HIPM' in s and not s.startswith('#'))]\n with open(filedir + 'SingleMu/' + filename) as fnew:\n jetdatafiles2016fnew = [redirector_str + s.strip() for s in fnew.readlines() if ('HIPM' not in s and not s.startswith('#'))]\n filesets['UL16preVFP_SingleMuF_Data'] += jetdatafiles2016fold\n filesets['UL16postVFP_SingleMuF_Data'] += jetdatafiles2016fnew\n elif 'Run2016G' in filename:\n with open(filedir + 'SingleMu/' + filename) as g:\n jetdatafiles2016g = [redirector_str + s.strip() for s in g.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_SingleMuG_Data'] += jetdatafiles2016g\n elif 'Run2016H' in filename:\n with open(filedir + 'SingleMu/' + filename) as h:\n jetdatafiles2016h = [redirector_str + s.strip() for s in h.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_SingleMuH_Data'] += jetdatafiles2016h\n \n if 'Run2017B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2017b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuB_Data'] += jetdatafiles2017b\n elif 'Run2017C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2017c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuC_Data'] += jetdatafiles2017c\n elif 'Run2017D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2017d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuD_Data'] += jetdatafiles2017d\n elif 'Run2017E' in filename:\n with open(filedir + 'SingleMu/' + filename) as e:\n jetdatafiles2017e = [redirector_str + s.strip() for s in e.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuE_Data'] += jetdatafiles2017e\n elif 'Run2017F' in filename:\n with open(filedir + 'SingleMu/' + filename) as f:\n jetdatafiles2017f = [redirector_str + s.strip() for s in f.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuF_Data'] += jetdatafiles2017f\n \n if 'Run2018A' in filename:\n with open(filedir + 'SingleMu/' + filename) as a:\n jetdatafiles2018a = [redirector_str + s.strip() for s in a.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuA_Data'] += jetdatafiles2018a\n elif 'Run2018B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2018b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuB_Data'] += jetdatafiles2018b\n elif 'Run2018C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2018c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuC_Data'] += jetdatafiles2018c\n elif 'Run2018D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2018d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuD_Data'] += jetdatafiles2018d\n \n \n # print(filesets['UL16postVFP_JetHT_Data'])\n # print('==========================================================================================================')\n # print(filesets['UL16postVFP_TTbar'])\n \n return filesets", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs", "def process_subdirectory(subdir_path, regex_images, output_folder_if_pickle,\n min_n_leftside_metadata=10, only_ionogram_content_extraction_on_leftside_metadata=True, to_pickle=True):\n # Run segment_images on the subdirectory \n df_img,df_loss,df_outlier = segment_images(subdir_path, regex_images)\n\n # Determine ionogram grid mappings used to map (x,y) pixel coordinates of ionogram trace to (Hz, km) values\n stack = all_stack(df_img)\n col_peaks,row_peaks,mapping_Hz, mapping_km = get_grid_mappings(stack)\n\n # Translate metadata located on the left\n df_img_left = df_img[df_img['metadata_type']== 'left']\n \n if len(df_img_left.index) > min_n_leftside_metadata:\n # Determine leftside metadata grid (pixel coordinates to number, category mappings)\n df_img_left, df_loss_meta,dict_mapping,dict_hist= get_leftside_metadata(df_img_left,subdir_path)\n df_all_loss = df_loss.append(df_loss_meta)\n else:\n df_all_loss = df_loss\n \n # Extract the coordinates of the ionogram trace (black), Map the (x,y) pixel coordinates to (Hz, km) values and Extract select parameters i.e. fmin\n if only_ionogram_content_extraction_on_leftside_metadata:\n df_processed, df_loss_coord = extract_coord_subdir_and_param(df_img_left,subdir_path,col_peaks,row_peaks,mapping_Hz,mapping_km)\n else:\n df_processed, df_loss_coord = extract_coord_subdir_and_param(df_img,subdir_path,col_peaks,row_peaks,mapping_Hz,mapping_km)\n\n df_processed['mapping_Hz'] = [mapping_Hz] * len(df_processed.index)\n df_processed['mapping_km'] = [mapping_km] * len(df_processed.index)\n\n if to_pickle:\n start,subdir_name = ntpath.split(subdir_path[:-1])\n start,dir_name = ntpath.split(start)\n df_processed.to_pickle(os.pardir + '/pickle/' + str(dir_name)+'_'+str(subdir_name)+'.pkl')\n \n df_all_loss = df_all_loss.append(df_loss_coord)\n return df_processed, df_all_loss,df_outlier", "def collect_data(folder):\n folder = pathlib.Path(folder)\n cases = []\n for case_folder in folder.iterdir():\n print(f'start collecting data for location {case_folder.name}')\n for tr_folder in case_folder.iterdir():\n case = calculate_values(tr_folder)\n cases.append(case)\n \n df = pd.DataFrame(cases)\n print(folder.parent.joinpath(f'{folder.stem}.csv'))\n df.to_csv(folder.parent.joinpath(f'{folder.stem}.csv'), index=False)", "def process( self ):\n\t\t\n\t\tprint( self._query[\"header\"], file = self._file )\n\t\tself._file.flush()\n\n\t\tfor root, dirs, files in os.walk(self._directory):\n\t\t\tpath = root.split(os.sep)\n\n\t\t\tif( root.endswith(\"logFiles\") and ( root.find(\"template\") == -1 ) ):\n\t\t\t\tLogProcessor._process_dir(root, self._file_list, self._columns, self._file, self._meta)", "def file_pairing(self, include=None, exclude=None):\n\n # List the file names for both the images and the catalogs\n if isinstance(self._irac_image_dir, list):\n image_files = list(chain.from_iterable(glob.glob(f'{img_dir}/*.fits') for img_dir in self._irac_image_dir))\n else:\n image_files = glob.glob(f'{self._irac_image_dir}/*.fits')\n if isinstance(self._sextractor_cat_dir, list):\n cat_files = list(\n chain.from_iterable(glob.glob(f'{cat_dir}/*.cat') for cat_dir in self._sextractor_cat_dir))\n else:\n cat_files = glob.glob(f'{self._sextractor_cat_dir}/*.cat')\n\n # Combine and sort both file lists\n cat_image_files = sorted(cat_files + image_files, key=self._keyfunct)\n\n # Group the file names together\n self._catalog_dictionary = {cluster_id: list(files)\n for cluster_id, files in groupby(cat_image_files, key=self._keyfunct)}\n\n # If we want to only run on a set of clusters we can filter for them now\n if include is not None:\n self._catalog_dictionary = {cluster_id: files for cluster_id, files in self._catalog_dictionary.items()\n if cluster_id in include}\n\n # If we want to exclude some clusters manually we can remove them now\n if exclude is not None:\n for cluster_id in exclude:\n self._catalog_dictionary.pop(cluster_id, None)\n\n # Sort the files into a dictionary according to the type of file\n for cluster_id, files in self._catalog_dictionary.items():\n self._catalog_dictionary[cluster_id] = {}\n for f in files:\n if f.endswith('.cat'):\n self._catalog_dictionary[cluster_id]['se_cat_path'] = f\n elif 'I1' in f and '_cov' not in f:\n self._catalog_dictionary[cluster_id]['ch1_sci_path'] = f\n elif 'I1' in f and '_cov' in f:\n self._catalog_dictionary[cluster_id]['ch1_cov_path'] = f\n elif 'I2' in f and '_cov' not in f:\n self._catalog_dictionary[cluster_id]['ch2_sci_path'] = f\n elif 'I2' in f and '_cov' in f:\n self._catalog_dictionary[cluster_id]['ch2_cov_path'] = f\n\n # Verify that all the clusters in our sample have all the necessary files\n problem_clusters = []\n for cluster_id, cluster_files in self._catalog_dictionary.items():\n file_keys = {'ch1_sci_path', 'ch1_cov_path', 'ch2_sci_path', 'ch2_cov_path', 'se_cat_path'}\n try:\n assert file_keys == cluster_files.keys()\n except AssertionError:\n message = f'Cluster {cluster_id} is missing files {file_keys - cluster_files.keys()}'\n warnings.warn(message)\n problem_clusters.append(cluster_id)\n\n # For now, remove the clusters missing files\n for cluster_id in problem_clusters:\n self._catalog_dictionary.pop(cluster_id, None)", "def analyze_dir(self, dirname):\n if self.exceeded_max():\n return\n\n for (dirpath, dirnames, filenames) in os.walk(dir_name):\n for filename in filenames:\n self.analyze_file(dirname + \"/\" + filename)", "def main():\n global MASK\n start_time = time()\n parser = initArgparse()\n args = parser.parse_args()\n dirtree = args.directorytree\n filetree = args.filetree\n meta = args.metadata\n newmeta = args.newmetadata\n sfv = args.sfv\n yes = args.yes\n MASK = args.exclude\n\n for i in args.DIRECTORY:\n if Path(i).exists() is True:\n basepath = Path(i)\n else:\n raise NotADirectoryError(f\"{i} does not exist\")\n default = False\n if dirtree == sfv == filetree == meta == newmeta is False:\n default = True\n if dirtree is True or default is True:\n dirtree_file = f\"{basepath.name}_directory_tree.txt\"\n checkFileExists(basepath, dirtree_file, yes)\n createDirectoryTree(basepath, dirtree_file)\n if sfv is True or default is True:\n sfv_file = f\"{basepath.name}.sfv\"\n checkFileExists(basepath, sfv_file, yes)\n createSfv(basepath, sfv_file)\n if filetree is True or default is True:\n csvtree_file = f\"{basepath.name}_file_tree.csv\"\n jsontree_file = f\"{basepath.name}_file_tree.json\"\n checkFileExists(basepath, jsontree_file, yes)\n checkFileExists(basepath, csvtree_file, yes)\n createFileTree(basepath, jsontree_file, csvtree_file)\n if meta is True or default is True:\n metadata_file = f\"{basepath.name}_metadata.json\"\n checkFileExists(basepath, metadata_file, yes)\n createMetadata(basepath, metadata_file)\n if newmeta is True:\n createNewMetadata(basepath)\n filesCache.cache_clear()\n getFileInfo.cache_clear()\n killTika()\n\n stop_time = time()\n print(f\"Finished in {round(stop_time-start_time, 2)} seconds\")", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def preprocess_dataset(dataset_path, SAMPLES_TO_CONSIDER: int, num_mfcc = 13, n_fft = 2048, hop_length = 512):\r\n\r\n data = {\r\n 'mapping': [],\r\n 'labels': [],\r\n 'MFCCs': [],\r\n 'files': []\r\n }\r\n\r\n # loop through all sub-dirs\r\n total_samples = 0\r\n valid_samples = 0\r\n for i, (dirpath, dirname, filenames) in tqdm(enumerate(os.walk(dataset_path))):\r\n\r\n # ensure we're at sub-folder level\r\n if dirpath is not dataset_path:\r\n # save label (i.e., sub-folder name) in the mapping\r\n label = dirpath.partition('speech_commands_subset')[-1][1:]\r\n\r\n data['mapping'].append(label)\r\n print(\"\\nProcessing: '{}'\".format(label))\r\n print(\"number of files for each class: \", len(filenames))\r\n # process all audio files\r\n for f in filenames:\r\n total_samples += 1\r\n file_path = os.path.join(dirpath, f)\r\n\r\n # load audio file and slice it to ensure length consistency among different files\r\n signal, sample_rate = librosa.load(file_path)\r\n # print(signal.shape)\r\n # print(type(signal[0]))\r\n\r\n # drop audio files with less than pre-decided number of samples\r\n if len(signal) >= SAMPLES_TO_CONSIDER:\r\n valid_samples += 1\r\n # ensure consistency of the length of the signal\r\n signal = signal[:SAMPLES_TO_CONSIDER]\r\n\r\n # extract MFCCs\r\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc = num_mfcc, n_fft = n_fft, \r\n hop_length = hop_length) \r\n # print(MFCCs.shape)\r\n # print(type(MFCCs[0,0]))\r\n\r\n # store data for analysed track\r\n data['MFCCs'].append(MFCCs.T.tolist())\r\n data['labels'].append(i-1)\r\n # data['files'].append(file_path)\r\n # print(\"{}: {}\".format(file_path, i-1))\r\n\r\n # if valid_samples == 20:\r\n # valid_samples =0\r\n # break\r\n print(\"\\ntotal samples: \", total_samples)\r\n print(\"\\nvalid_samples: \", valid_samples)\r\n\r\n \r\n return data", "def create_station_dics(data_directories):\n \n files_all = {} \n for k,v in data_directories.items() :\n files = os.listdir(v)\n \n for f in files:\n station = f.split('_')[0] \n if station not in files_all.keys():\n files_all[station] = {}\n \n if k == 'ncar': # separating ncar temperature and wind files \n if 'trhc' in f:\n k = 'ncar_t'\n elif 'windc' in f:\n k = 'ncar_w'\n files_all[station][k] = ''\n files_all[station][k] = v + '/' + f # compelte path to the netCDF file \n\n #print('check') \n \n \n return files_all", "def process(self):\n level = self.parameter['level-of-operation']\n assert_file_grp_cardinality(self.input_file_grp, 1)\n assert_file_grp_cardinality(self.output_file_grp, 1)\n\n for (n, input_file) in enumerate(self.input_files):\n self.logger.info(\"INPUT FILE %i / %s\", n, input_file.pageId or input_file.ID)\n file_id = make_file_id(input_file, self.output_file_grp)\n\n pcgts = page_from_file(self.workspace.download_file(input_file))\n self.add_metadata(pcgts)\n page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID # (PageType has no id)\n page = pcgts.get_Page()\n \n page_image, page_xywh, page_image_info = self.workspace.image_from_page(\n page, page_id, feature_filter='binarized')\n if self.parameter['dpi'] > 0:\n zoom = 300.0/self.parameter['dpi']\n elif page_image_info.resolution != 1:\n dpi = page_image_info.resolution\n if page_image_info.resolutionUnit == 'cm':\n dpi *= 2.54\n self.logger.info('Page \"%s\" uses %f DPI', page_id, dpi)\n zoom = 300.0/dpi\n else:\n zoom = 1\n \n if level == 'page':\n self.process_page(page, page_image, page_xywh, zoom,\n input_file.pageId, file_id)\n else:\n if level == 'table':\n regions = page.get_TableRegion()\n else: # region\n regions = page.get_AllRegions(classes=['Text'], order='reading-order')\n if not regions:\n self.logger.warning('Page \"%s\" contains no text regions', page_id)\n for region in regions:\n region_image, region_xywh = self.workspace.image_from_segment(\n region, page_image, page_xywh, feature_filter='binarized')\n if level == 'region':\n self.process_region(region, region_image, region_xywh, zoom,\n input_file.pageId, file_id + '_' + region.id)\n continue\n lines = region.get_TextLine()\n if not lines:\n self.logger.warning('Page \"%s\" region \"%s\" contains no text lines',\n page_id, region.id)\n for line in lines:\n line_image, line_xywh = self.workspace.image_from_segment(\n line, region_image, region_xywh, feature_filter='binarized')\n self.process_line(line, line_image, line_xywh, zoom,\n input_file.pageId, region.id,\n file_id + '_' + region.id + '_' + line.id)\n\n # update METS (add the PAGE file):\n file_path = os.path.join(self.output_file_grp, file_id + '.xml')\n pcgts.set_pcGtsId(file_id)\n out = self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n local_filename=file_path,\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts))\n self.logger.info('created file ID: %s, file_grp: %s, path: %s',\n file_id, self.output_file_grp, out.local_filename)", "def catalog_files(directory):\n catalog = {}\n for dirpath, filename, files in os.walk(directory):\n catalog[dirpath] = files\n for dirpath, files in catalog.items():\n matched_files = recognize_files(files)\n catalog[dirpath] = matched_files\n return catalog", "def preprocess_directory(data_path, label_path, damage_fn):\r\n\r\n file_names = os.listdir(data_path)\r\n os.mkdir(label_path)\r\n\r\n for file_name in file_names:\r\n file_path = data_path + \"/\" + file_name\r\n cur_label_path = label_path + \"/\" + file_name\r\n current_image = Image.open(file_path)\r\n label = damage_fn(current_image)\r\n label.save(cur_label_path, \"JPEG\")", "def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def _gather_path(self, comp, path, function_map):\n print(f'\"Analyzing {comp} at {path}')\n if not os.path.exists(path):\n print('No files in {path}')\n return\n\n for root, _dirs, files in os.walk(path):\n if self.excluded(root):\n continue\n if not self.included(root, self.dir_inclusions):\n continue\n for fname in files:\n if not self.included(fname, self.file_inclusions):\n continue\n if fname.endswith(\".su\"):\n with open(os.path.join(root, fname), \"r\") as frame:\n for line in frame.readlines():\n split = line.split()\n if len(split) < 3:\n continue\n func = f\"{comp}:{split[0]}\"\n usage = int(split[-2])\n if usage < self.cutoff:\n continue\n if func not in function_map:\n function_map[func] = usage\n elif usage > function_map[func]:\n function_map[func] = usage", "def handle_short_info(files, directories, args):\n result_info = []\n # Define the width of columns\n max_length = 0\n if directories:\n max_length = max(max(len(item) for item in directories[d])\n for d in directories)\n if files:\n max_length = max(max_length, max(len(item) for item in files))\n col_width = max_length + 1\n terminal_width = shutil.get_terminal_size().columns\n if args.format == 'single-column' or args.one:\n columns = 1\n else:\n columns = terminal_width // (max_length + 1) or 1\n\n if not files and len(directories) == 1:\n d = list(directories.keys())[0]\n result_info.extend(handle_files_group(directories[d],\n columns, col_width, args))\n log.debug(result_info)\n return result_info\n\n if files:\n result_info.extend(handle_files_group(files, columns, col_width, args))\n for d in directories:\n result_info.append(f'{d}:')\n result_info.extend(handle_files_group(directories[d],\n columns, col_width, args))\n log.debug(result_info)\n return result_info", "def main():\n # The following dictionary will allow us to map extensions to the destination folder names\n extension_to_category = {}\n os.chdir(\"FilesToSort\")\n for filename in os.listdir('.'):\n # Ignore directories, just process files\n if os.path.isdir(filename):\n continue\n\n extension = filename.split('.')[-1]\n if extension not in extension_to_category:\n category = input(\"What category would you like to sort {} files into? \".format(extension))\n # Now we can map this new extension to a folder name\n extension_to_category[extension] = category\n try:\n # We don't expect to get an exception due to the if statement\n # But we'll play it safe anyway in case the user chooses an existing folder\n os.mkdir(category)\n except FileExistsError:\n pass\n\n # We don't need a separate loop for this next step\n # We're already in a loop per file and we now know where to put it\n os.rename(filename, \"{}/{}\".format(extension_to_category[extension], filename))", "def make_all_charts(data, dir_path, filename, num_categories, colorby, args,\r\n color_data, prefs, background_color, label_color,\r\n chart_type, generate_image_type, plot_width, plot_height,\r\n bar_width, dpi, resize_nth_label, label_type,\r\n include_html_legend, include_html_counts):\r\n\r\n # iterate over the preferences and assign colors according to taxonomy\r\n img_data = []\r\n for label, f_name in data:\r\n raw_fpath = os.path.join(\r\n dir_path,\r\n 'raw_data',\r\n os.path.split(f_name)[-1])\r\n # move raw file to output directory\r\n shutil.copyfile(f_name, raw_fpath)\r\n\r\n f = color_data['counts'][f_name]\r\n level = max([len(t.split(';')) - 1 for t in f[1]])\r\n\r\n for key in prefs.keys():\r\n if prefs[key]['column'] != str(level):\r\n continue\r\n col_name = 'Taxon'\r\n mapping = [['Taxon']]\r\n mapping.extend([[m] for m in f[1]])\r\n if 'colors' in prefs[key]:\r\n if isinstance(prefs[key]['colors'], dict):\r\n pref_colors = prefs[key]['colors'].copy()\r\n # copy so we can mutate\r\n else:\r\n pref_colors = prefs[key]['colors'][:]\r\n else:\r\n pref_colors = {}\r\n labelname = prefs[key]['column']\r\n\r\n # Define groups and associate appropriate colors to each group\r\n groups = group_by_field(mapping, col_name)\r\n pref_colors, data_colors, data_color_order = \\\r\n get_group_colors(groups, pref_colors)\r\n\r\n updated_pref_colors = {}\r\n\r\n if chart_type == 'area' and len(f[0]) == 1:\r\n raise ValueError(\r\n 'When generating area charts, the number of samples (or category values) must be greater than 1. However, you can still produce a pie chart or bar chart with only 1 sample (or category value), but you must remove the area chart value from the input arguments.')\r\n\r\n for key in pref_colors:\r\n updated_pref_colors[key.replace('\"', '')] = pref_colors[key]\r\n\r\n for i, val in enumerate(f[1]):\r\n f[1][i] = val.replace('\"', '')\r\n\r\n # parse the counts and continue processing\r\n img_data.extend(get_counts(label.strip(), colorby, num_categories,\r\n dir_path, level, f, prefs, updated_pref_colors,\r\n background_color,\r\n label_color, chart_type, generate_image_type,\r\n plot_width, plot_height, bar_width, dpi, raw_fpath,\r\n resize_nth_label, label_type, include_html_legend,\r\n include_html_counts))\r\n\r\n # generate html filepath\r\n outpath = os.path.join(dir_path, '%s_charts.html' % chart_type)\r\n out_table = ''.join(img_data)\r\n # write out html file\r\n write_html_file(out_table, outpath)", "def make_output_names(self):\n yaml_names = []\n fits_names = []\n\n if self.use_nonstsci_names:\n for i in range(len(self.info['Module'])):\n act = str(self.info['act_id'][i]).zfill(2)\n if self.info['Instrument'][i].lower() == 'niriss':\n det = 'NIS'\n elif self.info['Instrument'][i].lower() == 'fgs':\n det = 'FGS'\n else:\n det = self.info['detector'][i]\n mode = self.info['Mode'][i]\n dither = str(self.info['dither'][i]).zfill(2)\n\n yaml_names.append(os.path.abspath(os.path.join(self.output_dir, 'Act{}_{}_{}_Dither{}.yaml'\n .format(act, det, mode, dither))))\n fits_names.append('Act{}_{}_{}_Dither{}_uncal.fits'.format(act, det, mode, dither))\n\n else:\n for i in range(len(self.info['Module'])):\n if self.info['Instrument'][i].upper() == 'NIRCAM':\n fulldetector = 'nrc{}'.format(self.info['detector'][i].lower())\n else:\n fulldetector = self.info['detector'][i].lower()\n outfilebase = self.create_output_name(self.info, index=i)\n outfile = \"{}{}{}\".format(outfilebase, fulldetector, '_uncal.fits')\n yamlout = \"{}{}{}\".format(outfilebase, fulldetector, '.yaml')\n\n yaml_names.append(yamlout)\n fits_names.append(outfile)\n\n self.info['yamlfile'] = yaml_names\n self.info['outputfits'] = fits_names\n # Table([self.info['yamlfile']]).pprint()", "def bulk_train(self):\n logger.info(\"collecting subfolders - relations\")\n relations = self.collect_subfolders(self.input_dir)\n logger.info(\"relations - {}\".format(relations))\n\n execution_times = []\n\n for rel, rel_path in tqdm(relations.items(), desc=\"relations\"):\n logger.info(\"collecting training files from {}\".format(rel_path))\n tr_files = self.collect_files(rel_path, self.regexp_train)\n hyper_params = self.get_hyperparams()\n hyper_params['graph'] = tr_files\n\n output_folder = os.path.join(self.output_dir, rel)\n if not os.path.exists(output_folder):\n logger.info(\"creating {} (did not exist)\".format(output_folder))\n os.makedirs(output_folder)\n\n for params in tqdm(ParameterGrid(hyper_params), desc=\"training embedding\"):\n logger.info(\"hyperparams: {}\".format(params))\n train_file = params['graph']\n model_name = self.compute_model_name(params, output_folder)\n logger.info('training starspace model \"{}\" from file \"{}\"'.format(\n model_name, train_file))\n external_output, delta = self.call_starspace(params, train_file, model_name)\n logger.info(\"executed in {:0.2f}s\".format(delta))\n\n logger.info(\"external command output logged in {}\".format(self.external_log))\n if not os.path.exists(self.output_dir):\n logger.info(\"creating {} (did not exist)\".format(self.output_dir))\n os.makedirs(self.output_dir)\n\n with open(self.external_log, 'a') as f:\n f.write(external_output)\n\n execution_times.append(dict({ 'time': delta }, **params))\n \n return execution_times", "def main():\n glob_pattern = \"{root}/{child}/*.xml\".format(root=MANCHESTER_ROOT, child=TARGET_CHILD)\n corpus_files = glob(glob_pattern)\n for filename in corpus_files:\n print(filename)\n to_csv(filtered_parent_freq_count([filename], 2))", "def find_benchmark_directories(self):\n for (benchmark, producer), result in self.results.items():\n pattern = benchmark + '_' + producer + '*'\n files = find_directory(pattern, self.root_folder)\n if files:\n # add just the latest one\n sorted_files = sorted(files)\n result.directory = sorted_files[-1]\n else:\n print('No benchmark directories found in ' + self.root_folder)", "def parse_json_data(settings, dataset):\n for directory in dataset: # for directory in list of directories\n directory[\"data\"] = []\n for record in directory[\"rawdata\"]: # each record is the raw JSON data of a file in a directory\n jsonrootpath = get_json_root_path(record)\n globaloptions = get_json_global_options(record)\n #for item in record[\"client_stats\"]:\n # if \"job options\" in item.keys():\n # print(item[\"job options\"][\"iodepth\"])\n process_json_record(settings, directory, record, jsonrootpath, globaloptions)\n #print(\"================================\")\n #print(directory[\"data\"])\n #for directory in dataset:\n # for item in directory[\"data\"]:\n # print(item[\"iodepth\"])\n directory[\"data\"] = sort_list_of_dictionaries(directory[\"data\"])\n return dataset", "def _collect(self, text_directory) -> Iterator[Any]:\n return dataset_path_iterator(text_directory, self.configs.file_ext)", "def load_data(self) -> None:\n self.paths: List[str] = []\n self.durations: List[float] = []\n self.transcriptions: List[str] = []\n\n def raise_(err):\n \"\"\"raises error if problem during os.walk\"\"\"\n raise err\n\n for subset in self.subsets:\n subset_path = os.path.join(self.root, self.base_dir, subset)\n for root, dirs, files in os.walk(subset_path, onerror=raise_):\n if not files:\n continue\n matches = fnmatch.filter(files, \"*.trans.txt\")\n assert len(matches) == 1, \"> 1 transcription file found\"\n self._parse_transcription_file(root, matches[0])\n\n self._sort_by_duration()", "def filter(self):\n self._printer('Standard Walk')\n count = Counter(length=3)\n for directory in self.directory:\n self._printer('Searching ' + directory)\n for root, directories, files in os.walk(directory, topdown=self.topdown):\n root = root[len(str(directory)) + 1:]\n self._printer(str(count.up) + \": Explored path - \" + str(root), stream=True)\n if self.filters.validate(root):\n # Check that non-empty folders flag is on and we're at the max directory level\n if self.filters.non_empty_folders and self.filters.get_level(root) == self.filters.max_level:\n # Check that the path is not an empty folder\n if os.path.isdir(directory + os.sep + root):\n # Get paths in folder without walking directory\n paths = os.listdir(directory + os.sep + root)\n\n # Check that any of the paths are files and not just directories\n if paths and any(os.path.isfile(os.path.join(directory, p)) for p in paths):\n self.add_path(directory, root)\n\n else:\n for filename in files:\n fullname = os.path.join(root, filename)\n if self.filters.validate(fullname):\n # Join the two strings in order to form the full filepath.\n self.add_path(directory, fullname)" ]
[ "0.589833", "0.58914953", "0.58123064", "0.58081996", "0.57288224", "0.56694525", "0.56223905", "0.56043434", "0.5600391", "0.55908597", "0.5513597", "0.5498663", "0.54749286", "0.5472291", "0.54717815", "0.54624385", "0.54597956", "0.5453633", "0.544727", "0.54446405", "0.54430956", "0.5416227", "0.53885376", "0.53797907", "0.5363911", "0.5362585", "0.5357643", "0.53376615", "0.53341985", "0.5318752", "0.5305905", "0.5302934", "0.52954334", "0.5285074", "0.5281908", "0.52778393", "0.5274811", "0.5269997", "0.52628785", "0.5229251", "0.5228976", "0.52223444", "0.5218709", "0.52147245", "0.52052206", "0.52049494", "0.52044535", "0.52026576", "0.52025276", "0.5196418", "0.51956147", "0.5191612", "0.5182797", "0.5179788", "0.51737785", "0.51696837", "0.51560795", "0.51478153", "0.5146849", "0.5144811", "0.51419973", "0.51396346", "0.5138662", "0.513727", "0.5136943", "0.51327676", "0.5130285", "0.5124531", "0.5120861", "0.51198775", "0.5112335", "0.5109667", "0.51091063", "0.5108473", "0.51080304", "0.51026356", "0.508449", "0.50818545", "0.50772786", "0.5071765", "0.5066838", "0.50544846", "0.50541264", "0.5053897", "0.50526154", "0.5051489", "0.50307006", "0.502703", "0.5023308", "0.50148124", "0.50138766", "0.5013425", "0.501036", "0.5009272", "0.50076073", "0.50037867", "0.50028926", "0.50001585", "0.49967062", "0.49963436", "0.49955162" ]
0.0
-1
Create directory or exit on error.
def MakeDir(self, dirname): if os.path.exists(dirname): return try: os.umask(UMASK_DIR) os.makedirs(dirname) except OSError: self.errors = True errstr = '\nCould not create directory: %s ... ' % dirname self.LogErrors(errstr) raise OSError(errstr) os.umask(UMASK_FILE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_dir(self):\n if not os.path.exists(self.d):\n try:\n os.mkdir(self.d)\n except OSError, e:\n if e.errno != 17:\n raise\n pass", "def make_dir(path=None):\n\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n exit(\"\\nOSError: You can not use that directory!\\n\")", "def create_dir():\n if check_dir_exist():\n return False\n else:\n os.makedirs(path_structure)\n return True", "def create_dir(dir_):\n try:\n os.makedirs(dir_)\n logger.debug(\"Creating directory %s\", dir_)\n except OSError as err:\n if err.errno != errno.EEXIST:\n raise", "def create_dir(dir):\n try:\n if not os.path.exists(dir):\n os.makedirs(dir)\n except OSError:\n print('Error: Cannot create directory named \\\"' + dir + '\\\"')", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def new_dir(the_dir):\n try:\n os.makedirs(the_dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass #not a problem if file exists", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def create_directory(dirpath: str, dryrun: bool):\n if not dryrun:\n try:\n os.mkdir(dirpath)\n except FileExistsError as error:\n raise error\n else:\n print(f\"Creating new directory: {dirpath}\")\n return(dirpath)", "def _create_directory(logger=None, directory=None):\n try:\n # For Python 3, we can add \"exist_ok=True\".\n os.makedirs(directory)\n\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(directory):\n pass\n else:\n if logger:\n logger.error(\"Failed to create servers directory {0}: {1}\".format(directory, exc))\n else:\n print(\"Failed to create servers directory {0}: {1}\".format(directory, exc))\n raise RuntimeError()", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def mkdirp(d):\r\n try:\r\n os.makedirs(d)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise", "def create_dir(newdir):\n try:\n os.makedirs(newdir)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(newdir):\n pass\n else:\n raise", "def _ensure_dir(directory):\r\n try:\r\n os.makedirs(directory)\r\n except OSError as exc:\r\n if exc.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise", "def make_directory(name: str):\n try:\n os.mkdir(name)\n except:\n pass", "def mkdir(path):\n try: \n os.mkdir(path)\n except OSError:\n if not os.path.isdir(path):\n raise", "def create_dir(dir_path):\n\n if not path.exists(dir_path):\n log('Creating directory: {0}'.format(dir_path))\n run(sh.mkdir, dir_path, p=True)", "def CreateDirectory(dir):\n if not os.path.exists(dir):\n os.makedirs(dir, 0777)", "def _makeDir(self):\n try:\n os.mkdir(self.dir)\n # log('created directory: %s\\n' % self.dir)\n except OSError, err:\n if err.errno != errno.EEXIST:\n raise", "def createDir(directory):\n if not os.path.exists(directory):\n statusCreation = os.makedirs(directory)\n else:\n statusCreation = 2\n return statusCreation", "def make_dir(directory):\n if not os.path.exists(directory):\n # noinspection PyBroadException\n try:\n os.mkdir(directory)\n except OSError as ex:\n logger.warning(\"Failed to create directory %s: %s\", directory, ex)\n sys.exit()", "def mkdir_p(directory):\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise", "def mkdir(dirname):\n try:\n os.mkdir(dirname)\n except Exception:\n pass", "def create_directory(path):\n try:\n os.makedirs(path) # pylint: disable=no-member\n except OSError as ex:\n if ex.errno != errno.EEXIST:\n raise", "def create_dir_if_necessary(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def create_dir(_dir):\n if not os.path.exists(_dir):\n os.makedirs(_dir)", "def _create_dir_if_not_exists(dir: str):\n\n if os.path.exists(dir) and not os.path.isdir(dir):\n raise ValueError(f'Provided path {dir} was not a directory')\n\n if not os.path.exists(dir):\n _log.info(f'Creating directory {dir}')\n os.mkdir(dir)", "def make_dir(directory):\n try:\n os.makedirs(directory)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def mkdir(directory):\n\n if os.path.exists(directory):\n if os.path.isfile(directory):\n message = \"Unable to created directory '%s': A file with that name already exists\"\n raise PyBuilderException(message, directory)\n return\n os.makedirs(directory)", "def create_or_clean_directory(dir):\n\tif not os.path.exists(dir):\n\t\tprint(\"The path \\\"\" + dir + \"\\\" does not exist\")\n\t\tprint(\"creating directory \\\"\" + dir + \"\\\"\")\n\t\tos.makedirs(dir)\n\telse: #Directory exists, but we want to clean it before use\n\t\tprint(dir + \" already exists. Cleaning before use...\")\n\t\tshutil.rmtree(dir)\n\t\tos.makedirs(dir)", "def create_directory(directory):\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n return", "def create_directory(directory):\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n return", "def create_directory(directory):\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n return", "def create_directory(directory):\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n return", "def create_directory(directory):\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n return", "def mkdir(path):\n try:\n os.mkdir(path)\n except FileExistsError:\n pass", "def create_dir(dir, v=1):\n if not os.path.exists(dir):\n os.makedirs(dir)\n if v:\n print(\"Created Directory : \", dir)\n return 1\n else:\n if v:\n print(\"Directory already existed : \", dir)\n return 0", "def create_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkDir(path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n # In a race between two threads, this thread may have lost,\n # in which case the directory will now exist. Otherwise this\n # is a real exception.\n if not os.path.exists(path):\n raise", "def ensure_dir(d):\n\n if not os.path.exists(d):\n os.makedirs(d, exist_ok=True)\n\n return", "def _mkdir(path):\n if not os.path.isdir(path):\n try:\n os.mkdir(path)\n except OSError as exc:\n logger.error(\"Error creating directory '%s': %s\", path, exc)\n raise RuntimeError()\n else:\n logger.info(\"Created directory '%s'\", path)", "def ensure_dir(dir_path):\n try:\n os.mkdir(dir_path)\n except FileExistsError:\n pass", "def make_dir(directory):\n try:\n os.makedirs(directory)\n except OSError as err:\n if err.errno != errno.EEXIST:\n raise", "def create_directory(dir_path):\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path, exist_ok=True)", "def create_directory():\n try:\n if os.path.isdir(\"./imagesFromTweets\") != True:\n os.makedirs(\"./imagesFromTweets\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def create_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)", "def create_dir(newdir):\n if not os.path.isdir(newdir):\n try:\n os.makedirs(newdir)\n print(newdir)\n except IOError:\n print(\"cannot create %s directoy\" % newdir)\n return 0", "def mkdir_p(path):\n try:\n os.makedirs(path) # , exist_ok=True\n except OSError:\n pass", "def create_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)", "def create_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)", "def create_dir_if_doesnt_exist(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return", "def makeDir(dirName):\n try:\n os.makedirs(dirName)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(dirName):\n ## The path already exists so we can safely ignore this exception\n pass\n else:\n ## If it failed for some other reason, we want to see what the\n ## error is still\n raise", "def create_directory(dirname):\n\n if dirname and not os.path.exists(dirname):\n os.makedirs(dirname)", "def make_dir_if_needed(dir) :\n\tif not exists(dir) :\n\t\tos.makedirs(dir)", "def create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)", "def create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)", "def create_directory(path, name):\n new_path = os.path.join(path, name)\n if not os.path.isdir(new_path):\n subprocess.run(['mkdir', new_path])", "def mkdir(path):", "def _ensure_directory(self, dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)", "def _ensure_dir(dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)", "def create_dir(directory):\n\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno == errno.EEXIST:\n # Backup directory already exists, no problem for this script,\n # just ignore the exception and carry on.\n pass\n else:\n # All errors other than an already existing backup directory\n # are not handled, so the exception is re-raised and the\n # script will crash here.\n raise", "def make_dir(dir_path):\n if os.path.isdir(dir_path) == False:\n os.mkdir(dir_path)", "def create_directory(dir:str):\n # Create directory if doesn't already exist\n # Path(dir).mkdir(parents=True, exist_ok=True)\n try:\n os.makedirs(dir)\n print(\"Created directory\",dir)\n except OSError as e:\n print(\"Directory exists\",dir)", "def ensureDirectory(dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n print '>>> made directory \"%s\"'%(dirname)\n if not os.path.exists(dirname):\n print '>>> failed to make directory \"%s\"'%(dirname)\n return dirname", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST:\n pass\n else:\n raise", "def create_dir(dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def mkdir(dir):\n\tif not os.path.exists(dir):\n\t\tos.makedirs(dir)", "def mkdir(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise", "def mkdir(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST:\n pass\n else: raise", "def create_dir(newdir):\n if not os.path.isdir(newdir):\n try:\n os.mkdir(newdir)\n print(newdir)\n except IOError:\n print(\"cannot create %s directoy\" % newdir)", "def createDirectory(directory=DIRECTORY):\n if not os.path.exists(directory):\n os.mkdir(directory)", "def maybe_make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def make_dir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path, exist_ok=True)\n return True", "def createDir(dirPath):\n try:\n os.makedirs(dirPath, exist_ok=True) # Python 3.2+\n except TypeError:\n try: # Python 3.2-\n os.makedirs(dirPath)\n except OSError as exception:\n if exception.errno != 17:\n raise", "def createDir(self, dirName):\n\n if not os.path.exists(dirName):\n self.createDir(os.path.dirname(dirName))\n try:\n os.mkdir(dirName)\n except:\n print 'Current directory =', os.getcwd()\n raise", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError, e:\n if e.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def make_directory(self):\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory)", "def _createDirectory(self, directoryName):\n import os\n\n if directoryName != '.':\n if len(directoryName) == 0:\n raise ValueError(\n \"Specified directory is not valid. Set to '.' for current directory.\")\n # Try to create directory\n if not os.path.exists(directoryName):\n os.makedirs(directoryName)\n # Check write permission\n if not os.access(directoryName, os.W_OK):\n raise ValueError(\"Write permission to '\" + directoryName + \"' denied.\")", "def create_directory(directory=DIRECTORY):\n if not os.path.exists(directory):\n os.mkdir(directory)", "def safeMkDir(pth):\n try: os.mkdir(pth)\n except OSError: print 'directory %s already exists ?!'%pth", "def mkdir_p(path):\n\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise", "def check_create_dir(directory):\n if not os.path.isdir(directory):\n if os.path.isfile(directory):\n raise RuntimeError(\"Cannot create directory %s, already \"\n \"exists as a file object\" % directory)\n os.makedirs(directory)\n log.debug(\"Making dir %s\" % directory)", "def mkdir(path):\n if not os.path.exists(path):\n os.mkdir(path)", "def _create_dir(filename):\n head = os.path.dirname(filename)\n if head != '' and not os.path.isdir(head):\n os.makedirs(head)", "def create_new_dir(path):\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called from save_single_file_locally', extra=d)\n\n if not os.path.exists(path):\n logger.debug('Calling Function: % s',\n 'create_new_dir: create_new_dir calling makedirs', extra=d)\n os.makedirs(path)\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called makedirs', extra=d)", "def create_output_dir(output_dir, dir_name):\n try:\n os.mkdir(os.path.join(output_dir, dir_name))\n except OSError:\n print(os.path.join(output_dir, dir_name) + \" exits... :(\")", "def command_mkdir(args):\n errors = 0\n for directory in args.dirs:\n if os.path.exists(directory):\n if not os.path.isdir(directory):\n # -- SANITY CHECK: directory exists, but as file...\n sys.stdout.write(\"mkdir: %s\\n\" % directory)\n sys.stdout.write(\"ERROR: Exists already, but as file...\\n\")\n errors += 1\n else:\n # -- NORMAL CASE: Directory does not exits yet.\n assert not os.path.isdir(directory)\n sys.stdout.write(\"mkdir: %s\\n\" % directory)\n os.makedirs(directory)\n return errors", "def ensure_dir(path):\n\n \n try:\n os.makedirs(path)\n except (EnvironmentError) as e:\n if not(e.errno == errno.EEXIST and \n e.filename == path):\n raise\n return", "def mkdir(path):\n if os.path.exists(path):\n print(\"{} already exists.\".format(path))\n else:\n try:\n os.makedirs(path, exist_ok=True)\n except OSError:\n print(\"Uh oh - something went awry!\")\n else:\n print(\"Successfully created {}\".format(path))", "def create_dir(directory):\n if not os.path.isdir(directory):\n os.makedirs(directory)", "def create_directory():\r\n\r\n # Create directory for all lyrics\r\n try:\r\n os.mkdir(markovDir)\r\n except FileExistsError:\r\n pass", "def _mkdir(path):\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno != 17:\n raise", "def mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else: raise" ]
[ "0.7861626", "0.77017003", "0.7609223", "0.7593223", "0.75566745", "0.75102687", "0.75102687", "0.7497127", "0.7463986", "0.74487466", "0.74413145", "0.74352056", "0.74352056", "0.74352056", "0.74352056", "0.742612", "0.74215883", "0.7420352", "0.74127334", "0.74027", "0.7396389", "0.7389916", "0.7389909", "0.73880494", "0.7387301", "0.738384", "0.7379575", "0.7365713", "0.73655957", "0.73652655", "0.73630005", "0.7355784", "0.73487264", "0.73471195", "0.7342225", "0.7342225", "0.7342225", "0.7342225", "0.7342225", "0.733046", "0.7321593", "0.73179525", "0.73146594", "0.7306889", "0.7295622", "0.72862047", "0.72836715", "0.7277326", "0.7276767", "0.7272641", "0.7266371", "0.72639257", "0.7251777", "0.7251777", "0.72371954", "0.7231255", "0.72293466", "0.7226684", "0.7223212", "0.7223212", "0.7218242", "0.720389", "0.7197897", "0.7194281", "0.7192869", "0.7184562", "0.7184153", "0.71833414", "0.7182804", "0.7173767", "0.7161817", "0.7161817", "0.71595865", "0.71595407", "0.71595407", "0.715603", "0.71501553", "0.7144575", "0.71370703", "0.7133607", "0.7132317", "0.7125948", "0.712487", "0.7124236", "0.7122799", "0.71193516", "0.7119239", "0.71179736", "0.7117513", "0.7114101", "0.7109122", "0.70989394", "0.70943755", "0.70894945", "0.70883054", "0.7086478", "0.70786524", "0.70775074", "0.70757866", "0.70722115", "0.7065143" ]
0.0
-1
Fill in a heirarcy of template files. The default template file is first loaded. Then entries are overwritten by entries in the studylevel template (in the directory containing each subjectlevel directories). Finally, entries in the subjectlevel template are loaded.
def _FindTemplateFile(self, topdir): if topdir.endswith('..'): topdir = '/'.join(topdir.split('/')[:-2]) fnames = os.listdir(topdir) for fname in fnames: filename = '%s/%s' % (topdir, fname) if filename.endswith('.yaml') and not os.path.isdir(filename) and \ os.path.exists(filename): f = open(filename, 'r') magic_code = f.read(22) f.close() if '#!fmri_file_template' in magic_code: return filename return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __fill_all_templates__(self,configs):\n template_dir = configs['system'].get('Common_directories','template')\n sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))\n system_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','system'))\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio'))\n self.__fill_template__(sample_template,self.sample_file)\n self.__fill_template__(system_template,self.systems_file)\n self.__fill_template__(qsub_template,self.qsub_file)", "def _GetTemplate(self):\n# First read default template.\n tmplt = self._LoadTemplate(c.preproc_template_default)\n tmplt['proc'] = self.topdir\n self.template_type = 'default'\n\n self.templates = []\n if self.template_file is not None:\n tmplt.update(self._LoadTemplate(self.template_file))\n self.template_type = 'command-line'\n self.templates.append(os.path.abspath(self.template_file))\n found_template = True\n else:\n# Find a study specific template file.\n study_template_file = self._FindTemplateFile('%s/..' % self.topdir)\n if study_template_file is not None:\n# Merge study template into default, study template has precedence.\n if self.verbose:\n print \"Using study template at \" + study_template_file\n tmplt.update(self._LoadTemplate(study_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(study_template_file))\n found_template = True\n else:\n found_template = False\n# Now look for a subject-specific template file.\n subject_template_file = self._FindTemplateFile('%s' % self.topdir)\n if subject_template_file is not None:\n# Merge subject template, subject template has precedence.\n if self.verbose:\n print \"Using subject-specific template at %s\" % \\\n subject_template_file\n tmplt.update(self._LoadTemplate(subject_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(subject_template_file))\n found_template = True\n\n if not found_template:\n raise RuntimeError('Could not find template file.')\n\n if tmplt.get('subject','same') == 'same':\n# Default subdirectory is same as data directory.\n tmplt['subject'] = self.topdir.split('/')[-1]\n else:\n if not isinstance(tmplt['subject'],str):\n errstr = 'preprocess: Invalid subject number. Be sure to ' + \\\n 'enclose the subject number item with double quotes.'\n raise RuntimeError(errstr)\n\n# Keys that apply to all EPIs.\n self.fsl_flip = tmplt.get('fsl_flip', False)\n if self.fsl_flip:\n self.flip_opts = '-LT'\n else:\n self.flip_opts = ''\n\n# Replace strings with python types.\n for key in tmplt.keys():\n if tmplt[key] == 'None':\n tmplt[key] = None\n elif key == 'True':\n tmplt[key] = True\n elif key == 'False':\n tmplt[key] = False\n return tmplt", "def _set_templates(spm_dir=SPM_DIR):\n global EPI_TEMPLATE, T1_TEMPLATE, GM_TEMPLATE, WM_TEMPLATE, CSF_TEMPLATE\n\n spm_version = _get_version_spm(SPM_DIR)\n\n # Set the tpm and template paths according to SPM version\n if spm_version == 'spm12':\n template_path = 'toolbox/OldNorm'\n tpm_path = 'toolbox/OldSeg'\n else:\n template_path = 'templates'\n tpm_path = 'tpm'\n\n # configure template images\n EPI_TEMPLATE = os.path.join(SPM_DIR, template_path, 'EPI.nii')\n SPM_T1_TEMPLATE = os.path.join(SPM_DIR, template_path, 'T1.nii')\n T1_TEMPLATE = \"/usr/share/data/fsl-mni152-templates/avg152T1.nii\"\n if not os.path.isfile(T1_TEMPLATE):\n T1_TEMPLATE += '.gz'\n if not os.path.exists(T1_TEMPLATE):\n T1_TEMPLATE = SPM_T1_TEMPLATE\n GM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'grey.nii')\n WM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'white.nii')\n CSF_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'csf.nii')", "def load_templates(self):\n\n self.templates = []\n\n if os.path.exists(\"question_templates.txt\"):\n for line in open(\"question_templates.txt\", \"r\"):\n self.templates.append(line.replace(\"\\n\", \"\"))", "def load_templates(self):\n TemplateHandler.templates = []\n for template in os.listdir(TemplateHandler.templates_path):\n template_config = self.load_template_conf(template)\n if template_config is None:\n continue\n TemplateHandler.templates.append(template_config)", "def _load_templates(cls):\n if cls._raw_templates is None:\n cls._raw_templates = fetch_rrlyrae_templates()", "def run():\r\n template_locations = settings.MAKO_TEMPLATES\r\n for namespace, directories in template_locations.items():\r\n clear_lookups(namespace)\r\n for directory in directories:\r\n add_lookup(namespace, directory)", "def read_in_templates(path, email_object=None):\n import os\n templates = {}\n\n for fle in os.listdir(path):\n with open(os.path.join(path, fle)) as _f:\n raw = \"\\n\".join(_f.readlines())\n templates[fle] = raw\n\n if email_object:\n email_object.use_templates(templates)\n else:\n return templates", "def init_templates( path=\"boilerplate\" ):\n global template_env\n template_loader = jinja2.FileSystemLoader(searchpath=\"boilerplate\" )\n template_env = jinja2.Environment(\n loader=template_loader,\n lstrip_blocks=True\n )", "def load_all_templates(dataset, template_dir: str) -> Dict[str, NexusTemplate]:\n template_set = {\n template_name\n for template_name in os.listdir(template_dir)\n if not template_name.endswith(\".json\")\n }\n template_set.add(\"linear\")\n\n template_ord = []\n for template_name in TEMPLATE_PREFERRED_ORDER:\n try:\n template_set.remove(template_name)\n except KeyError:\n pass\n else:\n template_ord.append(template_name)\n template_ord.extend(sorted(template_set))\n\n return {\n template_name: load_template(dataset, template_dir, template_name)\n for template_name in template_ord\n }", "def setup_templates(self):\n self.libs[\"template\"] = (\"#libs/templates/include\", None, \"\")\n self[\"CPPPATH\"].append(\"#libs/templates/include\")", "def load_template_files(self):\n templates = dict()\n template_path = settings.CUSTOM_VERTO_TEMPLATES\n templates.update(self.read_template_files(template_path))\n if hasattr(self, \"extra_converter_templates_directory\"):\n directory = self.extra_converter_templates_directory\n template_path = os.path.join(template_path, directory)\n templates.update(self.read_template_files(template_path))\n return templates", "def _ProcessTemplate(self,topdir):\n self.dicomdir = \"%s/anatomicals\" % self.topdir\n self.rawdir = \"%s/raw\" % topdir\n self.rawdirs = {}\n tmplt = self._GetTemplate()\n if self.opts.outdir is not None:\n# Override template output directory.\n tmplt['top_outdir'] = self.opts.outdir\n self.tmplt = tmplt\n if len(tmplt['top_outdir']) == 0:\n tmplt['top_outdir'] = os.path.realpath(self.topdir)\n raise RuntimeError('Template file must specify an output directory.')\n tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir'])\n if '/home' in tmplt['top_outdir'][:7]:\n raise RuntimeError('Image data cannot be stored in the /home partition. Change the \"top_outdir\" entry in the template file: %s.' % (' '.join(self.templates)))\n# tmplt['subject'] = 'orig'\n self.procdir = os.path.abspath(\"%s/%s\" % \\\n (tmplt['top_outdir'],tmplt['subject']))\n target = os.path.abspath('%s/../..' % tmplt['top_outdir'])\n if not ismounted(target):\n raise RuntimeError('Could not access partition at %s' % target)\n\n self.anatdir = \"%s/anat\" % self.procdir\n self.fmapdir = \"%s/%s\" % (self.procdir,tmplt['fmap']['outdir'])\n self.dtidir = \"%s/%s\" % (self.procdir,tmplt['dti']['outdir'])\n self.logdir = \"%s/%s\" % (self.procdir,tmplt['logdir'])\n self.skip = tmplt.get('skip', DEFAULT_SKIP)\n self.acq_tr = tmplt.get('acq_tr',None)\n self.episetup_dir = \"%s/%s\" % (self.procdir,tmplt['first_epi'])\n self.fsl_cmpblty = tmplt.get('fsl_compatibility',False)\n self.epi_file_format = self.tmplt['epi_file_format']\n self.censor_thresh = tmplt.get('censor_threshold', 2.)\n self.censor_interleave = tmplt.get('censor_interleave', True)\n# self.server_userid = self.tmplt.get('server_userid','default')\n\n# Overide flags for aligning EPIs and skull-stripping with command-\n# line options.\n if self.opts.align_fmaps:\n self.align_fmaps = True\n else:\n self.align_fmaps = self.tmplt.get('epi_align', False)\n\n if self.opts.no_align_fmaps:\n self.no_align_fmaps = True\n else:\n self.no_align_fmaps = self.tmplt.get('no_epi_align', False)\n\n if self.opts.skull_strip:\n self.skull_strip = True\n else:\n self.skull_strip = self.tmplt.get('skull_strip', False)\n\n# Create log file now so it can be used immediately.\n if not os.path.exists(self.logdir):\n if self.verbose:\n print 'mkdir %s' % self.logdir\n if not self.opts.fake_opts:\n self.MakeDir(self.logdir)\n\n self._ProcessTemplateEpiInfo()", "def update():\n if Project.use_templates:\n defaults = _project_defaults()\n\n template = Template()\n\n for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))\n for herringlib in HerringFile.herringlib_paths]:\n\n info(\"template directory: %s\" % template_dir)\n # noinspection PyArgumentEqualDefault\n template.generate(template_dir, defaults, overwrite=False)", "def __fill_template__(self,template_file,output_fname):\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n if k == 'sample_key':\n try:\n int(v)\n new_sample_key = \"Sample_\" + str(v)\n dictionary.update({k:new_sample_key})\n continue\n except ValueError:\n pass\n dictionary.update({k:str(v)})\n dictionary.update({'restats_tail': self.restats_file + '.tail'})\n with open(output_fname,'w') as f:\n string = fill_template(template_file,dictionary)\n f.write(string)", "def load_template(\n dataset: DatasetManager, template_dir: str, template_name: str\n) -> NexusTemplate:\n if template_name == \"linear\":\n return LinearNexusTemplate()\n\n fullpath = os.path.join(template_dir, template_name)\n with open(fullpath + \".json\", \"r\") as fdata:\n data = json.load(fdata)\n\n level_doors = []\n other_doors = []\n for eid, door_data in data[\"doors\"].items():\n if door_data[\"level\"] in dataset.levels:\n level_doors.append(eid)\n else:\n other_doors.append(eid)\n\n return NexusTemplate(fullpath, template_name, data, level_doors, other_doors)", "def load_templates(fwhm=400, line_complexes=True, stars=False,\n full_line_list=None, continuum_list=None,\n fsps_templates=False, alf_template=False):\n \n if stars:\n # templates = glob.glob('%s/templates/Pickles_stars/ext/*dat' %(os.getenv('GRIZLI')))\n # templates = []\n # for t in 'obafgkmrw':\n # templates.extend( glob.glob('%s/templates/Pickles_stars/ext/uk%s*dat' %(os.getenv('THREEDHST'), t)))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-M*txt' %(os.getenv('THREEDHST'))))\n # templates.extend(glob.glob('%s/templates/SPEX/spex-prism-[LT]*txt' %(os.getenv('THREEDHST'))))\n # \n # #templates = glob.glob('/Users/brammer/Downloads/templates/spex*txt')\n # templates = glob.glob('bpgs/*ascii')\n # info = catIO.Table('bpgs/bpgs.info')\n # type = np.array([t[:2] for t in info['type']])\n # templates = []\n # for t in 'OBAFGKM':\n # test = type == '-%s' %(t)\n # so = np.argsort(info['type'][test])\n # templates.extend(info['file'][test][so])\n # \n # temp_list = OrderedDict()\n # for temp in templates:\n # #data = np.loadtxt('bpgs/'+temp, unpack=True)\n # data = np.loadtxt(temp, unpack=True)\n # #data[0] *= 1.e4 # spex\n # scl = np.interp(5500., data[0], data[1])\n # name = os.path.basename(temp)\n # #ix = info['file'] == temp\n # #name='%5s %s' %(info['type'][ix][0][1:], temp.split('.as')[0])\n # print(name)\n # temp_list[name] = utils.SpectrumTemplate(wave=data[0],\n # flux=data[1]/scl)\n \n # np.save('stars_bpgs.npy', [temp_list])\n \n \n # tall = np.load(os.path.join(os.getenv('GRIZLI'), \n # 'templates/stars.npy'))[0]\n # \n # return tall\n # \n # temp_list = OrderedDict()\n # for k in tall:\n # if k.startswith('uk'):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n # \n # for t in 'MLT':\n # for k in tall:\n # if k.startswith('spex-prism-'+t):\n # temp_list[k] = tall[k]\n # \n # return temp_list\n \n #return temp_list\n templates = ['M6.5.txt', 'M8.0.txt', 'L1.0.txt', 'L3.5.txt', 'L6.0.txt', 'T2.0.txt', 'T6.0.txt', 'T7.5.txt']\n templates = ['stars/'+t for t in templates]\n else:\n ## Intermediate and very old\n # templates = ['templates/EAZY_v1.0_lines/eazy_v1.0_sed3_nolines.dat', \n # 'templates/cvd12_t11_solar_Chabrier.extend.skip10.dat'] \n templates = ['eazy_intermediate.dat', \n 'cvd12_t11_solar_Chabrier.dat']\n \n ## Post starburst\n #templates.append('templates/UltraVISTA/eazy_v1.1_sed9.dat')\n templates.append('post_starburst.dat')\n \n ## Very blue continuum\n #templates.append('templates/YoungSB/erb2010_continuum.dat')\n templates.append('erb2010_continuum.dat')\n \n ### Test new templates\n # templates = ['templates/erb2010_continuum.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_006.dat',\n # 'templates/fsps/tweak_fsps_temp_kc13_12_008.dat']\n \n if fsps_templates:\n #templates = ['templates/fsps/tweak_fsps_temp_kc13_12_0{0:02d}.dat'.format(i+1) for i in range(12)]\n templates = ['fsps/fsps_QSF_12_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(12)]\n #templates = ['fsps/fsps_QSF_7_v3_nolines_0{0:02d}.dat'.format(i+1) for i in range(7)]\n \n \n if alf_template:\n templates.append('alf_SSP.dat')\n \n if continuum_list is not None:\n templates = continuum_list\n \n temp_list = OrderedDict()\n for temp in templates:\n data = np.loadtxt(os.path.join(os.getenv('GRIZLI'), 'templates', temp), unpack=True)\n #scl = np.interp(5500., data[0], data[1])\n scl = 1.\n name = temp #os.path.basename(temp)\n temp_list[name] = SpectrumTemplate(wave=data[0], flux=data[1]/scl,\n name=name)\n \n temp_list[name].name = name\n \n if stars:\n return temp_list\n \n ### Emission lines:\n line_wavelengths, line_ratios = get_line_wavelengths()\n \n if line_complexes:\n #line_list = ['Ha+SII', 'OIII+Hb+Ha', 'OII']\n #line_list = ['Ha+SII', 'OIII+Hb', 'OII']\n line_list = ['Ha+NII+SII+SIII+He', 'OIII+Hb', 'OII+Ne', 'Lya+CIV']\n else:\n if full_line_list is None:\n line_list = DEFAULT_LINE_LIST\n else:\n line_list = full_line_list\n \n #line_list = ['Ha', 'SII']\n \n # Use FSPS grid for lines\n wave_grid = None\n # if fsps_templates:\n # wave_grid = data[0]\n # else:\n # wave_grid = None \n \n for li in line_list:\n scl = line_ratios[li]/np.sum(line_ratios[li])\n for i in range(len(scl)):\n line_i = SpectrumTemplate(wave=wave_grid, \n central_wave=line_wavelengths[li][i], \n flux=None, fwhm=fwhm, velocity=True)\n \n if i == 0:\n line_temp = line_i*scl[i]\n else:\n line_temp = line_temp + line_i*scl[i]\n \n name = 'line {0}'.format(li)\n line_temp.name = name\n temp_list[name] = line_temp\n \n return temp_list", "def read_templates(folder):\n output = []\n for path, subdirs, files in os.walk(folder):\n for name in files:\n if name.endswith('.yml'):\n tpl = yaml.load(open(os.path.join(path, name)).read())\n tpl['template_name'] = name\n\n # Test if all required fields are in template:\n assert 'keywords' in tpl.keys(), 'Missing keywords field.'\n required_fields = ['date', 'amount', 'invoice_number']\n assert len(set(required_fields).intersection(tpl['fields'].keys())) == len(required_fields), \\\n 'Missing required key in template {} {}. Found {}'.format(name, path, tpl['fields'].keys())\n \n # Keywords as list, if only one.\n if type(tpl['keywords']) is not list:\n tpl['keywords'] = [tpl['keywords']]\n\n output.append(InvoiceTemplate(tpl))\n return output", "def loadTemplate(self):\n\n\t\t# Change directory to template folder\n\n\t\toriginal_path = os.getcwd()\n\t\tos.chdir(self.template_path)\n\n\t\th.load_file(\"stdrun.hoc\")\n\t\tif self.verbose: print('- Loading constants')\t\t\n\t\th.load_file('import3d.hoc')\n\n\t\tconstants_loaded = h.load_file('constants.hoc')\n\t\tmorphology_loaded = h.load_file('morphology_%s.hoc'%self.template_name)\n\n\t\tbiophysics_loaded = h.load_file('biophysics_%s.hoc'%self.template_name)\n\n\t\terror = 'Can\\'t find hoc file! Did you create it and call it by the correct name?'\n\t\tif not constants_loaded:\n\t\t\tprint('WARNING: {} hoc file not loaded! Did you create it and call it by the correct name?'.format(constants))\n\t\tif not morphology_loaded:\n\t\t\tprint('WARNING: {} hoc file not loaded! Did you create it and call it by the correct name?'.format(morphology))\n\t\tif not biophysics_loaded:\n\t\t\t# pdb.set_trace()\n\t\t\tprint('WARNING: {} hoc file not loaded! Did you create it and call it by the correct name?'.format(biophysics))\n\n\n\t\tif self.verbose:\n\t\t\tprint('\\n- Making %s template from .hoc file'%self.template_name)\n\n\t\t# h.load_file('%s.hoc'%self.template_name)\n\t\th.load_file('template_%s.hoc'%self.template_name)\n\n\t\t# Return to original dir\n\t\tos.chdir(original_path)", "def add_user_templates(self):\n\n # get all the user's templates\n user_templates = self.find_user_templates()\n\n # loop through the templates\n for template in user_templates:\n # create a template object and add it to the database\n local_template = PhishingTemplate(template)\n self._templates[template] = local_template", "def get_templates(self):\n\n\t\tif not os.path.isdir('./repo'): os.mkdir('./repo')\n\t\ttemps = self.settings['template']\n\t\t#---ensure that the template object is always in a list\n\t\tif len(temps) == 2 and type(temps[0])==str and type(temps[1])==str: temps = [temps]\n\t\tself.template = []\n\t\tfor t in temps:\n\t\t\tprint 'retrieving '+str(t[0])\n\t\t\t#---check if in repo and move\n\t\t\tif not os.path.isfile(self.rootdir+t[0]+'.pdb') and os.path.isfile('./repo/'+t[0]+'.pdb'):\n\t\t\t\tcopy('./repo/'+t[0]+'.pdb',self.rootdir+t[0]+'.pdb')\n\t\t\t\t#---fasta retrieval is deprecated\n\t\t\t\tif 0: copy('./repo/'+t[0]+'.fasta',self.rootdir+t[0]+'.fasta')\n\t\t\telif not os.path.isfile(self.rootdir+t[0]+'.pdb'):\n\t\t\t\tresponse = urllib2.urlopen('http://www.rcsb.org/pdb/files/'+t[0]+'.pdb')\n\t\t\t\tpdbfile = response.read()\n\t\t\t\twith open(self.rootdir+t[0]+'.pdb','w') as fp: fp.write(pdbfile)\n\t\t\t\tcopy(self.rootdir+t[0]+'.pdb','./repo/'+t[0]+'.pdb')\n\t\t\tself.template.append(t)", "def update_templates():\n logging.info(\"Copying english po files to %s\" % POT_PATH)\n\n # post them to exposed URL\n ensure_dir(POT_PATH)\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"django.po\"), os.path.join(POT_PATH, \"kalite.pot\"))\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"djangojs.po\"), os.path.join(POT_PATH, \"kalitejs.pot\"))", "def create_base_templates(outdir, templateEnv):\n for file in ME_TEMPLATES:\n filename = os.path.join(outdir, ME_FILENAME.format(file))\n template = templateEnv.get_template(file + '.go.jinja')\n\n with open(filename, 'w') as f:\n output = template.render(copyright=COPYRIGHT,\n generator_warning=GENERATOR_WARNING,\n package_name=PACKAGE_NAME)\n f.write(output)\n pass", "def _prepare_simulation_subfolder(self, directory_strains):\n\t\tif not os.path.exists(directory_strains):\n\t\t\tos.mkdir(directory_strains)\n\t\tfor filename in self._directory_template_filenames:\n\t\t\tsrc = os.path.join(self._directory_template, filename)\n\t\t\tdst = os.path.join(directory_strains, filename)\n\t\t\tshutil.copy(src, dst)", "def mseed_2_templates(wav_dirs, cat, outdir, length, prepick,\n highcut=None, lowcut=None, f_order=None,\n samp_rate=None, min_snr=2.,\n start=None, end=None, miniseed=True,\n asdf_file=False, debug=1):\n\n # Establish date range for template creation\n cat.events.sort(key=lambda x: x.origins[-1].time)\n if start:\n cat_start = datetime.datetime.strptime(start, '%d/%m/%Y')\n cat_end = datetime.datetime.strptime(end, '%d/%m/%Y')\n else:\n cat_start = cat[0].origins[-1].time.date\n cat_end = cat[-1].origins[-1].time.date\n for date in date_generator(cat_start, cat_end):\n dto = UTCDateTime(date)\n print('Processing templates for: %s' % str(dto))\n q_start = dto - 10\n q_end = dto + 86410\n # Establish which events are in this day\n sch_str_start = 'time >= %s' % str(dto)\n sch_str_end = 'time <= %s' % str(dto + 86400)\n tmp_cat = cat.filter(sch_str_start, sch_str_end)\n if len(tmp_cat) == 0:\n print('No events on: %s' % str(dto))\n continue\n # Which stachans we got?\n stachans = {pk.waveform_id.station_code: [] for ev in tmp_cat\n for pk in ev.picks}\n for ev in tmp_cat:\n for pk in ev.picks:\n chan_code = pk.waveform_id.channel_code\n if chan_code not in stachans[pk.waveform_id.station_code]:\n stachans[pk.waveform_id.station_code].append(chan_code)\n wav_read_start = timer()\n # Be sure to go +/- 10 sec to account for GeoNet shit timing\n if asdf_file:\n with pyasdf.ASDFDataSet(asdf_file) as ds:\n st = Stream()\n for sta, chans in iter(stachans.items()):\n for station in ds.ifilter(ds.q.station == sta,\n ds.q.channel == chans,\n ds.q.starttime >= q_start,\n ds.q.endtime <= q_end):\n st += station.raw_recording\n elif miniseed:\n wav_ds = ['%s%d' % (d, dto.year) for d in wav_dirs]\n st = grab_day_wavs(wav_ds, dto, stachans)\n wav_read_stop = timer()\n print('Reading waveforms took %.3f seconds' % (wav_read_stop\n - wav_read_start))\n print('Looping through stachans to merge/resamp')\n stachans = [(tr.stats.station, tr.stats.channel) for tr in st]\n for stachan in list(set(stachans)):\n tmp_st = st.select(station=stachan[0], channel=stachan[1])\n if len(tmp_st) > 1 and len(set([tr.stats.sampling_rate for tr in tmp_st])) > 1:\n print('Traces from %s.%s have differing samp rates' % (stachan[0], stachan[1]))\n for tr in tmp_st:\n st.remove(tr)\n tmp_st.resample(sampling_rate=samp_rate)\n st += tmp_st\n st.merge(fill_value='interpolate')\n resamp_stop = timer()\n print('Resample/merge took %s secs' % str(resamp_stop - wav_read_stop))\n print('Preprocessing...')\n # Process the stream\n try:\n st1 = pre_processing.dayproc(st, lowcut=lowcut, highcut=highcut,\n filt_order=f_order, samp_rate=samp_rate,\n starttime=dto, debug=debug, ignore_length=True,\n num_cores=4)\n except NotImplementedError or Exception as e:\n print('Found error in dayproc, noting date and continuing')\n print(e)\n with open('%s/dayproc_errors.txt' % outdir, mode='a') as fo:\n fo.write('%s\\n%s\\n' % (str(date), e))\n continue\n print('Feeding stream to template_gen...')\n for event in tmp_cat:\n print('Copying stream to keep away from the trim...')\n trim_st = copy.deepcopy(st1)\n ev_name = str(event.resource_id).split('/')[-1]\n pk_stachans = ['%s.%s' % (pk.waveform_id.station_code,\n pk.waveform_id.channel_code)\n for pk in event.picks]\n # Run check to ensure that there is only one pick for each channel\n dups = [pk for pk, count\n in collections.Counter(pk_stachans).items() if count > 1]\n if len(dups) > 0:\n print('Event %s still has dup picks. Skipping' % ev_name)\n continue\n template = template_gen(event.picks, trim_st, length=length,\n prepick=prepick, min_snr=min_snr)\n if len([tr for tr in template\n if tr.stats.channel[-1] == 'Z']) < 6:\n print('Skipping template with fewer than 6 Z-comp traces')\n continue\n # temp_list.append(template)\n print('Writing event %s to file...' % ev_name)\n template.write('%s/%s.mseed' % (outdir, ev_name),\n format=\"MSEED\")\n del trim_st\n del tmp_cat, st1, st", "def get_template_files(fs, template_type):\n # no template fitting for null runs\n if fs[\"null_run\"]:\n template_type = None\n\n if \"template_type\" in fs:\n if template_type == fs[\"template_type\"]:\n return\n\n fs[\"template_type\"] = template_type\n\n # find all corresponding foreground templates\n if template_type is None:\n fs[\"template_root\"] = None\n fs[\"template_root2\"] = None\n fs[\"template_files\"] = None\n fs[\"template_files2\"] = None\n fs[\"template_noise_root\"] = None\n fs[\"template_noise_root2\"] = None\n fs[\"template_noise_files\"] = None\n fs[\"template_noise_files2\"] = None\n fs[\"num_template\"] = 0\n fs[\"num_template_noise\"] = 0\n else:\n num_template_noise = None\n for hm in [\"1\", \"2\"]:\n suff = \"\" if hm == \"1\" else \"2\"\n troot = os.path.join(\n fs[\"data_root\"],\n \"templates_{}\".format(template_type),\n \"halfmission-{}\".format(hm),\n )\n ### this block is so sims with template type like\n # 353_100_gauss_003 can use ensemble in 353_100_gauss\n tp = template_type.split(\"_\")\n ttype = template_type\n if tp[-1].isdigit():\n if ttype[-7:] not in [\"353_100\", \"217_100\"]:\n ttype = \"_\".join(tp[:-1])\n\n tnroot = os.path.join(\n fs[\"data_root\"],\n \"templates_noise_{}\".format(ttype),\n \"halfmission-{}\".format(hm),\n )\n\n tfiles = []\n tnfiles = []\n for f in fs[\"map_files\"]:\n nfile = f.replace(fs[\"map_root\"], troot)\n if not os.path.exists(nfile):\n raise OSError(\"Missing hm-{} template for {}\".format(hm, f))\n tfiles.append(nfile)\n nfiles = sorted(\n glob.glob(\n f.replace(fs[\"map_root\"], tnroot).replace(\n \".fits\", \"_*.fits\"\n )\n )\n )\n if not len(nfiles):\n raise OSError(\n \"Missing hm-{} template noise for {}\".format(hm, f)\n )\n tnfiles.append(nfiles)\n if num_template_noise is not None:\n if len(nfiles) != num_template_noise:\n raise OSError(\n \"Wrong number of template noise sims. \"\n \"Found {} files, expected {}.\".format(\n len(nfiles), num_template_noise\n )\n )\n\n num_template_noise = len(nfiles)\n\n tfiles = np.asarray(tfiles)\n tnfiles = np.asarray(tnfiles)\n fs[\"template_root{}\".format(suff)] = troot\n fs[\"template_files{}\".format(suff)] = tfiles\n fs[\"template_noise_root{}\".format(suff)] = tnroot\n fs[\"template_noise_files{}\".format(suff)] = tnfiles\n\n fs[\"num_template\"] = len(fs[\"template_files\"])\n fs[\"num_template_noise\"] = num_template_noise\n self.log(\n \"Found {} templates in {}\".format(\n fs[\"num_template\"], fs[\"template_root\"]\n ),\n \"info\",\n )\n self.log(\n \"Found {} template noise files in {}\".format(\n fs[\"num_template_noise\"], fs[\"template_noise_root\"]\n ),\n \"info\",\n )\n self.log(\"Template files: {}\".format(fs[\"template_files\"]), \"debug\")\n\n fields = [\n \"template_type\",\n \"template_root\",\n \"template_root2\",\n \"template_files\",\n \"template_files2\",\n \"template_noise_root\",\n \"template_noise_root2\",\n \"template_noise_files\",\n \"template_noise_files2\",\n \"num_template\",\n \"num_template_noise\",\n ]\n for k in fields:\n setattr(self, k, fs[k])", "def fetch_sample_templates():\n source_folder = Path(root, 'templates', 'sample_setup_files')\n Path('sample_templates').mkdir(parents=True, exist_ok=True)\n target_folder = Path().resolve()\n target_folder = Path(target_folder, 'sample_templates')\n\n copytree(source_folder, target_folder, dirs_exist_ok=True)\n logger.info(f'Sample templates can be found in directory {target_folder}')", "def create_files_from_templates(self, model_attributes):\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/%s_%s.py\" % (model_attributes['app_label'], folder_name,\n model_attributes['model_name_slug'], folder_name)\n template_path = \"django_baker/%s\" % (folder_name)\n self.create_file_from_template(file_path, template_path, model_attributes)\n for file_name in [\"base\", \"list\", \"detail\", \"create\", \"update\", \"delete\"]:\n file_path = \"%s/templates/%s/%s_%s.html\" % (model_attributes['app_label'], model_attributes['app_label'],\n model_attributes['model_name_slug'], file_name)\n template_path = \"django_baker/%s.html\" % (file_name)\n self.create_file_from_template(file_path, template_path, model_attributes)", "def generate_summary(self, dir: Path) -> None:\n for t in JINJA_TEMPLATES:\n generate_file_based_on_template(dir, t, self.template_arguments)", "def load_generic_trfiles_fi(stimuli, subject, root=\"data/trfiles\"):\n trdict = dict()\n\n for stimulus in stimuli:\n try:\n fname = \"{0}_{1}.report\".format(stimulus, subject)\n trf = TRFile(os.path.join(root, fname))\n trdict[stimulus] = [trf]\n except Exception, e:\n print e\n\n return trdict", "def load():\n root = Path(__file__).parent\n for path in root.iterdir():\n if path.is_dir() and not path.name.startswith(\"_\"):\n subject = (path / \"subject.txt\").read_text()\n txt = (path / \"body.txt\").read_text()\n html = path / \"body.html\"\n if html.exists():\n html = html.read_text()\n else:\n html = None\n attachment = None\n pymodule = path / \"__init__.py\"\n if pymodule.exists():\n pymodule = importlib.import_module(f\"egapro.emails.{path.name}\")\n attachment = pymodule.attachment\n globals()[path.name] = Email(subject, txt, html, attachment)", "def __init__(self, template_path, groups):\n self._groups = groups\n self._template_path = template_path", "def test_main():\n for template in templates:\n main([\"-g\", template])\n\n # One at a time\n for xyz_file in example_xyz_files:\n main([template, xyz_file])\n\n # All at once\n main([template] + list(example_xyz_files))\n\n # Allow use of template in the parent directory\n with cd(\"data\"):\n main([\"../pnictogen/repo/ADF.in\", \"water-dimer.xyz\"])", "def read_template_files(self, template_path):\n templates = dict()\n for file in listdir(template_path):\n template_file = re.search(r\"(.*?).html$\", file)\n if template_file:\n template_name = template_file.groups()[0]\n templates[template_name] = open(os.path.join(template_path, file)).read()\n return templates", "def _load_template(self):\n filename = os.path.join(get_conf('DEFAULT_TEMPLATE_PATH'), self._template, '__init__.ini')\n cf = ApplicationConf.get_instance()\n with comp_open(filename, mode='r') as fp:\n content = fp.read()\n content = content.format(**cf)\n conf = CompConfigParser(allow_no_value=True)\n conf.read_string(content, '__init__.ini')\n ini = {'dirs': [], 'files': [], 'binaries': []}\n if conf.has_section('dirs'):\n for key in conf.options('dirs'):\n ini['dirs'].append(key)\n if conf.has_section('files'):\n for key in conf.options('files'):\n ini['files'].append(self.__remap(key))\n if conf.has_section('binaries'):\n for key in conf.options('binaries'):\n ini['binaries'].append(self.__remap(key))\n if isinstance(self._ini, dict):\n self._ini.update(ini)\n else:\n self._ini = ini", "def replace_template_files(root_directory, variables=None, template_files=None, subdirs=None):\n variables = variables or {\n 'branch' : retrieve_current_branch(repository_directory=root_directory, fix_environment=True),\n }\n \n templates = template_files or [\"requirements.txt\", \"setup.py\", \"pavement.py\"]\n \n for template in templates:\n fp = os.path.join(root_directory, template)\n _replace_template(fp, variables)\n \n if subdirs is None:\n subdirs = ['debian']\n\n if subdirs:\n for subdir in subdirs:\n dp = os.path.join(*list(chain([root_directory], subdir.split('/'))))\n if os.path.exists(dp):\n for file in os.listdir(dp):\n fp = os.path.join(root_directory, subdir, file)\n if os.path.isfile(fp):\n _replace_template(fp, variables)", "def processTemplates(self, tk, templateFile = '', id = '', shotNum = '', inprogressBar = ''):\r\n ## Now fetch all the template paths from shotgun\r\n getTemplatePaths = tk.paths_from_template(templateFile, {'Step' : 'Light', 'id' : id, 'Shot' : shotNum})\r\n debug(app = self, method = 'processTemplates', message = 'getTemplatePaths: %s' % getTemplatePaths, verbose = False)\r\n \r\n ## Now look for each assets template path: \r\n xmlFile = max(getTemplatePaths) \r\n debug(app = self, method = 'processTemplates', message = 'Max Version xmlFile.... %s' % xmlFile, verbose = False)\r\n \r\n ## Now if versions has stuff in it..\r\n if not xmlFile:\r\n debug(app = self, method = 'processTemplates', message = 'Can not find any xml files for %s' % shotNum, verbose = False)\r\n pass\r\n else:\r\n \r\n debug(app = self, method = 'processTemplates', message = 'PathTo: %s' % os.path.isfile(xmlFile.replace(os.path.sep, \"/\")), verbose = False)\r\n if os.path.isfile(xmlFile.replace(os.path.sep, \"/\")):## is this a valid xml file!?\r\n inprogressBar.updateProgress(percent = 10, doingWhat = 'createAll shaders...')\r\n self._createAllShaders(XMLPath = xmlFile.replace(os.path.sep, \"/\"), Namespace = '', Root = 'MaterialNodes')\r\n \r\n inprogressBar.updateProgress(percent = 30, doingWhat = 'connectAll shaders...')\r\n self._connectAllShaders(XMLPath = xmlFile.replace(os.path.sep, \"/\"), Namespace = '', Root = 'MaterialNodes')\r\n else:\r\n debug(app = self, method = 'processTemplates', message = 'FAILED Can not find a valid published xml file for %s ...' % os.path.isfile(xmlFile.replace(os.path.sep, \"/\")), verbose = False)\r\n pass", "def setUp(self):\r\n capa_path = capa.__path__[0]\r\n self.template_path = os.path.join(capa_path,\r\n 'templates',\r\n self.TEMPLATE_NAME)\r\n with open(self.template_path) as f:\r\n self.template = MakoTemplate(f.read())", "def copy_templates(root_directory, dist_directory, sdk_directory,\n cpus, families, boards):\n\n def _process(when, contexts):\n for context in contexts:\n for template in configuration.TEMPLATES:\n if template[\"when\"] == when:\n context.update({\n \"root\": root_directory,\n \"sdk\": sdk_directory,\n \"dist\": dist_directory\n })\n\n source = templates.from_string(template[\"source\"], context)\n target = templates.from_string(template[\"target\"], context)\n target = os.path.join(dist_directory, target)\n\n # Perform the action.\n sys.stdout.write(\"Processing '%s'\\n\" % source)\n\n if template[\"type\"] == \"file\":\n templates.from_file(source, target, context)\n elif template[\"type\"] == \"glob\":\n for source_file in glob.glob(source):\n if os.path.isfile(source_file):\n target_file = os.path.join(\n target, os.path.basename(source_file))\n\n templates.from_file(\n source_file, target_file, context)\n else:\n raise Exception(\"Not supported\")\n\n _process(\"per_family\", families)\n _process(\"per_cpu\", cpus)\n _process(\"per_board\", boards)\n _process(\"per_once\", [{\n \"families\": [family[\"family\"] for family in families],\n \"cpus\": [cpu[\"cpu\"] for cpu in cpus],\n \"boards\": [board[\"board\"] for board in boards]\n }])", "def __init__(self, ctx, verbose=0) -> None:\n\n self.__ctx__ = ctx\n TemplateHandler.templates_path = ctx.obj['TEMPLATES_FOLDER']\n self.verbose = verbose\n\n self.validate_templates_path()\n if len(TemplateHandler.templates) == 0:\n self.load_templates()", "def _setup(self):\n self._raw_top_dir = os.path.join(self._snippets_dir,\"raw\",\"dynamic\")\n if not os.path.exists(self._raw_top_dir):\n os.mkdir(self._raw_top_dir)\n\n self._trec_top_dir = os.path.join(self._snippets_dir,\"trec\",\"dynamic\")\n if not os.path.exists(self._trec_top_dir):\n os.mkdir(self._trec_top_dir)\n\n self._temp_top_dir = os.path.join(self._snippets_dir,\"temp\",\"dynamic\")\n if not os.path.exists(self._temp_top_dir):\n os.mkdir(self._temp_top_dir)\n\n self._snippet_result_top_dir = os.path.join(self._snippets_dir,\"result\",\"dynamic\")\n if not os.path.exists(self._snippet_result_top_dir):\n os.mkdir(self._snippet_result_top_dir)\n\n self._snippet_index_top_dir = os.path.join(self._snippets_dir,\"index\",\"dynamic\")\n if not os.path.exists(self._snippet_index_top_dir):\n os.mkdir(self._snippet_index_top_dir)\n\n self._para_top_dir = os.path.join(self._snippets_dir,\"para\",\"dynamic\")\n if not os.path.exists(self._para_top_dir):\n os.mkdir(self._para_top_dir)", "def get_templates(self):\n\n data = self.request_from_server('templates')\n self.templates = data", "def initial_processing(subject_dir):\n # get subject name\n subject_name = subject_dir.parts[-1]\n\n # create ${subject_dir}/ASL and ${subject_dir}/T1w/Results/ASL \n # directories\n asl_dir = subject_dir / 'ASL'\n tis_dir = asl_dir / 'TIs'\n calib_dir = asl_dir / 'Calib'\n calib0_dir = calib_dir / 'Calib0'\n calib1_dir = calib_dir / 'Calib1'\n strucasl_dir = subject_dir / 'T1w/ASL'\n create_dirs([asl_dir, tis_dir, calib0_dir, calib1_dir, strucasl_dir])\n\n # find sub-directories\n # structural\n t1_dir = subject_dir / 'T1w'\n t1_name = t1_dir / 'T1w_acpc_dc_restore.nii.gz'\n t1_brain_name = t1_dir / 'T1w_acpc_dc_restore_brain.nii.gz'\n\n # asl\n b_dir = subject_dir / f'{subject_name}_V1_B'\n try:\n mbpcasl_dir = list(b_dir.glob('**/scans/*mbPCASLhr'))[0]\n # if no files match this format, it throws an IndexError\n except IndexError as e:\n print(e)\n mbpcasl = mbpcasl_dir / 'resources/NIFTI/files' / f'{subject_name}_V1_B_mbPCASLhr_PA.nii.gz'\n \n # output names\n tis_name = tis_dir / 'tis.nii.gz'\n calib0_name = calib0_dir / 'calib0.nii.gz'\n calib1_name = calib1_dir / 'calib1.nii.gz'\n # get tis\n fslroi(str(mbpcasl), tis_name, 0, 86)\n # get calibration images\n fslroi(str(mbpcasl), calib0_name, 88, 1)\n fslroi(str(mbpcasl), calib1_name, 89, 1)\n\n # get surface names\n surfaces_dir = t1_dir / 'fsaverage_LR32k'\n L_mid = surfaces_dir / f'{subject_name}_V1_MR.L.midthickness.32k_fs_LR.surf.gii'\n R_mid = surfaces_dir / f'{subject_name}_V1_MR.R.midthickness.32k_fs_LR.surf.gii'\n L_pial = surfaces_dir / f'{subject_name}_V1_MR.L.pial.32k_fs_LR.surf.gii'\n R_pial = surfaces_dir / f'{subject_name}_V1_MR.R.pial.32k_fs_LR.surf.gii'\n L_white = surfaces_dir / f'{subject_name}_V1_MR.L.white.32k_fs_LR.surf.gii'\n R_white = surfaces_dir / f'{subject_name}_V1_MR.R.white.32k_fs_LR.surf.gii'\n\n # add filenames to a dictionary to be saved to a json\n json_name = asl_dir / 'ASL.json'\n fields = [\n \"T1w_dir\",\n \"T1w_acpc\",\n \"T1w_acpc_brain\",\n \"ASL_seq\",\n \"ASL_dir\",\n \"TIs_dir\",\n \"structasl\",\n \"calib_dir\",\n \"calib0_dir\",\n \"calib1_dir\",\n \"calib0_img\",\n \"calib1_img\",\n \"L_mid\",\n \"R_mid\",\n \"L_pial\",\n \"R_pial\",\n \"L_white\",\n \"R_white\",\n \"json_name\"\n ]\n field_values = [\n t1_dir,\n t1_name,\n t1_brain_name,\n tis_name,\n asl_dir,\n tis_dir,\n strucasl_dir,\n calib_dir,\n calib0_dir,\n calib1_dir,\n calib0_name,\n calib1_name,\n L_mid,\n R_mid,\n L_pial,\n R_pial,\n L_white,\n R_white,\n json_name\n ]\n names_dict = {}\n for key, value in zip(fields, field_values):\n names_dict[key] = str(value)\n with open(json_name, 'w') as fp:\n json.dump(names_dict, fp, sort_keys=True, indent=4)", "def prepare_templates(params, outfile, redo=False):\n if os.path.exists(outfile) and not redo:\n return\n emiles = EMILES()\n wmin = params[\"wmin\"] * u.micrometer\n wmax = params[\"wmax\"] * u.micrometer\n # Modify wmin to compensate for the recession velocity of the system\n zmax = (params[\"vsyst\"] + 3000) / const.c.to(\"km/s\").value\n wrest = wmin / (1 + zmax)\n grid = np.array(np.meshgrid(params[\"ages\"], params[\"metals\"],\n params[\"bis\"])).T.reshape(-1, 3)\n ssppars = Table(grid, names=[\"T\", \"Z\", \"imf\"])\n filenames = []\n for args in grid:\n filenames.append(os.path.join(emiles.data_dir,\n emiles.filename(*args)))\n wave, spec = misc.read_spec(filenames[0])\n wave = wave * u.angstrom\n idx = np.where((wave > wrest) & (wave <= wmax))\n wave = wave[idx]\n spec = spec[idx]\n wrange = [wave[0].to(\"angstrom\").value, wave[-1].to(\"angstrom\").value]\n newflux, logLam, velscale = util.log_rebin(wrange, spec,\n velscale=params[\"velscale\"])\n ssps = np.zeros((len(filenames), len(newflux)))\n print(\"Processing SSP files\")\n for i, fname in tqdm(enumerate(filenames)):\n spec = fits.getdata(fname)[idx]\n newflux, logLam, velscale = util.log_rebin(wrange, spec,\n velscale=params[\"velscale\"])\n ssps[i] = newflux\n norm = np.median(ssps)\n ssps /= norm\n hdu1 = fits.PrimaryHDU(ssps)\n hdu1.header[\"EXTNAME\"] = \"SSPS\"\n hdu1.header[\"BSCALE\"] = (norm, \"Scale to convert from ADU to flux.\")\n hdu2 = fits.BinTableHDU(ssppars)\n hdu2.header[\"EXTNAME\"] = \"PARAMS\"\n hdu1.header[\"CRVAL1\"] = logLam[0]\n hdu1.header[\"CD1_1\"] = logLam[1] - logLam[0]\n hdu1.header[\"CRPIX1\"] = 1.\n # Making wavelength array\n hdu3 = fits.BinTableHDU(Table([logLam], names=[\"loglam\"]))\n hdu3.header[\"EXTNAME\"] = \"LOGLAM\"\n hdulist = fits.HDUList([hdu1, hdu2, hdu3])\n hdulist.writeto(outfile, overwrite=True)\n return", "def setup_analysis_skeleton(project_dir,sub_stem):\n \n # Move from project_dir to ToProcess\n TP_dir = os.path.join(project_dir,'ToProcess')\n move_files(project_dir, TP_dir, '')\n \n all_files = glob.glob(os.path.join(TP_dir,'*%s*' % (sub_stem)))\n all_files = [os.path.basename(f) for f in all_files]\n \n prefixes = [pref[:len(sub_stem)+4] for pref in all_files]\n subjects = list(set(prefixes))\n subjectDir = []\n\n for idx, subs in enumerate(subjects):\n \n sub_dir = os.path.join(TP_dir, subs[0:(len(sub_stem) + 4)])\n subjectDir.append(sub_dir)\n \n if os.path.exists(subjectDir[idx]) == 0:\n os.mkdir(os.path.join(TP_dir,subjectDir[idx]))\n \n toMove = [os.path.abspath(f) for f in glob.glob(subjectDir[idx] + '*')]\n \n # Move all files to subject-specific dirs\n for mFile in toMove:\n if os.path.isfile(mFile):\n shutil.move(mFile, subjectDir[idx])\n \n for entry in glob.glob(subjectDir[idx] + '*'):\n if os.path.isfile(entry):\n print \"Not allocated: \" + entry\n \n return subjectDir", "def generateISTemplates(course, wordlist, charLevels, translateDict, similarCharsList, supportsUppercase):\n\n levelsDict= {}\n stageByNumber = 1 # represents key of wordsByLevelDict to get wordlist value\n wordsByLengthDict = {}\n wordsByLevelDict = {}\n swappedTranslateDict = dict((v,k) for k,v in translateDict.iteritems()) # Keys are latin, values non-latin\n\n for word in wordlist:\n wordsByLengthDict = wordsByLength(word, wordsByLengthDict)\n wordsByLevelDict = wordsByLevel(word, charLevels, wordsByLevelDict)\n\n for stage in charLevels:\n\n if not wordsByLevelDict.has_key(stageByNumber): # Skip the uppercase stages in charLevels\n stageByNumber += 1\n continue\n\n charLevelsWordList = wordsByLevelDict[stageByNumber] # contains list of words suitable for current stage\n selectedOptions = makeWordOptions(charLevelsWordList, similarCharsList) # List of 3 sublists containing options or None\n\n if selectedOptions is not None:\n\n for i in range(len(selectedOptions)):\n generateLevel(random.choice(selectedOptions[i]), translateDict, selectedOptions[i], name+str(stageByNumber)+\".qdef\")\n generateLevelAlternative(selectedOptions[i], translateDict, similarCharsList, name+str(stageByNumber)+\".qdef\")\n\n if supportsUppercase:\n generateUppercaseLevel(random.choice(selectedOptions[i]), translateDict, selectedOptions[i], name+str(stageByNumber)+\".qdef\")\n\n generateEndOfQuiz(name+str(stageByNumber)+\".qdef\")\n stageByNumber += 1\n\n # generate practice levels\n for i in range(30):\n selectedWords = wordsByLengthDict[random.choice(wordsByLengthDict.keys())]\n if len(selectedWords) >= 3:\n quizRandomSelection = random.sample(selectedWords,3) # take random 4 items from list\n\n for word in quizRandomSelection:\n trickWord = findAndReplaceSimilarChar(word, quizRandomSelection, similarCharsList)\n if trickWord:\n quizRandomSelection.append(trickWord)\n break\n\n generateLevel(quizRandomSelection[0], translateDict, quizRandomSelection, name+\"Practice.qdef\")\n generateLevelAlternative(quizRandomSelection, translateDict, similarCharsList, name+\"Practice.qdef\")\n\n if supportsUppercase:\n generateUppercaseLevel(quizRandomSelection[0], translateDict, quizRandomSelection, name+\"Practice.qdef\")\n\n generateEndOfQuiz(name+\"Practice.qdef\")\n\n print \"New MUNI ROPOT templates has been created in: \" + target", "def _fill_template(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n\n line_object_keys = [\"quickReply\", \"items\", \"action\", \"template\", \"actions\"]\n\n if type(template) == list:\n for item in template:\n self._fill_template(item, template_vars)\n else:\n self._fill_template_text(template, template_vars)\n for key in line_object_keys:\n if key in template:\n self._fill_template(template[key], template_vars)\n\n return template", "def preprocess_template(template_file: str) -> None:\n LOGGER.info(\"Processing template %s\", template_file)\n\n with DFReader(open(template_file, \"rb\")) as reader:\n level = reader.read_level()\n\n doors = {}\n keys_needed: Dict[int, int] = collections.Counter()\n for eid, (_, _, entity) in level.entities.items():\n if not isinstance(entity, LevelDoor):\n continue\n\n doors[eid] = {\n \"level\": entity.file_name.decode(),\n \"door\": entity.door_set,\n }\n keys_needed[DOOR_INFO[entity.door_set][1]] += 1\n\n for door_data in doors.values():\n key_type = DOOR_INFO[door_data[\"door\"]][1]\n while key_type < 3 and keys_needed[key_type + 1] == 0:\n key_type += 1\n door_data[\"key_get\"] = key_type\n\n with open(template_file + \".json\", \"w\") as fout:\n json.dump(\n {\"doors\": doors},\n fout,\n )", "def __fill_qsub_file__(self,configs):\n template_file= os.path.join(configs['system'].get('Common_directories','template'),configs['pipeline'].get('Template_files','flowcell_report'))\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n dictionary.update({k:str(v)})\n dictionary.update({'post_pipeline':configs['pipeline'].get('Db_reports','post_pipeline')})\n dictionary.update({'concord_script':configs['pipeline'].get('Flowcell_reports','concord_script')})\n dictionary.update({'dbsnp_script':configs['pipeline'].get('Flowcell_reports','dbsnp_script')})\n dictionary.update({'tenx_script':configs['pipeline'].get('Flowcell_reports','tenx_script')})\n dictionary.update({'zero_script':configs['pipeline'].get('Flowcell_reports','zero_script')})\n dictionary.update({'hethom_script':configs['pipeline'].get('Flowcell_reports','hethom_script')})\n dictionary.update({'reads_script':configs['pipeline'].get('Flowcell_reports','reads_script')})\n with open(self.qsub_file,'w') as f:\n f.write(fill_template(template_file,dictionary))", "def _generate_and_load_initial_batch(self, working_directory: Path):\n\n template_dir = Path(working_directory) / \"template_1\"\n template_dir.mkdir()\n # changes here should often be reflected in\n # data_generator_opts and data_loader_opts\n\n channel_decl = self.channel_configs[0]\n\n plugin_options = {\n \"pid\": \"0\",\n \"big_ids\": \"True\",\n }\n # if it's efficient to do the whole load in one go, let's just do that.\n if self.run_until.gap < MIN_PORTION_SIZE:\n num_records = self.run_until.gap\n else:\n num_records = 1 # smallest possible batch to get to parallelizing fast\n results = self._generate_and_load_batch(\n template_dir,\n channel_decl.org_config,\n {\n \"generator_yaml\": self.options.get(\"recipe\"),\n \"num_records\": num_records,\n \"num_records_tablename\": self.run_until.sobject_name or COUNT_REPS,\n \"loading_rules\": self.loading_rules,\n \"vars\": channel_decl.merge_recipe_options(self.recipe_options),\n \"plugin_options\": plugin_options,\n \"bulk_mode\": self.bulk_mode,\n },\n )\n self.update_running_totals_from_load_step_results(results)\n\n # rename directory to reflect real number of sets created.\n wd = SnowfakeryWorkingDirectory(template_dir)\n if self.run_until.sobject_name:\n self.sets_finished_while_generating_template = wd.get_record_counts()[\n self.run_until.sobject_name\n ]\n else:\n self.sets_finished_while_generating_template = num_records\n\n new_template_dir = data_loader_new_directory_name(template_dir, self.run_until)\n shutil.move(template_dir, new_template_dir)\n template_dir = new_template_dir\n\n # don't send data tables to child processes. All they\n # care about are ID->OID mappings\n wd = SnowfakeryWorkingDirectory(template_dir)\n self._cleanup_object_tables(*wd.setup_engine())\n\n return template_dir, wd.relevant_sobjects()", "def __init__(\n self,\n pubchem_templates_path: str = \"\",\n general_templates_path: str = config.general_templates,\n ) -> None:\n self.coordgen_params = rdCoordGen.CoordGenParams()\n self.coordgen_params.coordgenScaling = 50 / 1.5\n self.coordgen_params.templateFileDir = config.coordgen_templates\n\n self.pubchem_templates = (\n pubchem_templates_path if os.path.isdir(pubchem_templates_path) else \"\"\n )\n self.templates: Dict[str, rdkit.Chem.rdchem.Mol] = OrderedDict()\n\n if os.path.isdir(general_templates_path):\n for k in sorted(os.listdir(general_templates_path)):\n template = self._load_template(os.path.join(general_templates_path, k))\n template_name = k.split(\".\")[0]\n self.templates[template_name] = template", "def create_templates_database(dataset_path_list, db_file_path):\n paths_list = dataset_path_list\n\n templates = dict()\n for file in paths_list:\n\n image = face_recognition.load_image_file(file)\n tmp = face_recognition.face_encodings(image)\n if tmp:\n template = face_recognition.face_encodings(image)[0]\n if template.size != 0:\n templates[file] = template\n\n dump_dict_to_db(templates, db_file_path)", "def test_nested_template_source_generation(self):\n sources = [source for source in self.loader.get_template_sources('component.child.html')]\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/component/child/child.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/component/child/child.html')\n\n sources = [source for source in self.loader.get_template_sources('deeply.nested.component.and.child.html')]\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/deeply/nested/component/and/child/child.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/deeply/nested/component/and/child/child.html')\n\n sources = [source for source in self.loader.get_template_sources('component.child/another.html')]\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/component/child/another.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/component/child/another.html')", "def __init__(self, website):\n self.website = website\n if website.paths.__ is None:\n self.defaults = {}\n self.master = None\n else:\n path = os.path.join(website.paths.__, 'etc', 'simplate.html')\n self.path = path\n if os.path.isfile(path):\n msg = message_from_file(open(self.path))\n body = msg.get_payload().decode(self.charset)\n self.defaults = dict()\n for key, val in msg.items():\n key = key.decode(self.charset)\n val = val.decode(self.charset)\n if not is_valid_identifier(key):\n raise BadKey(key, path)\n self.defaults[key] = val\n self.master = Template(body)\n else:\n self.defaults = {}\n self.master = None", "def __init__(self, template_path, out_dir_path):\n assert os.path.exists(template_path), \"Expected the path %s to exist\" % template_path\n assert isinstance(out_dir_path, str), \"Expected `out_dir_path` to be a string\"\n\n if not os.path.exists(out_dir_path):\n mkpath(out_dir_path)\n\n with open(template_path, mode=\"r\") as f:\n variables_and_template = yaml.load_all(f, Loader=yaml.RoundTripLoader)\n\n # Convert generator to list\n variables_and_template = [x for x in variables_and_template]\n assert len(variables_and_template) == 2, \"Expected the template file to contain two YAML documents\"\n\n variables, template = variables_and_template\n\n assert isinstance(variables, dict), \"Expected variables to be a dictionary\"\n assert isinstance(template, dict), \"Expected template to be a dictionary\"\n\n self._trial_generator = TrialGenerator(self.variables_to_intervals(variables))\n self._template = template\n self._out_dir_path = out_dir_path\n self._template_path = template_path", "def copy_templates(self, in_dir):\n self.copy_contents(in_dir, subdir=\"templates\")", "def main(args):\r\n logger.info(\"Starting site generation...\")\r\n logger.info(args)\r\n logger.info(\"Opening syllabus.yml...\")\r\n\r\n## PUT THIS BACK LATER!!!!!####\r\n\r\n with open(\"/home/tofts/git/upskilling/data/syllabus.yml\", \"r\") as syllabus_file:\r\n syllabus = yaml.safe_load(syllabus_file)\r\n\r\n logger.info(\"Found the following modules:\")\r\n for module in syllabus['modules']:\r\n logger.info(f\"...{syllabus['modules'][module]['name']}\")\r\n\r\n logger.info(\"Opening squad.yml...\")\r\n with open(\"/home/tofts/git/upskilling/data/squad.yml\", \"r\") as squad_file:\r\n squad = yaml.safe_load(squad_file)\r\n\r\n for member in squad['members']:\r\n logger.info(f\"Found member {member.upper()}\")\r\n logger.info(f\"...{squad['members'][member]}\")\r\n\r\n logger.info(\"Processing index.html template...\")\r\n with open(\"/home/tofts/git/upskilling/templates/index.html\") as index_template_file:\r\n index_template = index_template_file.read()\r\n\r\n index_template = module_list(index_template, syllabus, squad)\r\n index_template = task_list(index_template, syllabus, squad)\r\n index_template = member_progress_list(index_template, syllabus, squad)\r\n\r\n logger.info(\"Writing completed template out...\")\r\n with open(\"/home/tofts/git/upskilling/docs/index.html\", \"w\") as index_output_file:\r\n index_output_file.write(index_template)\r\n\r\n logger.info(\"Generating member pages...\")\r\n with open(\"/home/tofts/git/upskilling/templates/member-page.html\", \"r\") as member_template_file:\r\n member_page_template = member_template_file.read()\r\n generate_member_pages(member_page_template, syllabus, squad)\r\n\r\n logger.info(\"Generating task detail pages...\")\r\n with open(\"/home/tofts/git/upskilling/templates/task-details.html\", \"r\") as task_details_file:\r\n task_details_template = task_details_file.read()\r\n generate_task_details_pages(task_details_template, syllabus, squad)\r\n\r\n logger.info(\"Generating downloads...\")\r\n generate_downloads(syllabus, squad)", "def layout (self, lydef):\n\n # Categorize files\n fout = self.categorize()\n\n ly = defaultdict(list)\n\n # For each template path, attempt to map all files in that category\n # and add any files that renders completely to the layout.\n for tmplsrc, category in lydef.items():\n tmpl = Template(tmplsrc)\n for a, f in fout[category]:\n # print('%s: Try %s matched to %s in %s' % (category, tmplsrc, f, a))\n try:\n path = os.path.join(tmpl.substitute(a.info),\n os.path.basename(f))\n ly[path].append((a, f))\n except KeyError as e:\n print(' -- %s info key %s not found' % (a, e))\n pass\n\n # Sort providing sources for each path.\n # E.g., prefer .redist. before .symbols., etc.\n for path in ly:\n ly[path].sort(reverse=True)\n\n return ly", "def _load_templates(self, tasks, skip_tasks=()):\n name2kwargs = {}\n dir_ = Path('data/prompts')\n paths = (dir_/t for t in tasks) if tasks else dir_.iterdir()\n if skip_tasks: paths = (p for p in paths if p.stem not in skip_tasks)\n for path in paths:\n if not path.is_dir():\n if tasks: warnings.warn(f'{path} is not a directory.')\n continue\n name2kwargs[path.stem] = load_prompt(path.stem,\n verbose=self.verbose)\n return name2kwargs", "def load_data(self) -> None:\n self.paths: List[str] = []\n self.durations: List[float] = []\n self.transcriptions: List[str] = []\n\n def raise_(err):\n \"\"\"raises error if problem during os.walk\"\"\"\n raise err\n\n for subset in self.subsets:\n subset_path = os.path.join(self.root, self.base_dir, subset)\n for root, dirs, files in os.walk(subset_path, onerror=raise_):\n if not files:\n continue\n matches = fnmatch.filter(files, \"*.trans.txt\")\n assert len(matches) == 1, \"> 1 transcription file found\"\n self._parse_transcription_file(root, matches[0])\n\n self._sort_by_duration()", "def readTemplates():\n\n # Compile HTML templates.\n templates = {}\n for tt in [ 'image', 'dirindex', 'allindex', 'trackindex', 'sortindex' ]:\n fn = 'template-%s' % tt + opts.htmlext\n ttext = readTemplate(fn)\n templates[ tt ] = compileTemplate(ttext, fn)\n\n fn = 'template-css.css'\n ttext = readTemplate(fn)\n templates[ 'css' ] = compileTemplate(ttext, fn)\n\n # Compile user-specified rc file.\n rcsfx = 'rc'\n templates[ rcsfx ] = []\n if opts.rc:\n try:\n tfile = open(opts.rc, \"r\")\n orc = tfile.read()\n tfile.close()\n except IOError, e:\n print >> sys.stderr, \"Error: can't open user rc file:\", opts.rc\n sys.exit(1)\n\n o = compileCode('', orc, opts.rc)\n templates[ rcsfx ] += [ o ]\n\n # Compile user-specified code.\n if opts.rccode:\n o = compileCode('', opts.rccode, \"rccode option\")\n templates[ rcsfx ] += [ o ]\n\n # Compile global rc file without HTML tags, just python code.\n code = readTemplate('template-%s' % rcsfx + '.py')\n o = compileCode('', code, tt)\n templates[ rcsfx ] += [ o ]\n\n return templates", "def _init_test_project_dir(self, project_dir):\n templates = glob.glob(f'{project_dir}/*.yml.template')\n for template_path in templates:\n # Replace env vars in template\n with open(template_path, 'r', encoding='utf-8') as f_template:\n yaml = f_template.read()\n\n # Detect if every env var configured for the template\n template = os.path.basename(template_path)\n yaml_path = template_path.replace('.template', '')\n env_connectors = self._find_env_conn_by_template_name(template)\n is_configured = self._is_env_connector_configured(env_connectors)\n\n # \"Render\" the template and save to file if env vars configured\n if is_configured:\n template_vars = set(re.findall(r'\\$\\{(.+?)\\}', yaml))\n for var in template_vars:\n yaml = yaml.replace(\n f'${{{var}}}', self._all_env_vars_to_dict().get(var)\n )\n\n # Write the template replaced YAML file\n with open(yaml_path, 'w+', encoding='utf-8') as f_render:\n f_render.write(yaml)\n\n # Delete if exists but not configured\n else:\n try:\n os.remove(yaml_path)\n except OSError:\n pass", "def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')", "def create_templates(self):\n for name, params in list_registered_templates():\n if self['templates'].filter(theme=self, name=name).count() == 0:\n self['templates'].create(theme=self, name=name)", "def _prepare_assets(self, page_instructions, assets=None):\n assert type(assets) == tuple or type(assets) == list\n\n for yaml in page_instructions.yaml:\n # yaml = app/page/page.yaml\n template, origin = loader.find_template(yaml)\n filepath = template.origin.name\n\n # /Users/me/Development/app/templates/app/page/page.yaml\n yaml_basedir = os.path.dirname(yaml)\n # app/page\n template_basedir = filepath[:filepath.find(yaml)]\n # /Users/me/Development/app/templates\n\n for asset in assets:\n # directory = /media/js/templates\n if not yaml_basedir in asset:\n # The user might be specifying the directory relative to\n # the yaml file itself, so we'll add it for them if they\n # gave us something like 'media/js/templates'\n directory = os.path.join(yaml_basedir, asset)\n else:\n directory = asset\n\n sourcedirectory = os.path.join(template_basedir, directory)\n\n if not os.path.isdir(sourcedirectory):\n # We're going to try and find it somewhere else, it may not\n # be relative to the YAML file\n #\n # This is quite possible if the yaml file is processing a\n # \"chirp:\" attribute.\n try:\n sourcedirectory = find_directory_from_loader(\n page_instructions, asset)\n # We need to reset this, it has the yaml_basedir on it\n # at this point\n directory = asset\n except TemplateDoesNotExist:\n continue\n\n if not os.path.isdir(sourcedirectory):\n continue\n\n cachedirectory = os.path.join(self.cache_root, directory)\n\n if os.path.isdir(cachedirectory):\n if self._assets_are_stale(sourcedirectory, cachedirectory):\n shutil.rmtree(cachedirectory)\n else:\n continue\n\n shutil.copytree(sourcedirectory, cachedirectory)\n\n if settings.FILE_UPLOAD_PERMISSIONS is not None:\n os.chmod(cachedirectory, 02750)\n\n for root, dirs, files in os.walk(cachedirectory):\n for momo in files:\n os.chmod(os.path.join(root, momo),\n settings.FILE_UPLOAD_PERMISSIONS)\n for momo in dirs:\n os.chmod(os.path.join(root, momo), 02750)", "def bootstrap(self):\n\n\t\t#---paths.yaml specifies directories which might be absent so make them\n\t\tif not os.path.isdir(self.postdir): os.mkdir(self.postdir)\n\t\tif not os.path.isdir(self.plotdir): os.mkdir(self.plotdir)\n\t\t#---parse the simulations found in each \"spot\"\n\t\tfor spot in self.spots: self.treeparser(spot)\n\t\t#---if there is a part named edr then we use it to get simulation times\n\t\t#---! edr files are required to infer times for slicing however we might also use xtc or trr later\n\t\tassert 'edr' in zip(*self.spots.keys())[1]\n\t\tself.treeparser_edr()\n\t\t#---data are stored in dictionaries by spot name\n\t\tall_top_keys = [i for j in [k.keys() for k in self.toc.values()] for i in j]\n\n\t\t#---! under development\n\t\tfor key in ['post','groups','slices']:\n\t\t\tif key not in self.members_with_specific_parts:\n\t\t\t\tself.__dict__[key] = {i:{} for i in all_top_keys}\n\t\t\telse: self.__dict__[key] = {(spot,i):{} \n\t\t\t\tfor spot in self.toc for i in self.toc[spot]}\n\t\tself.save()", "def __init__(self, template_name):\n # self.env = Environment(loader=PackageLoader(\n # package, path))\n # self.template = self.env.get_template(template_name)\n with open(template_name, 'r', encoding='UTF-8') as f:\n self.template = Template(f.read())", "def initialise_templates(self, tel_type):\n for t in tel_type:\n if tel_type[t] in self.prediction.keys():\n continue\n\n self.prediction[tel_type[t]] = \\\n TableInterpolator(self.root_dir + \"/\" +\n self.file_names[tel_type[t]])\n\n return True", "def setUp(self):\n # Generates directory names\n self.tempdir = tempfile.mkdtemp()\n self.subdir = os.path.join(self.tempdir, \"dir\")\n self.emptydir = os.path.join(self.tempdir, \"empty\")\n # Populates directories\n os.makedirs(self.subdir)\n os.makedirs(self.emptydir)\n # Populates files\n self.root_fcount = 3\n self.nest_fcount = 5\n for i in range(0, self.root_fcount):\n with open(os.path.join(self.tempdir, \"%i.txt\" % i), \"w+\") as f:\n f.write(\"Test.\")\n for i in range(0, self.nest_fcount):\n with open(os.path.join(self.subdir, \"%i.txt\" % i), \"w+\") as f:\n f.write(\"Test.\")\n self.filename = os.path.join(self.subdir, \"nontxt.mp3\")\n with open(self.filename, \"w+\") as f:\n f.write(\"Test.\")", "def update_templates(self):\n\n params = self.chose_param_value(\"--temp\")\n self._check_path_availability([\"get_template_dir\", \"get_template_dir_to\"])\n if self._check_whether_has_params(params):\n self.updater.update_files(\n self.analizer.get_template_dir(),\n self.analizer.get_template_dir_to(),\n params\n )\n return self.write_debug_message(\"Temp files upgrade is done!\\n\")\n return self.write_error_message(\"You haven't passed any params about template files\")", "def fillBackgroundTemplates(opt):\n\n totalBkg={}\n templates=[]\n\n #import signal events\n data=ROOT.TChain('data')\n for f in [os.path.join(opt.input,x) for x in os.listdir(opt.input) if 'Data13TeV' in x]:\n if 'MuonEG' in f : continue\n data.AddFile(f)\n\n #define final preselection cuts\n cuts='xangle==%d'%opt.xangle\n if len(opt.presel) : cuts += ' && ' + opt.presel \n if opt.csiacc:\n csiCuts ='csi1>%f && csi1<%f && '%opt.csiacc[opt.xangle][0]\n csiCuts+='csi2>%f && csi2<%f'%opt.csiacc[opt.xangle][1]\n cuts=csiCuts if len(cuts)==0 else '{0} && {1}'.format(cuts,csiCuts)\n\n #loop over categories build templates\n for icat in range(len(opt.categs)):\n\n #apply category cuts\n categCut=opt.categs[icat]\n categCut=cuts if len(categCut)==0 else '%s && %s'%(categCut,cuts)\n catName='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n\n print '\\t',catName,categCut\n\n #background modelling histos\n histos=[]\n data_obs=None\n for name,pfix in [('bkg_'+catName,'mix'),('bkg_%s_bkgShape'%catName,'mixem')]:\n\n templCuts=categCut.replace('csi1',pfix+'csi1')\n templCuts=templCuts.replace('csi2',pfix+'csi2')\n data.Draw('{0}mmiss >> h({1},{2},{3})'.format(pfix,opt.nbins,opt.mMin,opt.mMax),templCuts,'goff')\n h=data.GetHistogram()\n histos.append(h.Clone(name))\n histos[-1].SetDirectory(0)\n\n if len(histos)==1:\n totalBkg[icat]=h.Integral()\n if not opt.unblind :\n data_obs=h.Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n h.Reset('ICE')\n templates += defineProcessTemplates(histos)\n\n #observed data in this category if unblinding\n if opt.unblind:\n data.Draw('mmiss >> h({1},{2},{3})'.format(opt.nbins,opt.mMin,opt.mMax),categCut,'goff')\n data_obs=data.GetHistogram().Clone('data_obs_'+catName)\n data_obs.SetDirectory(0)\n\n templates.append(data_obs)\n\n print '\\t total background:',totalBkg\n return totalBkg,templates", "def test_create_template_for_all_namespaces(self):\n pass", "def test_starting_template(checker):\n contents = labeled.contents(label=\"template\")\n _ = tomllib.loads(contents)", "def load_bodies(directory):\n\n files = glob.glob(os.path.join(directory, \"*.json\"))\n bodies = {}\n for file in files:\n print(\"Loading body \" + file)\n with open(file) as data_file:\n internal_name = splitext(basename(file))[0]\n bodies[internal_name] = load_body(json.load(data_file))\n for key in bodies:\n body = bodies[key]\n if body.parent_internal_name is not None:\n body.parent = bodies[body.parent_internal_name]\n del body.parent_internal_name\n\n for body in bodies.values():\n print(\"Executing post_init for \" + body.name)\n body.post_init()\n\n return bodies.values()", "def first_level(TR, contrast_list, subject_list, \n experiment_dir, output_dir, subjectinfo_func, working_dir='workingdir'):\n # SpecifyModel - Generates SPM-specific Model\n modelspec = Node(SpecifySPMModel(concatenate_runs=False,\n input_units='secs',\n output_units='secs',\n time_repetition=TR,\n high_pass_filter_cutoff=128),\n name=\"modelspec\")\n\n # Level1Design - Generates an SPM design matrix\n level1design = Node(Level1Design(bases={'hrf': {'derivs': [0, 0]}},\n timing_units='secs',\n interscan_interval=TR,\n model_serial_correlations='FAST'),\n name=\"level1design\")\n\n # EstimateModel - estimate the parameters of the model\n level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),\n name=\"level1estimate\")\n\n # EstimateContrast - estimates contrasts\n level1conest = Node(EstimateContrast(), name=\"level1conest\")\n\n # Get Subject Info - get subject specific condition information\n getsubjectinfo = Node(Function(input_names=['subject_id'],\n output_names=['subject_info'],\n function=subjectinfo_func),\n name='getsubjectinfo')\n\n # Infosource - a function free node to iterate over the list of subject names\n infosource = Node(IdentityInterface(fields=['subject_id',\n 'contrasts'],\n contrasts=contrast_list),\n name=\"infosource\")\n infosource.iterables = [('subject_id', subject_list)]\n\n # SelectFiles - to grab the data (alternativ to DataGrabber)\n smooth_dir = opj(experiment_dir, 'smooth_nomask', 'preproc')\n templates = {'func': opj(smooth_dir, 'sub-{subject_id}',\n '*run-*_fwhm-8_bold.nii')}\n\n selectfiles = Node(SelectFiles(templates,\n base_directory=experiment_dir,\n sort_filelist=True),\n name=\"selectfiles\")\n\n # Datasink - creates output folder for important outputs\n datasink = Node(DataSink(base_directory=experiment_dir,\n container=output_dir),\n name=\"datasink\")\n\n # Use the following DataSink output substitutions\n substitutions = [('_subject_id_', 'sub-')]\n datasink.inputs.substitutions = substitutions\n\n # Initiation of the 1st-level analysis workflow\n l1analysis = Workflow(name='l1analysis')\n l1analysis.base_dir = opj(experiment_dir, working_dir)\n\n # Connect up the 1st-level analysis components\n l1analysis.connect([(infosource, selectfiles, [('subject_id', 'subject_id')]),\n (infosource, getsubjectinfo, [('subject_id',\n 'subject_id')]),\n (getsubjectinfo, modelspec, [('subject_info',\n 'subject_info')]),\n (infosource, level1conest, [('contrasts', 'contrasts')]),\n (selectfiles, modelspec, [('func', 'functional_runs')]),\n (modelspec, level1design, [('session_info',\n 'session_info')]),\n (level1design, level1estimate, [('spm_mat_file',\n 'spm_mat_file')]),\n (level1estimate, level1conest, [('spm_mat_file',\n 'spm_mat_file'),\n ('beta_images',\n 'beta_images'),\n ('residual_image',\n 'residual_image')]),\n (level1conest, datasink, [('spm_mat_file', '1stLevel.@spm_mat'),\n ('spmT_images', '1stLevel.@T'),\n ('con_images', '1stLevel.@con'),\n ('spmF_images', '1stLevel.@F'),\n ('ess_images', '1stLevel.@ess'),\n ]),\n ])\n return l1analysis", "def main(temp_dir, extensions, template):\n env = load_env(template_dir=temp_dir)\n if not template:\n # Get all the templates and return a dict with enumerated \n # templates names\n ext = extensions if extensions else []\n template_dict = get_templates(env, extensions=ext)\n # Echo the content of the template directory by enumerating \n # the templates and a simple list join\n temp_list = list()\n for x in template_dict.items():\n num = str(x[0])\n # Remove whitespace, underscores and capitalize words\n temp_name = x[1].strip().replace(\"_\", \" \").title()\n temp_string = \"{}. {}\".format(num, temp_name)\n temp_list.append(temp_string)\n click.echo(\"\\n\".join(temp_list))\n # Prompt the user to give the number of the template\n temp_num = click.prompt(\n \"Choose a templeta by entering the number of the template.\",\n type=int\n )\n # Get the template from the template dictionary\n template = template_dict.get(temp_num)\n # Get the variables\n temp_vars = get_vars(template, env)\n # Crate a dict with variables and let the user input the variables\n vars_to_render = dict()\n for var in temp_vars:\n user_var = click.prompt(\"{}?\".format(var.capitalize()))\n vars_to_render[var] = user_var\n # Get the template\n temp = env.get_template(template)\n # Render the template\n click.echo(temp.render(vars_to_render))", "def make_template(filenames):\n result = {}\n for fn in filenames:\n with open(fn) as f:\n conf = yaml.load(f)\n expand_horizons(result, conf)\n return result", "def main():\n # %%\n CFG.profiles_yamls_path.mkdir(parents=True, exist_ok=True)\n fpaths = list( _Config.raw_profiles_path.glob('*.html') )\n print( f'{len(fpaths)} htmls found' )\n # %%\n fpath = CFG.raw_profiles_path / 'luis-mario-urrea-murillo.html'\n # %%\n fpath = CFG.raw_profiles_path / 'cristian-david-montoya-saldarriaga-09638514a.html'\n # %%\n fpaths = [ CFG.raw_profiles_path / 'ricardo-alarcon-44079b105.html' ]\n # %%\n fpaths = [ Path('/home/teo/_data/talent/linkedin_raw_profiles/israellaguan.html')]\n # %%\n dics = {}\n # %%\n\n for i, fpath in enumerate(fpaths):\n if fpath in dics:\n continue\n\n with fpath.open('rt') as f_in:\n html = f_in.read()\n\n print( f'\\n***{i+1}/{len(fpaths)} {fpath.name}:')\n dic = extract_one( html, fpath )\n dic['linkedin_url'] = f\"https://www.linkedin.com/in/{fpath.name.split('.')[0]}\"\n dic['scraped_at'] = dt.datetime.fromtimestamp( fpath.stat().st_ctime )\n # pprint(dic['work_stats'])\n dics[fpath] = dic\n\n dics_arr = list(dics.values())\n # %%\n del dics\n # %%\n\n with (CFG.profiles_yamls_path / 'all_profiles.json').open('wt') as f_out:\n json.dump( dics_arr, f_out, cls=DateTimeEncoder, indent=4 )\n # %%\n with (CFG.profiles_yamls_path / 'all_profiles.yaml').open('wt') as f_out:\n yaml.safe_dump( dics_arr, f_out )\n # %%\n df = produce_summary_table( dics_arr )\n df.to_excel( CFG.raw_profiles_path.parent / 'mined_ruby_candidates_sample.xlsx',\n index=False)\n # %%", "def create_template_loader(self, template_path):\n raise NotImplementedError()", "def _create_files_from_template(\r\n self,\r\n *,\r\n data_dir: str,\r\n is_first_submission: Optional[bool] = None,\r\n ):\r\n\r\n for template_type in TemplateType:\r\n\r\n # Do not re-create the submission_metadata file if it already\r\n # exists for other submission(s) for this benchmark\r\n if (\r\n template_type == TemplateType.METADATA\r\n and is_first_submission is not None\r\n and is_first_submission == False\r\n ):\r\n continue\r\n\r\n template_module = importlib.import_module(\r\n f\"mcs_benchmark_data.cli.template_contexts.{template_type.value}_template\"\r\n )\r\n TemplateDataclass = getattr(\r\n template_module, f\"{template_type.value.capitalize()}Template\"\r\n )\r\n\r\n template_metadata = TemplateDataclass(\r\n benchmark_name=self.benchmark_name, submission_name=self.submission_name\r\n )\r\n\r\n # Update the template for the benchmark/submission provided\r\n template_metadata.execute(root_path=self.root_path, data_dir=data_dir)\r\n\r\n self._logger.info(\r\n \"A %s file has been created at %s\",\r\n template_metadata.__class__.__name__,\r\n self.root_path / template_metadata.dest_file_path_from_root,\r\n )\r\n\r\n if template_type == TemplateType.METADATA:\r\n\r\n self._logger.info(\r\n \"A %s file has been created at %s\",\r\n template_type.value,\r\n self.root_path\r\n / f\"test_{str(template_metadata.dest_file_path_from_root)}\",\r\n )", "def find_user_templates(self):\n\n # a list to store file names in\n local_templates = []\n\n # loop through the directory content\n for name in os.listdir(self._template_directory):\n # check to see if it is a directory and not in the database\n if (os.path.isdir(os.path.join(self._template_directory, name)) and\n name not in self._templates):\n # add it to the list\n local_templates.append(name)\n\n return local_templates", "def createTemplateStack(self):\n\n\t\ttemplatestack = os.path.join(self.params['rundir'], \"templatestack00.spi\")\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### hack to use standard filtering library\n\t\ttemplateparams = {}\n\t\ttemplateparams['apix'] = self.stack['apix']\n\t\ttemplateparams['rundir'] = os.path.join(self.params['rundir'], \"templates\")\n\t\ttemplateparams['templateIds'] = self.templatelist\n\t\ttemplateparams['bin'] = self.params['bin']\n\t\ttemplateparams['lowpass'] = self.params['lowpass']\n\t\ttemplateparams['median'] = None\n\t\ttemplateparams['pixlimit'] = None\n\t\tprint templateparams\n\t\tapParam.createDirectory(os.path.join(self.params['rundir'], \"templates\"))\n\t\tfilelist = apTemplate.getTemplates(templateparams)\n\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d templates/\"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(self.boxsize)+\",\"+str(self.boxsize)\n\t\t\t\t+\" spiderswap \")\n\t\t\tif self.params['inverttemplates'] is True:\n\t\t\t\temancmd += \" invert \"\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack", "def setUp(self):\n print \"Setting Up: %s\" % self.id()\n # render the template\n g.render_template(self.template_file,\n self.template_vars,\n self.output_file,\n self.search_path)\n\n # read the resulting config file built from template\n self.output_config = g.load_config(self.output_file)\n g.show_config(self.output_config)", "def all_templates():\r\n # TODO use memcache to memoize w/ expiration\r\n templates = defaultdict(list)\r\n for category, descriptor in XBlock.load_classes():\r\n if not hasattr(descriptor, 'templates'):\r\n continue\r\n templates[category] = descriptor.templates()\r\n\r\n return templates", "def _fill_template(self, name, superclass):\n template = Template(self.template)\n template = template.substitute(namespace=self.module.namespace,\n module_name=self.module.name,\n name=name,\n superclass=superclass)\n return template", "def _main(args):\n if args.files:\n _update_files()\n\n if args.templates:\n _update_template(args.template_definition)", "def map_template(template: dict, input_: dict) -> None:\n for k, v in template.items():\n config_val = input_.get(k)\n\n if isinstance(v, dict) and k != 'NullHandler':\n map_template(v, input_)\n\n if config_val:\n template[k] = config_val.upper() if k == 'level' else config_val", "def write_template_body1(template_filename):\n template_type = template_filename.split('/')[-1].split('_')[0]\n template_file = open(template_filename, 'a')\n template_file.write('<body>\\n') \n template_file.write('<div id=\"pageTitle\">\\n')\n template_file.write('<?php echo $stat_title; ?>\\n') \n template_file.write('</div>\\n')\n template_file.write('<div class=\"page-menu\"><div class=\"table\">\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(' <span class=\"bold\">Basin:</span>\\n')\n template_file.write(\n ' <select id=\"maptype\" '\n +'onchange=\"changeMaptype(this.value)\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(' <span class=\"bold\">Name:</span>\\n')\n template_file.write(\n ' <select id=\"domain\" '\n +'onchange=\"changeDomain(this.value);\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(\n ' <span class=\"bold\">Forecast Lead:</span>\\n'\n )\n template_file.write(\n ' <select id=\"variable\" '\n +'onchange=\"changeVariable(this.value)\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write('</div></div>\\n')\n template_file.write('\\n')\n template_file.write('<!-- Middle menu -->\\n')\n template_file.write('<div class=\"page-middle\" id=\"page-middle\">\\n')\n template_file.write(\n 'Left/Right arrow keys = Change forecast lead | Up/Down arrow keys '\n +'= Change Storm\\n'\n )\n template_file.write(\n '<br>For information on tropical cyclone verification, '\n +'<button class=\"infobutton\" id=\"myBtn\">click here</button>\\n'\n )\n template_file.write('<div id=\"myModal\" class=\"modal\">\\n')\n template_file.write(' <div class=\"modal-content\">\\n')\n template_file.write(' <span class=\"close\">&times;</span>\\n')\n template_file.write(' Tropical Cyclone Verification Information\\n')\n template_file.write(\n ' <embed width=100% height=100% src=\"../main.php\">\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write('</div>\\n')\n template_file.write('<!-- /Middle menu -->\\n')\n template_file.write('</div>\\n')\n template_file.write('\\n')\n template_file.write(\n '<div id=\"loading\"><img style=\"width:100%\" '\n +'src=\"../../images/loading.png\"></div>\\n'\n )\n template_file.write('\\n')\n template_file.write('<!-- Image -->\\n')\n template_file.write('<div id=\"page-map\">\\n')\n template_file.write(' <image name=\"map\" style=\"width:100%\">\\n')\n template_file.write('</div>\\n')\n template_file.write('\\n')\n template_file.write('<script type=\"text/javascript\">\\n')\n template_file.write('// Get the modal\\n')\n template_file.write('var modal = document.getElementById(\"myModal\");\\n')\n template_file.write('\\n')\n template_file.write('// Get the button that opens the modal\\n')\n template_file.write('var btn = document.getElementById(\"myBtn\");\\n')\n template_file.write('\\n')\n template_file.write('// Get the <span> element that closes the modal\\n')\n template_file.write(\n 'var span = document.getElementsByClassName(\"close\")[0];\\n'\n )\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks the button, open the modal\\n'\n )\n template_file.write('btn.onclick = function() {\\n')\n template_file.write(' modal.style.display = \"block\";\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks on <span> (x), close the modal\\n'\n )\n template_file.write('span.onclick = function() {\\n')\n template_file.write(' modal.style.display = \"none\";\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks anywhere outside of the modal, close it\\n'\n )\n template_file.write('window.onclick = function(event) {\\n')\n template_file.write(' if (event.target == modal) {\\n')\n template_file.write(' modal.style.display = \"none\";\\n')\n template_file.write(' }\\n')\n template_file.write('}\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//User-defined variables\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('\\n')\n template_file.write('//Global variables\\n')\n template_file.write(\n 'var minFrame = 0; //Minimum frame for every variable\\n'\n )\n template_file.write(\n 'var maxFrame = 26; //Maximum frame for every variable\\n'\n )\n template_file.write(\n 'var incrementFrame = 1; //Increment for every frame\\n'\n )\n template_file.write('\\n')\n template_file.write('var startFrame = 0; //Starting frame\\n')\n template_file.write('\\n')\n template_file.write('var cycle = 2018100600\\n')\n template_file.write('\\n')\n template_file.write('/*\\n')\n template_file.write(\n 'When constructing the URL below, DDD = domain, VVV = variable, '\n +'LLL = level, SSS = season, Y = frame number.\\n'\n )\n template_file.write(\n 'For X and Y, labeling one X or Y represents an integer '\n +'(e.g. 0, 10, 20). Multiple of these represent a string\\n'\n )\n template_file.write(\n 'format (e.g. XX = 00, 06, 12 --- XXX = 000, 006, 012).\\n'\n )\n template_file.write('*/\\n')\n template_file.write(\n 'var url = \"<?php echo $'+template_type+'_url; ?>\";\\n'\n )\n template_file.write('\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//Add variables & domains\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('\\n')\n template_file.write('var variables = [];\\n')\n template_file.write('var domains = [];\\n')\n template_file.write('var levels = [];\\n')\n template_file.write('var seasons = [];\\n')\n template_file.write('var maptypes = [];\\n')\n template_file.write('var validtimes = [];\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.close()", "def __loadFromFile(self):\n fh = open(self.__fileName)\n for line in fh:\n if line.strip() == \" \":\n continue # we have an empty line, just skip\n sub = self.__createSubjectFromLine(line)\n # invoke the store method from the base class\n SubjectsRepo.store_subject(self, sub)\n fh.close()", "def _buildjinja2_templates(self):\n templates = self.embryo.templates\n\n # if templates is a module extract its public string attributes\n # into the templates dict expected below.\n if isinstance(templates, ModuleType):\n tmp_templates = {}\n for k in dir(templates):\n v = getattr(templates, k)\n if (not k.startswith('_')) and isinstance(v, (str, Template)):\n tmp_templates[k] = v\n templates = tmp_templates\n\n # load the jinja2 templates contained in the module, either in the form\n # of Template objects or strings.\n loaded_templates = {}\n jinja_env = build_env()\n\n if templates:\n for k, v in templates.items():\n say('loading template: {}'.format(k))\n if isinstance(v, Template):\n loaded_templates[k] = v\n elif isinstance(v, str):\n try:\n loaded_templates[k] = jinja_env.from_string(v)\n except Exception as exc:\n source = exc.source.split('\\n')[exc.lineno - 1]\n shout(f'error \"{exc.message}\", line {exc.lineno} {source}')\n\n self.jinja2_templates = loaded_templates", "def parse_template(self):\n for line in self.raw_template.split(\"\\n\"):\n line = line.strip()\n if line.startswith('#m3'):\n key, val = line[3:].strip().split('=', 1)\n key = key.strip()\n val = val.strip()\n self.variables[key] = val\n\n for fitem in self.finditem.finditer(self.raw_template):\n fgrp = fitem.groups()\n categ = fgrp[0]\n name = fgrp[1]\n rest_str = fgrp[2]\n rest = {} # type: dict\n for item in rest_str.split('|'):\n item = item.strip()\n if item:\n key, val = item.split('=')\n rest[key] = val\n\n self.data[name] = (categ, rest)", "def get_templates(instrument=''):\n import os, json\n template_path = os.path.dirname(__file__)\n template_names = [fn\n for fn in os.listdir(template_path)\n if fn.endswith(\".json\") and fn.startswith(instrument)]\n templates = dict([(tn[len(instrument)+1:-5],\n json.loads(open(os.path.join(template_path, tn), 'r').read()))\n for tn in template_names])\n return templates", "def load_template(cls, template_name):\n\n template_path = path.join(dirs.user_data_dir, 'template', '%s.yaml' % template_name)\n\n if not path.isfile(template_path):\n return {}\n\n with open(template_path, 'r') as gf:\n return yaml.safe_load(gf)", "def test_populate_single_template_from_master(populated_template, datamap):\n data = parse(populated_template, datamap)\n assert data[0]['gmpp_key'] == 'Project/Programme Name'\n assert data[0]['gmpp_key_value'] == 'PROJECT/PROGRAMME NAME 9'", "def startup(self, title=None, author=None, uid=None):\n title, author, uid = self._collect_metadata(title, author, uid)\n self._generate_skeleton(title, author, uid, from_dirpath=TEMPLATE_DIRPATH)", "def setUp(self):\n BuilderTestsMixin.setUp(self)\n self.builder = DocBuilder()\n self.howtoDir = FilePath(self.mktemp())\n self.howtoDir.createDirectory()\n self.templateFile = self.howtoDir.child(\"template.tpl\")\n self.templateFile.setContent(self.template)", "def use_templates(self, templates):\n self.htmls = templates", "def create_template(api_url, project_id, username, token, update_flag,\n validation_messages, json_files, content_files, scope, csv_flag, input_list):\n try:\n # template loader log folder exists check\n log_path = '/opt/core/cache/tmp/templateloader_logs/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n timestamp = datetime.datetime.fromtimestamp(\n time.time()).strftime('%Y%m%d%H%M%S')\n log_filename = 'templateloader_' + timestamp\n my_file = open(log_path + log_filename, \"a\")\n\n # Print and write the log messages\n for message in validation_messages:\n my_file.write(\"%s\\n\" % message)\n\n success_templates = 0\n\n for metadata, content in zip(json_files, content_files):\n # Metadata Read\n json_file = open(metadata, 'r')\n file_name = list(metadata.split(\"/\"))\n file_name = file_name[-1]\n req_body = json.dumps(json_file.read()).encode('utf-8')\n req_body = json.loads(req_body)\n json_file.close()\n\n req_body = json.loads(req_body)\n\n if csv_flag:\n if input_list and req_body.get(\"name\") not in input_list:\n continue\n # Content Read\n if os.path.isfile(content):\n content_datafile = open(content, 'r')\n content_value = json.dumps(content_datafile.read()).encode('utf-8')\n content_value = json.loads(content_value)\n content_datafile.close()\n req_body[\"content_files\"] = dict(content=dict(content=content_value, name=content.split('/')[-1]))\n else:\n req_body[\"content_files\"] = get_content_files(content)\n # Checks for files\n files_directory = os.path.abspath(\n os.path.join(content, os.pardir)) + \"/files\"\n if os.path.exists(files_directory):\n dependencies = list()\n for script_file_path in find_files(files_directory, '*'):\n script_file_name = os.path.basename(script_file_path)\n script_file_obj = open(script_file_path, 'r')\n script_file_value = script_file_obj.read()\n script_file_obj.close()\n dependencies.append({\"content\": script_file_value, \"name\": script_file_name})\n req_body[\"content_files\"][\"files\"] = dependencies\n\n dependencies_directory = os.path.abspath(os.path.join(content, 'modules'))\n if os.path.exists(dependencies_directory):\n dependencies = list()\n for elem in os.listdir(dependencies_directory):\n module_path = os.path.join(dependencies_directory, elem)\n if not os.path.isdir(module_path):\n continue\n dependencies.append({\"type\": \"module\", \"name\": elem,\n \"content_files\": get_content_files(module_path)})\n if dependencies:\n req_body['dependencies'] = dependencies\n if scope != 'default':\n req_body['scope'] = scope\n\n req_body = json.dumps(req_body).encode('utf-8')\n\n url = \"%s%s/%s\" % (api_url, project_id, 'templates')\n http_client = httplib2.Http()\n headers = {\"X-Auth-User\": username, \"X-Auth-Token\": token}\n\n # call the Create Template API\n resp, content = http_client.request(\n url, method=\"POST\", body=req_body, headers=headers)\n content = json.loads(content)\n\n if resp[\"status\"] == \"200\":\n success_templates += 1\n log_msg = \"%s%s%s - %s\" % (file_name[:-5], \" ==> status:\",\n content[\"status\"],\n content[\"message\"])\n sys.stdout.write(\"%s\\n\" % log_msg)\n elif resp[\"status\"] == \"400\" and update_flag:\n template_id = None\n url = \"%s%s/%s\" % (api_url, project_id, 'templates')\n list_resp, list_content = http_client.request(\n url, method=\"GET\", headers=headers)\n list_content = json.loads(list_content)\n if list_resp[\"status\"] == \"200\":\n template_list = list_content['data']['templates']\n for template in template_list:\n if template['name'] == json.loads(req_body)['name']:\n # call the Update Template API\n template_id = template[\"id\"]\n url = \"%s%s/%s/%s\" % (api_url, project_id,\n 'templates', template_id)\n update_resp, update_content = \\\n http_client.request(url, method=\"PUT\",\n body=req_body,\n headers=headers)\n update_content = json.loads(update_content)\n log_msg = \"%s%s%s - %s\" % (\n file_name[:-5], \" ==> status:\",\n update_content[\"status\"],\n update_content[\"message\"])\n sys.stdout.write(\"%s\\n\" % log_msg)\n if update_resp[\"status\"] == \"200\":\n success_templates += 1\n break\n if not template_id:\n temp_url = \"%s%s/%s?is_temp=true\" % (api_url, project_id, 'templates')\n list_temp_resp, list_temp_content = http_client.request(\n temp_url, method=\"GET\", headers=headers)\n list_temp_content = json.loads(list_temp_content)\n if list_temp_resp[\"status\"] == \"200\":\n temp_template_list = list_temp_content['data']['templates']\n for template in temp_template_list:\n if template['name'] == json.loads(req_body)['name']:\n # call the Update Template API\n template_id = template[\"id\"]\n url = \"%s%s/%s/%s\" % (api_url, project_id,\n 'templates', template_id)\n update_resp, update_content = \\\n http_client.request(url, method=\"PUT\",\n body=req_body,\n headers=headers)\n update_content = json.loads(update_content)\n log_msg = \"%s%s%s - %s\" % (\n file_name[:-5], \" ==> status:\",\n update_content[\"status\"],\n update_content[\"message\"])\n sys.stdout.write(\"%s\\n\" % log_msg)\n if update_resp[\"status\"] == \"200\":\n success_templates += 1\n break\n if not template_id:\n log_msg = \"%s%s%s - %s\" % (file_name[:-5], \" ==> status:\",\n content[\"status\"],\n content[\"message\"])\n sys.stderr.write(\"%s\\n\" % log_msg)\n my_file.write(\"%s\\n\" % log_msg)\n else:\n log_msg = \"%s%s%s - %s\" % (file_name[:-5], \" ==> status:\",\n content[\"status\"],\n content[\"message\"])\n sys.stderr.write(\"%s\\n\" % log_msg)\n my_file.write(\"%s\\n\" % log_msg)\n if not csv_flag:\n total_templates = len(json_files)\n failed_templates = total_templates - success_templates\n else:\n total_templates = len(input_list)\n failed_templates = total_templates - success_templates\n sys.stdout.write('Total templates: ' + str(total_templates) + \"\\n\")\n sys.stdout.write(\"Success Templates: \" + str(success_templates) + \"\\n\")\n sys.stderr.write(\"Failed Templates: \" + str(failed_templates) + \"\\n\")\n\n my_file.write('Total templates: ' + str(total_templates) + \"\\n\")\n my_file.write(\"Failed Templates: \" + str(failed_templates) + \"\\n\")\n my_file.close()\n\n except Exception as e:\n sys.stdout.write(e.message)\n exit(1)", "def load_test_subjects_names(self):\n files = os.listdir(os.path.join(self.db_path, self.test_batch))\n for f in files:\n if f.startswith('test-volume'):\n s_name = str.split(str.split(f, '.')[0], '-')[-1]\n self.testing_subjects.append(s_name)\n self.n_test = len(self.testing_subjects)", "def __init__(self, path_template, untrimmed_path, qualities, file_opener):\n super().__init__()\n assert '{name}' in path_template\n self.template = path_template\n self.untrimmed_path = untrimmed_path\n self.untrimmed_writer = None\n self.writers = dict()\n self.qualities = qualities\n self.file_opener = file_opener", "def generate_template():\n date = input(\"What day is the template for? dd/mm/yy: \")\n day = input(\"What day of the week is this?: \")\n if \"/\" not in date:\n date_components = [date[0:2], date[2:4], date[4:]]\n else:\n date_components = date.split(\"/\")\n file_name = \"{}{}{}.table\".format(date_components[0], date_components[1], date_components[2])\n try:\n template_file = open(\"templates/{}_template.table\".format(day), \"r\")\n except IOError:\n print(\"source template not found\")\n else:\n source_lines = []\n output_file = open(file_name, \"w\")\n for line in template_file:\n source_lines.append(line)\n output_file.write(line)\n print(\"template generated successfully for: {} on date: {}\".format(day, date))" ]
[ "0.68409294", "0.644731", "0.6447111", "0.6338145", "0.62998015", "0.6060812", "0.5989269", "0.58991736", "0.5831137", "0.5817373", "0.57764006", "0.57094055", "0.56525546", "0.5633865", "0.56115997", "0.5558664", "0.5556968", "0.5553622", "0.5548341", "0.55309093", "0.55063766", "0.54962105", "0.54890543", "0.5476232", "0.5410289", "0.54051226", "0.54048455", "0.53966486", "0.53867567", "0.5378757", "0.537122", "0.5343997", "0.53437066", "0.5313351", "0.53061765", "0.5295205", "0.5294356", "0.5282731", "0.5282331", "0.5263208", "0.525255", "0.5233792", "0.5231249", "0.5210978", "0.5205843", "0.52019536", "0.5200031", "0.5197621", "0.51459926", "0.5110066", "0.5092271", "0.50870746", "0.5078975", "0.5074102", "0.5073286", "0.50664234", "0.50552565", "0.50534517", "0.5045291", "0.5040949", "0.5028978", "0.5028777", "0.5025671", "0.5003208", "0.49978966", "0.49904042", "0.4973075", "0.49683666", "0.49486437", "0.49477503", "0.4944941", "0.493763", "0.49304485", "0.49267906", "0.49180222", "0.49159688", "0.4908462", "0.48929104", "0.48871833", "0.48830202", "0.48811087", "0.48801905", "0.48718658", "0.48675779", "0.48669168", "0.48634014", "0.4860944", "0.48558706", "0.48482263", "0.48430738", "0.4831272", "0.48300633", "0.48201245", "0.48135042", "0.48131418", "0.48124814", "0.48119202", "0.48072696", "0.480326", "0.4798732", "0.47977564" ]
0.0
-1
Read a single template file and return the resulting dict object.
def _LoadTemplate(self,fname): f = open(fname, 'r') lines = f.readlines() data = '' for line in lines: if not line.startswith('---'): data += line data = data.replace('\t',' ') if '\t' in data: errstr = \ 'Illegal tabs encountered in template file. Use spaces instead.' raise ScannerError(errstr) proc.LogErrors(errstr) tmplt = yaml.load(data) f.close() return tmplt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_template_file(self):\n try:\n return json.loads(open(self.TEMPLATE_FILE,'r').read())\n except FileNotFoundError:\n sys.stdout.write(\"Template-file does not exist.\\n\")\n except OSError as e:\n sys.stdout.write(\n \"Error: \\'{}\\' occured while reading the template file. It could be possibly insufficient access.\\n\".\n format(e))", "def read_template(file_name):\n infile = open(file_name, 'r')\n return infile.read()", "def read_template(filename):\n \n with open(filename, 'r', encoding='utf-8') as template_file:\n template_file_content = template_file.read()\n return Template(template_file_content)", "def read_template(filename):\n\n with open(filename, encoding=\"utf-8\") as template_file:\n template_file_content = template_file.read()\n return Template(template_file_content)", "def load(template):\n with open(template) as f:\n return f.read()", "def read_template(self):\n template_file = open(self._template)\n template = template_file.readlines()\n template_file.close()\n return template", "def load_template(cls, template_name):\n\n template_path = path.join(dirs.user_data_dir, 'template', '%s.yaml' % template_name)\n\n if not path.isfile(template_path):\n return {}\n\n with open(template_path, 'r') as gf:\n return yaml.safe_load(gf)", "def get_template_data(template_file):\n\n if not pathlib.Path(template_file).exists():\n raise ValueError(\"Template file not found at {}\".format(template_file))\n\n with open(template_file, 'r') as fp:\n try:\n return yaml_parse(fp.read())\n except (ValueError, yaml.YAMLError) as ex:\n raise ValueError(\"Failed to parse template: {}\".format(str(ex)))", "def readTemplate(self, file):\n templates = {}\n doc = ElementTree.parse(file)\n entries = doc.findall(\"entry\")\n for entry in entries:\n templates[entry.find(\"trigger\").text] = [entry.find(\"description\").text,\n entry.find(\"template\").text]\n self._convertTabs(templates)\n return templates", "def read_template(template_name: str) -> str:\n with open(os.path.join(DHPARSER_DIR, 'templates', template_name), 'r', encoding='utf-8') as f:\n return f.read()", "def read_config(template_path: str) -> dict:\n # JSON is technically yaml but not the other way around.\n # yaml.safe_load can actually read in json files.\n with open(template_path, \"r\") as template_f:\n config = yaml.safe_load(template_f)\n return config", "def parse(file_path: Path) -> tuple[dict[str, Any], list[tuple[int, str]]] | tuple[None, None]:\n\n template = None\n template_lines = None\n try:\n template, template_lines = loads(file_path=file_path)\n except IOError as e:\n if e.errno == 2:\n logger.error(f\"Template file not found: {file_path}\")\n elif e.errno == 21:\n logger.error(f\"Template references a directory, not a file: {file_path}\")\n elif e.errno == 13:\n logger.error(f\"Permission denied when accessing template file: {file_path}\")\n except UnicodeDecodeError:\n logger.error(f\"Cannot read file contents: {file_path}\")\n except ScannerError as err:\n if err.problem in (\"found character '\\\\t' that cannot start any token\", \"found unknown escape character\"):\n try:\n result = json_parse(file_path, allow_nulls=False)\n if result:\n template, template_lines = result # type:ignore[assignment] # this is handled by the next line\n if isinstance(template, list):\n # should not happen and is more relevant for type safety\n template = template[0]\n except Exception:\n logger.error(f\"Template {file_path} is malformed: {err.problem}\")\n logger.error(f\"Tried to parse {file_path} as JSON\", exc_info=True)\n except YAMLError:\n pass\n\n if template is None or template_lines is None:\n return None, None\n\n return template, template_lines", "def read_in_templates(path, email_object=None):\n import os\n templates = {}\n\n for fle in os.listdir(path):\n with open(os.path.join(path, fle)) as _f:\n raw = \"\\n\".join(_f.readlines())\n templates[fle] = raw\n\n if email_object:\n email_object.use_templates(templates)\n else:\n return templates", "def _load_template(template_file: str = None, module_name: str = None, stack_name: str = None) -> str:\n if template_file:\n # read the template file\n with open(template_file, 'r') as fh:\n template_body = fh.read()\n else:\n # Import the troposphere module\n stack = _import_tropo_module(stack_name, module_name)\n # Get the yaml template file\n template_body = stack.get_template().to_json()\n return template_body", "def getTemplate():\n\n with open('/home/sevudan/Scripts/projects/topogen/template.cfg', 'r') as file:\n data = file.read()\n file.close()\n return Template(data)", "def get_template(self, name):\n with open(name, 'r+') as open_f:\n template_content = open_f.read()\n return template_content", "def read_template_files(self, template_path):\n templates = dict()\n for file in listdir(template_path):\n template_file = re.search(r\"(.*?).html$\", file)\n if template_file:\n template_name = template_file.groups()[0]\n templates[template_name] = open(os.path.join(template_path, file)).read()\n return templates", "def loads(file_path: Path) -> tuple[dict[str, Any], list[tuple[int, str]]]:\n\n content = read_file_with_any_encoding(file_path=file_path)\n\n if not all(key in content for key in (\"resource\", \"provider\")):\n return {}, []\n\n file_lines = [(idx + 1, line) for idx, line in enumerate(content.splitlines(keepends=True))]\n\n template: \"dict[str, Any] | list[dict[str, Any]]\" = loader.loads(content=content)\n if not template:\n template = {}\n if isinstance(template, list):\n template = template[0]\n\n if template:\n template = prepare_definition(template)\n\n return template, file_lines", "def _get_template(specified_template, default_template):\n template_file_path = specified_template\n if template_file_path:\n if not (os.path.exists(template_file_path) and os.path.isfile(template_file_path)):\n LOG.error(u\"Template file: %s doesn't exist, using default template\",\n template_file_path)\n template_file_path = None\n\n if not template_file_path:\n # using default template\n template_file_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n default_template\n )\n\n LOG.debug(u\"template file used: %s\", template_file_path)\n with open(template_file_path, \"r\") as definition:\n return definition.read()", "def _load_template(self):\n filename = os.path.join(get_conf('DEFAULT_TEMPLATE_PATH'), self._template, '__init__.ini')\n cf = ApplicationConf.get_instance()\n with comp_open(filename, mode='r') as fp:\n content = fp.read()\n content = content.format(**cf)\n conf = CompConfigParser(allow_no_value=True)\n conf.read_string(content, '__init__.ini')\n ini = {'dirs': [], 'files': [], 'binaries': []}\n if conf.has_section('dirs'):\n for key in conf.options('dirs'):\n ini['dirs'].append(key)\n if conf.has_section('files'):\n for key in conf.options('files'):\n ini['files'].append(self.__remap(key))\n if conf.has_section('binaries'):\n for key in conf.options('binaries'):\n ini['binaries'].append(self.__remap(key))\n if isinstance(self._ini, dict):\n self._ini.update(ini)\n else:\n self._ini = ini", "def _load_template(file_name):\n\n filepath = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n '../../fixtures/autoscaling_templates', file_name)\n with open(filepath) as f:\n return f.read()", "def load_template(mapping_location: str, vendorname: str) -> Dict:\n\n location = mapping_location\n\n #check if template mapping file exists\n # can be checked at the source if Invoice Parser used with GUI\n \n try:\n os.path.exists(location)\n except Exception as e:\n print(\"{0}. File not found\".format(e))\n else:\n with open(location) as t:\n mapping = json.load(t)\n\n #checking if mapping has vendorname\n try:\n mapping[vendorname]\n except KeyError as e:\n print(\"KeyError {0}. Vendor does not have a template\".format(e))\n else:\n\n template_file_location = mapping[vendorname]\n\n #checking if template file exists\n try:\n os.path.exists(template_file_location)\n except Exception as e:\n print(\"{0}. File not found\".format(e))\n else:\n with open(template_file_location) as templ:\n data = json.load(templ)\n \n return data", "def file_read(filename):\n f = open(filename, 'r') \n d_str = f.read() \n f.close()\n\n d = dict(eval(d_str))\n return d", "def readTemplate(tfn):\n\n if opts.verbose: print \"fetching template\", tfn\n\n found = 0\n foundInRoot = 0\n\n # check in user-specified template root.\n if opts.templates:\n fn = join(opts.templates, tfn)\n if opts.verbose: print \" looking in %s\" % fn\n if exists(fn):\n found = 1\n\n # check in hierarchy root\n if not found:\n fn = join(opts.root, tfn)\n if opts.verbose: print \" looking in %s\" % fn\n if exists(fn):\n foundInRoot = 1\n found = 1\n\n # look for it in the environment var path\n if not found:\n try:\n curatorPath = os.environ[ 'CURATOR_TEMPLATE' ]\n pathlist = string.split(curatorPath, os.pathsep)\n for p in pathlist:\n fn = join(p, tfn)\n if opts.verbose: print \" looking in %s\" % fn\n if exists(fn):\n found = 1\n break\n except KeyError:\n pass\n\n if found == 1:\n # read the file\n try:\n tfile = open(fn, \"r\")\n t = tfile.read()\n tfile.close()\n except IOError, e:\n print >> sys.stderr, \"Error: can't open image template file:\", fn\n sys.exit(1)\n if opts.verbose: print \" succesfully loaded template\", tfn\n\n else:\n # bah... can't load it, use fallback templates\n if opts.verbose:\n print \" falling back on simplistic default templates.\"\n global fallbackTemplates\n try:\n t = fallbackTemplates[ splitext(tfn)[0] ]\n except KeyError:\n t = ''\n\n # Save templates in root, if it was requested.\n if opts.save_templates and foundInRoot == 0:\n rootfn = join(opts.root, tfn)\n if opts.verbose: print \" saving template in %s\" % rootfn\n\n # saving the file template\n if exists(rootfn):\n bakfn = join(opts.root, tfn + '.bak')\n if opts.verbose: print \" making backup in %s\" % bakfn\n import shutil\n try:\n shutil.copy(rootfn, bakfn)\n except:\n print >> sys.stderr, \\\n \"Error: can't copy backup template %s\", bakfn\n\n try:\n ofile = open(rootfn, \"w\")\n ofile.write(t)\n ofile.close()\n except IOError, e:\n print >> sys.stderr, \"Error: can't save template file to\", rootfn\n\n return t", "def get_message(filename):\n\n with open(filename, 'r', encoding='utf-8') as template_file:\n template_file_contents = template_file.read()\n return Template(template_file_contents)", "def _parse_template(self):\n with open(\"./common/sagemaker_rl/orchestrator/cloudformation.yaml\") as template_fileobj:\n template_data = template_fileobj.read()\n self.cf_client.validate_template(TemplateBody=template_data)\n return template_data", "def read(file_path: str) -> dict:\n\n if not os.path.isfile(file_path):\n raise FileNotFoundError(\"The file `%s` must exist and be a BLM file\" % file_path)\n\n file_contents = open(file_path, 'r').read()\n headers = parse_headers(file_contents)\n definitions = parse_definitions(headers, file_contents)\n data = parse_data(headers, definitions, file_contents)\n\n return {'headers': headers, 'definitions': definitions, 'data': data}", "def load(replay_dir, template_name):\n if not isinstance(template_name, str):\n raise TypeError('Template name is required to be of type str')\n\n replay_file = get_file_name(replay_dir, template_name)\n\n with open(replay_file) as infile:\n context = json.load(infile)\n\n if 'cookiecutter' not in context:\n raise ValueError('Context is required to contain a cookiecutter key')\n\n return context", "def process_template(template, data):\n t = Template(template, data)\n t.job = get_current_job()\n t.process()\n\n result = dict(template=template, data=data, result_folder=t.resultdir, log=t.log)\n\n return result", "def main_function(template_file):\n\n content = load(template_file)\n assert content, \"Couldn't load template\"\n\n template = Template(content)\n\n return template.render(context(content))", "def load_template(format_: str) -> Template:\n template_path = Path(TEMPLATES_PATH).joinpath(f'{format_}{TEMPLATE_SUFFIX}')\n template = Template(template_path.read_text())\n return template", "def load_template(name, stdout = sys.stdout, stderr = sys.stderr):\n path = \"weblab\" + os.sep + \"admin\" + os.sep + \"config_templates\" + os.sep + name\n try:\n f = file(path, \"r\")\n template = f.read()\n f.close()\n except:\n print(\"Error: Could not load template file %s. Probably couldn't be found.\" % path, file=stderr)\n return template", "def load_dev_templates(settings, project_name):\n #Load json file\n base_path = settings['path.templates']\n template_path = os.path.join(base_path,\n \"{0}.json\".format(project_name)).replace(\"\\\\\", \"/\")\n file = open(template_path).read()\n template = json.loads(file)\n\n return template", "def load(self, spec):\n if spec.template is not None:\n return self.loader.unicode(spec.template, spec.template_encoding)\n\n path = self._find(spec)\n\n return self.loader.read(path, spec.template_encoding)", "async def load(self, file: IO) -> dict:", "def _read_translation_file(language_code: str, filename: str):\n twlight_home = settings.TWLIGHT_HOME\n filepath = \"{twlight_home}/locale/{language_code}/{filename}.json\".format(\n twlight_home=twlight_home, language_code=language_code, filename=filename\n )\n if os.path.isfile(filepath):\n with open(filepath, \"r\") as translation_file:\n translation_dict = json.load(translation_file)\n\n # Remove the \"@metadata\" key from the dictionary\n if \"@metadata\" in translation_dict:\n translation_dict.pop(\"@metadata\")\n return translation_dict\n else:\n return {}", "def read_template(site_name, doc_name):\n siteid = _get_site_id(site_name)\n cur = conn.cursor(cursor_factory=pgx.RealDictCursor)\n querystring = 'select text from {} where site_id = %s and name = %s;'\n result = execute_query(querystring.format(TABLES[5]), (siteid, doc_name))\n row = cur.fetchone()\n result = row['text']\n return result", "def body_template(template_path):\n try:\n template_text = Path(template_path).read_text()\n except FileNotFoundError:\n raise FileNotFoundError('File path not found: {}'\n .format(template_path))\n return template_text", "def _GetTemplate(self):\n# First read default template.\n tmplt = self._LoadTemplate(c.preproc_template_default)\n tmplt['proc'] = self.topdir\n self.template_type = 'default'\n\n self.templates = []\n if self.template_file is not None:\n tmplt.update(self._LoadTemplate(self.template_file))\n self.template_type = 'command-line'\n self.templates.append(os.path.abspath(self.template_file))\n found_template = True\n else:\n# Find a study specific template file.\n study_template_file = self._FindTemplateFile('%s/..' % self.topdir)\n if study_template_file is not None:\n# Merge study template into default, study template has precedence.\n if self.verbose:\n print \"Using study template at \" + study_template_file\n tmplt.update(self._LoadTemplate(study_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(study_template_file))\n found_template = True\n else:\n found_template = False\n# Now look for a subject-specific template file.\n subject_template_file = self._FindTemplateFile('%s' % self.topdir)\n if subject_template_file is not None:\n# Merge subject template, subject template has precedence.\n if self.verbose:\n print \"Using subject-specific template at %s\" % \\\n subject_template_file\n tmplt.update(self._LoadTemplate(subject_template_file))\n self.template_type = 'study-specific'\n self.templates.append(os.path.abspath(subject_template_file))\n found_template = True\n\n if not found_template:\n raise RuntimeError('Could not find template file.')\n\n if tmplt.get('subject','same') == 'same':\n# Default subdirectory is same as data directory.\n tmplt['subject'] = self.topdir.split('/')[-1]\n else:\n if not isinstance(tmplt['subject'],str):\n errstr = 'preprocess: Invalid subject number. Be sure to ' + \\\n 'enclose the subject number item with double quotes.'\n raise RuntimeError(errstr)\n\n# Keys that apply to all EPIs.\n self.fsl_flip = tmplt.get('fsl_flip', False)\n if self.fsl_flip:\n self.flip_opts = '-LT'\n else:\n self.flip_opts = ''\n\n# Replace strings with python types.\n for key in tmplt.keys():\n if tmplt[key] == 'None':\n tmplt[key] = None\n elif key == 'True':\n tmplt[key] = True\n elif key == 'False':\n tmplt[key] = False\n return tmplt", "def _get_template(self, tgt):\n with open(tgt, 'r', encoding='utf-8') as template_file:\n template_file_content = template_file.read()\n self.template = Template(template_file_content)\n return", "def get_template(self, template):\n\n\n env = Environment(\n loader=FileSystemLoader('templates')\n )\n return env.get_template(template)", "def _get_template(self, template_name, template_file):\n template = os.path.join(self.location, 'templates',\n template_name, template_file)\n return jinja2.Template(open(template).read())", "def load_template(self, template_path):\n res = self.load_template_with_location(template_path)\n content, template_type, path = res\n\n # only return content for backward compatibility\n return content", "def gen_from_template(template_file: str, data):\n tmpl = ezt.Template(template_file)\n fp = ezt.StringIO()\n tmpl.generate(fp, data)\n return fp.getvalue()", "def readTemplates():\n\n # Compile HTML templates.\n templates = {}\n for tt in [ 'image', 'dirindex', 'allindex', 'trackindex', 'sortindex' ]:\n fn = 'template-%s' % tt + opts.htmlext\n ttext = readTemplate(fn)\n templates[ tt ] = compileTemplate(ttext, fn)\n\n fn = 'template-css.css'\n ttext = readTemplate(fn)\n templates[ 'css' ] = compileTemplate(ttext, fn)\n\n # Compile user-specified rc file.\n rcsfx = 'rc'\n templates[ rcsfx ] = []\n if opts.rc:\n try:\n tfile = open(opts.rc, \"r\")\n orc = tfile.read()\n tfile.close()\n except IOError, e:\n print >> sys.stderr, \"Error: can't open user rc file:\", opts.rc\n sys.exit(1)\n\n o = compileCode('', orc, opts.rc)\n templates[ rcsfx ] += [ o ]\n\n # Compile user-specified code.\n if opts.rccode:\n o = compileCode('', opts.rccode, \"rccode option\")\n templates[ rcsfx ] += [ o ]\n\n # Compile global rc file without HTML tags, just python code.\n code = readTemplate('template-%s' % rcsfx + '.py')\n o = compileCode('', code, tt)\n templates[ rcsfx ] += [ o ]\n\n return templates", "def load_json(file_name_template, record_id):\n with open(file_name_template % (record_id)) as f:\n json_data = json.load(f)\n return json_data", "def read_template(pool, sim_tag, source_id, variable_id, fgt, output_file_path):\n\n connection = pool.connection()\n try:\n\n with connection.cursor() as cursor:\n sql_statement = \"SELECT `template` FROM `run_info` WHERE `sim_tag`=%s and `source`=%s and \" \\\n \"`variable`=%s and `fgt`=%s\"\n row_count = cursor.execute(sql_statement, (sim_tag, source_id, variable_id, fgt))\n if row_count > 0:\n template_data = cursor.fetchone()['template']\n write_file(data=template_data, filename=output_file_path)\n else:\n return None\n\n return True\n except Exception as exception:\n error_message = \"Retrieving template failed for run info entry with source={}, variable={}, sim_tag={}, fgt={}\" \\\n .format(source_id, variable_id, sim_tag, fgt)\n logger.error(error_message)\n traceback.print_exc()\n raise exception\n finally:\n if connection is not None:\n connection.close()", "def get_template(template_file=None, default_template=None):\n if template_file is None:\n template_file = os.path.join(os.path.dirname(__file__), default_template)\n with open(template_file) as f0:\n job_template = Template(f0.read())\n return job_template", "def read_file(filename: str) -> Dict:\n if not Path(filename).exists():\n raise Exception(f\"file {filename} does not exists.\")\n else:\n with open(filename, \"r\") as f:\n return json.load(f)", "def parse_template(self):\n for line in self.raw_template.split(\"\\n\"):\n line = line.strip()\n if line.startswith('#m3'):\n key, val = line[3:].strip().split('=', 1)\n key = key.strip()\n val = val.strip()\n self.variables[key] = val\n\n for fitem in self.finditem.finditer(self.raw_template):\n fgrp = fitem.groups()\n categ = fgrp[0]\n name = fgrp[1]\n rest_str = fgrp[2]\n rest = {} # type: dict\n for item in rest_str.split('|'):\n item = item.strip()\n if item:\n key, val = item.split('=')\n rest[key] = val\n\n self.data[name] = (categ, rest)", "def parse(self, dict, header=TRUE):\n if type(dict) != types.DictType:\n raise TypeError, \"Second argument must be a dictionary\"\n if not self.template:\n raise OpagMissingPrecondition, \"template path is not set\"\n # Open the file if its not already open. If it is, seek to the\n # beginning of the file.\n if not self.template_file:\n self.template_file = open(self.template, \"r\")\n else:\n self.template_file.seek(0)\n # Instantiate a new bound method to do the replacement.\n replacer = Replacer(dict).replace\n # Read in the entire template into memory. I guess we'd better keep\n # the templates a reasonable size if we're going to keep doing this.\n buffer = self.template_file.read()\n replaced = \"\"\n if header:\n replaced = \"Content-Type: text/html\\n\\n\"\n replaced = replaced + re.sub(\"%%(\\w+)%%\", replacer, buffer)\n return replaced", "def read_template(self, name, mount_point=DEFAULT_MOUNT_POINT):\n api_path = '/v1/{mount_point}/template/{name}'.format(\n mount_point=mount_point,\n name=name,\n )\n return self._adapter.get(\n url=api_path,\n )", "def read(self, filepath, dirpath=None):\n try:\n #filepath = os.path.normpath(filepath)\n with open(filepath) as f_p:\n try:\n self.json_dict = json.load(f_p)\n self.filepath = filepath\n return self.json_dict\n except ValueError as err:\n print('JSON content error in \"%s\"' % filepath)\n print(err)\n except (IOError, FileNotFoundError):\n print(\n 'Failed to open JSON file \"%s\" \"%s\"' %\n (os.path.abspath(''), filepath))\n raise NoSuchFileError(filepath)\n raise JsonContentError", "def read_yaml_file(filepath: str) -> Dict:\n return yaml.safe_load(read_file(filepath))", "def _load_template(name: str) -> str:\n html_tpl = _read_text(name + '.html')\n import re\n\n # line breaks are not needed\n html_tpl = html_tpl.replace('\\n', '')\n # remove comments\n html_tpl = re.sub(r'<!--(.|\\s|\\n)*?-->', '', html_tpl)\n # remove space around special characters\n html_tpl = re.sub(r'\\s*([><])\\s*', r'\\1', html_tpl)\n return html_tpl", "def get_yaml_data(path):\n yaml_path = \"%s%s.yml\" % (CONTENT_FILE_DIR, path[:-5])\n if os.path.isfile(yaml_path):\n f = open(yaml_path, 'r')\n template_data = yaml.load(f)\n return template_data\n else:\n return False", "def read_template(self, template_file_name=\"\", debug=False):\n\t\tif (debug):\n\t\t\tprint \"Debug enabled\"\n\t\ttry: \n\t\t\tif (debug):\n\t\t\t\tprint \"attempting local\"\n\t\t\tfilein = open(template_file_name, \"r\")\n\t\t\tjson_string = filein.read()\n\t\t\tself.template_tags = json.loads(json_string)\n\t\t\tfilein.close()\n\t\t\treturn True\n\t\texcept IOError as err:\n\t\t\tfilein = None\n\n\t\t#if the project doesn't have a .json file association\n\t\tif (not template_file_name.endswith(\".json\")):\n\t\t\ttemplate_file_name = template_file_name + \".json\"\n\t\t\n\t\t\ttry:\n\t\t\t\tif (debug):\n\t\t\t\t\tprint \"attempting local + .json\"\n\t\t\t\tfilein = open(template_file_name, \"r\")\n\t\t\t\tjson_string = filein.read()\n\t\t\t\tself.template_tags = json.loads(json_string)\n\t\t\t\tfilein.close()\n\t\t\t\treturn True\n\t\t\texcept IOError as err:\n\t\t\t\tfilein = None\n\n\t\t#see if there is a environmental setting for SAPLIB_BASE\n\t\tif (len(os.getenv(\"SAPLIB_BASE\")) > 0):\n\t\t\tfile_name = os.getenv(\"SAPLIB_BASE\") + \"/templates/\" + template_file_name\t\n\t\t\ttry:\n\t\t\t\tif (debug):\n\t\t\t\t\tprint \"attempting environmental variable SAPLIB_BASE\"\n\t\t\t\t\tprint file_name\n\t\t\t\tfilein = open(file_name, \"r\")\n\t\t\t\tjson_string = filein.read()\n\t\t\t\tself.template_tags = json.loads(json_string)\n\t\t\t\tfilein.close()\n\t\t\t\treturn True\n\t\t\texcept IOError as err:\n\t\t\t\tfilein = None\n\n\t\t#see if the sap_location was specified\n\t\tif (self.project_tags.has_key(\"sap_location\")):\n\t\t\tfile_name = self.project_tags[\"sap_location\"] + \"/templates/\" + template_file_name\n\t\t\ttry:\n\t\t\t\tif (debug):\n\t\t\t\t\tprint \"attempting to read from project tags\"\n\t\t\t\tfilein = open (file_name, \"r\")\n\t\t\t\tjson_string = filein.read()\n\t\t\t\tself.template_tags = json.loads(json_string)\n\t\t\t\tfilein.close()\n\t\t\t\treturn True\n\t\t\texcept IOError as err:\n\t\t\t\tfilein = None\n\n\t\t#try the default location\t\n\t\tfile_name = \"../templates/\" + template_file_name\n\t\ttry:\n\t\t\tif (debug):\n\t\t\t\tprint \"attemping to read from hard string\"\n\t\t\tfilein = open(file_name, \"r\")\n\t\t\tjson_string = filein.read()\n\t\t\tself.template_tags = json.loads(json_string)\n\t\t\tfilein.close()\n\t\t\treturn True\n\t\texcept IOError as err:\n\t\t\tfilein = None\n\t\t\n\t\treturn False", "def read(cls, filename: str) -> Objdict:\n obj = config.read(filename)\n return cls.from_obj(obj)", "def from_template(template, **extra_args):\n if hasattr(template, 'read') and callable(template.read):\n template_contents = template.read()\n elif os.path.exists(template):\n template_file = file(template, 'r')\n template_contents = template_file.read()\n template_file.close()\n else:\n # treat `template` as a string\n template_contents = template\n # substitute `extra_args` into `t` and return it\n return (template_contents % extra_args)", "def getFile(self):\n #try to redetect the filetype\n vim.command(\"filetype detect\")\n #return the filetype\n filetype = vim.eval(\"&ft\")\n #filetype = vim.command(\"&ft\")\n if filetype:\n for file in self.template_files:\n if filetype.lower() in file.lower():\n self.hasTemplate = True\n return open(self.template_folder + \"/\" + file, 'r')\n return None", "def _read_infile_with_tplfile(tpl_file, input_file):\n\n if not os.path.exists(input_file):\n raise Exception(\"input file '{0}' not found\".format(input_file))\n\n f_tpl = open(tpl_file, \"r\")\n f_in = open(input_file, \"r\")\n\n # read the tpl header\n _, marker = f_tpl.readline().split()\n itpl, iin = 1, 0\n pnames, pvals = [], []\n pdict = {}\n while True:\n tpl_line = f_tpl.readline()\n if tpl_line == \"\":\n break\n\n in_line = f_in.readline()\n if in_line == \"\":\n raise Exception(\n \"input file EOF, tpl file line {0}, in file line {1}\".format(itpl, iin)\n )\n\n if marker in tpl_line:\n idxs = [i for i, ltr in enumerate(tpl_line) if ltr == marker]\n if len(idxs) % 2 != 0:\n raise Exception(\"unbalanced markers on tpl line {0}\".format(itpl))\n\n for s, e in zip(idxs[0:-1:2], idxs[1::2]):\n tpl_str = tpl_line[s : e + 1]\n pname = tpl_str.replace(marker, \"\").strip().lower()\n if s > len(in_line):\n raise Exception(\n \"input file EOL line {0}, tpl line {1}, looking for {2}\".format(\n iin, itpl, tpl_str\n )\n )\n junk_val = \"Jennyigotunumber8675309\"\n tmp = tpl_line[:s] + \" {} \".format(junk_val) + tpl_line[e + 1 :]\n if len(tmp.split()) == len(in_line.split()):\n # treat this as whitespace delimited\n in_str = in_line.split()[tmp.split().index(junk_val)]\n else:\n # or we must assume the params are written using the same spacing as template file\n in_str = in_line[s : e + 1]\n try:\n v = float(in_str)\n except Exception as e:\n raise Exception(\n \"error casting '{0}' to float on in line {1}, tpl line {2} for {3}: {4}\".format(\n in_str, iin, itpl, tpl_str, str(e)\n )\n )\n\n if pname in pdict:\n eval = pdict[pname]\n if not np.isclose(eval, v, 1.0e-6):\n raise Exception(\n \"different values {0}:{1} for par {2} on in line {3}\".format(\n v, eval, pname, iin\n )\n )\n else:\n pnames.append(pname)\n pvals.append(v)\n pdict[pname] = v\n itpl += 1\n iin += 1\n df = pd.DataFrame({\"parnme\": pnames, \"parval1\": pvals}, index=pnames)\n return df", "def context(template):\n\n return {\n v.key: v.read()\n for v in [Variable(name) for name in extract_variables(template)]\n }", "def _parse(\n self, source: str, name: t.Optional[str], filename: t.Optional[str]\n ) -> nodes.Template:\n return Parser(self, source, name, filename).parse()", "def _raw_read(self, filepath, dirpath=None):\n self.json_dict = super().read(filepath)\n return self.json_dict", "def get_translation_dict_from_file(path, lang, app):\n\tjson_content = {}\n\tif os.path.exists(path):\n\t\twith open(path, 'r') as f:\n\t\t\tjson_content = json.loads(f.read())\n\n\treturn json_content", "def json_reader(file_path):\n\n with open(file_path) as file:\n json_dict = json.load(file)\n\n return json_dict", "def get_key_from_file():\n json_data = request.get_json()\n \n is_reference = json_data['is_reference']\n filename = json_data['filename']\n key_name = json_data['key_name']\n\n \n settings.setOptionsFile(get_info('uid'))\n f = ROOT.TFile(filename)\n\n d = eval(cppyy.gbl.getDictionary(f,key_name))\n \n f.Close()\n return jsonify(d)", "def read(file_):\n if not os.path.isfile(file_):\n raise AssertionError()\n\n dict_ = {}\n for line in open(file_).readlines():\n\n list_ = shlex.split(line)\n\n is_empty = (list_ == [])\n\n if not is_empty:\n is_keyword = list_[0].isupper()\n else:\n is_keyword = False\n\n if is_empty:\n continue\n\n if is_keyword:\n keyword = list_[0]\n dict_[keyword] = {}\n continue\n\n process(list_, dict_, keyword)\n\n dict_ = auxiliary(dict_)\n\n # We perform some basic consistency checks regarding the user's request.\n check_initialization_dict(dict_)\n\n return dict_", "def render_template(\n template_name: str = \"index.html\", context: t.Dict[str, str] = {}\n):\n html_str: str\n with open(template_name, \"r\") as f:\n html_str = f.read()\n html_str = html_str.format(**context)\n return html_str\n # return f\"<h1>Hello {path=}</h1>\\n{template_name=}\"", "def process_feature_file(filename: str) -> Dict[str, Any]:\n feature = json.loads(open(filename).read())\n template = feature['query']\n name = feature['name']\n params = feature['params']\n feature_spec = {\n 'name': name,\n 'template': template,\n 'params': params\n }\n return feature_spec", "def read_template():\n\n text_msg = \"\"\"${PERSON_NAME} - Calling Campaign Summary - ${DATE}:\\n\n Total Called = ${TOTAL_CALLED}\\n\n Answered = ${ANSWERED}\\n\n Not Answered = ${NOT_ANSWERED}\\n\n Declines = ${DECLINES}\\n\n Remaining = ${REMAINING}\\n\n \\n\n Thank You.\"\"\"\n\n return Template(text_msg)", "def loading_strategy(self):\n try:\n if not self.file_allowed():\n raise Exception('File type {} is not allowed'.format(self.get_ext()))\n\n with open(self._file_path, 'r') as outfile:\n content = outfile.read()\n\n if self.is_json() or self.is_template():\n return json.loads(content)\n\n if self.is_yaml():\n if self._yaml_replacements:\n for key, value in self._yaml_replacements.iteritems():\n content = content.replace(key, value)\n\n return yaml.load(content)\n else:\n return content\n\n except Exception as e:\n Oprint.err(e)\n else:\n raise Exception('File type {} is not allowed'.format(self.get_ext()))", "def read_dict(txt_file_path):\n txt_file = open(txt_file_path,'r')\n txt_raw = txt_file.read()\n txt_as_dict = ast.literal_eval(txt_raw)\n txt_file.close()\n return txt_as_dict", "def fill_template_file(filename, value_dict):\n f = open(filename, 'r')\n text = f.read()\n f.close()\n f = open(filename, 'w')\n f.write(text % value_dict)\n f.close()", "def file_parser(file):\n\n # Copy of the file instance to save it\n new_file = file\n dict_file = {}\n # We find the right function depending on the extension of the file\n meta_func = find_meta_function(find_extension(file))\n if callable(meta_func):\n dict_file = meta_func(new_file)\n return dict_file", "def ReadFromFile(self):\n\n data = \"\"\n try:\n with open(self.fileLoc, \"r\") as file:\n data += file.read()\n except IOError:\n with open(self.fileLoc, \"w\") as file:\n file.write(\" \")\n return {}\n \n if len(data) == 0:\n return {}\n\n data = self.Decrypt(data)\n\n data = \"\".join(data.split())\n kvstrings = data.split(\"%\")\n kvstrings = filter(None, kvstrings)\n\n pairs = {}\n for x in kvstrings:\n kv = x.split(\":\")\n pairs[kv[0]] = kv[1]\n\n return pairs", "def _file_dict(self, fn_):\n if not os.path.isfile(fn_):\n err = \"The referenced file, {} is not available.\".format(fn_)\n sys.stderr.write(err + \"\\n\")\n sys.exit(42)\n with salt.utils.files.fopen(fn_, \"r\") as fp_:\n data = fp_.read()\n return {fn_: data}", "def get_template(template_file):\n\n template_paths = [os.path.abspath('./Templates'), os.path.abspath('.')]\n latex_jinja_env = jinja2.Environment(\n block_start_string='\\BLOCK{',\n block_end_string='}',\n variable_start_string='\\VAR{',\n variable_end_string='}',\n comment_start_string='\\#{',\n comment_end_string='}',\n line_statement_prefix='%%',\n line_comment_prefix='%#',\n trim_blocks=True,\n autoescape=False,\n loader=jinja2.FileSystemLoader(template_paths)\n )\n latex_jinja_env.filters['tex_escape'] = tex_escape\n print(\"Loading Template {}\".format(os.path.abspath(template_file)))\n template = latex_jinja_env.get_template(os.path.normpath(template_file))\n return template", "def parse(\n self,\n source: str,\n name: t.Optional[str] = None,\n filename: t.Optional[str] = None,\n ) -> nodes.Template:\n try:\n return self._parse(source, name, filename)\n except TemplateSyntaxError:\n self.handle_exception(source=source)", "def get_template(cls, template_id):\r\n dirname = cls.get_template_dir()\r\n if dirname is not None:\r\n path = os.path.join(dirname, template_id)\r\n for pkg in cls.template_packages:\r\n if resource_exists(pkg, path):\r\n template_content = resource_string(pkg, path)\r\n template = yaml.safe_load(template_content)\r\n template['template_id'] = template_id\r\n return template", "def read_config_file():\n # Read in the config file to get sensative (non-git) email info\n with open('assets/config.yaml', 'r') as f:\n dikt = yaml.safe_load(f)['email_config']\n\n # Allows to access this dict as if it were an object\n # TODO do we need this? Is there a better way?\n class ObjectView():\n def __init__(self, d):\n self.__dict__ = d\n return ObjectView(dikt)", "def try_read_input_file_with_tpl(tpl_file, input_file=None):\n\n if input_file is None:\n input_file = tpl_file.replace(\".tpl\", \"\")\n if not os.path.exists(input_file):\n return None\n # read the names first to see what we are dealing with\n # and also to do some basic error checking\n parnames = parse_tpl_file(tpl_file)\n try:\n df = _read_infile_with_tplfile(tpl_file, input_file)\n except Exception as e:\n print(\"error trying to read input file with tpl file:{0}\".format(str(e)))\n return None\n return df", "def _create_from_template(self):\n template_file = self._helper._get_template_file_path()\n self._engine.open_file_by_path(template_file)\n self._save_current_as_new()", "def read_html_template(self, configuration_file=\"./conf.txt\", section=\"html_template\"):\n htmls = self.read_section(configuration_file, section)\n\n if \"template_dir\" in htmls and htmls[\"template_dir\"]:\n template_dir = htmls[\"template_dir\"]\n\n if not os.path.isdir(template_dir):\n raise ConfException(\"{0} is not a directory in {1} file\".\n format(template_dir, configuration_file))\n\n htmls.pop(\"template_dir\")\n\n if \"/\" != template_dir[-1:]:\n template_dir += \"/\"\n\n for key in htmls:\n if htmls[key] is not None and \"\" != htmls[key]:\n htmls[key] = template_dir + htmls[key]\n return htmls", "def preprocess_template(template_file: str) -> None:\n LOGGER.info(\"Processing template %s\", template_file)\n\n with DFReader(open(template_file, \"rb\")) as reader:\n level = reader.read_level()\n\n doors = {}\n keys_needed: Dict[int, int] = collections.Counter()\n for eid, (_, _, entity) in level.entities.items():\n if not isinstance(entity, LevelDoor):\n continue\n\n doors[eid] = {\n \"level\": entity.file_name.decode(),\n \"door\": entity.door_set,\n }\n keys_needed[DOOR_INFO[entity.door_set][1]] += 1\n\n for door_data in doors.values():\n key_type = DOOR_INFO[door_data[\"door\"]][1]\n while key_type < 3 and keys_needed[key_type + 1] == 0:\n key_type += 1\n door_data[\"key_get\"] = key_type\n\n with open(template_file + \".json\", \"w\") as fout:\n json.dump(\n {\"doors\": doors},\n fout,\n )", "def read_dictionary(filepath):\n with open(filepath, 'r') as dict_file:\n return dict_file.read().splitlines()", "def get_file(file):\n with open(file) as data_file:\n return json.load(data_file)", "def get_templates(instrument=''):\n import os, json\n template_path = os.path.dirname(__file__)\n template_names = [fn\n for fn in os.listdir(template_path)\n if fn.endswith(\".json\") and fn.startswith(instrument)]\n templates = dict([(tn[len(instrument)+1:-5],\n json.loads(open(os.path.join(template_path, tn), 'r').read()))\n for tn in template_names])\n return templates", "def test_read_namespaced_template(self):\n pass", "def open_dict_file(self, fn: str) -> dict[str, str]:\n language = self.language\n if not fn or not language:\n return None\n if g.app.spellDict:\n return g.app.spellDict\n if not g.os_path_exists(fn):\n # Fix bug 1175013: leo/plugins/spellpyx.txt is\n # both source controlled and customized.\n self.create(fn)\n if g.os_path_exists(fn):\n # Merge the local and global dictionaries.\n try:\n self.clean_dict(fn)\n d = enchant.DictWithPWL(language, fn)\n except Exception:\n try:\n d = enchant.Dict(language)\n except Exception:\n d = {}\n else:\n # A fallback. Unlikely to happen.\n try:\n d = enchant.Dict(language)\n except Exception:\n d = {}\n # Commen exit, for traces.\n return d", "def load_template(\n dataset: DatasetManager, template_dir: str, template_name: str\n) -> NexusTemplate:\n if template_name == \"linear\":\n return LinearNexusTemplate()\n\n fullpath = os.path.join(template_dir, template_name)\n with open(fullpath + \".json\", \"r\") as fdata:\n data = json.load(fdata)\n\n level_doors = []\n other_doors = []\n for eid, door_data in data[\"doors\"].items():\n if door_data[\"level\"] in dataset.levels:\n level_doors.append(eid)\n else:\n other_doors.append(eid)\n\n return NexusTemplate(fullpath, template_name, data, level_doors, other_doors)", "def get_spec_file(*, path:str, format:str) -> dict:\n ext ={'json': json,\n 'yaml': yaml}\n try:\n with open(path, 'r') as f:\n return ext[format].load(f)\n except Exception as e:\n logger.error(f'file could not be loaded {path}')\n raise", "def get_template(loader, template_name):\n return loader.get_template(template_name)", "def load_umi_template(json_template):\n if os.path.isfile(json_template):\n with open(json_template) as f:\n dicts = json.load(f, object_pairs_hook=OrderedDict)\n\n return [{key: json_normalize(value)} for key, value in dicts.items()]\n else:\n raise ValueError(\"File {} does not exist\".format(json_template))", "def load_ftpl(self, file):\n\n logger_env.info(\" Loading ftpl file %s\" % file)\n lines = open(file).readlines()\n for line in lines:\n line = line.split(\"#\")[0]\n fields = line.split(\":\")\n if len(fields) != 2:\n continue\n\n key_string = fields[0].strip()\n keys = key_string.split(\",\")\n keys = [x.strip().strip(\"\\\"\") for x in keys]\n keys = [x for x in keys if x]\n keys = tuple(keys)\n\n value_string = fields[1].strip()\n if keys in self.tpl:\n if value_string == \"!!!\":\n # Special rule to negate a key, it is used by the last loaded ftpl file\n # to overwrite values that might have been defined before.\n del self.tpl[keys]\n else:\n self.tpl[keys] = self.tpl[keys] + \" , \" + value_string\n else:\n self.tpl[keys] = value_string\n return", "def read_file(file_path):\n\n output_dict = dict()\n try:\n if os.path.exists(file_path):\n with open(file_path) as fd:\n output = fd.readlines()\n for idx in range(len(output)):\n key_info = output[idx].split('=')[0].strip()\n value_info = output[idx].split('=')[1].strip()\n output_dict[key_info] = value_info\n return output_dict\n except Exception as e:\n SysTools.logger.warning(\"Read file:%s failed, reason:%s\" % (file_path, str(e)))", "def return_template_output(base_dir,filename,data_dict):\n templateLoader = jinja2.FileSystemLoader( searchpath=base_dir)\n templateEnv = jinja2.Environment( loader=templateLoader )\n template = templateEnv.get_template(filename)\n output = template.render(data_dict)\n return output", "def _file_loader(self) -> dict:\n cfg = None\n try:\n with open(self._path) as file:\n cfg = json.loads(file.read())\n except FileNotFoundError as e:\n print(e)\n exit(1)\n return cfg", "def get_data_model() -> Dict:\n root = Path(__file__).parent.parent.parent\n template_path = root / 'data_models/data_model.json'\n with template_path.open() as template:\n out = json.load(template)\n return out", "def __init__(self, template_name):\n # self.env = Environment(loader=PackageLoader(\n # package, path))\n # self.template = self.env.get_template(template_name)\n with open(template_name, 'r', encoding='UTF-8') as f:\n self.template = Template(f.read())" ]
[ "0.7782369", "0.76745313", "0.7547481", "0.74933225", "0.74301606", "0.738433", "0.7244087", "0.7218751", "0.71575886", "0.6929455", "0.69022644", "0.6875117", "0.68383133", "0.67652184", "0.659829", "0.65947074", "0.65571237", "0.6519493", "0.64934295", "0.649163", "0.6485917", "0.64532334", "0.6451852", "0.6404066", "0.6353623", "0.63397896", "0.6314211", "0.6241727", "0.6225553", "0.6224012", "0.61869335", "0.6136726", "0.6062407", "0.60375446", "0.6022057", "0.5979512", "0.5967998", "0.59330314", "0.59132147", "0.5906389", "0.5897128", "0.5888516", "0.5875579", "0.5867794", "0.5865039", "0.5862009", "0.5844031", "0.58370024", "0.5834547", "0.58240914", "0.58067775", "0.5805804", "0.5795068", "0.57924545", "0.57808334", "0.5779702", "0.57674736", "0.5765866", "0.57458997", "0.5743487", "0.5740194", "0.57219553", "0.57159734", "0.5713282", "0.57056725", "0.5697598", "0.5691927", "0.56866986", "0.5678013", "0.56762433", "0.5667476", "0.5645239", "0.564508", "0.56327343", "0.56307137", "0.5629547", "0.56270295", "0.5615731", "0.5599203", "0.55954474", "0.55823493", "0.55808634", "0.5577242", "0.5574971", "0.55720687", "0.5564626", "0.55641043", "0.5563324", "0.55581963", "0.5543746", "0.55390054", "0.55385447", "0.5535586", "0.55308914", "0.552336", "0.55088013", "0.5503278", "0.55032", "0.5498031", "0.5488191" ]
0.64322484
23
Load the hierarchy of templates.
def _GetTemplate(self): # First read default template. tmplt = self._LoadTemplate(c.preproc_template_default) tmplt['proc'] = self.topdir self.template_type = 'default' self.templates = [] if self.template_file is not None: tmplt.update(self._LoadTemplate(self.template_file)) self.template_type = 'command-line' self.templates.append(os.path.abspath(self.template_file)) found_template = True else: # Find a study specific template file. study_template_file = self._FindTemplateFile('%s/..' % self.topdir) if study_template_file is not None: # Merge study template into default, study template has precedence. if self.verbose: print "Using study template at " + study_template_file tmplt.update(self._LoadTemplate(study_template_file)) self.template_type = 'study-specific' self.templates.append(os.path.abspath(study_template_file)) found_template = True else: found_template = False # Now look for a subject-specific template file. subject_template_file = self._FindTemplateFile('%s' % self.topdir) if subject_template_file is not None: # Merge subject template, subject template has precedence. if self.verbose: print "Using subject-specific template at %s" % \ subject_template_file tmplt.update(self._LoadTemplate(subject_template_file)) self.template_type = 'study-specific' self.templates.append(os.path.abspath(subject_template_file)) found_template = True if not found_template: raise RuntimeError('Could not find template file.') if tmplt.get('subject','same') == 'same': # Default subdirectory is same as data directory. tmplt['subject'] = self.topdir.split('/')[-1] else: if not isinstance(tmplt['subject'],str): errstr = 'preprocess: Invalid subject number. Be sure to ' + \ 'enclose the subject number item with double quotes.' raise RuntimeError(errstr) # Keys that apply to all EPIs. self.fsl_flip = tmplt.get('fsl_flip', False) if self.fsl_flip: self.flip_opts = '-LT' else: self.flip_opts = '' # Replace strings with python types. for key in tmplt.keys(): if tmplt[key] == 'None': tmplt[key] = None elif key == 'True': tmplt[key] = True elif key == 'False': tmplt[key] = False return tmplt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_templates(cls):\n if cls._raw_templates is None:\n cls._raw_templates = fetch_rrlyrae_templates()", "def load_templates(self):\n TemplateHandler.templates = []\n for template in os.listdir(TemplateHandler.templates_path):\n template_config = self.load_template_conf(template)\n if template_config is None:\n continue\n TemplateHandler.templates.append(template_config)", "def load_templates(self):\n\n self.templates = []\n\n if os.path.exists(\"question_templates.txt\"):\n for line in open(\"question_templates.txt\", \"r\"):\n self.templates.append(line.replace(\"\\n\", \"\"))", "def load_template_files(self):\n templates = dict()\n template_path = settings.CUSTOM_VERTO_TEMPLATES\n templates.update(self.read_template_files(template_path))\n if hasattr(self, \"extra_converter_templates_directory\"):\n directory = self.extra_converter_templates_directory\n template_path = os.path.join(template_path, directory)\n templates.update(self.read_template_files(template_path))\n return templates", "def run():\r\n template_locations = settings.MAKO_TEMPLATES\r\n for namespace, directories in template_locations.items():\r\n clear_lookups(namespace)\r\n for directory in directories:\r\n add_lookup(namespace, directory)", "def get_hierarchy_loader(directories):\n template_loaders = OrderedDict()\n for app_name, template_dir in directories:\n # Pull FileSystemLoader from cache if it already exists for this directory,\n # or instanciate it if not\n if template_dir not in file_system_loaders:\n loader = FileSystemLoader(template_dir)\n file_system_loaders[template_dir] = loader\n else:\n loader = file_system_loaders[template_dir]\n template_loaders[app_name] = loader\n return HierarchyLoader(template_loaders)", "def load_all_templates(dataset, template_dir: str) -> Dict[str, NexusTemplate]:\n template_set = {\n template_name\n for template_name in os.listdir(template_dir)\n if not template_name.endswith(\".json\")\n }\n template_set.add(\"linear\")\n\n template_ord = []\n for template_name in TEMPLATE_PREFERRED_ORDER:\n try:\n template_set.remove(template_name)\n except KeyError:\n pass\n else:\n template_ord.append(template_name)\n template_ord.extend(sorted(template_set))\n\n return {\n template_name: load_template(dataset, template_dir, template_name)\n for template_name in template_ord\n }", "def get_templates(self):\n\n data = self.request_from_server('templates')\n self.templates = data", "def template_loader(self):\n return None", "def __fill_all_templates__(self,configs):\n template_dir = configs['system'].get('Common_directories','template')\n sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))\n system_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','system'))\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio'))\n self.__fill_template__(sample_template,self.sample_file)\n self.__fill_template__(system_template,self.systems_file)\n self.__fill_template__(qsub_template,self.qsub_file)", "def get_multi_hierarchy_loader(get_active_hierarchy_cb, hierarchies):\n template_loaders = {}\n for hierarchy_name, directories in hierarchies:\n template_loaders[hierarchy_name] = get_hierarchy_loader(directories)\n return MultiHierarchyLoader(get_active_hierarchy_cb, template_loaders)", "def load_template(\n dataset: DatasetManager, template_dir: str, template_name: str\n) -> NexusTemplate:\n if template_name == \"linear\":\n return LinearNexusTemplate()\n\n fullpath = os.path.join(template_dir, template_name)\n with open(fullpath + \".json\", \"r\") as fdata:\n data = json.load(fdata)\n\n level_doors = []\n other_doors = []\n for eid, door_data in data[\"doors\"].items():\n if door_data[\"level\"] in dataset.levels:\n level_doors.append(eid)\n else:\n other_doors.append(eid)\n\n return NexusTemplate(fullpath, template_name, data, level_doors, other_doors)", "def all_templates():\r\n # TODO use memcache to memoize w/ expiration\r\n templates = defaultdict(list)\r\n for category, descriptor in XBlock.load_classes():\r\n if not hasattr(descriptor, 'templates'):\r\n continue\r\n templates[category] = descriptor.templates()\r\n\r\n return templates", "def loadTemplate(self):\n\n\t\t# Change directory to template folder\n\n\t\toriginal_path = os.getcwd()\n\t\tos.chdir(self.template_path)\n\n\t\th.load_file(\"stdrun.hoc\")\n\t\tif self.verbose: print('- Loading constants')\t\t\n\t\th.load_file('import3d.hoc')\n\n\t\tconstants_loaded = h.load_file('constants.hoc')\n\t\tmorphology_loaded = h.load_file('morphology_%s.hoc'%self.template_name)\n\n\t\tbiophysics_loaded = h.load_file('biophysics_%s.hoc'%self.template_name)\n\n\t\terror = 'Can\\'t find hoc file! Did you create it and call it by the correct name?'\n\t\tif not constants_loaded:\n\t\t\tprint('WARNING: {} hoc file not loaded! Did you create it and call it by the correct name?'.format(constants))\n\t\tif not morphology_loaded:\n\t\t\tprint('WARNING: {} hoc file not loaded! Did you create it and call it by the correct name?'.format(morphology))\n\t\tif not biophysics_loaded:\n\t\t\t# pdb.set_trace()\n\t\t\tprint('WARNING: {} hoc file not loaded! Did you create it and call it by the correct name?'.format(biophysics))\n\n\n\t\tif self.verbose:\n\t\t\tprint('\\n- Making %s template from .hoc file'%self.template_name)\n\n\t\t# h.load_file('%s.hoc'%self.template_name)\n\t\th.load_file('template_%s.hoc'%self.template_name)\n\n\t\t# Return to original dir\n\t\tos.chdir(original_path)", "def load_views(dispatcher: Dispatcher):\n base_path = os.path.join(os.path.dirname(__file__), 'views')\n files = os.listdir(base_path)\n\n for file_name in files:\n handler_module, _ = os.path.splitext(file_name)\n if handler_module[:2] + handler_module[-2:] != '____':\n module = import_module(f'.{handler_module}', 'views')\n module.init(dispatcher)", "def test_nested_template_source_generation(self):\n sources = [source for source in self.loader.get_template_sources('component.child.html')]\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/component/child/child.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/component/child/child.html')\n\n sources = [source for source in self.loader.get_template_sources('deeply.nested.component.and.child.html')]\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/deeply/nested/component/and/child/child.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/deeply/nested/component/and/child/child.html')\n\n sources = [source for source in self.loader.get_template_sources('component.child/another.html')]\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/component/child/another.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/component/child/another.html')", "def load_network_templates(self) -> List:\n try:\n network_templates = self.api.get(host=self.host, endpoint=f\"/api/v1/orgs/{self.oid}/networktemplates\")\n except Exception as e:\n logger.error(f\"{TextColors.FAIL}Error getting network templates:{TextColors.ENDC} {e}\")\n raise e\n self.network_templates = network_templates", "def test_filesystem_loader(self):\n\n self.assertEqual(\n list(\n template_finder.templates_for_engine({\n 'BACKEND': 'django.templates.backends.django.Djangotemplate.',\n 'APP_DIRS': False,\n 'DIRS': ['/tmp/project/templates/', '/tmp/project/other_templates/']\n })\n ),\n [\n ('base.html', '/tmp/project/templates/base.html'),\n ('foo/bar.html', '/tmp/project/templates/foo/bar.html'),\n ('baz.html', '/tmp/project/other_templates/baz.html'),\n ]\n )", "def _load_template(self):\n filename = os.path.join(get_conf('DEFAULT_TEMPLATE_PATH'), self._template, '__init__.ini')\n cf = ApplicationConf.get_instance()\n with comp_open(filename, mode='r') as fp:\n content = fp.read()\n content = content.format(**cf)\n conf = CompConfigParser(allow_no_value=True)\n conf.read_string(content, '__init__.ini')\n ini = {'dirs': [], 'files': [], 'binaries': []}\n if conf.has_section('dirs'):\n for key in conf.options('dirs'):\n ini['dirs'].append(key)\n if conf.has_section('files'):\n for key in conf.options('files'):\n ini['files'].append(self.__remap(key))\n if conf.has_section('binaries'):\n for key in conf.options('binaries'):\n ini['binaries'].append(self.__remap(key))\n if isinstance(self._ini, dict):\n self._ini.update(ini)\n else:\n self._ini = ini", "def get_templates(self):\n\n\t\tif not os.path.isdir('./repo'): os.mkdir('./repo')\n\t\ttemps = self.settings['template']\n\t\t#---ensure that the template object is always in a list\n\t\tif len(temps) == 2 and type(temps[0])==str and type(temps[1])==str: temps = [temps]\n\t\tself.template = []\n\t\tfor t in temps:\n\t\t\tprint 'retrieving '+str(t[0])\n\t\t\t#---check if in repo and move\n\t\t\tif not os.path.isfile(self.rootdir+t[0]+'.pdb') and os.path.isfile('./repo/'+t[0]+'.pdb'):\n\t\t\t\tcopy('./repo/'+t[0]+'.pdb',self.rootdir+t[0]+'.pdb')\n\t\t\t\t#---fasta retrieval is deprecated\n\t\t\t\tif 0: copy('./repo/'+t[0]+'.fasta',self.rootdir+t[0]+'.fasta')\n\t\t\telif not os.path.isfile(self.rootdir+t[0]+'.pdb'):\n\t\t\t\tresponse = urllib2.urlopen('http://www.rcsb.org/pdb/files/'+t[0]+'.pdb')\n\t\t\t\tpdbfile = response.read()\n\t\t\t\twith open(self.rootdir+t[0]+'.pdb','w') as fp: fp.write(pdbfile)\n\t\t\t\tcopy(self.rootdir+t[0]+'.pdb','./repo/'+t[0]+'.pdb')\n\t\t\tself.template.append(t)", "def read_templates(folder):\n output = []\n for path, subdirs, files in os.walk(folder):\n for name in files:\n if name.endswith('.yml'):\n tpl = yaml.load(open(os.path.join(path, name)).read())\n tpl['template_name'] = name\n\n # Test if all required fields are in template:\n assert 'keywords' in tpl.keys(), 'Missing keywords field.'\n required_fields = ['date', 'amount', 'invoice_number']\n assert len(set(required_fields).intersection(tpl['fields'].keys())) == len(required_fields), \\\n 'Missing required key in template {} {}. Found {}'.format(name, path, tpl['fields'].keys())\n \n # Keywords as list, if only one.\n if type(tpl['keywords']) is not list:\n tpl['keywords'] = [tpl['keywords']]\n\n output.append(InvoiceTemplate(tpl))\n return output", "def setup_templates(self):\n self.libs[\"template\"] = (\"#libs/templates/include\", None, \"\")\n self[\"CPPPATH\"].append(\"#libs/templates/include\")", "def get_templates(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/templates\").json()", "def load(self, filename, relative_to=None, cls=None, encoding=None):\n # TODO: get the template extension from the config!!\n if not filename.endswith('.html'):\n filename = tg.config['pylons.app_globals'\n ].dotted_filename_finder.get_dotted_filename(\n template_name=filename,\n template_extension='.html')\n\n return TemplateLoader.load(self, filename,\n relative_to=relative_to, cls=cls, encoding=encoding)", "def init_templates( path=\"boilerplate\" ):\n global template_env\n template_loader = jinja2.FileSystemLoader(searchpath=\"boilerplate\" )\n template_env = jinja2.Environment(\n loader=template_loader,\n lstrip_blocks=True\n )", "def get_templates_dirs(self): \n from pkg_resources import resource_filename\n return [ resource_filename(__name__, 'templates') ]\n # return []", "def get_all_templates(cls):\n raise NotImplementedError()", "def create_template_loader(self, template_path):\n raise NotImplementedError()", "def _LoadTemplate(self,fname):\n f = open(fname, 'r')\n lines = f.readlines()\n data = ''\n for line in lines:\n if not line.startswith('---'):\n data += line\n data = data.replace('\\t',' ')\n if '\\t' in data:\n errstr = \\\n 'Illegal tabs encountered in template file. Use spaces instead.'\n raise ScannerError(errstr)\n proc.LogErrors(errstr)\n tmplt = yaml.load(data)\n f.close()\n return tmplt", "def _load_dirs(self):\n rootdirs = self._docset.get_compounds(xml.Directory,\n lambda x: x.get_parent() is None)\n for dirdoc in rootdirs:\n self._load_dir(dirdoc, None)", "def load_fixtures(self):\n for fixture_dir in settings.FIXTURE_DIRS:\n fixture_dir = os.path.join(fixture_dir, self.filesystem_name)\n for (root, dirs, files) in os.walk(fixture_dir):\n for file in files:\n full_file_path = os.path.join(root, *dirs, file)\n with open(full_file_path, 'rb') as f:\n self.save(os.path.relpath(full_file_path, fixture_dir), f)", "def fetch_sample_templates():\n source_folder = Path(root, 'templates', 'sample_setup_files')\n Path('sample_templates').mkdir(parents=True, exist_ok=True)\n target_folder = Path().resolve()\n target_folder = Path(target_folder, 'sample_templates')\n\n copytree(source_folder, target_folder, dirs_exist_ok=True)\n logger.info(f'Sample templates can be found in directory {target_folder}')", "def read_in_templates(path, email_object=None):\n import os\n templates = {}\n\n for fle in os.listdir(path):\n with open(os.path.join(path, fle)) as _f:\n raw = \"\\n\".join(_f.readlines())\n templates[fle] = raw\n\n if email_object:\n email_object.use_templates(templates)\n else:\n return templates", "def loadTreeHandlers(self):\n #\n # Paths for key folders\n plugin_path = g.os_path_join(g.app.loadDir, \"..\", \"plugins\")\n self.handler_path = handler_path = g.os_path_join(g.app.loadDir, \"..\", \"plugins\", \"trees\")\n #\n if not g.os_path_isdir(handler_path):\n g.es(\"No tree handler folder found\", color=\"red\")\n else:\n g.es(\"Scanning for tree handlers\", color=\"blue\")\n #\n # Add folder locations to path\n old_path = sys.path[:]\n sys.path.insert(0, plugin_path)\n sys.path.insert(0, handler_path)\n #@+<< Get plugin manager module >>\n #@+node:ekr.20050329082101.135: *4* << Get plugin manager module >>\n # Get the manager\n try:\n self.plugin_manager = __import__(\"plugin_manager\")\n except ImportError as err:\n g.es(\"Autotrees did not load plugin manager: %s\" % (err,), color=\"red\")\n self.plugin_manager = None\n #@-<< Get plugin manager module >>\n #@+<< Find all handlers >>\n #@+node:ekr.20050329082101.136: *4* << Find all handlers >>\n # Find all handlers\n for filename in glob.glob(g.os_path_join(handler_path, \"*.py\")):\n handler_name = g.os_path_splitext(g.os_path_split(filename)[1])[0]\n g.es(\"... looking in %s\" % handler_name, color=\"blue\")\n try:\n self.loadHandlersFrom(handler_name)\n except BadHandler as err:\n g.es(\"... unable to load '%s' handler: %s\" % (handler_name, err), color=\"red\")\n #@-<< Find all handlers >>\n # Restore\n sys.path = old_path", "def load(self):\n\n if not os.path.isdir(self.path):\n raise TreeError('Not a directory: {}'.format(self.path))\n\n self.log.debug('{} load tree'.format(self.path))\n start = int(time.mktime(time.localtime()))\n\n super(Tree, self).load()\n self.paths = {}\n self.empty_dirs = []\n self.relative_dirs = []\n\n for (root, dirs, files) in os.walk(self.path, topdown=True):\n if os.path.basename(root) in IGNORED_TREE_FOLDER_NAMES:\n continue\n\n if files:\n self.files.extend((root, filename) for filename in files if filename != '')\n for filename in files:\n self.paths[os.path.join(root, filename)] = True\n\n elif not dirs:\n self.empty_dirs.append(root)\n\n self.relative_dirs = set(self.relative_path(x[0]) for x in self.files)\n self.files.sort(key=self.__cmp_file_path__())\n\n stop = int(time.mktime(time.localtime()))\n self.log.debug('loaded {:d} files in {:d} seconds'.format(\n len(self.files),\n (stop-start)\n ))", "def test_create_template_for_all_namespaces(self):\n pass", "def loadTheme(self, name = \"default\"):\n themes = os.listdir(\"themes\")\n if name in themes:\n self.templates = {}\n files = glob.glob(\"themes/%s/*.tpl\" % name)\n for file in files:\n f = open(file)\n data = \"\\n\".join(f.readlines())\n f.close()\n\n key = file.replace(\".tpl\", \"\").split(os.path.sep)[-1]\n self.templates[key] = data", "def preload_views():\n log(\"Starting `preload_views`\")\n modules = set()\n for pkg in find_packages(ROOT_DIR):\n pkgpath = ROOT_DIR + \"/\" + pkg.replace(\".\", \"/\")\n for info in pkgutil.iter_modules([pkgpath]):\n if info.ispkg:\n continue\n if info.name != \"views\":\n continue\n modules.add(pkg + \".\" + info.name)\n for module in modules:\n try:\n importlib.import_module(module)\n except Exception as e: # pylint: disable=broad-except\n log(\"{} failed to load: {}\".format(module, e))\n log(\"Done `preload_views`\")", "def find_user_templates(self):\n\n # a list to store file names in\n local_templates = []\n\n # loop through the directory content\n for name in os.listdir(self._template_directory):\n # check to see if it is a directory and not in the database\n if (os.path.isdir(os.path.join(self._template_directory, name)) and\n name not in self._templates):\n # add it to the list\n local_templates.append(name)\n\n return local_templates", "def load_template(cls, template_name):\n\n template_path = path.join(dirs.user_data_dir, 'template', '%s.yaml' % template_name)\n\n if not path.isfile(template_path):\n return {}\n\n with open(template_path, 'r') as gf:\n return yaml.safe_load(gf)", "def load():\n root = Path(__file__).parent\n for path in root.iterdir():\n if path.is_dir() and not path.name.startswith(\"_\"):\n subject = (path / \"subject.txt\").read_text()\n txt = (path / \"body.txt\").read_text()\n html = path / \"body.html\"\n if html.exists():\n html = html.read_text()\n else:\n html = None\n attachment = None\n pymodule = path / \"__init__.py\"\n if pymodule.exists():\n pymodule = importlib.import_module(f\"egapro.emails.{path.name}\")\n attachment = pymodule.attachment\n globals()[path.name] = Email(subject, txt, html, attachment)", "def LoadAllSubElements(self, recursive=False):\n pass", "def loadTemplatedCutFolder(filepath, inputsDir):\n cutfolder = QFramework.TQFolder(\"cuts\")\n\n filepath_abs = common.findConfigPath(filepath)\n\n with open(filepath_abs, 'r') as templateFile:\n template = templateFile.read()\n\n # Keywords found in the template file are used as the names of the files containing the inputs\n inputs = common.findFormatKeywords(template)\n\n # Reads the inputs and plugs them into the template\n inputDict = {}\n for input in inputs:\n with open(inputsDir+input+\".dent\", \"r\") as inputFile:\n inputDict[input] = inputFile.read()\n\n ok = cutfolder.importFromText(template % inputDict)\n if not ok:\n QFramework.BREAK(\"unable to load templated cuts from '{:s}' \".format(filepath.Data()))\n return cutfolder", "def _load_template(name: str) -> str:\n html_tpl = _read_text(name + '.html')\n import re\n\n # line breaks are not needed\n html_tpl = html_tpl.replace('\\n', '')\n # remove comments\n html_tpl = re.sub(r'<!--(.|\\s|\\n)*?-->', '', html_tpl)\n # remove space around special characters\n html_tpl = re.sub(r'\\s*([><])\\s*', r'\\1', html_tpl)\n return html_tpl", "def __init__(self, *paths, **kwargs):\n trajectories = load_trajectories(*paths, **kwargs)\n super().__init__(trajectories, **kwargs)", "def test_non_nested_template_source_generation(self):\n sources = [source for source in self.loader.get_template_sources('component.html')]\n\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/component/component.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/component/component.html')", "def _load_files(self):\n for filedoc in self._docset.get_files():\n path = filedoc.get_path()\n if not path:\n # In case of only partially loaded file information,\n # the path information is not set for unloaded files.\n continue\n if not os.path.isabs(path):\n path = os.path.join(self._source_root, path)\n extension = os.path.splitext(path)[1]\n # We don't care about Markdown files that only produce pages\n # (and fail the directory check below).\n if extension == '.md':\n continue\n dirdoc = filedoc.get_directory()\n if not dirdoc:\n self._reporter.xml_assert(filedoc.get_xml_path(),\n \"file is not in any directory in Doxygen\")\n continue\n relpath = self._get_rel_path(path)\n fileobj = self._files.get(relpath)\n if not fileobj:\n fileobj = File(path, relpath, self._docmap[dirdoc])\n self._files[relpath] = fileobj\n fileobj.set_doc_xml(filedoc, self)\n self._docmap[filedoc] = fileobj", "def load_template_if_needed(self):\n\n class GeneratorProxy(object):\n \"\"\"\n An interface to templates and plugins for\n providing restricted access to the methods.\n \"\"\"\n\n def __init__(self, preprocessor=None, postprocessor=None,\n context_for_path=None):\n self.preprocessor = preprocessor\n self.postprocessor = postprocessor\n self.context_for_path = context_for_path\n\n if not self.template:\n logger.info(\"Generating site at [%s]\" % self.site.sitepath)\n self.template = Template.find_template(self.site)\n logger.debug(\"Using [%s] as the template\",\n self.template.__class__.__name__)\n\n logger.info(\"Configuring the template environment\")\n preprocessor = self.events.begin_text_resource\n postprocessor = self.events.text_resource_complete\n proxy = GeneratorProxy(context_for_path=self.context_for_path,\n preprocessor=preprocessor,\n postprocessor=postprocessor)\n self.template.configure(self.site,\n engine=proxy)\n self.events.template_loaded(self.template)", "def load_project_structure(self, startpath, tree):\n from PyQt5.QtWidgets import QTreeWidgetItem\n from PyQt5.QtGui import QIcon\n\n for element in os.listdir(startpath):\n path_info = startpath + \"/\" + element\n parent_itm = QTreeWidgetItem(tree, [os.path.basename(element)])\n if os.path.isdir(path_info):\n self.load_project_structure(path_info, parent_itm)\n parent_itm.setIcon(0, QIcon(\"assets/folder.ico\"))\n else:\n parent_itm.setIcon(0, QIcon(\"assets/file.ico\"))", "def load_elements(self):\n for path in self.element_paths:\n self.process_path(path)", "def load_template_with_location(self, template_path):\n if not template_path:\n raise exc.FrameworkError(\"Invalid template path '%s'.\" %\n template_path)\n\n # first attempt to load from file\n content, path = self._load_template_from_file(template_path)\n if content is None:\n # second attempt to load from module\n content, path = self._load_template_from_module(template_path)\n template_type = 'module'\n else:\n template_type = 'directory'\n\n # if content is None, that means we didn't find a template file in\n # either and that is an exception\n if content is None:\n raise exc.FrameworkError(\"Could not locate template: %s\" %\n template_path)\n\n return (content, template_type, path)", "def read_template_files(self, template_path):\n templates = dict()\n for file in listdir(template_path):\n template_file = re.search(r\"(.*?).html$\", file)\n if template_file:\n template_name = template_file.groups()[0]\n templates[template_name] = open(os.path.join(template_path, file)).read()\n return templates", "def use_templates(self, templates):\n self.htmls = templates", "def _load(self, directory):\n pass", "def list_templates(self):\n raise NotImplementedError()", "def _buildjinja2_templates(self):\n templates = self.embryo.templates\n\n # if templates is a module extract its public string attributes\n # into the templates dict expected below.\n if isinstance(templates, ModuleType):\n tmp_templates = {}\n for k in dir(templates):\n v = getattr(templates, k)\n if (not k.startswith('_')) and isinstance(v, (str, Template)):\n tmp_templates[k] = v\n templates = tmp_templates\n\n # load the jinja2 templates contained in the module, either in the form\n # of Template objects or strings.\n loaded_templates = {}\n jinja_env = build_env()\n\n if templates:\n for k, v in templates.items():\n say('loading template: {}'.format(k))\n if isinstance(v, Template):\n loaded_templates[k] = v\n elif isinstance(v, str):\n try:\n loaded_templates[k] = jinja_env.from_string(v)\n except Exception as exc:\n source = exc.source.split('\\n')[exc.lineno - 1]\n shout(f'error \"{exc.message}\", line {exc.lineno} {source}')\n\n self.jinja2_templates = loaded_templates", "def _load(name, paths):\n for base_path in paths:\n parts = name.split('.')\n number_of_parts = len(parts)\n\n for folder_parts in range(number_of_parts):\n folder = os.path.join(base_path, *parts[:folder_parts])\n filename = '.'.join(parts[folder_parts:]) + '.json'\n json_path = os.path.join(folder, filename)\n\n if os.path.isfile(json_path):\n with open(json_path, 'r') as json_file:\n LOGGER.debug('Loading %s from %s', name, json_path)\n return json.load(json_file)", "def get_templates_dirs(self):\n\t\tfrom pkg_resources import resource_filename\n\t\treturn [resource_filename(__name__, 'templates')]", "def render_templates(self):\n\n # dockerfile\n try:\n t = self.templates.get_template(\n 'docker/dockerfiles/{}.dockerfile.template'.format(self.repo)\n )\n except TemplateNotFound:\n t = self.templates.get_template(\n 'docker/dockerfiles/default.dockerfile.template'\n )\n\n self.files.append({\n 'name': 'Dockerfile',\n 'content': t.render(commit=self.commit),\n })\n\n # gunicorn\n t = self.templates.get_template(\n 'docker/gunicorn/gunicorn.conf.py'\n )\n self.files.append({\n 'name': 'gunicorn.conf.py',\n 'content': t.render(),\n })\n\n t = self.templates.get_template(\n 'docker/gunicorn/gunicorn.sh'\n )\n self.files.append({\n 'name': 'gunicorn.sh',\n 'content': t.render(),\n 'mode': 0555,\n })\n\n # nginx\n t = self.templates.get_template(\n 'docker/nginx/app.nginx.conf'\n )\n self.files.append({\n 'name': 'app.nginx.conf',\n 'content': t.render(),\n })\n\n t = self.templates.get_template(\n 'docker/nginx/nginx.sh'\n )\n self.files.append({\n 'name': 'nginx.sh',\n 'content': t.render(),\n 'mode': 0555,\n })\n\n # cron/, etc/ iif there exists a `self.repo` directory\n def _filter(p):\n return (\"cron/\" in p or \"etc/\" in p) and (self.repo in p) and \\\n (not os.path.basename(p).startswith('.'))\n\n for t in self.templates.list_templates(\n filter_func=_filter):\n\n self.files.append({\n 'name': os.path.basename(t),\n 'content': self.templates.get_template(t).render(),\n })", "def load_subdirs(hub: pop.hub.Hub, sub: pop.hub.Sub, recurse: bool = False):\n if not sub._sub_virtual:\n return\n dirs = hub.pop.sub.get_dirs(sub)\n roots = {}\n for dir_ in dirs:\n for fn in os.listdir(dir_):\n if fn.startswith(\"_\"):\n continue\n if fn == \"contracts\":\n continue\n full = os.path.join(dir_, fn)\n if not os.path.isdir(full):\n continue\n if fn not in roots:\n roots[fn] = [full]\n else:\n roots[fn].append(full)\n for name, sub_dirs in roots.items():\n # Load er up!\n hub.pop.sub.add(\n subname=name,\n sub=sub,\n static=sub_dirs,\n virtual=sub._virtual,\n omit_start=sub._omit_start,\n omit_end=sub._omit_end,\n omit_func=sub._omit_func,\n omit_class=sub._omit_class,\n omit_vars=sub._omit_vars,\n mod_basename=sub._mod_basename,\n stop_on_failures=sub._stop_on_failures,\n )\n if recurse:\n if isinstance(getattr(sub, name), pop.hub.Sub):\n hub.pop.sub.load_subdirs(getattr(sub, name), recurse)", "def load_data(self) -> None:\n self.paths: List[str] = []\n self.durations: List[float] = []\n self.transcriptions: List[str] = []\n\n def raise_(err):\n \"\"\"raises error if problem during os.walk\"\"\"\n raise err\n\n for subset in self.subsets:\n subset_path = os.path.join(self.root, self.base_dir, subset)\n for root, dirs, files in os.walk(subset_path, onerror=raise_):\n if not files:\n continue\n matches = fnmatch.filter(files, \"*.trans.txt\")\n assert len(matches) == 1, \"> 1 transcription file found\"\n self._parse_transcription_file(root, matches[0])\n\n self._sort_by_duration()", "def get_templates_dirs(self):\n return [resource_filename(__name__, 'templates')]", "def get_templates_dirs(self):\n return [resource_filename(__name__, 'templates')]", "def load_dev_templates(settings, project_name):\n #Load json file\n base_path = settings['path.templates']\n template_path = os.path.join(base_path,\n \"{0}.json\".format(project_name)).replace(\"\\\\\", \"/\")\n file = open(template_path).read()\n template = json.loads(file)\n\n return template", "def add_user_templates(self):\n\n # get all the user's templates\n user_templates = self.find_user_templates()\n\n # loop through the templates\n for template in user_templates:\n # create a template object and add it to the database\n local_template = PhishingTemplate(template)\n self._templates[template] = local_template", "def LoadAllSubElements(self, recursive=False):\n for name in self.AllSubElements():\n element = self.LoadSubElement(name)\n if element and recursive:\n element.LoadAllSubElements(recursive=recursive)", "def _load_templates(self, tasks, skip_tasks=()):\n name2kwargs = {}\n dir_ = Path('data/prompts')\n paths = (dir_/t for t in tasks) if tasks else dir_.iterdir()\n if skip_tasks: paths = (p for p in paths if p.stem not in skip_tasks)\n for path in paths:\n if not path.is_dir():\n if tasks: warnings.warn(f'{path} is not a directory.')\n continue\n name2kwargs[path.stem] = load_prompt(path.stem,\n verbose=self.verbose)\n return name2kwargs", "def test_team_template_folders_id_templates_get(self):\n pass", "def _load_statements(self):\n home = Path(\".\")\n context = {\"table_name\": self.TABLE}\n self.sql = {}\n for path in home.glob(\"./sql/*\"):\n with open(path) as f:\n template = Template(f.read().strip())\n self.sql[path.stem] = template.render(context)", "def readTemplates():\n\n # Compile HTML templates.\n templates = {}\n for tt in [ 'image', 'dirindex', 'allindex', 'trackindex', 'sortindex' ]:\n fn = 'template-%s' % tt + opts.htmlext\n ttext = readTemplate(fn)\n templates[ tt ] = compileTemplate(ttext, fn)\n\n fn = 'template-css.css'\n ttext = readTemplate(fn)\n templates[ 'css' ] = compileTemplate(ttext, fn)\n\n # Compile user-specified rc file.\n rcsfx = 'rc'\n templates[ rcsfx ] = []\n if opts.rc:\n try:\n tfile = open(opts.rc, \"r\")\n orc = tfile.read()\n tfile.close()\n except IOError, e:\n print >> sys.stderr, \"Error: can't open user rc file:\", opts.rc\n sys.exit(1)\n\n o = compileCode('', orc, opts.rc)\n templates[ rcsfx ] += [ o ]\n\n # Compile user-specified code.\n if opts.rccode:\n o = compileCode('', opts.rccode, \"rccode option\")\n templates[ rcsfx ] += [ o ]\n\n # Compile global rc file without HTML tags, just python code.\n code = readTemplate('template-%s' % rcsfx + '.py')\n o = compileCode('', code, tt)\n templates[ rcsfx ] += [ o ]\n\n return templates", "def _fill_template(self, name, superclass):\n template = Template(self.template)\n template = template.substitute(namespace=self.module.namespace,\n module_name=self.module.name,\n name=name,\n superclass=superclass)\n return template", "def readTemplate(tfn):\n\n if opts.verbose: print \"fetching template\", tfn\n\n found = 0\n foundInRoot = 0\n\n # check in user-specified template root.\n if opts.templates:\n fn = join(opts.templates, tfn)\n if opts.verbose: print \" looking in %s\" % fn\n if exists(fn):\n found = 1\n\n # check in hierarchy root\n if not found:\n fn = join(opts.root, tfn)\n if opts.verbose: print \" looking in %s\" % fn\n if exists(fn):\n foundInRoot = 1\n found = 1\n\n # look for it in the environment var path\n if not found:\n try:\n curatorPath = os.environ[ 'CURATOR_TEMPLATE' ]\n pathlist = string.split(curatorPath, os.pathsep)\n for p in pathlist:\n fn = join(p, tfn)\n if opts.verbose: print \" looking in %s\" % fn\n if exists(fn):\n found = 1\n break\n except KeyError:\n pass\n\n if found == 1:\n # read the file\n try:\n tfile = open(fn, \"r\")\n t = tfile.read()\n tfile.close()\n except IOError, e:\n print >> sys.stderr, \"Error: can't open image template file:\", fn\n sys.exit(1)\n if opts.verbose: print \" succesfully loaded template\", tfn\n\n else:\n # bah... can't load it, use fallback templates\n if opts.verbose:\n print \" falling back on simplistic default templates.\"\n global fallbackTemplates\n try:\n t = fallbackTemplates[ splitext(tfn)[0] ]\n except KeyError:\n t = ''\n\n # Save templates in root, if it was requested.\n if opts.save_templates and foundInRoot == 0:\n rootfn = join(opts.root, tfn)\n if opts.verbose: print \" saving template in %s\" % rootfn\n\n # saving the file template\n if exists(rootfn):\n bakfn = join(opts.root, tfn + '.bak')\n if opts.verbose: print \" making backup in %s\" % bakfn\n import shutil\n try:\n shutil.copy(rootfn, bakfn)\n except:\n print >> sys.stderr, \\\n \"Error: can't copy backup template %s\", bakfn\n\n try:\n ofile = open(rootfn, \"w\")\n ofile.write(t)\n ofile.close()\n except IOError, e:\n print >> sys.stderr, \"Error: can't save template file to\", rootfn\n\n return t", "def preload_all_problems(self):\n for _, _, filenames in os.walk(self.problemDir):\n for filename in filenames:\n if filename[-3:] == \".py\" and filename != \"__init__.py\":\n self.load_problem_file(filename[0:-3])", "def layout (self, lydef):\n\n # Categorize files\n fout = self.categorize()\n\n ly = defaultdict(list)\n\n # For each template path, attempt to map all files in that category\n # and add any files that renders completely to the layout.\n for tmplsrc, category in lydef.items():\n tmpl = Template(tmplsrc)\n for a, f in fout[category]:\n # print('%s: Try %s matched to %s in %s' % (category, tmplsrc, f, a))\n try:\n path = os.path.join(tmpl.substitute(a.info),\n os.path.basename(f))\n ly[path].append((a, f))\n except KeyError as e:\n print(' -- %s info key %s not found' % (a, e))\n pass\n\n # Sort providing sources for each path.\n # E.g., prefer .redist. before .symbols., etc.\n for path in ly:\n ly[path].sort(reverse=True)\n\n return ly", "def include_templates(self):\n if self._include_templates is None:\n result = []\n for inc in self._includes:\n result.append(self.manager.get_template(inc))\n self._include_templates = result\n return self._include_templates", "def test_app_loader(self):\n\n with mock.patch('template_tree.template_finder.apps', new=self.mock_apps):\n self.assertEqual(\n list(template_finder.templates_for_engine(self.engine_config)),\n [\n ('abc.html', '/tmp/project/project/templates/abc.html'),\n ('my_app/def.html', '/tmp/project/my_app/templates/my_app/def.html'),\n ('your_app/def.html', '/tmp/project/your_app/templates/your_app/def.html'),\n ]\n )", "def __init__(self, template_dirs=[], include_comments=False,\n include_introspection_server=False):\n # Flag to enable rendering of header and footer comments in templates\n self._include_comments = include_comments\n\n # Flag to enable inclusion of introspection server (for use with\n # smach_viewer)\n self._include_introspection_server = include_introspection_server\n\n # Create list of any custom user-defined template dirs + default\n # template dir\n self._template_dirs = (\n template_dirs +\n [os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'templates')])\n\n # Create template loader for the template directories\n template_loaders = [\n jinja2.FileSystemLoader(template_dir)\n for template_dir in self._template_dirs]\n self._template_loader = jinja2.ChoiceLoader(template_loaders)\n\n # Create an environment for reading and parsing templates, including\n # the SkipBlockExtension class to allow for skipping certain blocks.\n self._template_env = (\n jinja2.Environment(loader=self._template_loader,\n extensions=[jinja2.ext.do,\n SkipBlockExtension],\n trim_blocks=False,\n lstrip_blocks=True))\n\n # Skip comment blocks as required\n if not self._include_comments:\n self._template_env.skip_blocks.append('upper_comments')\n self._template_env.skip_blocks.append('lower_comments')\n\n # Skip introspection server blocks as required\n if not self._include_introspection_server:\n self._template_env.skip_blocks.append('introspection_server')\n self._template_env.skip_blocks.append('spin')\n\n # Register custom tests with the environment\n self._template_env.tests['expression'] = expression\n self._template_env.tests['not_string'] = not_string\n\n # Register custom filters with the environment\n self._template_env.filters['exptostr'] = exptostr\n\n # Create a template references cache dictionary\n # to be indexed by template names.\n self._template_ref_names_cache = {}\n\n # Create a template block names cache dictionary\n # to be indexed by template names.\n self._template_block_names_cache = {}\n\n # Create a template block cache dictionary\n # to be indexed by tuples of the form (template_name, block_name)\n self._template_block_cache = {}\n\n pass", "def populate_models(tree) :\n\te_root = tree.getroot()\n\tpopulate_crisis(e_root)\n\tpopulate_person(e_root)\n\tpopulate_org(e_root)", "def copy_templates(root_directory, dist_directory, sdk_directory,\n cpus, families, boards):\n\n def _process(when, contexts):\n for context in contexts:\n for template in configuration.TEMPLATES:\n if template[\"when\"] == when:\n context.update({\n \"root\": root_directory,\n \"sdk\": sdk_directory,\n \"dist\": dist_directory\n })\n\n source = templates.from_string(template[\"source\"], context)\n target = templates.from_string(template[\"target\"], context)\n target = os.path.join(dist_directory, target)\n\n # Perform the action.\n sys.stdout.write(\"Processing '%s'\\n\" % source)\n\n if template[\"type\"] == \"file\":\n templates.from_file(source, target, context)\n elif template[\"type\"] == \"glob\":\n for source_file in glob.glob(source):\n if os.path.isfile(source_file):\n target_file = os.path.join(\n target, os.path.basename(source_file))\n\n templates.from_file(\n source_file, target_file, context)\n else:\n raise Exception(\"Not supported\")\n\n _process(\"per_family\", families)\n _process(\"per_cpu\", cpus)\n _process(\"per_board\", boards)\n _process(\"per_once\", [{\n \"families\": [family[\"family\"] for family in families],\n \"cpus\": [cpu[\"cpu\"] for cpu in cpus],\n \"boards\": [board[\"board\"] for board in boards]\n }])", "def _set_templates(spm_dir=SPM_DIR):\n global EPI_TEMPLATE, T1_TEMPLATE, GM_TEMPLATE, WM_TEMPLATE, CSF_TEMPLATE\n\n spm_version = _get_version_spm(SPM_DIR)\n\n # Set the tpm and template paths according to SPM version\n if spm_version == 'spm12':\n template_path = 'toolbox/OldNorm'\n tpm_path = 'toolbox/OldSeg'\n else:\n template_path = 'templates'\n tpm_path = 'tpm'\n\n # configure template images\n EPI_TEMPLATE = os.path.join(SPM_DIR, template_path, 'EPI.nii')\n SPM_T1_TEMPLATE = os.path.join(SPM_DIR, template_path, 'T1.nii')\n T1_TEMPLATE = \"/usr/share/data/fsl-mni152-templates/avg152T1.nii\"\n if not os.path.isfile(T1_TEMPLATE):\n T1_TEMPLATE += '.gz'\n if not os.path.exists(T1_TEMPLATE):\n T1_TEMPLATE = SPM_T1_TEMPLATE\n GM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'grey.nii')\n WM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'white.nii')\n CSF_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'csf.nii')", "def load_full(self):\n\t\tfor filename in self.FILENAMES:\n\t\t\tself.load(filename)\n\t\tself.reverse_dicts()", "def __init__(self, base=None, sub_files=None):\n self.categories = dict()\n self.base = base\n self.files = list()\n if not sub_files:\n sub_files = self.sub_node_paths\n for sub in sub_files:\n loaded = import_property(sub)(self)\n self.files.append(loaded)\n if loaded.category not in self.categories:\n self.categories[loaded.category] = list()\n self.categories[loaded.category].append(loaded)\n for contents in self.categories.values():\n contents.sort(key=lambda h: h.key)", "def load_template(name, stdout = sys.stdout, stderr = sys.stderr):\n path = \"weblab\" + os.sep + \"admin\" + os.sep + \"config_templates\" + os.sep + name\n try:\n f = file(path, \"r\")\n template = f.read()\n f.close()\n except:\n print(\"Error: Could not load template file %s. Probably couldn't be found.\" % path, file=stderr)\n return template", "def templates(self):\n if self._templates is None:\n self._templates = self.get_all_templates()\n return self._templates", "def load_chain_rules(tpl_name, project_name, quiet):\n tpls = get_available_templates()\n if tpl_name not in tpls:\n raise CloneError(\"Parent template %s was not found\" % tpl_name)\n tpl_path = tpls[tpl_name]\n tpl_file = join(tpl_path, PARENT_TPL_FILE)\n parent_rules = None\n rules = load_rules(tpl_path, project_name, quiet)\n data = getattr(rules, \"data\", {})\n file_name = getattr(rules, \"file_name\", None)\n scripts = list(getattr(rules, \"postclone_scripts\", []))\n if rules and not getattr(rules, \"extend\", False):\n return rules\n if exists(tpl_file):\n with open(tpl_file) as fil:\n tpl_name = re.sub(r'[^\\w-]', '', fil.read())\n parent_rules = load_chain_rules(tpl_name, project_name, quiet)\n merged_rules = MergedRules()\n parent_data = getattr(parent_rules, \"data\", {})\n parent_data.update(data)\n merged_rules.data = parent_data\n merged_rules.file_name = file_name or getattr(parent_rules, \"file_name\", None)\n parent_scripts = list(getattr(parent_rules, \"postclone_scripts\", []))\n merged_rules.postclone_scripts = list(set(parent_scripts + scripts))\n return merged_rules", "def test_list_template_for_all_namespaces(self):\n pass", "def __init__(self, ctx, verbose=0) -> None:\n\n self.__ctx__ = ctx\n TemplateHandler.templates_path = ctx.obj['TEMPLATES_FOLDER']\n self.verbose = verbose\n\n self.validate_templates_path()\n if len(TemplateHandler.templates) == 0:\n self.load_templates()", "def initialise_languages():\n is_language_folder = r\"^[^\\\\\\.]*\" # Cannot have backslash or dot.\n language_folder_path = os.path.join(definitions.ROOT_DIR, \"languages\")\n\n for root, dirs, files in os.walk(language_folder_path):\n for name in files:\n if name.startswith(\"_NEW_\") or name.startswith(\"_CHANGED_\"):\n # Files that are auto-generated will not be added.\n continue\n\n full_dir = os.path.join(root, name)\n relative_dir = full_dir.replace(language_folder_path + \"\\\\\", \"\")\n\n match = re.match(is_language_folder, relative_dir)\n language_id = match.group(0)\n\n language = None\n if language_id != \"cache\":\n language = definitions.LANGUAGES[language_id]\n else:\n language = definitions.CACHE_LANGUAGE\n\n if relative_dir == \"{id}\\\\commands.json\".format(id=language_id):\n # Take the commands.\n definitions.COMMANDS.add_command_localisation(full_dir, language_id)\n\n elif relative_dir == \"{id}\\\\languages.json\".format(id=language_id):\n # Take the languages.\n language.add_languages(full_dir)\n\n elif relative_dir == \"{id}\\\\meta.json\".format(id=language_id):\n # Take the metadata.\n language.add_meta(full_dir)\n\n elif relative_dir == \"{id}\\\\permissions.json\".format(id=language_id):\n # Take the permissions.\n language.add_permission_names(full_dir)\n\n elif relative_dir == \"{id}\\\\units.json\".format(id=language_id):\n language.add_units(full_dir)\n\n else:\n # Take the keys\n language.add_keys_from_path(full_dir)\n\n for name in dirs:\n full_dir = os.path.join(root, name)\n relative_dir = full_dir.replace(language_folder_path + \"\\\\\", \"\")\n\n match = re.fullmatch(is_language_folder, relative_dir)\n if match is not None:\n if name != \"cache\":\n definitions.LANGUAGES[name] = Language(name)\n else:\n definitions.CACHE_LANGUAGE = Language(name)", "def __init__(self, settings):\n self.mylookup = TemplateLookup(\n directories = [join(settings['root'],rpath) for rpath in settings['view_paths']],\n output_encoding='utf-8')\n \n self.routes = []\n self._part_matcher = re.compile(r'{.*?}')\n self.error_handler = noop", "def test_read_namespaced_template(self):\n pass", "def get_templates_dirs(self):\n from pkg_resources import resource_filename\n return [resource_filename(__name__, 'templates')]", "def test_theme_template_loading_by_prefix():\n app = create_ctfd()\n with app.test_request_context():\n tpl1 = render_template_string(\"{% extends 'core/page.html' %}\", content=\"test\")\n tpl2 = render_template(\"page.html\", content=\"test\")\n assert tpl1 == tpl2", "def load_systems(self, systems_dir: Path) -> None:\n system_settings:list = systems_dir.glob(\"*.yaml\")\n dependencies = defaultdict(list)\n\n for settings_file in system_settings:\n loaded = quiet_parse(settings_file)\n if loaded is None:\n continue\n\n system_name = next(iter(loaded))\n loaded_contents = loaded[system_name]\n\n if \"extends\" in loaded_contents:\n dependencies[loaded_contents[\"extends\"]].append(loaded)\n continue\n\n self.merge_data(loaded, namespace=\"npc.systems\")\n\n def load_dependencies(deps: dict):\n \"\"\"Handle dependency loading\n \n Unrecognized parents are stored away for the next iteration. Otherwise, children are merged with \n their parent's attributes, then merged into self.\n\n If the dependencies do not change for one iteration, then the remaining systems cannot be loaded \n and are skipped.\n \n Args:\n deps (dict): Dict mapping parent system keys to child system configs\n \"\"\"\n new_deps = {}\n for parent_name, children in deps.items():\n if parent_name not in self.get(\"npc.systems\"):\n new_deps[parent_name] = children\n continue\n\n for child in children:\n child_name = next(iter(child))\n parent_conf = dict(self.get(f\"npc.systems.{parent_name}\"))\n combined = merge_data_dicts(child[child_name], parent_conf)\n self.merge_data(combined, namespace=f\"npc.systems.{child_name}\")\n if not new_deps:\n return\n if new_deps == deps:\n logger.error(f\"Some systems could not be found: {deps.keys()}\")\n return\n load_dependencies(new_deps)\n\n load_dependencies(dependencies)", "def load(self):\n self.root = self._load()\n\n if self.ignore_case_in_keys:\n self.root = self._convert_keys_to_lower(self.root)", "def get_templates(instrument=''):\n import os, json\n template_path = os.path.dirname(__file__)\n template_names = [fn\n for fn in os.listdir(template_path)\n if fn.endswith(\".json\") and fn.startswith(instrument)]\n templates = dict([(tn[len(instrument)+1:-5],\n json.loads(open(os.path.join(template_path, tn), 'r').read()))\n for tn in template_names])\n return templates", "def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()", "def test_starting_template(checker):\n contents = labeled.contents(label=\"template\")\n _ = tomllib.loads(contents)", "def create_templates(self):\n for name, params in list_registered_templates():\n if self['templates'].filter(theme=self, name=name).count() == 0:\n self['templates'].create(theme=self, name=name)", "def load_template(format_: str) -> Template:\n template_path = Path(TEMPLATES_PATH).joinpath(f'{format_}{TEMPLATE_SUFFIX}')\n template = Template(template_path.read_text())\n return template", "def load_dependencies(deps: dict):\n new_deps = {}\n for parent_name, children in deps.items():\n if parent_name not in self.get(\"npc.systems\"):\n new_deps[parent_name] = children\n continue\n\n for child in children:\n child_name = next(iter(child))\n parent_conf = dict(self.get(f\"npc.systems.{parent_name}\"))\n combined = merge_data_dicts(child[child_name], parent_conf)\n self.merge_data(combined, namespace=f\"npc.systems.{child_name}\")\n if not new_deps:\n return\n if new_deps == deps:\n logger.error(f\"Some systems could not be found: {deps.keys()}\")\n return\n load_dependencies(new_deps)" ]
[ "0.727779", "0.7196964", "0.65432745", "0.64201707", "0.6378715", "0.6374151", "0.6279858", "0.6175564", "0.6173083", "0.60840946", "0.60698014", "0.5997303", "0.5987318", "0.5981226", "0.59653664", "0.5951422", "0.58668286", "0.58246344", "0.58026916", "0.576884", "0.5767016", "0.5723341", "0.571188", "0.5685532", "0.5623685", "0.5613112", "0.56080484", "0.559998", "0.559293", "0.5583293", "0.557433", "0.5518951", "0.55124813", "0.55038077", "0.54963523", "0.54941434", "0.5479352", "0.5470005", "0.5456973", "0.5453805", "0.54398793", "0.54289097", "0.54215384", "0.5414817", "0.5412801", "0.5412511", "0.539131", "0.5373803", "0.5364548", "0.53591746", "0.5351719", "0.5346815", "0.53465647", "0.53410715", "0.53340626", "0.5332676", "0.532819", "0.53168774", "0.5307067", "0.528952", "0.5287919", "0.52824795", "0.52824795", "0.5274192", "0.5270386", "0.52703464", "0.5265913", "0.5256262", "0.52480966", "0.5220849", "0.5215494", "0.52086484", "0.5201326", "0.52000993", "0.5175693", "0.51707006", "0.51671696", "0.516357", "0.5163521", "0.5159488", "0.5155694", "0.515175", "0.51503986", "0.5148937", "0.5144406", "0.51254493", "0.51244724", "0.51164657", "0.511576", "0.51140743", "0.5113163", "0.5111763", "0.5108701", "0.510694", "0.51023483", "0.5098449", "0.50930554", "0.5090716", "0.5089782", "0.5077941" ]
0.5642072
24
Process the data in the templates and set attributes accordingly.
def _ProcessTemplate(self,topdir): self.dicomdir = "%s/anatomicals" % self.topdir self.rawdir = "%s/raw" % topdir self.rawdirs = {} tmplt = self._GetTemplate() if self.opts.outdir is not None: # Override template output directory. tmplt['top_outdir'] = self.opts.outdir self.tmplt = tmplt if len(tmplt['top_outdir']) == 0: tmplt['top_outdir'] = os.path.realpath(self.topdir) raise RuntimeError('Template file must specify an output directory.') tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir']) if '/home' in tmplt['top_outdir'][:7]: raise RuntimeError('Image data cannot be stored in the /home partition. Change the "top_outdir" entry in the template file: %s.' % (' '.join(self.templates))) # tmplt['subject'] = 'orig' self.procdir = os.path.abspath("%s/%s" % \ (tmplt['top_outdir'],tmplt['subject'])) target = os.path.abspath('%s/../..' % tmplt['top_outdir']) if not ismounted(target): raise RuntimeError('Could not access partition at %s' % target) self.anatdir = "%s/anat" % self.procdir self.fmapdir = "%s/%s" % (self.procdir,tmplt['fmap']['outdir']) self.dtidir = "%s/%s" % (self.procdir,tmplt['dti']['outdir']) self.logdir = "%s/%s" % (self.procdir,tmplt['logdir']) self.skip = tmplt.get('skip', DEFAULT_SKIP) self.acq_tr = tmplt.get('acq_tr',None) self.episetup_dir = "%s/%s" % (self.procdir,tmplt['first_epi']) self.fsl_cmpblty = tmplt.get('fsl_compatibility',False) self.epi_file_format = self.tmplt['epi_file_format'] self.censor_thresh = tmplt.get('censor_threshold', 2.) self.censor_interleave = tmplt.get('censor_interleave', True) # self.server_userid = self.tmplt.get('server_userid','default') # Overide flags for aligning EPIs and skull-stripping with command- # line options. if self.opts.align_fmaps: self.align_fmaps = True else: self.align_fmaps = self.tmplt.get('epi_align', False) if self.opts.no_align_fmaps: self.no_align_fmaps = True else: self.no_align_fmaps = self.tmplt.get('no_epi_align', False) if self.opts.skull_strip: self.skull_strip = True else: self.skull_strip = self.tmplt.get('skull_strip', False) # Create log file now so it can be used immediately. if not os.path.exists(self.logdir): if self.verbose: print 'mkdir %s' % self.logdir if not self.opts.fake_opts: self.MakeDir(self.logdir) self._ProcessTemplateEpiInfo()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')", "def prepare(self):\n self.parse_template()\n self.build_argparser()\n self.parse_arguments()\n self.render_template()\n self.update_relation()", "def parse_template(self):\n for line in self.raw_template.split(\"\\n\"):\n line = line.strip()\n if line.startswith('#m3'):\n key, val = line[3:].strip().split('=', 1)\n key = key.strip()\n val = val.strip()\n self.variables[key] = val\n\n for fitem in self.finditem.finditer(self.raw_template):\n fgrp = fitem.groups()\n categ = fgrp[0]\n name = fgrp[1]\n rest_str = fgrp[2]\n rest = {} # type: dict\n for item in rest_str.split('|'):\n item = item.strip()\n if item:\n key, val = item.split('=')\n rest[key] = val\n\n self.data[name] = (categ, rest)", "def _get_template_data(self):\n self._set_meta_info()\n if self._report_key == ReportTypes.SEARCH_TOC_REPORT:\n self._set_selected()\n elif self._report_key == ReportTypes.MHR_COVER:\n self._report_data['cover'] = report_utils.set_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n elif self._report_key == ReportTypes.MHR_REGISTRATION_COVER:\n self._report_data['regCover'] = report_utils.set_registration_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n if str(self._report_data.get('registrationType', '')).startswith('TRAN'):\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n else:\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_search_additional_message()\n elif self._report_key == ReportTypes.MHR_TRANSFER:\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n self._set_date_times()\n self._set_addresses()\n self._set_owner_groups()\n if self._report_key not in (ReportTypes.MHR_REGISTRATION,\n ReportTypes.MHR_TRANSFER,\n ReportTypes.MHR_TRANSPORT_PERMIT):\n self._set_notes()\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_selected()\n self._set_ppr_search()\n elif self._report_key == ReportTypes.SEARCH_BODY_REPORT:\n # Add PPR search template setup here:\n self._set_ppr_search()\n if self._report_key not in (ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION, ReportTypes.MHR_NOTE):\n self._set_location()\n if self._report_key != ReportTypes.MHR_TRANSPORT_PERMIT:\n self._set_description()\n return self._report_data", "def process_template(template, data):\n t = Template(template, data)\n t.job = get_current_job()\n t.process()\n\n result = dict(template=template, data=data, result_folder=t.resultdir, log=t.log)\n\n return result", "def run(self):\n self.load_template()\n self.load_data()\n self.load_files()\n self.render_content()\n self.process()\n # pprint(self.data)", "def prepare_data(self):", "def get_template_data(cls, pydata, view):\n return dict(previewdata=cls.get_previewdata(pydata),\n content_types=view.content_types,\n title=cls.html_title,\n brandingtitle=cls.html_brandingtitle,\n heading=cls.html_heading)", "def _process(self):\n # choose the correct transform model before processing TI data\n self._select_transform()\n\n # process type first, fail early\n self._process_type()\n\n # process type specific data\n if isinstance(self.transform, GroupTransformModel):\n self._process_group()\n elif isinstance(self.transform, IndicatorTransformModel):\n self._process_indicator()\n\n # self.process_associations(self.transform.associations)\n self._process_associated_group(self.transform.associated_groups)\n self._process_attributes(self.transform.attributes or [])\n self._process_security_labels(self.transform.security_labels or [])\n self._process_tags(self.transform.tags or [])\n\n # date added\n self._process_metadata_datetime('dateAdded', self.transform.date_added)\n\n # last modified\n self._process_metadata_datetime('lastModified', self.transform.last_modified)\n\n # xid\n self._process_metadata('xid', self.transform.xid)", "def apply_to(self, template):\n pass", "def _template_data(self):\n return {\"form\": self.form.render()}", "def _fill_user_specific_attributes(self, template_dictionary):\n template_dictionary[KEY_INCLUDE_TOOLTIP] = False\n template_dictionary[KEY_WRAP_CONTENT_IN_MAIN_DIV] = True\n template_dictionary[KEY_CURRENT_TAB] = 'none'\n\n return template_dictionary", "def render(data_dict, *args, **kwargs):", "def __fill_template__(self,template_file,output_fname):\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n if k == 'sample_key':\n try:\n int(v)\n new_sample_key = \"Sample_\" + str(v)\n dictionary.update({k:new_sample_key})\n continue\n except ValueError:\n pass\n dictionary.update({k:str(v)})\n dictionary.update({'restats_tail': self.restats_file + '.tail'})\n with open(output_fname,'w') as f:\n string = fill_template(template_file,dictionary)\n f.write(string)", "def _fill_template(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n\n line_object_keys = [\"quickReply\", \"items\", \"action\", \"template\", \"actions\"]\n\n if type(template) == list:\n for item in template:\n self._fill_template(item, template_vars)\n else:\n self._fill_template_text(template, template_vars)\n for key in line_object_keys:\n if key in template:\n self._fill_template(template[key], template_vars)\n\n return template", "def prepare_woo_template_vals(self, template_data, odoo_template_id, import_for_order,\n woo_instance, common_log_book_id):\n if import_for_order:\n woo_category_ids = self.sync_woo_categ_with_product_v1_v2_v3(woo_instance,\n common_log_book_id,\n template_data[\n \"woo_categ_ids\"],\n woo_instance.sync_images_with_product)\n woo_tag_ids = self.sync_woo_tags_with_product_v1_v2_v3(woo_instance,\n template_data[\"woo_tag_ids\"])\n else:\n woo_category_ids = []\n woo_tag_ids = []\n for woo_category in template_data[\"woo_categ_ids\"]:\n woo_categ = self.env[\"woo.product.categ.ept\"].search(\n [(\"woo_categ_id\", \"=\", woo_category.get(\"id\")),\n ('woo_instance_id', '=', woo_instance.id)], limit=1).id\n woo_categ and woo_category_ids.append(woo_categ)\n for woo_tag in template_data[\"woo_tag_ids\"]:\n product_tag = self.env[\"woo.tags.ept\"].search(\n [(\"woo_tag_id\", \"=\", woo_tag.get(\"id\")),\n ('woo_instance_id', '=', woo_instance.id)], limit=1).id\n product_tag and woo_tag_ids.append(product_tag)\n\n template_data.update({\n \"product_tmpl_id\":odoo_template_id,\n \"exported_in_woo\":True,\n \"woo_categ_ids\":[(6, 0, woo_category_ids)],\n \"woo_tag_ids\":[(6, 0, woo_tag_ids)]\n })\n return template_data", "def prepare(self, **template_parameters: Any) -> None:\n\n self._body_plain = render_template(self._body_template_base_path + '.txt', **template_parameters)\n self._body_html = render_template(self._body_template_base_path + '.html', **template_parameters)", "def get_context_data(self, **kwargs):\n\n context = super(MonFinView, self).get_context_data()\n if test_on == False:\n rezult_1, rezult_2, rezult_3, proc_error, title_1, title_2, titles_10, data_p, critical = processing_r()\n context['table_1'] = rezult_1\n context['table_2'] = rezult_2\n context['table_3'] = rezult_3\n context['titles_10'] = titles_10\n context['data_p'] = data_p\n context['critical'] = critical\n else: # Test data for the template\n table_1, table_23, proc_error, title_1, title_2, critical = templates_test()\n context['table_1'] = table_1\n context['table_2'] = table_23\n context['table_3'] = table_23\n context['critical'] = critical\n context['data_p'] = {'company': '',\n 'position': '',\n 'name': 'Копенок Виктор',\n 'phone': '',\n 'email': 'vkopenok@mail.ru'\n }\n context['titles_1'] = title_1\n context['titles'] = title_2\n context['connect'] = proc_error\n context['version'] = VERSION\n return context", "def generate_extra_data(self):\n self.data[\"male_initial\"], self.data[\"female_initial\"] = \\\n self.get_initial_student_count()\n \n date_line = '<p class=\"report-title\"> %s</p>' \\\n %(self.start_date.strftime(\"%B %Y\"))\n row1 = \"\"\"\n <table>\n <tr class=\"tblRow\"><td>%s</td><td>Enrollment For Year</td>\n <td>Male:</td><td>%d</td><td>Female:</td><td>%d</td>\n <td>Total:</td><td>%d</td></tr>\n \"\"\" %(unicode(self.school), self.data[\"male_initial\"], \n self.data[\"female_initial\"], \n self.data[\"male_initial\"] + self.data[\"female_initial\"])\n row2 = \"\"\"\n <tr class=\"tblOddRow\"><td>%s</td><td>Enrollment For Month</td>\n <td>Male:</td><td>%d</td><td>Female:</td><td>%d</td>\n <td>Total:</td><td>%d</td></tr>\n \"\"\" %(unicode(self.section), self.data[\"male_current\"], \n self.data[\"female_current\"],\n self.data[\"male_current\"] + self.data[\"female_current\"])\n row3 = \"\"\"\n <tr class=\"tblRow\"><td>%s</td><td>Average Attendance</td>\n <td>Male:</td><td>%.1f</td><td>Female:</td><td>%.1f</td>\n <td>Total:</td><td>%.1f</td></tr>\n \"\"\" %(\"Secondary\", self.data[\"aa_male\"], self.data[\"aa_female\"] ,\n self.data[\"aa_combined\"])\n row4 =\"\"\"\n <tr class=\"tblOddRow\"><td>%s</td><td>Percentage of Attendance</td>\n <td>Male:</td><td>%.1f %% </td><td>Female:</td><td>%.1f %% </td>\n <td>Total:</td><td>%.1f %% </td></tr>\n \"\"\" %(unicode(self.school.municipality), self.data[\"pa_male\"], \n self.data[\"pa_female\"], self.data[\"pa_combined\"])\n row5 = \"\"\"\n <tr class=\"tblRow\"><td>School Days: %d</td><td>Percentage of Enrollment</td>\n <td>Male:</td><td>%.1f %% </td><td>Female:</td><td>%.1f %% </td>\n <td>Total:</td><td>%.1f %% </td></tr>\n </table>\n \"\"\" %(self.data[\"num_school_days\"], \n self.data[\"male_current\"] * 100.0 / self.data[\"male_initial\"],\n self.data[\"female_current\"] * 100.0 / \n self.data[\"female_initial\"],\n (self.data[\"male_current\"] + self.data[\"female_current\"]) * \n 100.0 /\n (self.data[\"male_initial\"] + self.data[\"female_initial\"]))\n self.extra_data = date_line + row1 + row2 + row3 + row4 + row5", "def __fill_all_templates__(self,configs):\n template_dir = configs['system'].get('Common_directories','template')\n sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))\n system_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','system'))\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio'))\n self.__fill_template__(sample_template,self.sample_file)\n self.__fill_template__(system_template,self.systems_file)\n self.__fill_template__(qsub_template,self.qsub_file)", "def __init__(self, template):\n\n self.template = template\n self.parsed_template = {}", "def setData(self, params):\n\n input_template = None\n if \"id\" in params and params[\"id\"]:\n input_template = InputTemplate.objects.get(pk=params[\"id\"])\n else:\n input_template = InputTemplate.objects.get(code=params[\"code\"])\n\n self._checkInputTemplateSecurity(input_template)\n return self._setData(input_template, params)", "def _loadData(self, data):\n self._data = data\n self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))\n self.email = data.attrib.get('email')\n self.friend = utils.cast(bool, data.attrib.get('friend'))\n self.friendlyName = data.attrib.get('friendlyName')\n self.home = utils.cast(bool, data.attrib.get('home'))\n self.id = utils.cast(int, data.attrib.get('id'))\n self.server = utils.cast(bool, data.attrib.get('server'))\n self.servers = self.findItems(data, MyPlexServerShare)\n self.thumb = data.attrib.get('thumb')\n self.username = data.attrib.get('username', '')\n for server in self.servers:\n server.accountID = self.id", "def _render(self, f_target, data):\n \n evt = self.plugin.dispatch(\n jink.plugin.Event('onBeforeRender', self, data=data, target=f_target),\n permit_cancel=True)\n if evt.isCancelled(): return\n data = evt.extra.data\n f_target = evt.extra.target\n \n # check if inheritance is specified\n refs = jinja2.meta.find_referenced_templates(self.engine.parse(data))\n try:\n refs.next()\n except StopIteration, e:\n # no, so insert default template\n t = self._filter(f_target, self.templates)\n if t: data = ( '{%% extends \"%s\" %%}\\n' % t ) + data\n \n # render\n data = self.engine.from_string(data).render({ 'config' : self.config.clone({ 'jink.path' : f_target}).get })\n self.log(2, '------------------------------')\n self.log(2, data)\n self.log(2, '------------------------------')\n return data", "def handle_data(self, data):\n\n # Medium export files have a footer with junk that's not part of the original post.\n # Stop processing entirely if we hit the document footer.\n if self.seen_footer:\n return\n\n # If this text is part of an image caption, slap that caption on the last Image card so the caption\n # ends up in the right place and bail out.\n if \"figcaption\" in self.tag_stack:\n self.cards[-1][1][\"caption\"] = data\n return\n\n # If we are nested inside a <pre>, we are dealing with code content. Just append it to the current code\n # card and bail.\n if \"pre\" in self.tag_stack:\n self.cards[-1][1][\"code\"] += data\n return\n\n # If we got this fair, we have regular HTML text that may or may not be nested inside a <strong>, <em>, etc tag.\n # In Mobiledoc, the easiest way to to annotate each text string with all the formats (strong, em) etc that apply\n # to it.\n # So let's loop through the html tag stack and see all the formatting tags that apply to this piece of text.\n markups_for_data = []\n markup_count = 0\n\n body = []\n if \"a\" in self.tag_stack:\n markups_for_data.append(len(self.markups) - 1)\n markup_count += 1\n if \"em\" in self.tag_stack:\n markups_for_data.append(0)\n markup_count += 1\n if \"strong\" in self.tag_stack:\n markups_for_data.append(1)\n markup_count += 1\n\n # Finally, generate a Mobiledoc tag containing the text and all the formatting tags that apply to it.\n body = [0, markups_for_data, markup_count, data]\n\n self.current_markers.append(body)", "def _setup_report_data(self):\n # current_app.logger.debug('Setup report data template starting.')\n template = self._get_template()\n current_app.logger.debug('Setup report data template completed, setup data starting.')\n data = {\n 'reportName': self._get_report_filename(),\n 'template': template,\n 'templateVars': self._get_template_data()\n }\n current_app.logger.debug('Setup report data completed.')\n return data", "def write_data(self,template=None,**data):\n \n if not self.api_call:\n if template is None:\n template = self.default_template\n \n self.render(template,**data)\n return\n\n def _parsed():\n for k,v in data.iteritems():\n if hasattr(v,\"as_type\"):\n yield (k,v.as_type(self.api_type))\n else:\n yield (k,v)\n\n data = dict(_parsed())\n\n\n if self.api_type == \"json\":\n self.write(json.dumps(data))\n self.finish()", "def prepare_template(self, rest_handler, key=''):\n template_values = {}\n template_values['page_title'] = self.format_title('Edit Question')\n template_values['main_content'] = self.get_form(rest_handler, key=key)\n\n return template_values", "def parse_arguments(self):\n self.args = self.argparser.parse_args(self.template_args) # noqa: T484\n\n # get values from args or defaults\n for name, (categ, rest) in self.data.items():\n if categ not in '<>?':\n continue\n val = getattr(self.args, name)\n if rest.get('type') == 'flag':\n val = str(rest.get('val')) if val else ''\n else:\n val = val if val is not None else rest.get('default')\n self.variables[name] = val\n\n # possibly fill in substitutions in the template variables\n findreplace = re.compile(r'{{\\s*(\\w+)\\s*}}')\n for name, val in self.variables.items():\n if findreplace.search(val):\n t = jinja2.Template(val)\n self.variables[name] = t.render(self.variables)", "def parse_template(data, template):\n img_html = \"\"\"<div class=\"thumb-wrap\"><div class=\"thumb-holder\"></div><a href=\"{{URL}}\" target=\"_top\"><div class=\"thumb-img\" style=\"background-image:url('{{IMG}}');\"></div></a></div>\"\"\"\n template = template.replace('{{URL}}', data['link'].replace('http:','https:'))\n template = template.replace('{{URLX}}', data['link'])\n template = template.replace('{{TITLE}}', data['title'])\n #template = template.replace('{{BLURB}}', data['summary'])\n img_html = img_html.replace('{{URL}}', data['link'].replace('http:','https:'))\n if hasattr(data, 'tags') and len(data['tags']) > 0:\n template = template.replace('{{SECTION}}', data['tags'][0]['term'])\n else:\n template = template.replace('<h2><a href=\"{{URL}}\" target=\"_top\">{{SECTION}}</a></h2>', '')\n if hasattr(data, 'media_content') and len(data['media_content']) > 0:\n template = template.replace('{{IMG}}', '%s?w=150' % data['media_content'][0]['url'].replace('http:','https:'))\n else:\n template = template.replace(img_html, '')\n\n return template", "def create(self, validated_data):\n\n # Create the Attribute instance\n # Creates an instance regardless errors happen later\n template = ProductTemplate.objects.create(\n name=validated_data['name']\n )\n\n # Create each AttributeProduct instance\n if validated_data.get('attribute_product'):\n for item in validated_data['attribute_product']:\n AttributeProduct.objects.create(\n attribute=Attribute(item['attribute']['id']),\n product_template=template\n )\n\n # Create each AttributeProduct instance\n if validated_data.get('attribute_variant'):\n for item in validated_data['attribute_variant']:\n AttributeVariant.objects.create(\n attribute=Attribute(item['attribute']['id']),\n product_template=template\n )\n\n return template", "def _process_data(self):\r\n # Rename columns to match final feature class\r\n self._rename_columns()\r\n # Add point ID column\r\n self._add_pointid()\r\n # Sort rows by transect id and timestamp\r\n self._sort_rows()\r\n # Fill Null records with a value\r\n self._fill_nulls()\r\n # Set site_code to lower case\r\n self._lower_site_code()\r\n # Create survey_id\r\n self._calc_survey_id()\r\n # Calculate nativesg column if at least one of the veg columns is a Native seagrass type\r\n if set(self.veg_columns).intersection(set(NATIVESG_CODES)) > 0:\r\n self.nativesg_columns = list(set(self.veg_columns).intersection(set(NATIVESG_CODES)))\r\n self._calc_nativesg()\r\n #\r", "def prepare_product_update_data(self, template, update_image, update_basic_detail, data):\n instance = template.woo_instance_id\n flag = False\n tmpl_images = []\n if update_image:\n tmpl_images += self.get_gallery_images(instance, template, template.product_tmpl_id)\n data.update({\"images\":tmpl_images})\n flag = True\n\n if update_basic_detail:\n\n weight = self.convert_weight_by_uom(template.product_tmpl_id.weight, instance)\n\n description = ''\n short_description = ''\n if template.woo_description:\n woo_template_id = template.with_context(lang=instance.woo_lang_id.code)\n description = woo_template_id.woo_description\n\n if template.woo_short_description:\n woo_template_id = template.with_context(lang=instance.woo_lang_id.code)\n short_description = woo_template_id.woo_short_description\n data.update({\n 'name':template.name,\n 'enable_html_description':True,\n 'enable_html_short_description':True, 'description':description,\n 'short_description':short_description,\n 'weight':str(weight),\n 'taxable':template.taxable and 'true' or 'false'\n })\n woo_categ_ids = list(map(int,template.woo_categ_ids.mapped(\"woo_categ_id\")))\n if all(woo_categ_ids):\n categ_ids = [{'id': cat_id} for cat_id in woo_categ_ids]\n data.update({'categories':categ_ids})\n\n woo_tag_ids = list(map(int,template.woo_tag_ids.mapped(\"woo_tag_id\")))\n if all(woo_tag_ids):\n tag_ids = [{'id': tag_id} for tag_id in woo_tag_ids]\n data.update({'tags':tag_ids})\n\n return flag, data", "def process(self, date_generator=None, **kwargs):\n template = Template(self.template)\n\n if date_generator is None:\n date_generator = self.date_generator\n\n # Only the passed objects will be accessible from the template\n # the next built-in needs to be passed for next(date_generator) to work\n return template.render(fake=self.fake, datetime=datetime,\n date_generator=date_generator,\n next=next, **self.providers, **kwargs)", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def common_template_data(request, revision=None, mime_type=None):\n\n cfg = request.cfg\n\n # Initialize data dictionary members (sorted alphanumerically)\n data = TemplateData(\n {\n \"annotate_href\": None,\n \"cfg\": cfg,\n \"docroot\": (\n cfg.options.docroot is None\n and request.script_name + \"/\" + docroot_magic_path\n or cfg.options.docroot\n ),\n \"download_href\": None,\n \"download_text_href\": None,\n \"graph_href\": None,\n \"home_href\": request.script_name or \"/\",\n \"kv\": request.kv,\n \"lockinfo\": None,\n \"log_href\": None,\n \"nav_path\": nav_path(request),\n \"pathtype\": None,\n \"prefer_markup\": ezt.boolean(0),\n \"queryform_href\": None,\n \"rev\": None,\n \"revision_href\": None,\n \"rootname\": (request.rootname and request.server.escape(request.rootname) or None),\n \"rootpath\": request.rootpath,\n \"roots_href\": None,\n \"roottype\": request.roottype,\n \"rss_href\": None,\n \"tarball_href\": None,\n \"up_href\": None,\n \"username\": request.username,\n \"view\": _view_codes[request.view_func],\n \"view_href\": None,\n \"vsn\": __version__,\n \"where\": request.server.escape(request.where),\n }\n )\n\n rev = revision\n if not rev:\n rev = request.query_dict.get(\"annotate\")\n if not rev:\n rev = request.query_dict.get(\"revision\")\n if not rev and request.roottype == \"svn\":\n rev = request.query_dict.get(\"pathrev\")\n try:\n data[\"rev\"] = hasattr(request.repos, \"_getrev\") and request.repos._getrev(rev) or rev\n except vclib.InvalidRevision:\n raise ViewVCException(\"Invalid revision\", \"404 Not Found\")\n\n if request.pathtype == vclib.DIR:\n data[\"pathtype\"] = \"dir\"\n elif request.pathtype == vclib.FILE:\n data[\"pathtype\"] = \"file\"\n\n if request.path_parts:\n dir = _path_join(request.path_parts[:-1])\n data[\"up_href\"] = request.get_url(\n view_func=view_directory, where=dir, pathtype=vclib.DIR, params={}, escape=1\n )\n\n if \"roots\" in cfg.options.allowed_views:\n data[\"roots_href\"] = request.get_url(view_func=view_roots, escape=1, params={})\n\n if request.pathtype == vclib.FILE:\n fvi = get_file_view_info(request, request.where, data[\"rev\"], mime_type)\n data[\"view_href\"] = fvi.view_href\n data[\"download_href\"] = fvi.download_href\n data[\"download_text_href\"] = fvi.download_text_href\n data[\"annotate_href\"] = fvi.annotate_href\n data[\"revision_href\"] = fvi.revision_href\n data[\"prefer_markup\"] = fvi.prefer_markup\n data[\"log_href\"] = request.get_url(view_func=view_log, params={}, escape=1)\n if request.roottype == \"cvs\" and cfg.options.use_cvsgraph:\n data[\"graph_href\"] = request.get_url(view_func=view_cvsgraph, params={}, escape=1)\n file_data = request.repos.listdir(request.path_parts[:-1], request.pathrev, {})\n entries = [item for item in file_data if item.name == request.path_parts[-1]]\n if len(entries) == 1:\n request.repos.dirlogs(request.path_parts[:-1], request.pathrev, entries, {})\n data[\"lockinfo\"] = entries[0].lockinfo\n elif request.pathtype == vclib.DIR:\n data[\"view_href\"] = request.get_url(view_func=view_directory, params={}, escape=1)\n if \"tar\" in cfg.options.allowed_views:\n data[\"tarball_href\"] = request.get_url(view_func=download_tarball, params={}, escape=1)\n if request.roottype == \"svn\":\n data[\"revision_href\"] = request.get_url(\n view_func=view_revision, params={\"revision\": data[\"rev\"]}, escape=1\n )\n\n data[\"log_href\"] = request.get_url(view_func=view_log, params={}, escape=1)\n\n if is_querydb_nonempty_for_root(request):\n if request.pathtype == vclib.DIR:\n params = {}\n if request.roottype == \"cvs\" and request.pathrev:\n params[\"branch\"] = request.pathrev\n data[\"queryform_href\"] = request.get_url(\n view_func=view_queryform, params=params, escape=1\n )\n data[\"rss_href\"] = request.get_url(\n view_func=view_query, params={\"date\": \"month\", \"format\": \"rss\"}, escape=1\n )\n elif request.pathtype == vclib.FILE:\n parts = _path_parts(request.where)\n where = _path_join(parts[:-1])\n data[\"rss_href\"] = request.get_url(\n view_func=view_query,\n where=where,\n pathtype=request.pathtype,\n params={\"date\": \"month\", \"format\": \"rss\", \"file\": parts[-1], \"file_match\": \"exact\"},\n escape=1,\n )\n return data", "def post_process_request(req, template, data, content_type):\n\n return (template, data, content_type)", "def perform(self, context):\n\n engine = PySTEngine()\n engine.init(context[CTX_KEY_COMMON_COLLECTD_JMX_ATTR_TEMPLATE_FILE])\n replaced_template_name = engine.apply({'attribute':self._attribute}, None, True)\n context[CTX_KEY_COLLECTD_GENERIC_JMX_TEMPLATE_FILE] = replaced_template_name\n context[CTX_KEY_COLLECTD_GENERIC_JMX_ATTRIBUTE_BLOCK] = self._attribute\n\n super().perform(context)\n\n jmxType = context[CTX_KEY_COMMON_COLLECTD_JMX_TYPE]\n logger.debug(\"======================================================================\")\n logger.debug('[Transifig] Collectd-[%s] replaced template name [%s]', jmxType,\n replaced_template_name)\n logger.debug(\"======================================================================\")", "def fill_default_attributes(self, template_dictionary, escape_db_operations=False):\n template_dictionary = self._populate_user_and_project(template_dictionary, escape_db_operations)\n template_dictionary = self._populate_message(template_dictionary)\n template_dictionary = self._populate_menu(template_dictionary)\n\n if KEY_ERRORS not in template_dictionary:\n template_dictionary[KEY_ERRORS] = {}\n if KEY_FORM_DATA not in template_dictionary:\n template_dictionary[KEY_FORM_DATA] = {}\n if KEY_SUB_SECTION not in template_dictionary and KEY_SECTION in template_dictionary:\n template_dictionary[KEY_SUB_SECTION] = template_dictionary[KEY_SECTION]\n if KEY_SUBMENU_LIST not in template_dictionary:\n template_dictionary[KEY_SUBMENU_LIST] = None\n\n template_dictionary[KEY_CURRENT_VERSION] = cfg.BASE_VERSION\n return template_dictionary", "def render_html_with_data(self):\n self.logger.info('Builder {} prepares html data for {} file'.format(self.id, self.html_template_file()))\n return self \\\n .template_env.get_template(self.html_template_file()) \\\n .render(self.build_document().prepare_html_dict())", "def _config(self):\n tmpl = self._template_interface\n for p in tmpl._params:\n setattr(self, p._name, p.get_value())", "def build(self):\n self.logger.debug(\"run\")\n\n self.onInit()\n self.work()\n \n self.afterWork()\n\n template = Templateengine(self.currenttemplate)\n template.readTemplateFile()\n contenttype = self.settings.contenttype \n self.defaultTemplateParameter()\n \n try:\n self.content = template.get(self.tplparam)\n except Exception as ex:\n Emergency.stop(ex)\n\n self.onDone()\n \n self.logger.debug(\"done\")", "def __init__(self, jinja_template, report_generator):\n self._jinja_template = jinja_template\n self._report_generator = report_generator\n self._outp = {} # all section-specific information for the template\n # will be stored here\n self._config = self._report_generator.config", "def _update_attributes(self, data):\n self._set_avatar(data)\n self.boosts_since = parse_boosts_since(data)\n self.flags = parse_flags(data)\n self.nick = parse_nick(data)\n self.pending = parse_pending(data)\n self.role_ids = parse_role_ids(data)\n self.timed_out_until = parse_timed_out_until(data)", "def apply_instance_data(instance_data):\n from defcon import Font\n\n instance_ufos = []\n for path, data in instance_data:\n ufo = Font(path)\n set_weight_class(ufo, data)\n set_width_class(ufo, data)\n set_custom_params(ufo, data=data)\n ufo.save()\n instance_ufos.append(ufo)\n return instance_ufos", "def process_requirements(self):\r\n # Use local dicts and sets so that if there are exceptions, we don't\r\n # end up in a partially-initialized state.\r\n loaded = {}\r\n to_render = set()\r\n for attribute in self.get_attributes():\r\n loaded[attribute.name] = attribute.parse_from_xml(self.xml)\r\n if attribute.render:\r\n to_render.add(attribute.name)\r\n\r\n self.loaded_attributes = loaded\r\n self.to_render = to_render", "def sync_data(self):\n self.template_code = self.data_file.data_as_string", "def prepare_template_vals(self, woo_instance, product_response):\n template_info_vals = {\n \"name\":product_response.get(\"name\"),\n \"woo_tmpl_id\":product_response.get(\"id\"),\n \"woo_instance_id\":woo_instance.id,\n \"woo_short_description\":product_response.get(\"short_description\", \"\"),\n \"woo_description\":product_response.get(\"description\", \"\"),\n \"website_published\":True if product_response[\"status\"] == \"publish\" else False,\n \"taxable\":True if product_response[\"tax_status\"] == \"taxable\" else False,\n \"woo_categ_ids\":product_response.get(\"categories\"),\n \"woo_tag_ids\":product_response.get(\"tags\"),\n \"total_variants_in_woo\":len(product_response[\"variations\"]),\n \"woo_product_type\":product_response[\"type\"],\n \"active\":True\n }\n if product_response.get(\"date_created\"):\n template_info_vals.update(\n {\"created_at\":product_response.get(\"date_created\").replace(\"T\", \" \")})\n if product_response.get(\"date_modified\"):\n template_info_vals.update(\n {\"updated_at\":product_response.get(\"date_modified\").replace(\"T\", \" \")})\n return template_info_vals", "def prepare_product_data(self, woo_template, publish, update_price,\n update_image, basic_detail, common_log_id, model_id):\n template = woo_template.product_tmpl_id\n instance = woo_template.woo_instance_id\n data = {}\n if basic_detail:\n description = ''\n short_description = ''\n if woo_template.woo_description:\n woo_template_id = woo_template.with_context(lang=instance.woo_lang_id.code)\n description = woo_template_id.woo_description\n\n if woo_template.woo_short_description:\n woo_template_id = woo_template.with_context(lang=instance.woo_lang_id.code)\n short_description = woo_template_id.woo_short_description\n\n weight = self.convert_weight_by_uom(template.weight, instance)\n\n data = {\n 'enable_html_description':True, 'enable_html_short_description':True,\n 'type':'simple', 'name':woo_template.name,\n 'description':description, 'weight':str(weight),\n 'short_description':short_description,\n 'taxable':woo_template.taxable and 'true' or 'false',\n 'shipping_required':'true'\n }\n woo_categ_ids = list(map(int,woo_template.woo_categ_ids.mapped(\"woo_categ_id\")))\n if all(woo_categ_ids):\n categ_ids = [{'id': cat_id} for cat_id in woo_categ_ids]\n data.update({'categories':categ_ids})\n\n woo_tag_ids = list(map(int,woo_template.woo_tag_ids.mapped(\"woo_tag_id\")))\n if all(woo_tag_ids):\n tag_ids = [{'id': tag_id} for tag_id in woo_tag_ids]\n data.update({'tags':tag_ids})\n\n attributes, is_variable = self.get_product_attribute(template, instance, common_log_id,\n model_id)\n if is_variable:\n data.update({'type':'variable'})\n\n if template.attribute_line_ids:\n variations = []\n for variant in woo_template.woo_product_ids:\n variation_data = {}\n product_variant = self.get_variant_data(variant, instance, update_image)\n variation_data.update(product_variant)\n if update_price:\n if data.get('type') == 'simple':\n data.update(self.get_product_price(instance, variant))\n else:\n variation_data.update(self.get_product_price(instance, variant))\n variations.append(variation_data)\n default_att = variations and variations[0].get('attributes') or []\n data.update({\n 'attributes':attributes, 'default_attributes':default_att,\n 'variations':variations\n })\n if data.get('type') == 'simple':\n data.update({'sku':str(variant.default_code),\n \"manage_stock\":variant.woo_is_manage_stock})\n else:\n variant = woo_template.woo_product_ids\n data.update(self.get_variant_data(variant, instance, update_image))\n if update_price:\n data.update(self.get_product_price(instance, variant))\n\n if publish == 'publish':\n data.update({'status':'publish'})\n else:\n data.update({'status':'draft'})\n\n if update_image:\n tmpl_images = []\n tmpl_images += self.get_gallery_images(instance, woo_template, template)\n tmpl_images and data.update({\"images\":tmpl_images})\n return data", "def __init__(self, pageName):\n self.pageName = pageName\n self.updateFileData()\n self.template = pystache.parse(unicode(self.fileData, 'utf-8'))", "def _fill_template_text(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n line_text_keys = [\"text\", \"altText\", \"label\", \"uri\"]\n try:\n for key in line_text_keys:\n if key in template:\n template[key] = template[key].format(**template_vars)\n except KeyError as e:\n logger.exception(\n \"Failed to fill line template '{}'. \"\n \"Tried to replace '{}' but could not find \"\n \"a value for it. There is no slot with this \"\n \"name nor did you pass the value explicitly \"\n \"when calling the template. Return template \"\n \"without filling the template. \"\n \"\".format(template, e.args[0]))\n return template", "def _add_attributes(self, this): # noqa: C901\n for v in this.data_vars:\n if \"TEMP\" in v and \"_QC\" not in v:\n this[v].attrs = {\n \"long_name\": \"SEA TEMPERATURE IN SITU ITS-90 SCALE\",\n \"standard_name\": \"sea_water_temperature\",\n \"units\": \"degree_Celsius\",\n \"valid_min\": -2.0,\n \"valid_max\": 40.0,\n \"resolution\": 0.001,\n }\n if \"ERROR\" in v:\n this[v].attrs[\"long_name\"] = (\n \"ERROR IN %s\" % this[v].attrs[\"long_name\"]\n )\n\n for v in this.data_vars:\n if \"PSAL\" in v and \"_QC\" not in v:\n this[v].attrs = {\n \"long_name\": \"PRACTICAL SALINITY\",\n \"standard_name\": \"sea_water_salinity\",\n \"units\": \"psu\",\n \"valid_min\": 0.0,\n \"valid_max\": 43.0,\n \"resolution\": 0.001,\n }\n if \"ERROR\" in v:\n this[v].attrs[\"long_name\"] = (\n \"ERROR IN %s\" % this[v].attrs[\"long_name\"]\n )\n\n for v in this.data_vars:\n if \"PRES\" in v and \"_QC\" not in v:\n this[v].attrs = {\n \"long_name\": \"Sea Pressure\",\n \"standard_name\": \"sea_water_pressure\",\n \"units\": \"decibar\",\n \"valid_min\": 0.0,\n \"valid_max\": 12000.0,\n \"resolution\": 0.1,\n \"axis\": \"Z\",\n }\n if \"ERROR\" in v:\n this[v].attrs[\"long_name\"] = (\n \"ERROR IN %s\" % this[v].attrs[\"long_name\"]\n )\n\n for v in this.data_vars:\n if \"DOXY\" in v and \"_QC\" not in v:\n this[v].attrs = {\n \"long_name\": \"Dissolved oxygen\",\n \"standard_name\": \"moles_of_oxygen_per_unit_mass_in_sea_water\",\n \"units\": \"micromole/kg\",\n \"valid_min\": -5.0,\n \"valid_max\": 600.0,\n \"resolution\": 0.001,\n }\n if \"ERROR\" in v:\n this[v].attrs[\"long_name\"] = (\n \"ERROR IN %s\" % this[v].attrs[\"long_name\"]\n )\n\n for v in this.data_vars:\n if \"_QC\" in v:\n attrs = {\n \"long_name\": \"Global quality flag of %s profile\" % v,\n \"convention\": \"Argo reference table 2a\",\n }\n this[v].attrs = attrs\n\n if \"CYCLE_NUMBER\" in this.data_vars:\n this[\"CYCLE_NUMBER\"].attrs = {\n \"long_name\": \"Float cycle number\",\n \"convention\": \"0..N, 0 : launch cycle (if exists), 1 : first complete cycle\",\n }\n\n if \"DATA_MODE\" in this.data_vars:\n this[\"DATA_MODE\"].attrs = {\n \"long_name\": \"Delayed mode or real time data\",\n \"convention\": \"R : real time; D : delayed mode; A : real time with adjustment\",\n }\n\n if \"DIRECTION\" in this.data_vars:\n this[\"DIRECTION\"].attrs = {\n \"long_name\": \"Direction of the station profiles\",\n \"convention\": \"A: ascending profiles, D: descending profiles\",\n }\n\n if \"PLATFORM_NUMBER\" in this.data_vars:\n this[\"PLATFORM_NUMBER\"].attrs = {\n \"long_name\": \"Float unique identifier\",\n \"convention\": \"WMO float identifier : A9IIIII\",\n }\n\n return this", "def generatePredictorDataTemplate(self):\n self.__pdir = Predictor.directory\n self.__predictorData = PredictorData(None)\n self.save()", "def _postprocessing(self):\n # (in)validate\n if len(self._var_names) == 0:\n self.invalidate()\n else:\n self.put_param('is_valid', True)\n \n # set type\n self.put_param('type', 'Generic')", "def process(self, doc_data):\n self.doc_data = doc_data\n self.process_text(self.auto_link_messages)\n self.process_text(self.auto_link_xips)\n self.add_type_sizes()\n return self.doc_data", "def process(self, request):\n if \"type\"+self.id in request.args:\n self.lms.type = request.args[\"type\"+self.id][0]\n if \"studentpost\"+self.id in request.args:\n self.lms.studentpost = request.args[\"studentpost\"+self.id][0]\n if \"groupmode\"+self.id in request.args:\n self.lms.groupmode = request.args[\"groupmode\"+self.id][0]\n if \"visible\"+self.id in request.args:\n self.lms.visible = request.args[\"visible\"+self.id][0]\n if \"subscription\"+self.id in request.args:\n self.lms.subscription = request.args[\"subscription\"+self.id][0]\n if \"other\"+self.id in request.args:\n self.lms.otherLabel = request.args[\"other\"+self.id][0]\n if \"url\"+self.id in request.args:\n self.lms.otherUrl = request.args[\"url\"+self.id][0]", "def __init__(self, tmp_json):\n super(Template, self).__init__(tmp_json)", "def prepare_data(self, config: TreeConfigParser) -> None:\n self.data = Data(config)\n self.data.prepare_input()\n self.data.prepare_output()", "def fit_to_template(self, templates: EquivDict) -> None:\n label_values = set(seginfo.label_value for seginfo in self.infos.values())\n for segment in self.infos.values():\n other_label_values = label_values - set((segment.label_value,))\n for equivalents, updated_attrs in templates.items():\n if segment.name in equivalents:\n # update the SegmentInfo instance describing the\n # semantic class\n assert 'name' in updated_attrs, 'Requiring name to identify segment!'\n segment.name = updated_attrs['name']\n # We have to dispatch relabel or swaplabel if label values are changed\n # to ensure that the numerical data member (numpy.ndarray) and the\n # describing SegmentInfo instances are synchronized \n for attr_name, new_attr_val in updated_attrs.items():\n if new_attr_val is None:\n continue\n elif attr_name == 'label_value':\n if new_attr_val == segment.label_value:\n # no change of label_value as value is identical\n continue\n elif new_attr_val in other_label_values:\n # use swaplabel to avoid label value clash\n self.swaplabel(segment.label_value, new_attr_val)\n else:\n # easy relabel as new label_value is not pre-existing\n self.relabel(segment.label_value, new_attr_val)\n else:\n setattr(segment, attr_name, new_attr_val)\n break\n # propagate state changes\n self._update_state_from_infos()", "def _process_plugin_data(self, fields, fetch_related_data=False):\n for field, default_value in fields:\n try:\n setattr(\n self.data,\n field,\n self.plugin_data.get(field, default_value)\n )\n except Exception:\n setattr(self.data, field, default_value)", "def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n vm_id = vm_data['id']\n Windows.logger.debug(f'Compiling template data for VM #{vm_id}')\n data: Dict[str, Any] = {key: None for key in Windows.template_keys}\n\n data['vm_identifier'] = f'{vm_data[\"project\"][\"id\"]}_{vm_id}'\n # changes\n changes: Dict[str, Any] = {\n 'ram': False,\n 'cpu': False,\n 'storages': False,\n }\n updates = vm_data['history'][0]\n try:\n if updates['ram_quantity'] is not None:\n # RAM is needed in MB for the updater but we take it in in GB (1024, not 1000)\n changes['ram'] = vm_data['ram'] * 1024\n except KeyError:\n pass\n try:\n if updates['cpu_quantity'] is not None:\n changes['cpu'] = vm_data['cpu']\n except KeyError:\n pass\n # Fetch the drive information for the update\n try:\n if len(updates['storage_histories']) != 0:\n Windows.logger.debug(f'Fetching drives for VM #{vm_id}')\n child_span = opentracing.tracer.start_span('fetch_drive_updates', child_of=span)\n changes['storages'] = Windows.fetch_drive_updates(vm_data)\n child_span.finish()\n except KeyError:\n pass\n # Add changes to data\n data['changes'] = changes\n data['storage_type'] = vm_data['storage_type']\n data['vms_path'] = settings.HYPERV_VMS_PATH\n\n # Get the host name of the server\n host_name = None\n for interface in vm_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_name = interface['hostname']\n break\n if host_name is None:\n error = f'Host ip address not found for the server # {vm_data[\"server_id\"]}.'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n # Add the host information to the data\n data['host_name'] = host_name\n # Determine restart\n data['restart'] = vm_data['restart']\n return data", "def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)", "def prepare_data(self):\n\n # Get days abbrs and month names lists\n self.month_names = cal_data.get_month_names()\n self.month_names_eng = cal_data.get_month_names_eng()\n self.days_abrs = cal_data.get_days_abbrs()\n\n # Today date\n self.active_date = cal_data.today_date_list()\n # Set title\n self.title = \"%s - %s\" % (self.month_names[self.active_date[1] - 1],\n self.active_date[2])\n\n # Quarter where current month in the self.quarter[1]\n self.get_quarter()", "def get_data(self):\n self.data = dict()\n # list to save all the attributes we are going to create\n self.attr = []\n # list to save all the groups available in the incomming input\n self.groups.extend(self.values.keys())\n # Grouping\n self.parse_data()", "def _fill_template(self, name, superclass):\n end = name.lower().split(\"_\")[-1]\n id_field_name = self.id_field_name or end + \"_id\"\n template = Template(self.template)\n template = template.substitute(namespace=self.module.namespace,\n module_name=self.module.name,\n name=name,\n superclass=superclass,\n group=self.module.name.lower(),\n name_lower=name.lower(),\n end=end,\n id_field_name=id_field_name)\n return template", "def _set_attributes(self):", "def __getattr__(self, key):\n if key == 'user_data':\n if key in self.data:\n return self.data[key]\n api = ApiClient()\n user_json = api.rest(self.url + '/user_data.json')\n self.user_data = UserData(json.loads(user_json), self.url)\n return self.user_data\n\n return super(Template, self).__getattr__(key)", "def prepare_data():\n user_name = os.environ.get('USER')\n traintest_corpus = ResumeCorpus('/Users/' + user_name + '/Documents/Data')\n random.shuffle(traintest_corpus.resumes)\n\n for resume in traintest_corpus.resumes:\n try:\n review_text = pre_processing(resume[0])\n review_text = \" \".join(review_text)\n data_dict['data'].append(review_text)\n data_dict['label'].append(resume[1])\n except:\n pass", "def load_subject_attr(attrib_data):\n module_logger.info(\"Creating a template %s.\", __name__)\n attrib = SubjectAttribute()\n\n module_logger.debug(\"Filling in %s details.\", __name__)\n attrib._set_id(attrib_data['id'])\n attrib.links = attrib_data['linkage']\n attrib.version = attrib_data['ver']\n\n # Required fields\n attrib.study = attrib_data['meta']['study']\n attrib.tags = attrib_data['meta']['tags']\n\n # Handle optional properties\n if 'aerobics' in attrib_data['meta']:\n attrib.aerobics = attrib_data['meta']['aerobics']\n\n if 'alcohol' in attrib_data['meta']:\n attrib.alcohol = attrib_data['meta']['alcohol']\n\n if 'allergies' in attrib_data['meta']:\n attrib.allergies = attrib_data['meta']['allergies']\n\n if 'asthma' in attrib_data['meta']:\n attrib.asthma = attrib_data['meta']['asthma']\n\n if 'cad' in attrib_data['meta']:\n attrib.cad = attrib_data['meta']['cad']\n\n if 'chf' in attrib_data['meta']:\n attrib.chf = attrib_data['meta']['chf']\n\n if 'comment' in attrib_data['meta']:\n attrib.comment = attrib_data['meta']['comment']\n\n if 'contact' in attrib_data['meta']:\n attrib.contact = attrib_data['meta']['contact']\n\n if 'diabetes' in attrib_data['meta']:\n attrib.diabetes = attrib_data['meta']['diabetes']\n\n if 'education' in attrib_data['meta']:\n attrib.education = attrib_data['meta']['education']\n\n if 'family_history' in attrib_data['meta']:\n attrib.family_history = attrib_data['meta']['family_history']\n\n if 'father' in attrib_data['meta']:\n attrib.father = attrib_data['meta']['father']\n\n if 'ga_at_delivery' in attrib_data['meta']:\n attrib.ga_at_delivery = attrib_data['meta']['ga_at_delivery']\n\n if 'gallbladder' in attrib_data['meta']:\n attrib.gallbladder = attrib_data['meta']['gallbladder']\n\n if 'hyperlipidemia' in attrib_data['meta']:\n attrib.hyperlipidemia = attrib_data['meta']['hyperlipidemia']\n\n if 'hypertension' in attrib_data['meta']:\n attrib.hypertension = attrib_data['meta']['hypertension']\n\n if 'illicit_drug' in attrib_data['meta']:\n attrib.illicit_drug = attrib_data['meta']['illicit_drug']\n\n if 'kidney' in attrib_data['meta']:\n attrib.kidney = attrib_data['meta']['kidney']\n\n if 'liver' in attrib_data['meta']:\n attrib.liver = attrib_data['meta']['liver']\n\n if 'lmp' in attrib_data['meta']:\n attrib.lmp = attrib_data['meta']['lmp']\n\n if 'mother' in attrib_data['meta']:\n attrib.mother = attrib_data['meta']['mother']\n\n if 'occupation' in attrib_data['meta']:\n attrib.occupation = attrib_data['meta']['occupation']\n\n if 'osa' in attrib_data['meta']:\n attrib.osa = attrib_data['meta']['osa']\n\n if 'pancreatitis' in attrib_data['meta']:\n attrib.pancreatitis = attrib_data['meta']['pancreatitis']\n\n if 'postmenopausal' in attrib_data['meta']:\n attrib.postmenopausal = attrib_data['meta']['postmenopausal']\n\n if 'preg_term' in attrib_data['meta']:\n attrib.preg_term = attrib_data['meta']['preg_term']\n\n if 'pvd' in attrib_data['meta']:\n attrib.pvd = attrib_data['meta']['pvd']\n\n if 'rx' in attrib_data['meta']:\n attrib.rx = attrib_data['meta']['rx']\n\n if 'siblings' in attrib_data['meta']:\n attrib.siblings = attrib_data['meta']['siblings']\n\n if 'subproject' in attrib_data['meta']:\n attrib.subproject = attrib_data['meta']['subproject']\n\n if 'survey_id' in attrib_data['meta']:\n attrib.survey_id = attrib_data['meta']['survey_id']\n\n if 'tobacco' in attrib_data['meta']:\n attrib.tobacco = attrib_data['meta']['tobacco']\n\n module_logger.debug(\"Returning loaded %s.\", __name__)\n return attrib", "def _transform_data(self):\n parser = self.parse_xml(self._data_src, 'Chemical')\n for chemical in parser:\n if 'displayName' not in chemical.attrib:\n continue\n\n # initial setup and get label\n display_name = chemical.attrib['displayName']\n if not display_name or not re.search(TAGS_REGEX, display_name):\n continue\n label = re.sub(TAGS_REGEX, '', display_name)\n params = {\n 'label': label\n }\n\n # get concept ID\n reg_no = chemical.find('NumberList').find(\"CASRegistryNumber\")\n if not reg_no:\n continue\n params['concept_id'] = \\\n f'{NamespacePrefix.CASREGISTRY.value}:{reg_no.text}'\n\n # get aliases\n aliases = []\n label_l = label.lower()\n name_list = chemical.find('NameList')\n if name_list:\n for name in name_list.findall('NameOfSubstance'):\n text = name.text\n if text != display_name and text.lower() != label_l:\n aliases.append(re.sub(TAGS_REGEX, '', text))\n params['aliases'] = aliases\n\n # get xrefs and associated_with\n params['xrefs'] = []\n params['associated_with'] = []\n locator_list = chemical.find('LocatorList')\n if locator_list:\n for loc in locator_list.findall('InternetLocator'):\n if loc.text == 'DrugBank':\n db = f'{NamespacePrefix.DRUGBANK.value}:' \\\n f'{loc.attrib[\"url\"].split(\"/\")[-1]}'\n params['xrefs'].append(db)\n elif loc.text == 'FDA SRS':\n fda = f'{NamespacePrefix.FDA.value}:' \\\n f'{loc.attrib[\"url\"].split(\"/\")[-1]}'\n params['associated_with'].append(fda)\n\n # double-check and load full record\n assert Drug(**params)\n self._load_therapy(params)", "def postprocessData(meta, units, data):\n\n data['time'] = np.arange(0, meta['dt'] * len(data), meta['dt'])\n units['time'] = 's'\n\n meta, units, data = self.calculateForce(meta, units, data)\n\n data['distance'] = np.sqrt(data.xDist**2 + data.yDist**2)\n units['distance'] = 'nm'\n\n return meta, units, data", "def create_page(self, data):\n env = Environment(loader=FileSystemLoader(self.template_folder), trim_blocks=True, lstrip_blocks=True)\n template = env.get_template(self.template_file_name)\n template_vars = {'class_name': self.get_class_name(data['name']), 'page': data}\n output = template.render(template_vars)\n formatted_output = output.encode('utf8').strip()\n file_name = data['name'] + self.get_output_file_type()\n result_html = open(os.path.join(self.output_folder, file_name), 'w')\n result_html.write(formatted_output)\n result_html.close()", "def __call__(self, template, obj=None):\n for engine in self.engines:\n filename = engine.find_template_filename(template)\n if filename:\n if obj:\n self.res.locals.update(obj)\n html = engine.render_source(filename, self.res.locals)\n self.res.send_html(html)\n break\n else:\n raise ValueError(\"Could not find a template with name '%s'\" % template)", "def _compile(self,datafile):\n\t\ttemplate_py = 'def displayself(tpldata):\\n';\n\t\ttemplate_py += '\\tpage=[]\\n';\n\t\ttemplate_py += '\\tappend=page.append\\n';\n\t\tself.lineno = 1\n\t\tself.error_found = 0\n\t\tfor line in datafile:\n\t\t\tif self.error_found:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tline=line.rstrip()\n\t\t\t\t#escape special chars\n\t\t\t\tline=line.replace(\"\\\\\",\"\\\\\\\\\").replace(\"'\",\"\\\\'\")\n\t\t\t\ttline = '\\t'*self.tab_dep + \"append('\" + line\n\t\t\t\ttemplate_py = template_py + self._compile_var_tags(tline)+'\\n'\n\t\t\t\tself.lineno += 1\n\t\t\t\n\t\ttemplate_py = template_py + '\\treturn page\\n'\n\t\treturn template_py", "def apply_custom_template(self, name, filename, context):\n with open(os.path.join(self.build_path, filename), 'r') as f:\n template = actions.ActionsTemplate.from_dict(json.loads(f.read()))\n\n outputs = template.apply(context, self)\n\n for key, value in six.iteritems(outputs):\n context[key] = value", "def run(self, template_data, data_cont):\n\n tprops = acalib.core.transform.fits_props(template_data)\n scaled = acalib.core.transform.scale(data_cont, tprops['major'])\n rotated, angles = acalib.core.transform.rotate(scaled, tprops['angle'])\n aligned = acalib.core.transform.crop_and_align(rotated, angles)\n result = mean(aligned , axis=0)\n return result", "def _process(self):\n self.output[\"data\"] = get_form_data(self.kwargs[\"collect\"].ona_scan_form_pk)", "def set_properties(self):\n\n # assign feed entries from the root of the parsed data\n if hasattr(self.parsed_data, \"entries\"):\n self.items = self.parsed_data.entries\n\n # check if it is a feed root or feed element\n if hasattr(self.parsed_data, \"feed\"):\n source_data = self.parsed_data.feed\n else:\n source_data = self.parsed_data\n\n # assign available properties not listed in keymap\n self.title = source_data.title\n self.link = source_data.link\n\n for key in self.parsed_data.keymap.keys():\n if hasattr(self, key) and not getattr(self, key):\n attr_value = source_data.get(key)\n if isinstance(attr_value, struct_time):\n attr_value = self.serialize_datetime(attr_value)\n\n setattr(self, key, attr_value)", "def setup(self):\r\n self.text_input_values = {}\r\n if self.tag == 'radiotextgroup':\r\n self.html_input_type = \"radio\"\r\n elif self.tag == 'checkboxtextgroup':\r\n self.html_input_type = \"checkbox\"\r\n else:\r\n raise Exception(\"ChoiceGroup: unexpected tag {0}\".format(self.tag))\r\n\r\n if self.value == '':\r\n # Make `value` an empty dictionary, if it currently has an empty\r\n # value. This is necessary because the template expects a\r\n # dictionary.\r\n self.value = {}\r\n self.choices = self.extract_choices(self.xml)", "def setvariables(self, request, contextvars, thevars):\n postdata = {}\n if request.POST:\n postdata = dict(request.POST.dict())\n for var in thevars:\n if postdata.get(\"custom_\"+var):\n contextvars[var] = postdata.get(\"custom_\"+var)\n else:\n try:\n contextvars[var] = thevars[var]\n except Exception:\n pass\n return contextvars", "def template_data(self) -> Any:\n return pulumi.get(self, \"template_data\")", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def prepareData(self, *data):\n arguments = 8\n (self.X, self.X_name, self.Y, self.Y_name, self.alignment,\n self.model, self.annotations, self.args) = tuple(data[:arguments])\n \n self.width = self.args.beam_width\n self.mathType = self.args.mathType\n self.io_files = {\n 'input': self.args.intermediate_input_files,\n 'output': self.args.intermediate_output_files\n }\n self.repeat_width = self.args.repeat_width\n self.cons_count = self.args.cons_count\n self.posterior_processors = self.args.posterior_processors \n\n self.positionGenerator = \\\n list(AlignmentBeamGenerator(self.alignment, self.width))\n \n for i in range(len(self.model.states)):\n self.model.states[i].computeHints(self)\n\n return data[arguments:]", "def run(self):\n\n for line in self.template:\n match = self._regex.match(line)\n if match:\n self._process(match)\n return self.parsed_template", "def __post_init__(self):\n # Only do this if source_data already exists (not during its own initialization)\n if \"SOURCE_DATA\" in globals():\n for data_field in fields(self):\n setattr(self, data_field.name, getattr(SOURCE_DATA, data_field.name))", "def process_data(self, data):\n\n for feat, vals in zip(data.names, data.feats):\n for val in vals:\n self.fvals[feat][val][1] = Literal(feature=feat, value=val)", "def template(self, data, variables, fail_on_undefined=False):\n try:\n templar = Templar(loader=self.data_loader, variables=variables)\n return templar.template(data, fail_on_undefined=fail_on_undefined)\n except AnsibleError as ansible_error:\n # Sometime we need to export\n if fail_on_undefined:\n raise\n self.display.warning(ansible_error)\n return data", "def init_context_data(self):\n pass", "def gen_compute_data(self):\n\n print \"\\t* Generating combined nova and neutron data\"\n self.init_compute_clients()\n self.compute_data[\"heat_template_version\"] = \"2013-05-23\"\n self.compute_data[\"description\"] = \"Generated Template %s on Project %s\" % \\\n (str(datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M%p\")), str(self.tenant_name))\n self.compute_data[\"parameters\"] = {}\n self.compute_data[\"resources\"] = {}\n self.gen_parameters()\n self.gen_resources()\n self.compute_template = self.compute_data", "def set_data(self, case: Case, **kwargs: Any) -> None:\n context = kwargs[\"context\"]\n self.set_parameters(case, context)\n self.set_body(case, context)\n case.set_source(context.response, context.case)", "def _process_data(self):\n assert not hasattr(self, 'changes'), '_process_data called twice.'\n assert hasattr(self, 'errors'), (\n '_process_data not called by is_valid().')\n r_by_t = Collection.resource_by_type\n\n # Create and load collection of new data\n new_collection = Collection()\n for rtype, items in self.data.items():\n resource_cls = r_by_t.get(rtype)\n if resource_cls:\n for seq, json_api_item in enumerate(items):\n item = json_api_item.copy()\n links = item.pop('links', {})\n item.update(links)\n resource = self.load_resource(resource_cls, item)\n resource._seq = seq\n new_collection.add(resource)\n\n # Create native representation of current feature data\n current_collection = Collection(DjangoResourceClient())\n feature_serializer = ViewFeatureSerializer(context=self.context)\n current_feature = feature_serializer.to_representation(self.feature)\n current_extra = current_feature.pop('_view_extra')\n del current_extra['meta']\n\n # Load feature into new and current collection\n current_feature_resource = self.load_resource(\n r_by_t['features'], current_feature)\n current_collection.add(current_feature_resource)\n current_feature.update(self.feature._in_extra)\n current_feature['id'] = str(current_feature['id'])\n resource_feature = self.load_resource(\n r_by_t['features'], current_feature)\n resource_feature._seq = None\n new_collection.add(resource_feature)\n\n # Populate collection of current data\n for rtype, items in current_extra.items():\n resource_cls = r_by_t[rtype]\n for item in items:\n resource = self.load_resource(resource_cls, item)\n current_collection.add(resource)\n\n # Add existing items not explicit in PUT content\n # This avoids 'delete' changes\n new_items = new_collection.get_all_by_data_id()\n for data_id, item in current_collection.get_all_by_data_id().items():\n if data_id not in new_items:\n rtype = item._resource_type\n resource = r_by_t[rtype]()\n json_api_rep = item.to_json_api()\n json_api_rep[rtype]['id'] = item.id.id\n resource.from_json_api(json_api_rep)\n resource._seq = None\n new_collection.add(resource)\n\n # Add existing items used in new collection to current collection\n # This avoids incorrect 'new' changes\n existing_items = current_collection.get_all_by_data_id()\n for data_id, item in new_collection.get_all_by_data_id().items():\n if item.id:\n item_id = item.id.id\n int_id = None\n existing_item = existing_items.get(data_id)\n try:\n int_id = int(item_id)\n except ValueError:\n pass\n if int_id and (existing_item is None):\n rtype = item._resource_type\n resource_cls = r_by_t[rtype]\n model_cls, serializer_cls = view_cls_by_name[rtype]\n obj = model_cls.objects.get(id=int_id)\n serializer = serializer_cls()\n data = serializer.to_representation(obj)\n resource = self.load_resource(resource_cls, data)\n current_collection.add(resource)\n\n # Load the diff\n self.changeset = CollectionChangeset(\n current_collection, new_collection)\n assert not self.changeset.changes.get('deleted'), (\n 'Existing items were not added, so deletions found:\\n%s'\n % self.changes['deleted'])", "def template_attribute_process(self, woo_instance, odoo_template, variant, template_title,\n common_log_book_id, data, product_data_queue_line,\n order_queue_line):\n common_log_line_obj = self.env[\"common.log.lines.ept\"]\n model_id = common_log_line_obj.get_model_id(self._name)\n if odoo_template.attribute_line_ids:\n # If the new variant has other attribute than available in odoo template, then exception\n # activity will be generated.\n # else it will add new value in current attribute, and will relate with the new odoo\n # variant.\n woo_attribute_ids = []\n odoo_attributes = odoo_template.attribute_line_ids.attribute_id.ids\n for attribute in variant.get(\"attributes\"):\n attribute = self.env[\"product.attribute\"].get_attribute(\n attribute[\"name\"])\n woo_attribute_ids.append(attribute.id)\n woo_attribute_ids.sort()\n odoo_attributes.sort()\n if odoo_attributes != woo_attribute_ids:\n message = \"- Product %s has tried adding a new attribute for sku '%s' in Odoo.\\n- \" \\\n \"System will not allow adding new attributes to a product.\" % (\n template_title, variant.get(\"sku\"))\n common_log_line_obj.woo_create_product_log_line(message, model_id,\n product_data_queue_line if not\n order_queue_line else order_queue_line,\n common_log_book_id)\n\n if not order_queue_line:\n product_data_queue_line.state = \"failed\"\n line_failed = True\n if woo_instance.is_create_schedule_activity:\n common_log_book_id.create_woo_schedule_activity()\n return False\n\n template_attribute_value_domain = self.find_template_attribute_values(\n data.get(\"attributes\"), variant.get(\"attributes\"),\n odoo_template, woo_instance)\n if not template_attribute_value_domain:\n for woo_attribute in variant.get(\"attributes\"):\n attribute_id = self.env[\"product.attribute\"].get_attribute(\n woo_attribute[\"name\"],\n type=\"radio\",\n create_variant=\"always\",\n auto_create=True)\n value_id = self.env[\n \"product.attribute.value\"].get_attribute_values(\n woo_attribute[\"option\"],\n attribute_id.id,\n auto_create=True)\n attribute_line = odoo_template.attribute_line_ids.filtered(\n lambda x:x.attribute_id.id == attribute_id.id)\n if not value_id.id in attribute_line.value_ids.ids:\n attribute_line.value_ids = [(4, value_id.id, False)]\n odoo_template._create_variant_ids()\n template_attribute_value_domain = self.find_template_attribute_values(\n data.get(\"attributes\"), variant.get(\"attributes\"),\n odoo_template, woo_instance)\n template_attribute_value_domain.append(\n (\"product_tmpl_id\", \"=\", odoo_template.id))\n odoo_product = self.env[\"product.product\"].search(\n template_attribute_value_domain)\n if not odoo_product.default_code:\n odoo_product.default_code = variant[\"sku\"]\n return odoo_product\n\n template_vals = {\"name\":template_title, \"type\":\"product\",\n \"default_code\":variant[\"sku\"]}\n if self.env[\"ir.config_parameter\"].sudo().get_param(\n \"woo_commerce_ept.set_sales_description\"):\n template_vals.update({\"description_sale\":variant.get(\"description\", \"\")})\n\n self.env[\"product.template\"].create(template_vals)\n odoo_product = self.env[\"product.product\"].search(\n [(\"default_code\", \"=\", variant[\"sku\"])])\n return odoo_product", "def applyData(self):\n c.setUserName(self.textname.GetValue())\n c.setPhone(self.textphon.GetValue())\n c.setCarrier(self.textcarr.GetValue())\n c.setSmsFinished(self.smsfin.GetValue())\n c.setSmsError(self.smserr.GetValue())", "def make_cake_templates():\n tmpl = dict()\n\n # Attributes\n tmpl['Cooking time'] = ConditionTemplate(\n name=\"Cooking time\",\n description=\"The time elapsed during a cooking process\",\n bounds=RealBounds(0, 7 * 24.0, \"hr\")\n )\n tmpl[\"Oven temperature setting\"] = ParameterTemplate(\n name=\"Oven temperature setting\",\n description=\"Where the knob points\",\n bounds=RealBounds(0, 2000.0, \"K\")\n )\n tmpl[\"Oven temperature\"] = ConditionTemplate(\n name=\"Oven temperature\",\n description=\"Actual temperature measured by the thermocouple\",\n bounds=RealBounds(0, 2000.0, \"K\")\n )\n\n tmpl[\"Tastiness\"] = PropertyTemplate(\n name=\"Tastiness\",\n description=\"Yumminess on a fairly arbitrary scale\",\n bounds=IntegerBounds(lower_bound=1, upper_bound=10)\n )\n\n # Objects\n tmpl[\"Baking in an oven\"] = ProcessTemplate(\n name=\"Baking in an oven\",\n description='Using heat to promote chemical reactions in a material',\n allowed_labels=['precursor'],\n conditions=[(tmpl[\"Oven temperature\"], RealBounds(0, 700, \"degF\"))],\n parameters=[(tmpl[\"Oven temperature setting\"], RealBounds(100, 550, \"degF\"))]\n )\n\n tmpl[\"Taste test\"] = MeasurementTemplate(\n name=\"Taste test\",\n properties=[tmpl[\"Tastiness\"]]\n )\n\n tmpl[\"Dessert\"] = MaterialTemplate(\n name=\"Dessert\",\n properties=[tmpl[\"Tastiness\"]]\n )\n\n tmpl[\"Generic Material\"] = MaterialTemplate(name=\"Generic\")\n tmpl[\"Icing\"] = ProcessTemplate(name=\"Icing\",\n description='Applying a coating to a substrate',\n allowed_labels=['coating', 'substrate'])\n tmpl[\"Mixing\"] = ProcessTemplate(name=\"Mixing\",\n description='Physically combining ingredients',\n allowed_labels=['wet', 'dry', 'leavening', 'seasoning',\n 'sweetener', 'shortening', 'flavoring'])\n tmpl[\"Procurement\"] = ProcessTemplate(name=\"Procurement\",\n description=\"Buyin' stuff\")\n\n return tmpl", "def render(self, data, *args, **kwargs):\n pass # pragma: nocover", "def _set_data(self, new_data):\n for name, field in self._get_fields().items():\n if name in new_data:\n try:\n setattr(self, f\"__{name}\", field.from_raw(new_data[name]))\n except (fields.ValidationError, ValueError):\n # should at least log validation and value errors\n # this can happen in case of e.g. fields type change\n pass", "def __attrs_post_init__(self):\n\n self.jobdate = parse(to_str(self.jobdate).strip())\n self.vehicleid = to_str(self.vehicleid).strip()\n self.reason = to_str(self.reason).strip()\n self.notes = to_str(self.notes).strip()\n self.costparts = Decimal(to_str(self.costparts).strip())\n self.costlabor = Decimal(to_str(self.costlabor).strip())\n self.costtotal = Decimal(to_str(self.costtotal).strip())\n\n # Apply additional data transformations\n self.yearmon = datetime.strftime(self.jobdate, '%Y-%m')", "def render_template(self):\n # create and expand commandline template\n tmpl_r1 = self.finditem.sub(r'{{\\2}}', self.raw_template)\n tmpl_r2 = jinja2.Template(tmpl_r1).render(self.variables)\n self.relation.script = tmpl_r2\n self.relation.template_sha256 = self.variables['template_sha256']", "def handle_data(self, data):\n if data.strip():\n self._content_list.append((self._current_tag, data))\n self._html += f\"{{{'placeholder_'+str(self._index)}}}\"\n self._index += 1", "def render_template(self, template_txt, model, res_ids, post_process=False):\n multi_mode = True\n if isinstance(res_ids, (int, long)):\n multi_mode = False\n res_ids = [res_ids]\n\n results = dict.fromkeys(res_ids, u\"\")\n\n # try to load the template\n try:\n mako_env = mako_safe_template_env if self.env.context.get('safe') else mako_template_env\n template = mako_env.from_string(tools.ustr(template_txt))\n except Exception:\n _logger.info(\"Failed to load template %r\", template_txt, exc_info=True)\n return multi_mode and results or results[res_ids[0]]\n\n # prepare template variables\n records = self.env[model].browse(filter(None, res_ids)) # filter to avoid browsing [None]\n res_to_rec = dict.fromkeys(res_ids, None)\n for record in records:\n res_to_rec[record.id] = record\n variables = {\n 'format_date': lambda date, format=False, context=self._context: format_date(self.env, date, format),\n 'format_tz': lambda dt, tz=False, format=False, context=self._context: format_tz(self.env, dt, tz, format),\n 'format_amount': lambda amount, currency, context=self._context: format_amount(self.env, amount, currency),\n 'user': self.env.user,\n 'ctx': self._context, # context kw would clash with mako internals\n }\n for res_id, record in res_to_rec.iteritems():\n variables['object'] = record\n try:\n render_result = template.render(variables)\n except Exception:\n _logger.info(\"Failed to render template %r using values %r\" % (template, variables), exc_info=True)\n raise UserError(_(\"Failed to render template %r using values %r\")% (template, variables))\n if render_result == u\"False\":\n render_result = u\"\"\n results[res_id] = render_result\n\n if post_process:\n for res_id, result in results.iteritems():\n results[res_id] = self.render_post_process(result)\n\n return multi_mode and results or results[res_ids[0]]" ]
[ "0.6795622", "0.62453735", "0.6222564", "0.6101384", "0.6039472", "0.5970306", "0.5819297", "0.57768494", "0.57472324", "0.5730784", "0.5694763", "0.5607646", "0.5585788", "0.5584792", "0.5528987", "0.5528372", "0.5523228", "0.54737645", "0.5458935", "0.5447084", "0.5441327", "0.5422623", "0.5422028", "0.5419556", "0.54164386", "0.54120433", "0.54089606", "0.5404749", "0.53949666", "0.5391593", "0.5387013", "0.53751606", "0.53688496", "0.5359548", "0.5345921", "0.5286804", "0.5271743", "0.5269325", "0.5264303", "0.52602255", "0.52369124", "0.52305627", "0.52282816", "0.5223789", "0.52219516", "0.5210882", "0.5205274", "0.5204566", "0.5194963", "0.5189258", "0.51694894", "0.51673883", "0.51444685", "0.5140394", "0.5140268", "0.5131661", "0.51183504", "0.5117773", "0.5113444", "0.51059806", "0.51058084", "0.5101627", "0.50950426", "0.5094853", "0.50864273", "0.50834", "0.5083027", "0.50709593", "0.5068852", "0.50536215", "0.5052928", "0.50491774", "0.5044277", "0.50390846", "0.5036464", "0.5036437", "0.50361854", "0.50338084", "0.50320077", "0.5030414", "0.5019563", "0.50179756", "0.50086534", "0.5007589", "0.50048673", "0.5001725", "0.49987552", "0.4996805", "0.49932724", "0.49930206", "0.49821767", "0.498014", "0.49791643", "0.49727678", "0.497197", "0.4969672", "0.49682498", "0.49666485", "0.49664137", "0.49657285" ]
0.5548209
14
Synthesize yaml header filename from directory name.
def _yaml_filename(self, path): fullpath = os.path.abspath(path) if not os.path.isdir(fullpath): dirname = os.path.dirname(fullpath) else: dirname = path if dirname.endswith('/'): dirname = dirname[:-1] fname = dirname.split('/')[-1] + '.yaml' return dirname, fname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\"", "def format_filename(title: str, id: Any, ext: str = \".\", dirFormat=None):\r\n ...", "def file_title(self):\n basename = os.path.basename(self.__path)\n index_dot = basename.rfind(\".\")\n if index_dot == 0:\n return basename[1:]\n return basename if index_dot < 0 else basename[:index_dot]", "def _filename(self, key):\n return os.path.join(self.root, key[:2], key)", "def file_name(id, title, kind=\"src\"):\n fn_template = conf.template_source_file_name\n if kind == \"tst\":\n fn_template = conf.template_test_file_name\n\n return fn_template.format(id=id, title=title.replace(\"-\", \"_\"))", "def generate_header(name: str) -> str:\n return MARKDOWN_HEADER.format(name.capitalize(), date.today())", "def ifdef_name(filename):\n return filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper() + \"_\"", "def filename(self):\n return self.config.get('filename', self.id) + f'_{self.file_suffix}'", "def make_filename(key, extension):\n key = unicode(key.strip())\n return '{}.{}'.format(slugify(key), extension)", "def out_filename(self, filetype, dir, format='old'):\n filename = self.filename(filetype=filetype, format=format)\n return Path(dir) / filename", "def _get_filename(self, type_: str, name: str) -> str:\n if not os.path.isdir(self._datadir):\n os.mkdir(self._datadir, mode=0o700)\n\n type_dir = os.path.join(self._datadir, type_)\n if not os.path.isdir(type_dir):\n os.mkdir(type_dir, mode=0o700)\n\n fn = os.path.join(type_dir, name) + '.yaml'\n return fn", "def format_filename(prefix, suffix, seq_len, uncased):\n seq_str = \"seq-{}\".format(seq_len)\n if uncased:\n case_str = \"uncased\"\n else:\n case_str = \"cased\"\n\n file_name = \"{}.{}.{}.{}\".format(prefix, seq_str, case_str, suffix)\n\n return file_name", "def root_name(file_name, file_id):\n if file_id is not None:\n return \"{}{}\".format(R_DIR, file_name.format(file_id))\n else:\n return \"{}{}\".format(R_DIR, file_name)", "def genBaseName(fileName):\n return fileName.split(\"_\")[0].split(\".\")[0]", "def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name", "def filename(self):\n translator = {ord(\" \"): \"_\", ord(\",\"): None}\n return f'{self._full_name.translate(translator)}.txt'", "def get_file_name(replay_dir, template_name):\n suffix = '.json' if not template_name.endswith('.json') else ''\n file_name = f'{template_name}{suffix}'\n return os.path.join(replay_dir, file_name)", "def filename(self):\n return '%s%s' % (self.identifier, self.extension)", "def generate_file_name(entry):\n return str_for_file(u'{name}, {year}, {title}'.format(\n year=entry['year'],\n name=get_last_name(entry['author'][0]),\n title=entry['title']\n ))", "def format_name(name_dir):\n if(name_dir.endswith('/')):\n name_dir = name_dir.rstrip('/')\n return(name_dir)", "def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )", "def title(self):\n return os.path.basename(self.__path) if self.isdir() else self.file_title", "def content_file_name(self, filename):\n ext = filename.split('.')[-1]\n filename = \"%s_%s.%s\" % (filename, self.id, ext)\n return os.path.join('pictures/static/pictures/', filename)", "def filename(lang):\n filename = lang.replace('-', '_')\n filename = filename.lower()\n return filename", "def _title(self, path):\n title = os.path.basename(os.path.splitext(path)[0])\n return title", "def extract_dir_name(input_file):\r\n fname = PurePath(input_file).__str__()\r\n s = fname.split('.')\r\n name = '.'.join(s[:-1])\r\n return name", "def title(self):\n if self.file_name is None:\n return None\n else:\n fname = os.path.split(self.file_name)[-1]\n fname, *ext = fname.rsplit('.', 1)\n procgen = ext and ext[0] in ('json', 'yaml')\n if procgen and self._seed and self._seed.spawn_key:\n # Append the spawn key as the episode number\n fname += '-e' + str(self._seed.spawn_key[-1])\n return fname", "def collected_filename(cfg, collect_dir, i=None):\n if i is not None:\n file = cfg[\"files\"][i]\n else:\n file = cfg[\"file\"]\n ext = path.splitext(file)[1]\n name = cfg[\"id\"]\n if i is not None:\n name += \"_\" + str(i)\n return path.join(collect_dir, name + ext)", "def path_to_name(img):\n\n return os.path.dirname(img) + '_' + os.path.basename(img)", "def _make_fname(song, ext=None, av=None, subdir=None):\n # pylint: disable=E1103\n # Instance of 'bool' has no 'extension' member (some types not inferable)\n ddir = os.path.join(Config.DDIR.get, subdir) if subdir else Config.DDIR.get\n if not os.path.exists(ddir):\n os.makedirs(ddir)\n\n if ext:\n extension = ext\n\n else:\n stream = streams.select(streams.get(song),\n audio=av == \"audio\", m4a_ok=True)\n extension = stream['ext']\n\n # filename = song.title[:59] + \".\" + extension\n filename = song.title + \".\" + extension\n filename = os.path.join(ddir, mswinfn(filename.replace(\"/\", \"-\")))\n filename = filename.replace('\"', '')\n return filename", "def filename(self):\n # Just the name of the file\n filename = self.use_name\n if self.extension:\n filename = \"{0}.{1}\".format(self.use_name, self.extension)\n # Architecture sub-folder\n arch_folder_conf = spack.config.get(\"modules:%s:arch_folder\" % self.conf.name, True)\n if arch_folder_conf:\n # include an arch specific folder between root and filename\n arch_folder = str(self.spec.architecture)\n filename = os.path.join(arch_folder, filename)\n # Return the absolute path\n return os.path.join(self.dirname(), filename)", "def GetHeaderName(name):\n name = os.path.splitext(name)[0] + '.h'\n name = name.replace(os.sep, '/')\n return 'ppapi/c/' + name", "def _gen_basename(param_dict, clargs):\n if param_dict['output_basename'] in ['', 'auto']:\n return clargs.input_fname.lower().split('.json')[0]\n\n else:\n return param_dict['output_basename']", "def makefilename(self):\n fp= (pathlib.Path(self.vr_folder).expanduser()/(time.strftime(self.vr_filename))).with_suffix('')\n fp.parent.mkdir(parents=True, exist_ok=True)\n print('files setup', str(fp))\n return fp", "def update_filename(instance, filename):\n path = os.path.join(\"documents_analizer\", \"documents\")\n name = \"{}{}\".format(highly_random_name(),\n os.path.splitext(filename)[1])\n return os.path.join(path, name)", "def dir_key(dirname):\n return DIR_PREFIX + dirname", "def _proto_filename_to_generated_header(proto_file: str) -> str:\n filename = os.path.splitext(proto_file)[0]\n return f'{filename}.rpc{PROTO_H_EXTENSION}'", "def subs_filename(subs_id, lang='en'):\r\n if lang == 'en':\r\n return u'subs_{0}.srt.sjson'.format(subs_id)\r\n else:\r\n return u'{0}_subs_{1}.srt.sjson'.format(lang, subs_id)", "def generate_header(self, header=None):\n if header is None:\n header = self.header\n\n lines = [self.PREFIX_HEAD + '!b']\n for k, v in header.items():\n if k in ('labels', 'categories'):\n v = ', '.join(v)\n elif k == 'draft':\n v = repr(v)\n lines.append(self.HEADER_FMT % (k, v))\n lines.append(self.PREFIX_END)\n return '\\n'.join([_f for _f in lines if _f]) + '\\n'", "def get_conf_filename (self, directory):\n return os.path.join(directory, \"_%s_configdata.py\" % self.get_name())", "def content_file_name(instance, filename):\r\n return '/'.join([str(instance.app.publisher.id), str(instance.app.id), filename])", "def _get_uml_filename(module_filename) -> str:\n return Path(module_filename).stem", "def get_title(self):\n\n if self.title: return self.title\n path = self.get_path()\n if str(path) == \"\": \n Settings.err_print(\"missing file title\")\n return \"\"\n title, ext = os.path.splitext(path)\n self.ext = ext\n self.title = \"{}{}\".format(os.path.basename(title), ext)\n return self.title", "def create_filename(value):\n return '%s.mp3' % slugify(value, u'_')", "def _prettyfilename(self):\n return f'{self.title} ({self.year})'", "def data_filename(self) -> str: # type: ignore[return-value]\n return os.path.abspath(self.name) # type: ignore", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))", "def _format_filename(filename: str) -> str:\n stdlib = (\n f\"{sys.prefix}/lib/python{sys.version_info.major}.{sys.version_info.minor}/\"\n )\n site_pkg = f\"{sys.prefix}/lib/python{sys.version_info.major}.{sys.version_info.minor}/site-packages/\"\n home = f\"{Path.home()}/\"\n cwd = f\"{Path.cwd()}/\"\n if filename.startswith(site_pkg):\n return \"<sitepkg>/\" + filename[len(site_pkg) :]\n if filename.startswith(stdlib):\n return \"<stdlib>/\" + filename[len(stdlib) :]\n if filename.startswith(cwd):\n return \"<cwd>/\" + filename[len(cwd) :]\n if filename.startswith(home):\n return \"<home>/\" + filename[len(home) :]\n return filename", "def friendly_name(self):\n if not self.collection:\n print('YIKES no filepath %s %s' % (self.title,self.slug))\n print (self)\n return ''\n fpath = self.collection.filePath\n if '/' in fpath:\n bg = fpath.index('/')+1\n return fpath[bg:]+ '/' + self.fileName\n return fpath+'/'+self.fileName", "def file_name(path):\n return os.path.basename(path).split('.')[0]", "def fileSetup(path):\n past_fnames = next(os.walk(path))[2]\n f_nums = []\n for f in past_fnames:\n if f.startswith('semi'):\n f_nums.append(int(f[4:8]))\n\n if not f_nums:\n now_fname = '/semi0001.h264'\n else:\n now_fname = '/semi%04d.h264' % (max(f_nums)+1, )\n\n now_fname = path+now_fname\n print 'Current filename: %s' % now_fname\n\n return now_fname", "def naming_convention(file_dir, file_name):\n long_hash = sha1sum(os.path.join(file_dir, file_name))\n file_prefix, file_sufix = file_name.split('.')\n new_name = '{file_prefix}-{short_hash}.{file_sufix}'.format(\n file_prefix=file_prefix,\n short_hash=long_hash[:8],\n file_sufix=file_sufix)\n return new_name, long_hash", "def _prettyfilename(self):\n return f'{self.grandparentTitle} - {self.seasonEpisode} - {self.title}'", "def _create_dir_name(date, dir_structure='ymd', is_exif=True):\n if is_exif:\n date_split = date.split(' ')[0].split(':')\n else:\n date_split = date.split(' ')[0].split('-')\n dir_name = '\\\\'\n if 'y' in dir_structure:\n dir_name += date_split[0] + '\\\\'\n if 'm' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:2]) + '\\\\'\n if 'd' in dir_structure:\n dir_name += '_'.join(d for d in date_split[:3]) + '\\\\'\n return dir_name", "def default_filename(entry):\n template = \"{title}-{speaker}.pdf\"\n return template.format(**entry._asdict())", "def generate_filename(player_name):\n name = player_name.split()\n filename = '_'.join(name).lower()\n return filename", "def get_filename(out_dir, file_date, extension):\n return path.join(out_dir, f'CrossrefCitations_{file_date}.{extension}')", "def filename(self, url, default_file = \"index.html\"):\n purl = urlparse(url)\n file_name = purl[1] + purl[2] \n folder_name = (purl[1] + purl[2])\n \n if purl[2] == '':\n folder_name += ('/' + default_file)\n file_name += ('/' + default_file)\n elif purl[2] == '/':\n folder_name += default_file\n file_name += default_file\n elif (purl[2])[-1] == '/':\n file_name += ('/' + default_file)\n\n folder_path = dirname(folder_name)\n \n if not isdir(folder_path): # create archive dir if nec.\n if not exists(folder_path): \n makedirs(folder_path)\n return file_name", "def _image_filename(image_name):\n return '{}.tar'.format(image_name.replace(':', '_').replace('/', '_'))", "def displayname(self):\n if self.path.is_dir():\n if (is_uuid(self.path.parts[-1])):\n self.is_uuid_folder = True\n return self.path.name + '/'\n elif is_proj(self.path.parts[-1]):\n return f'{bcolors.BOLD}' + self.path.name + f'{bcolors.ENDC}'\n return self.path.name", "def _prettyfilename(self):\n return self.title", "def _make_header(title: str, category: int, description: str, slug: str, image_file_name: Optional[str] = None) -> str:\n\n current_date = _get_current_time()\n category = _get_category(category)\n social_image = SOCIAL_IMAGE_TEMPLATE.format(image_file_name) if image_file_name else \"\"\n header = HEADER_TEMPLATE.format(title, current_date, slug, category, description, social_image)\n\n if social_image:\n figure_template = FIGURE_TEMPLATE.format(social_image)\n header += figure_template\n\n return header", "def generateFileName(self):\n return 'Covid' + self.map_type + '.html'", "def normalized_export_filename(title, extension):\n filename = timezone.localtime().strftime('%Y-%m-%d_%H-%M-%S__') + slugify(title)\n if extension.startswith(os.path.extsep):\n filename += extension\n else:\n filename += os.path.extsep + extension\n return filename", "def get_filename(cls):\n return osp.join(cls.dir_location, *cls.file_path)", "def _generate_filename(instance, filename, prefix):\n md5 = hashlib.md5()\n md5.update(struct.pack('f', time.time()))\n for chunk in instance.file.chunks():\n md5.update(chunk)\n extension = os.path.splitext(filename)[1]\n return os.path.join(prefix, md5.hexdigest() + extension)", "def niceName(self, path):\n logger.debug(\"Func: niceName\")\n\n basename = os.path.split(path)[1]\n return os.path.splitext(basename)[0]", "def format_filename(self, s):\n valid_chars = \"-_ %s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in s if c in valid_chars)\n filename = filename.replace(' ', '_') # I don't like spaces in filenames.\n return filename", "def file_key(filename):\n return FILE_PREFIX + filename", "def get_file_name(self):\n return self.path.name[6:]", "def GetFileSaveName(WaveMetaData):\n FileName = WaveMetaData.SourceFilename()\n sanitizeFile = FileName.replace(\" \",\"_\")\n ext = HDF5Util.DEFAULT_HDF5_EXTENSION\n return \"X{:s}_{:d}_{:s}{:s}\".format(sanitizeFile,\n int(WaveMetaData.TimeCreated()),\n WaveMetaData.name,ext)", "def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n # return Path(dir) / filename\n return filename", "def distutils_dir_name(dname):\n f = \"{dirname}.{platform}-{version[0]}.{version[1]}\"\n return f.format(dirname=dname,\n platform=sysconfig.get_platform(),\n version=sys.version_info)", "def _prettyfilename(self):\n return f'{self.title} ({self.subtype})'", "def get_title(f):\n return os.path.basename(f)", "def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n #return Path(dir) / filename\n return filename", "def get_filename(name):\n return osp.join(osp.dirname(osp.abspath(__file__)), name)", "def name_sans_ext(self) -> str:\n return os.path.splitext(self.path)[0]", "def build_target_dir_name(topicdirname):\n if topicdirname.startswith(\"_\"):\n return topicdirname[1:]\n return \"__\" + topicdirname", "def outputNamingBase(video, audio):\n outbase = os.path.join(\n outfolder,\n \"{}_{}_{}_{}_{}\".format(\n os.path.splitext(basev)[0],\n '___',\n os.path.splitext(basea)[0],\n '___',\n now,\n ))\n return outbase", "def _generate_header_template() -> str:\n return LICENCE_HEADER_TEMPLATE.format(\n licence_identifier=configuration.get_value(ConfigurationVariable.FILE_LICENCE_IDENTIFIER),\n author=\"${owner}\",\n date=\"${years}\",\n )", "def get_filename(self):\n name, ext = self.fkit.filename.rsplit('.', 1)\n if self._field.extension():\n ext = self._field.extension()\n return '.'.join((name, ext))", "def GetOutputFilename(self, directory=None):\n if self.forced_filename:\n logging.debug('Forced filename or pre-computed file name = %s', self.filename)\n return self.filename\n\n tags = dict()\n\n # Base tag\n tags['base'] = f\"{self['ARTIST']} - {self['DATE_RECORDED']} - {self['TITLE']}\"\n\n # Setup version subinfo\n tags['version'] = f\" ({self['VERSION']})\" if self[\"VERSION\"] else \"\"\n\n # Setup label / release subinfo\n channels = self.channels if self.channels != '2.0' else ''\n if self[\"ORIGINAL_MEDIUM\"] == \"CD\":\n labeltag = f\"{self['LABEL']} {self['ISSUE_DATE']} {channels}\"\n else:\n labeltag = f\"{self['LABEL']} {self['ISSUE_DATE']} {self['ORIGINAL_MEDIUM']} {channels}\"\n labeltag = labeltag.strip()\n tags['label'] = labeltag and f\" ({labeltag})\"\n\n # Setup disc tag\n if self[\"PART_NUMBER\"]:\n disctag = f\" (Disc {self['PART_NUMBER']}) {self['DISC_NAME']}\"\n else:\n disctag = f\" {self['DISC_NAME']}\"\n tags['disc'] = disctag.rstrip()\n\n # Merge into filename\n filename = f\"{tags['base']}{tags['version']}{tags['disc']}{tags['label']}{ext.WAV}\"\n # Replace invalid characters with either a dash or remove them\n filename = re.compile(\"[<>:/\\\\\\\\]\").sub(\"-\", filename)\n filename = re.compile(\"[|?*]\").sub(\"\", filename)\n # Replace invalid double quotes with valid single quotes\n filename = filename.replace('\"', \"'\")\n\n if directory:\n return os.path.join(directory, filename)\n return filename", "def to_html_filename(source_file_name: str) -> str:\n def escape_char(m: re.match) -> str:\n c = m.group()\n if c == '/':\n return '--'\n elif c == '-':\n return '-m'\n else:\n return '-' + hex(ord(c))[2:]\n\n return 'source-' + re.sub(r'[^.\\w]', escape_char, source_file_name) + '.html'", "def build_portrait_filename(self, key):\n\t\tname = self.source.get_name()\n\t\tif name == None:\n\t\t\treturn None\n\t\treturn \"portrait_\" + name + \"_\" + key + \".bmp\"", "def filename(self):\n return self.hfile.GetName()", "def _create_id(self):\r\n buildfile_relpath = os.path.dirname(self.address.buildfile.relpath)\r\n if buildfile_relpath in ('.', ''):\r\n return self.name\r\n else:\r\n return \"%s.%s\" % (buildfile_relpath.replace(os.sep, '.'), self.name)", "def unique_filename(data):\n file = data\n get_ext = file.filename.split(\".\")[-1]\n new_name = \"%s.%s\" % (uuid.uuid4().hex, get_ext)\n return new_name", "def sim_dir_name(fdir, nsteps, stim_pars, hc_pars, gs_pars, str_pars):\n\n fn = ('nsteps_{}_GSHCsharp_{}'.format(nsteps, hc_pars['gs_hc_sharp']) +\n '_hcpow_{}'.format(hc_pars['ro_pow']) +\n '_msig_{}'.format(int(stim_pars['mot_sig'])) +\n '_vbeta_{}'.format(stim_pars['vis_beta']) +\n '_lambda_{:.1f}'.format(str_pars['gamma']))\n fn = fdir + 'navigation/' + utils.format_to_fname(fn) + '/'\n return fn", "def sed_template_filename(sedtype):\n path = datapath.sed_template_path()\n filename = 'SEDtemplate_'+sedtype.lower()+'.fits'\n return join(path, filename)", "def data_filename_create(movie_filename):\n path, filename = os.path.split(movie_filename)\n filename_stub, ext = os.path.splitext(filename)\n if os.path.splitext(movie_filename)[1] in ['.png','.jpg','.tiff','.JPG']: \n data_filename = os.path.join(path, ''.join([letter for letter in filename_stub if letter.isalpha()]) + '.hdf5')\n else:\n data_filename = os.path.join(path, filename_stub + '.hdf5')\n return data_filename", "def generate_filename(\n self, directory=os.getcwd(), prefix=\"tile\", format=\"png\", path=True\n ):\n filename = prefix + \"_{col:02d}_{row:02d}.{ext}\".format(\n col=self.column, row=self.row, ext=format.lower().replace(\"jpeg\", \"jpg\")\n )\n if not path:\n return filename\n return os.path.join(directory, filename)", "def make_file_name(name):\n expanded_path = os.path.expandvars(make_fp_rel(name))\n return expanded_path", "def get_filename(self, base_filename: str) -> str:\n folder = self.prepare_folder()\n i = 0\n cartridge_number = self.config['info']['cartridge_number']\n while os.path.isfile(os.path.join(folder, base_filename.format(\n cartridge_number=cartridge_number,\n i=i))):\n i += 1\n\n return os.path.join(folder, base_filename.format(cartridge_number=cartridge_number, i=i))", "def filename_for_key(self, key, extension=None):\n if extension is None:\n extension = self.file_extension\n f = self.key2basename(key) + extension\n return os.path.join(self.basepath, f)", "def get_name(name):\n return \"/\".join([settings.CONF['path'], name])", "def generate_header_collection(self):\n\n header_collection_writer = CppHeaderCollectionWriter(self.package_info,\n self.wrapper_root)\n header_collection_writer.write()\n header_collection_path = self.wrapper_root + \"/\"\n header_collection_path += header_collection_writer.header_file_name\n\n return header_collection_path", "def get_header():\n try:\n yml_iter = cfg.yml_config[\"header\"]\n except:\n # Probably no \"comments\" section in the yml-file.\n return \"\"\n\n return (\"\\n\".join(yml_iter) + \"\\n\\n\") if yml_iter is not None else \"\\n\"", "def name(self):\n\n if self.package:\n directory = self.package.directory\n if self.package.resolve_root:\n directory = directory.joinpath(self.package.resolve_root)\n rel = None\n try:\n rel = self.filename.with_suffix('').relative_to(directory)\n except ValueError as e:\n if self.package.resolve_root:\n # Possibly this module is required from a directory outside of\n # the package's resolve_root, and Path.relative_to() will raise a\n # ValueError if the file is not inside the specified directory.\n try:\n rel = type(self.filename)(os.path.relpath(str(self.filename.with_suffix('')), str(directory)))\n except ValueError as e:\n pass # On a different drive\n pass\n if rel:\n parts = filter(bool, utils.path.lparts(rel))\n return self.package.name + '/' + '/'.join(parts)\n\n return self.filename.stem", "def fs_generate_entry_name(self, sDirPath, sFilenamePrefix = '', sFilenameSuffix = '', sIndexDelimiter = ''):\n\t\treturn Job(SDK.PrlSrv_FsGenerateEntryName(self.handle, sDirPath, sFilenamePrefix, sFilenameSuffix, sIndexDelimiter)[0])" ]
[ "0.63299215", "0.6177597", "0.6154436", "0.6145138", "0.61193216", "0.6117771", "0.60900265", "0.6084294", "0.59812474", "0.59652424", "0.5949886", "0.59462756", "0.5918559", "0.59006333", "0.5891024", "0.58728975", "0.5864215", "0.5860745", "0.5852123", "0.58312166", "0.5803451", "0.5797999", "0.5788537", "0.5786971", "0.5777936", "0.5775427", "0.5753256", "0.57517606", "0.5700192", "0.56959397", "0.56610876", "0.5640611", "0.563658", "0.56218946", "0.5618178", "0.5609829", "0.5609451", "0.5604566", "0.55933887", "0.55922306", "0.5582503", "0.5582153", "0.5555282", "0.5547431", "0.552905", "0.5527645", "0.55228823", "0.5517765", "0.5517274", "0.5515443", "0.5511755", "0.5494741", "0.54943454", "0.54921234", "0.5489921", "0.54885393", "0.5486711", "0.54649043", "0.5464398", "0.5463051", "0.5461842", "0.5460043", "0.545837", "0.54442054", "0.5442486", "0.5434638", "0.5434046", "0.5427739", "0.5422498", "0.5418603", "0.54160345", "0.5412793", "0.540455", "0.539808", "0.5392642", "0.53882724", "0.53798985", "0.5376069", "0.5374146", "0.5361393", "0.5357583", "0.53536165", "0.53487015", "0.5343802", "0.5342622", "0.5339064", "0.5335082", "0.53261936", "0.5323333", "0.5321235", "0.53208876", "0.53206706", "0.5319599", "0.5316539", "0.53147846", "0.53073597", "0.5306274", "0.5303207", "0.5301208", "0.529918" ]
0.6754749
0
Get T1 and T2 weighted structural image info.
def _AnatInfo(self, info, path): if info['data_filetype'] == 'ge_data': return ERROR outdir = '%s/%s' % (self.procdir, self.tmplt['anat']['outdir']) info['InversionTime'] = self.hdr['native_header']['InversionTime'] if info['psdname'] == 'efgre3d' or info['psdname'] == 'bravo': # Structural scans are 3d inversion-recovery. if self.hdr['native_header']['InversionTime'] < 1.: # Only inversion recovery used for anatomy. Must be calibration. return None elif self.hdr['zsize'] > 1.25: # Only one slab acquired. Assume thick slices. name = 'T1Low_%d' % self.n_t1low self.n_t1low += 1 else: if self.n_t1high == 0: name = 'T1High' else: name = 'T1High_%d' % self.n_t1high self.n_t1high += 1 else: psdname = info['psdname'] name = self.imgtype.get(psdname, info['psdname']) if self.ntype.has_key(psdname): self.ntype[psdname] += 1 name = '%s_%0d' % (name, self.ntype[psdname]) else: self.ntype[psdname] = 1 info['norm_src'] = False info['outdir'] = outdir info['filetype'] = self.tmplt['anat']['format'] info['imgfile'] = '%s/%s' % (info['outdir'], name) self.entry_map['anat'].append(self.current_entry) return OK
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scalarInfo(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"perimeter\":m[\"perimeter\"], \"oreientation\":m[\"orientation\"], \"solidity\":m[\"solidity\"],\"height\":m[\"height\"], \"extent\":m[\"extent\"], \"aspect ratio\":m[\"aspect ratio\"], \"area\":m[\"area\"], \"sum intensity\":m[\"sum intensity\"], \"width\":m[\"width\"], \"equivalent diameter\": m[\"equivalent diameter\"], \"mean intensity\": m[\"mean intensity\"]}\n\treturn d", "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def getWeights(self):\n return self.W1, self.W2", "def get_model_and_tile_weights(model):\n weight = model.weight.data.detach().cpu().numpy()\n bias = model.bias.data.detach().cpu().numpy()\n analog_weight, analog_bias = model.analog_tile.get_weights()\n analog_weight = analog_weight.detach().cpu().numpy().reshape(weight.shape)\n analog_bias = analog_bias.detach().cpu().numpy()\n return weight, bias, analog_weight, analog_bias", "def get_image_info_struct(nimage, path_len,\n image_id_len=None,\n wcs_len=None,\n ext_len=None,\n extra_dtype=None):\n dt = get_image_info_dtype(\n path_len,\n image_id_len=image_id_len,\n wcs_len=wcs_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n\n data = np.zeros(nimage, dtype=dt)\n\n data['scale'] = 1.0\n\n return data", "def _get_t1w_mgh(fs_subject, fs_subjects_dir):\n import nibabel as nib\n\n fs_subjects_dir = get_subjects_dir(fs_subjects_dir, raise_error=True)\n t1_fname = Path(fs_subjects_dir) / fs_subject / \"mri\" / \"T1.mgz\"\n if not t1_fname.exists():\n raise ValueError(\n \"Freesurfer recon-all subject folder \"\n \"is incorrect or improperly formatted, \"\n f\"got {Path(fs_subjects_dir) / fs_subject}\"\n )\n t1w_img = _load_image(str(t1_fname), name=\"T1.mgz\")\n t1w_mgh = nib.MGHImage(t1w_img.dataobj, t1w_img.affine)\n return t1w_mgh", "def miscinfo(self):\n return _image.image_miscinfo(self)", "def finish_weight(output_dir, imtype='intbgsub', wttype='rrhr'):\n image_file = os.path.join(output_dir, '{}_mosaic.fits'.format(imtype))\n wt_file = os.path.join(output_dir, '{}_mosaic.fits'.format(wttype))\n \n im, hdr = astropy.io.fits.getdata(image_file, header=True)\n wt = astropy.io.fits.getdata(wt_file)\n newim = im / wt\n\n newfile = os.path.join(output_dir, 'image_mosaic.fits')\n astropy.io.fits.writeto(newfile, newim, hdr)\n\n return newfile, wt_file", "def GetMetadata(IMAGE):\n SPACING = IMAGE.GetSpacing()\n ORIGIN = IMAGE.GetOrigin()\n DIRECTION = IMAGE.GetDirection()\n METADATA = [SPACING,ORIGIN,DIRECTION]\n return METADATA", "def build_img_info(img_root):\n imgs = []\n feats = []\n K = []\n for i, name in enumerate(os.listdir(img_root)):\n if '.jpg' in name or '.JPG' in name:\n path = os.path.join(img_root, name)\n img = cv2.imread(path)\n imgs.append(img)\n feature_process = FeatureProcess(img)\n kpt, des = feature_process.extract_features()\n photo_info = PhotoExifInfo(path)\n photo_info.get_tags()\n K.append(photo_info.get_intrinsic_matrix())\n A = photo_info.get_area()\n D = photo_info.get_diam()\n feats.append({'kpt': kpt, 'des': des, 'A': A, 'D': D})\n return imgs, feats, K", "def get_vtk_image_attrib(image):\n data = vtk_image_to_numpy(image)\n return (data.shape, data.dtype)", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def _info(self) -> tfds.core.DatasetInfo:\n features = {\n # sequence of [RGB, depth] images\n \"image\": tfds.features.Sequence(\n tfds.features.Image(shape=(600, 800, 3)), length=2,\n ),\n # sequence of image features for [RGB, depth]\n \"images\": tfds.features.Sequence(\n tfds.features.FeaturesDict(\n {\n \"file_name\": tfds.features.Text(),\n \"height\": tf.int64,\n \"width\": tf.int64,\n \"id\": tf.int64,\n },\n ),\n length=2,\n ),\n # both modalities share the same categories\n \"categories\": tfds.features.Sequence(\n tfds.features.FeaturesDict(\n {\n \"id\": tf.int64, # {'pedstrian':1, 'vehicles':2, 'trafficlight':3, 'patch':4}\n \"name\": tfds.features.Text(),\n }\n )\n ),\n # both modalities share the same objects\n \"objects\": tfds.features.Sequence(\n {\n \"id\": tf.int64,\n \"image_id\": tf.int64,\n \"area\": tf.int64, # un-normalized area\n \"boxes\": tfds.features.BBoxFeature(), # normalized bounding box [ymin, xmin, ymax, xmax]\n \"labels\": tfds.features.ClassLabel(num_classes=5),\n \"is_crowd\": tf.bool,\n }\n ),\n # these data only apply to the \"green screen patch\" objects, which both modalities share\n \"patch_metadata\": tfds.features.FeaturesDict(\n {\n \"gs_coords\": tfds.features.Sequence(\n tfds.features.Tensor(\n shape=[2], dtype=tf.int64\n ), # green screen vertices in (x,y)\n length=4, # always rectangle shape\n ),\n \"cc_ground_truth\": tfds.features.Tensor(\n shape=[24, 3], dtype=tf.float32\n ), # colorchecker color ground truth\n \"cc_scene\": tfds.features.Tensor(\n shape=[24, 3], dtype=tf.float32\n ), # colorchecker colors in a scene\n # binarized segmentation mask of patch.\n # mask[x,y] == 1 indicates patch pixel; 0 otherwise\n \"mask\": tfds.features.Tensor(shape=[600, 800, 3], dtype=tf.uint8),\n \"shape\": tfds.features.Text(),\n }\n ),\n }\n\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict(features),\n citation=_CITATION,\n )", "def info(image, mask=None, Comment=\"\"):\n\tif(Comment): print \" *** \", Comment\n\te = get_image(image)\n\t[mean, sigma, imin, imax] = Util.infomask(e, mask, True)\n\tnx = e.get_xsize()\n\tny = e.get_ysize()\n\tnz = e.get_zsize()\n\tif (e.is_complex()):\n\t\ts = \"\"\n\t\tif e.is_shuffled():\n\t\t\ts = \" (shuffled)\"\n\t\tif (e.is_fftodd()):\n\t\t\tprint \"Complex odd image%s: nx = %i, ny = %i, nz = %i\" % (s, nx, ny, nz)\n\t\telse:\n\t\t\tprint \"Complex even image%s: nx = %i, ny = %i, nz = %i\" % (s, nx, ny, nz)\n\n\telse:\n\t\tprint \"Real image: nx = %i, ny = %i, nz = %i\" % (nx, ny, nz)\n\n\tprint \"avg = %g, std dev = %g, min = %g, max = %g\" % (mean, sigma, imin, imax)\n\treturn mean, sigma, imin, imax, nx, ny, nz", "def get_weights(self):", "def get_perfect_information(self):\n raise NotImplementedError", "def print_weight_info(weights):\n print(\"Length: {}\\nw:\\n{}\\nb:\\n{}\".format(\n len(weights[\"w\"]),\n [ w.shape for w in weights[\"w\"] ],\n [ b.shape for b in weights[\"b\"] ]))", "def get_image_characteristics(self):\r\n self.image_height, self.image_width, self.image_channels = self.image.shape\r\n\r\n # Estimate the cell size to be around a ninth of the width of the screenshot area\r\n self.cell_size = int(self.image_width / 9) | 1\r\n\r\n # Cell size should be at most a ninth of the width and at least a twentieth of the width of the screenshot\r\n # Since a typical grid is 9x9, so it should be at most a ninth of the image width, and it shouldn't be too small\r\n self.min_cell_size = int(self.image_width / 20 * self.image_width / 20)\r\n self.max_cell_size = int(self.image_width / 9 * self.image_width / 9)", "def _get_weights(layer_name, weights):\n W = weights[layer_name][0]\n b = weights[layer_name][1]\n return W, b", "def extract_feat(self, img):\n x = self.backbone(img)\n y = self.backbone_gan(img)\n if self.with_feature_selection:\n x, y = self.feature_selection(x, y)\n if self.with_neck:\n x = self.neck(x)\n return x, y", "def getImageInfo(img, header=''):\n if (os.path.exists(img) == False):\n print \"image not found: \", img\n return\n # Assume this is a CASA image\n if (header == ''):\n try:\n print \"imhead\",\n header = imhead(img, mode = 'list') # This will work for most CASA builds\n except:\n print \"imhead\",\n header = imhead(img) # needed to prevent crash in early CASA 4.6 builds (see CAS-8214)\n print \"imhead\",\n header = imhead(img, mode = 'list')\n if (header is None):\n print \"imhead returned NoneType. This image header is not sufficiently standard.\"\n return\n if ('beammajor' in header.keys()):\n bmaj = header['beammajor']\n bmin = header['beamminor']\n bpa = header['beampa']\n elif ('perplanebeams' in header.keys()):\n beammajor = []\n beamminor = []\n beampa = []\n for beamchan in range(header['perplanebeams']['nChannels']):\n beamdict = header['perplanebeams']['*'+str(beamchan)]\n beammajor.append(beamdict['major']['value'])\n beamminor.append(beamdict['minor']['value'])\n beampa.append(beamdict['positionangle']['value'])\n bmaj = np.median(beammajor)\n bmin = np.median(beamminor)\n sinbpa = np.sin(np.radians(np.array(beampa)))\n cosbpa = np.cos(np.radians(np.array(beampa)))\n bpa = np.degrees(np.median(np.arctan2(np.median(sinbpa), np.median(cosbpa))))\n else:\n bmaj = 0\n bmin = 0\n bpa = 0\n naxis1 = header['shape'][0]\n naxis2 = header['shape'][1]\n cdelt1 = header['cdelt1']\n cdelt2 = header['cdelt2']\n if (header['cunit1'].find('rad') >= 0):\n # convert from rad to arcsec\n cdelt1 *= 3600*180/np.pi\n elif (header['cunit1'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt1 *= 3600\n if (header['cunit2'].find('rad') >= 0):\n cdelt2 *= 3600*180/np.pi\n # convert from rad to arcsec\n elif (header['cunit2'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt2 *= 3600\n if (type(bmaj) == dict):\n # casa >= 4.1.0 (previously these were floats)\n bmaj = headerToArcsec(bmaj)\n bmin = headerToArcsec(bmin)\n bpa = headerToArcsec(bpa)/3600.\n ghz = 0\n if ('ctype4' in header.keys()):\n if (header['ctype4'] == 'Frequency'):\n imgfreq = header['crval4']\n cdelt = header['cdelt4']\n crpix = header['crpix4']\n npix = header['shape'][3]\n ghz = imgfreq*1e-9\n if (ghz == 0):\n if ('ctype3' in header.keys()):\n if (header['ctype3'] == 'Frequency'):\n imgfreq = header['crval3']\n cdelt = header['cdelt3']\n crpix = header['crpix3']\n npix = header['shape'][2]\n ghz = imgfreq*1e-9\n return([bmaj,bmin,bpa,cdelt1,cdelt2,naxis1,naxis2,ghz], header)", "def HeaderBmpinfo( self, bWidth, bHeight, wBitCount, imgdata, bColorCount ):\n ## (4bytes)biSize - (4bytes)biWidth - (4bytes)biHeight - (2bytes)biPlanes - (2bytes)biBitCount -\n ## - (4bytes)biCompression - (4bytes)biSizeImage -\n ## - (4bytes)biXPelsPerMeter - (4bytes)biYPelsPerMeter - (4bytes)biClrused - (4bytes)biClrImportant.\n biSize = calcsize('3I2H2I2i2I')\n biWidth = bWidth\n biHeight = bHeight * 2 # include the mask height\n biPlanes = 1 # color planes must be 1 \n biBitCount = wBitCount # 1, 2, 4, 8, 16, 24, 32 \n biCompression = 0 # only uncompressed images BI_RGB.\n biSizeImage = len(imgdata) + self.CalcRowSize( 1, bWidth ) * abs(bHeight) # calculate pixel array size\n biXPelsPerMeter = 0\n biYPelsPerMeter = 0\n biClrUsed = bColorCount\n biClrImportant = 0\n \n bmpinfoheader = pack('3I2H2I2i2I', biSize, biWidth, biHeight, biPlanes, biBitCount, biCompression, biSizeImage,\n biXPelsPerMeter, biYPelsPerMeter, biClrUsed, biClrImportant)\n return bmpinfoheader", "def __getitem__(self, index):\n im, gt, h, w = self.pull_item(index)\n if self.return_image_info:\n return im, gt, h, w\n return im, gt", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def _make_image_info_hst(self, flistname):\n\n flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n #fname=line.strip()\n flist.append(fname)\n magzp_list.append(magzp)\n\n magzp = np.array(magzp_list)\n\n nimage = len(flist)\n\n path_len = max([len(f) for f in flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f.replace('sci.fits','wht.fits')\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def fileInfo(tif: TiffFile):\n print(tif.flags)\n print(tif.geotiff_metadata)\n for page in tif.pages:\n print(page.tags)\n print(page.geotiff_tags)\n print(page.shape)\n print(page.dtype)\n print(page.flags)", "def _extract_weights(self,W):\n wl1_size = self._D*self._hidden_layer_size\n bl1_size = self._hidden_layer_size\n \n wl2_size = self._hidden_layer_size*self._output_size\n bl2_size = self._output_size\n\n \n weights_L1 = W[0:wl1_size].reshape((self._D,self._hidden_layer_size))\n bias_L1 = W[wl1_size:wl1_size+bl1_size]\n \n start_l2 = wl1_size+bl1_size\n\n weights_L2 = W[start_l2: start_l2 + wl2_size].reshape((self._hidden_layer_size,self._output_size))\n bias_L2 = W[start_l2 + wl2_size : start_l2 + wl2_size + bl2_size]\n \n \n \n return weights_L1,bias_L1,weights_L2,bias_L2", "def get_image_attributes(self, element):", "def get_weights(vmin, vmax, pvmin, pvmax, weight_type):\n if weight_type == 'trilinear':\n return cell_weights_trilinear(vmin, vmax, pvmin, pvmax)\n elif weight_type == 'none':\n return 1.0, vmin, vmax\n else:\n raise ValueError(\"Invalid weight_type, must be one of 'trilinear' or 'none'\")", "def get_example(self, i):\n id_ = self.ids[i]\n bbox = list()\n label = list()\n difficult = list()\n depth = list()\n y_rot = list()\n \n label_f = os.path.join(self.data_dir, 'label_2', id_ + '.txt')\n lines = open(label_f).readlines()\n items = [x.strip(' ').split(' ') for x in lines]\n for i in range(len(lines)):\n name = items[i][0]\n '''\n ingore the DontCare part\n '''\n if name == 'DontCare':\n continue\n xmin, ymin, xmax, ymax = items[i][4:8]\n bbox.append([int(float(ymin)), int(float(xmin)), int(float(ymax)), int(float(xmax))])\n label.append(KITTI_LABEL_NAMES.index(name))\n difficult.append(False)\n \n depth_ = float(items[i][13])/70.0\n if abs(depth_) > 1:\n depth_ = 1\n depth.append(depth_)\n \n y_rot.append(float(items[i][3]))\n\n\n\n bbox = np.stack(bbox).astype(np.float32)\n label = np.stack(label).astype(np.int32)\n depth = np.stack(depth).astype(np.float32)\n y_rot = np.stack(y_rot).astype(np.float32)\n # When `use_difficult==False`, all elements in `difficult` are False.\n difficult = np.array(difficult, dtype=np.bool).astype(np.uint8) # PyTorch don't support np.bool\n\n # Load a image\n img_file = os.path.join(self.data_dir, 'image_2', id_ + '.png')\n img = read_image(img_file, color=True)\n\n # if self.return_difficult:\n # return img, bbox, label, difficult\n return img, bbox, label, difficult, depth, y_rot", "def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)", "def struct_sim(image1: np.ndarray, image2: np.ndarray, **kwargs) -> np.ndarray:\n n, h, w = image1.shape\n assert (n, h, w) == image2.shape\n ssim = np.zeros(n)\n for ii in range(n):\n ssim[ii] = structural_similarity(image1[ii], image2[ii], **kwargs)\n return ssim", "def get_only_target():\r\n\ttype1_img, type1_label, type2_img, type2_label = load_data_all()\r\n\ttype1_imgs, type1_labels = type1_makeup(type1_img, type1_label, v1 = 100, v2 = 160, masking = True)\r\n\ttype2_imgs, type2_labels = type2_makeup(type2_img, type2_label, v1 = 100, v2 = 55, masking = True)\r\n\r\n\tnew_type1_imgs, new_type1_labels = find_contain_target(type1_imgs, type1_labels)\r\n\tnew_type2_imgs, new_type2_labels = find_contain_target(type2_imgs, type2_labels)\r\n\r\n\treturn {'type1_img' : new_type1_imgs, 'type1_label' : new_type1_labels,\r\n\t\t'type2_img':new_type2_imgs, 'type2_label':new_type2_labels}", "def get_visual_attrib_template():\n return {\"conaffinity\": \"0\", \"contype\": \"0\", \"mass\": \"1e-8\", \"group\": \"1\"}", "def get_image_pair(self):\n pooled_images = self.real_images + self.fake_images\n img1_info = random.choice(self.real_images)\n if self.enable_fake_pairs:\n img1_info = random.choice(pooled_images)\n img2_info = random.choice(pooled_images)\n img1 = Image.open(img1_info[0])\n img2 = Image.open(img2_info[0])\n label1 = np.array([img1_info[1][0]])\n label2 = np.array([img2_info[1][0]])\n\n return img1, img2, label1, label2", "def getTiffInfo(path):\n # py 2/3 comp\n first_file = glob.glob(os.path.join(path, '*.tif'))[0]\n if ScanImageTiffReader is not None and ScanImageTiffReader(first_file).metadata() != '':\n string = ScanImageTiffReader(first_file).metadata()\n else:\n tfh = tifffile.TiffFile(first_file)\n # If software key is in dict tags --> SI2016\n if 'software' in tfh.pages[0].tags:\n string = tfh.pages[0].tags['software'].value.decode('utf-8')\n else:\n string = tfh.pages[0].tags['image_description'].value.decode('utf-8')\n string = \" \".join(string.split()).replace('\\\\', ' ')\n string = string.replace(')', '')\n string = string.replace('(', '')\n return string", "def return_weights(self):\n w0 = self.comparator.weight.data.numpy()\n b0 = self.comparator.bias.data.numpy()\n\n w1 = self.matcher.weight.data.numpy()\n b1 = self.matcher.bias.data.numpy()\n\n w2 = self.head.weight.data.numpy()\n b2 = self.head.bias.data.numpy()\n\n return w0, b0, w1, b1, w2, b2", "def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObject()\n\n for obj_key in xObject:\n obj = xObject[obj_key]\n if obj['/Subtype'] == '/Image':\n width, height = (obj['/Width'], obj['/Height'])\n num_bytes = len(obj._data)\n density = num_bytes * 1.0 / (width * height)\n return {'width': width, 'height': height, 'size': num_bytes, 'density': density}\n\n return None", "def test_get_weight_parameter(self):\n self.assertIsNotNone(model_utils.get_weight_parameter(MaskConv2d(32, 32, 3)))\n\n weight_groups = model_utils.get_weight_parameter(\n GroupConv2d(32, 64, 3, groups=2)\n )\n self.assertIsNotNone(weight_groups)\n self.assertIsInstance(weight_groups, torch.Tensor)\n self.assertEqual(weight_groups.shape[0], 64)\n self.assertEqual(weight_groups.shape[1], 16)", "def make_img_gt_pair(self, idx):\n path=os.path.join(self.img_list[idx])\n #print(path)\n img = cv2.imread(path)\n #print(img.shape)\n if self.labels[idx] is not None:\n label = cv2.imread(os.path.join(self.labels[idx]))\n else:\n gt = np.zeros(img.shape[:-1], dtype=np.uint8)\n\n if self.inputRes is not None:\n img = imresize(img, self.inputRes)\n if self.labels[idx] is not None:\n label = imresize(label, self.inputRes, interp='nearest')\n\n img = np.array(img, dtype=np.float32)\n img = np.subtract(img, np.array(self.meanval, dtype=np.float32))\n\n if self.labels[idx] is not None:\n gt = np.array(label, dtype=np.float32)\n gt = gt/np.max([gt.max(), 1e-8])\n\n return img, gt", "def get_instance_image_info(task):\n ctx = task.context\n node = task.node\n image_info = {}\n # NOTE(pas-ha) do not report image kernel and ramdisk for\n # local boot or whole disk images so that they are not cached\n if (node.driver_internal_info.get('is_whole_disk_image')\n or deploy_utils.get_boot_option(node) == 'local'):\n return image_info\n root_dir = get_http_boot_dir()\n i_info = node.instance_info\n labels = ('kernel', 'ramdisk')\n d_info = deploy_utils.get_image_instance_info(node)\n if not (i_info.get('kernel') and i_info.get('ramdisk')):\n glance_service = service.GlanceImageService(context=ctx)\n iproperties = glance_service.show(d_info['image_source'])['properties']\n for label in labels:\n i_info[label] = str(iproperties[label + '_id'])\n node.instance_info = i_info\n node.save()\n\n for label in labels:\n image_info[label] = (\n i_info[label],\n os.path.join(root_dir, node.uuid, label)\n )\n\n return image_info", "def calc_tdict(band_names, sar_image=None, segments=None, thresh_file=None, directory_figure=None, source=['pixels', 'segments'], approach=['tiled', 'global'], t_method=['KI', 'Otsu'], tile_dim=[200, 200], n_final=5, hand_matrix=None, hand_t=100):\n # Calculate threshold values\n t_dict = {}\n for source_value in source:\n t_dict[source_value] = {}\n if source_value == 'pixels':\n for approach_value in approach:\n t_dict[source_value][approach_value] = {}\n if \"KI\" in t_method:\n t_dict[source_value][approach_value]['KI'] = {}\n if \"Otsu\" in t_method:\n t_dict[source_value][approach_value]['Otsu'] = {}\n if approach_value == 'tiled':\n for ib, band in enumerate(band_names):\n t_values, _ = tiled_thresholding(sar_image[ib], selection='Martinis', t_method=['KI', 'Otsu'], tile_dim=tile_dim, n_final=n_final, hand_matrix=hand_matrix, hand_t=hand_t, directory_figure=directory_figure, incomplete_tile_warning=True)\n if \"KI\" in t_method:\n t_dict[source_value][approach_value]['KI'][band] = t_values[0]\n if \"Otsu\" in t_method:\n t_dict[source_value][approach_value]['Otsu'][band] = t_values[1]\n elif approach_value == 'global':\n if \"KI\" in t_method:\n for ib, band in enumerate(band_names):\n t_dict[source_value][approach_value][\"KI\"][band] = apply_ki(sar_image[ib], accuracy=200)\n if \"Otsu\" in t_method:\n for ib, band in enumerate(band_names):\n t_dict[source_value][approach_value][\"Otsu\"][band] = apply_otsu(sar_image[ib], accuracy=200)\n else: # if source_value == 'segments'\n approach_value = 'global'\n t_dict[source_value][approach_value] = {}\n if \"KI\" in t_method:\n t_dict[source_value][approach_value][\"KI\"] = {}\n for band in band_names:\n t_dict[source_value][approach_value][\"KI\"][band] = apply_ki(segments[band], accuracy=200)\n if \"Otsu\" in t_method:\n t_dict[source_value][approach_value][\"Otsu\"] = {}\n for band in band_names:\n t_dict[source_value][approach_value][\"Otsu\"][band] = apply_otsu(segments[band], accuracy=200)\n # Save to file\n if thresh_file:\n with open(thresh_file, \"wb\") as handle:\n pickle.dump(t_dict, handle)\n # Make plot of all threshold values\n if directory_figure:\n for band in band_names:\n leghandles = []\n leglabels = []\n fig, ax = plt.subplots()\n xmin = []\n xmax = []\n if segments:\n h, bins = np.histogram(segments[band], bins=200, density=True)\n g = np.arange(bins[0]+(bins[1]-bins[0])/2, bins[-1], (bins[1]-bins[0]))\n leghandles.append(ax.plot(g, h, color='blue')[0])\n leglabels.append(\"h segments\")\n xmin.append(np.min(segments[band]))\n xmax.append(np.max(segments[band]))\n if sar_image is not None:\n h, bins = np.histogram(sar_image[band_names.index(band)].ravel(), bins=200, density=True)\n g = np.arange(bins[0]+(bins[1]-bins[0])/2, bins[-1], (bins[1]-bins[0]))\n leghandles.append(ax.plot(g, h, color='lightblue')[0])\n leglabels.append(\"h pixels\")\n xmin.append(np.min(sar_image[band_names.index(band)]))\n xmax.append(np.max(sar_image[band_names.index(band)]))\n ax.set_xlim(np.min(np.array(xmin)), np.max(np.array(xmax)))\n M = 0.95 * ax.get_ylim()[1]\n for source_value in source:\n if source_value == 'pixels':\n for approach_value in approach:\n for t_method_value in t_method:\n if t_method_value == 'KI':\n color = 'red'\n else:\n color = 'green'\n if approach_value == 'global':\n linestyle = 'dashed'\n else: \n linestyle = 'dotted'\n leghandles.append(ax.plot([t_dict[source_value][approach_value][t_method_value][band], \\\n t_dict[source_value][approach_value][t_method_value][band]], [0, M], color=color, linestyle=linestyle)[0])\n leglabels.append(source_value + ' ' + approach_value + ' ' + t_method_value)\n else:\n approach_value = 'global'\n for t_method_value in t_method:\n if t_method_value == 'KI':\n color = 'red'\n else:\n color = 'green'\n leghandles.append(ax.plot([t_dict[source_value][approach_value][t_method_value][band], \\\n t_dict[source_value][approach_value][t_method_value][band]], [0, M], color=color)[0])\n leglabels.append(source_value + ' ' + approach_value + ' ' + t_method_value)\n fig.legend(leghandles, leglabels, framealpha=1)\n plt.savefig(os.path.join(directory_figure, \"Thresholding_HistWithTs_{}.png\".format(band)))\n # Return\n return t_dict", "def compare_images(self):\r\n m = round(self.mse(self.image_a, self.image_b), 4)\r\n s = round(ssim(self.image_a, self.image_b) * 100, 5)\r\n return (\r\n m, s)", "def print_image_info(image, resize=rsz_default, kernel=kernel_size):\n\tprint \"Image Size: {0}\".format(image.shape)\n\tprint \"Image Max: {0}\".format(image.max())\n\tprint \"Image Min: {0}\".format(image.min())\n\tprint \"Image Mean: {0}\".format(image.mean())\n\tprint \"Image dtype: {0}\\n\".format(image.dtype)\n\timage = to_uint8(image)\n\timage_prep = preprocess(image, resize=resize, kernel=kernel)\n\tcontour = get_contour(image_prep)\n\tM = get_image_moments(contour=contour)\n\tsecond_m = ['m20', 'm11', 'm02', 'm30', 'm21', 'm12', 'm03']\n\tprint \"Zero Order Moment: {0}\".format(M['m00'])\n\tprint \"First Order Moments: {0}, {1}\".format(M['m10'], M['m01'])\n\tprint \"Second Order Moments:\"\n\tsecond_m_str = ''\n\tfor m2 in second_m:\n\t\tsecond_m_str += \"{0},\".format(M[m2])\n\tprint second_m_str[:-1]", "def _prepare_image_and_label(self, data):\n image = tf.io.decode_image(data['image/encoded'], channels=3)\n label = tf.io.decode_image(data['image/segmentation/class/encoded'],\n channels=1)\n height = data['image/height']\n width = data['image/width']\n image = tf.reshape(image, (height, width, 3))\n label = tf.reshape(label, (1, height, width))\n label = tf.cast(label, tf.float32)\n # Normalizes image with mean and std pixel values.\n image = input_utils.normalize_image(image)\n return image, label", "def get_weight(self, item1: Any, item2: Any) -> Union[int, float]:\n v1 = self._vertices[item1]\n v2 = self._vertices[item2]\n return v1.neighbours.get(v2, 0)", "def get_weight(ew1, ew2):\n dw = flu.delta_epiweeks(ew1, ew2)\n yr = 52.2\n hl1, hl2, bw = yr, 1, 4\n a = 0.05\n #b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2\n b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2))\n c = 2 ** -(dw / hl1)\n d = 1 - 2 ** -(dw / hl2)\n return (a + (1 - a) * b) * c * d", "def extract_feat(self, imgs):\n pass", "def extract_feat(self, imgs):\n pass", "def get_sched_info(image):\n sched_info = SchedulingInfo()\n\n metadata_dir = find_metadata_dir(None, image)\n if metadata_dir:\n info_files = glob.glob('{}/schedblock-info-*[0-9].txt'.format(metadata_dir))\n for filename in info_files:\n in_cal = False\n with open(filename, 'r') as f:\n line_num = 0\n for line in f:\n if line.startswith('==') or line.startswith('--'):\n continue\n line_num += 1\n # print (line.strip())\n if line_num == 2:\n parts = line.split(' ')\n print (parts)\n if parts[1] == 'bandpass':\n in_cal = True\n sched_info.calid = parts[0]\n else:\n sched_info.sbid = parts[0]\n sched_info.field_name = parts[1]\n \n if not in_cal and line.startswith('common.target.src%d.corrmode'):\n sched_info.corr_mode = line.split(' = ')[1].strip()\n if not in_cal and line.startswith('common.target.src%d.footprint.name'):\n sched_info.footprint = line.split(' = ')[1].strip()\n if not in_cal and line.startswith('common.target.src%d.footprint.pitch'):\n sched_info.pitch = line.split(' = ')[1].strip()\n if in_cal and line.startswith('common.target.src%d.field_name'):\n sched_info.cal_src = line.split(' = ')[1].strip()\n\n return sched_info", "def get_image_params(image_path):\n image = cv2.imread(image_path)\n\n return image, image.shape", "def extract_feat(self, img):\n xb = self.backbone(img)\n if self.with_neck:\n xn = self.neck(xb)\n #for xx in xb:\n # print(xx.shape)\n # print(xb[2].shape)\n return [xb[2]], xn", "def _get_model_attr(model_dir):\n global MODEL_URL\n coco_utils.maybe_download_and_extract(model_dir, MODEL_URL)\n filename = MODEL_URL.split(\"/\")[-1]\n filepath = os.path.join(model_dir, filename)\n if not os.path.exists(filepath):\n raise IOError(\"VGG Model not found.\")\n \n data = scipy.io.loadmat(filepath)\n mean = data['normalization']['averageImage'][0,0]\n mean_pixel = np.mean(mean, axis=(0, 1))\n weights = np.squeeze(data['layers'])\n\n return weights, mean_pixel", "def t1_hypointensity( x, xsegmentation, xWMProbability, template, templateWMPrior, wmh_thresh=0.1 ):\n mybig = [88,128,128]\n templatesmall = ants.resample_image( template, mybig, use_voxels=True )\n qaff = ants.registration(\n ants.rank_intensity(x),\n ants.rank_intensity(templatesmall), 'SyN',\n syn_sampling=2,\n syn_metric='CC',\n reg_iterations = [25,15,0,0],\n aff_metric='GC', random_seed=1 )\n afftx = qaff['fwdtransforms'][1]\n templateWMPrior2x = ants.apply_transforms( x, templateWMPrior, qaff['fwdtransforms'] )\n cerebrum = ants.threshold_image( xsegmentation, 2, 4 )\n realWM = ants.threshold_image( templateWMPrior2x , 0.1, math.inf )\n inimg = ants.rank_intensity( x )\n parcellateWMdnz = ants.kmeans_segmentation( inimg, 2, realWM, mrf=0.3 )['probabilityimages'][0]\n x2template = ants.apply_transforms( templatesmall, x, afftx, whichtoinvert=[True] )\n parcellateWMdnz2template = ants.apply_transforms( templatesmall,\n cerebrum * parcellateWMdnz, afftx, whichtoinvert=[True] )\n # features = rank+dnz-image, lprob, wprob, wprior at mybig resolution\n f1 = x2template.numpy()\n f2 = parcellateWMdnz2template.numpy()\n f3 = ants.apply_transforms( templatesmall, xWMProbability, afftx, whichtoinvert=[True] ).numpy()\n f4 = ants.apply_transforms( templatesmall, templateWMPrior, qaff['fwdtransforms'][0] ).numpy()\n myfeatures = np.stack( (f1,f2,f3,f4), axis=3 )\n newshape = np.concatenate( [ [1],np.asarray( myfeatures.shape )] )\n myfeatures = myfeatures.reshape( newshape )\n\n inshape = [None,None,None,4]\n wmhunet = antspynet.create_unet_model_3d( inshape,\n number_of_outputs = 1,\n number_of_layers = 4,\n mode = 'sigmoid' )\n\n wmhunet.load_weights( get_data(\"simwmhseg\", target_extension='.h5') )\n\n pp = wmhunet.predict( myfeatures )\n\n limg = ants.from_numpy( tf.squeeze( pp[0] ).numpy( ) )\n limg = ants.copy_image_info( templatesmall, limg )\n lesresam = ants.apply_transforms( x, limg, afftx, whichtoinvert=[False] )\n # lesresam = lesresam * cerebrum\n rnmdl = antspynet.create_resnet_model_3d( inshape,\n number_of_classification_labels = 1,\n layers = (1,2,3),\n residual_block_schedule = (3,4,6,3), squeeze_and_excite = True,\n lowest_resolution = 32, cardinality = 1, mode = \"regression\" )\n rnmdl.load_weights( get_data(\"simwmdisc\", target_extension='.h5' ) )\n qq = rnmdl.predict( myfeatures )\n\n lesresamb = ants.threshold_image( lesresam, wmh_thresh, 1.0 )\n lgo=ants.label_geometry_measures( lesresamb, lesresam )\n wmhsummary = pd.read_csv( get_data(\"wmh_evidence\", target_extension='.csv' ) )\n wmhsummary.at[0,'Value']=lgo.at[0,'VolumeInMillimeters']\n wmhsummary.at[1,'Value']=lgo.at[0,'IntegratedIntensity']\n wmhsummary.at[2,'Value']=float(qq)\n\n return {\n \"wmh_summary\":wmhsummary,\n \"wmh_probability_image\":lesresam,\n \"wmh_evidence_of_existence\":float(qq),\n \"wmh_max_prob\":lesresam.max(),\n \"features\":myfeatures }", "def diff_image_feature(image0, image1):\n return 0", "def calculate_tumor(filename, verbose = False):\n img = nibabel.load(filename)\n data = img.get_data()\n pixdim = img.header['pixdim']\n xyzt_units = img.header['xyzt_units']\n #pixdim[1],pixdim[2],pixdim[3] stores width, depth and height\n volume_per_pix = pixdim[1]*pixdim[2]*pixdim[3]\n\n volumes = {}\n volumes['total vasogenic edema volume'] = round(sum(data[data ==2 ])*volume_per_pix/1000, 3)\n volumes['enhancing portion'] = round(sum(data[data == 4]) * volume_per_pix/1000, 3)\n volumes['non enhancing portion'] = round(sum(data[data == 1]) * volume_per_pix/1000, 3)\n volumes['total tumor volume'] = round(volumes['enhancing portion'] + volumes['non enhancing portion'], 3)\n if xyzt_units == 1:\n volumes['unit'] = 'L'\n if xyzt_units == 2:\n volumes['unit'] = 'ML'\n if xyzt_units == 3:\n volumes['unit'] = 'UL'\n\n return volumes", "def statistics(img1_blobs, img2_blobs, matches):\n\tstatistics = {}\n\n\tstatistics['#Interest Points in img1'] = len(img1_blobs)\n\tstatistics['#Interest Points in img2'] = len(img2_blobs)\n\tstatistics['Accepted Matches'] = len(matches)\n\tdissimilarity = [match[2] for match in matches]\n\tstatistics['Mean of accepted matches'] = sum(dissimilarity)/len(dissimilarity)\n\tstatistics['SD of accepted matches'] = np.std(dissimilarity)\n\treturn statistics", "def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n citation=_CITATION,\n features=tfds.features.FeaturesDict({\n 'image': tfds.features.Image(shape=(None, None, 3)),\n 'label': tfds.features.ClassLabel(names=_CLASS_NAMES),\n }),\n homepage=_HOMEPAGE,\n supervised_keys=('image', 'label'),\n )", "def describe(image):\n needle = cv2.imread(image, 0)\n orb = cv2.ORB()\n keypoints, description = orb.detectAndCompute(needle, None)\n print(keypoints)\n print(description)\n return keypoints, description", "def test_rt_metadata(self):\n\n img = hopper()\n\n # Behaviour change: re #1416\n # Pre ifd rewrite, ImageJMetaData was being written as a string(2),\n # Post ifd rewrite, it's defined as arbitrary bytes(7). It should\n # roundtrip with the actual bytes, rather than stripped text\n # of the premerge tests.\n #\n # For text items, we still have to decode('ascii','replace') because\n # the tiff file format can't take 8 bit bytes in that field.\n\n basetextdata = \"This is some arbitrary metadata for a text field\"\n bindata = basetextdata.encode('ascii') + b\" \\xff\"\n textdata = basetextdata + \" \" + chr(255)\n reloaded_textdata = basetextdata + \" ?\"\n floatdata = 12.345\n doubledata = 67.89\n info = TiffImagePlugin.ImageFileDirectory()\n\n ImageJMetaData = tag_ids['ImageJMetaData']\n ImageJMetaDataByteCounts = tag_ids['ImageJMetaDataByteCounts']\n ImageDescription = tag_ids['ImageDescription']\n\n info[ImageJMetaDataByteCounts] = len(bindata)\n info[ImageJMetaData] = bindata\n info[tag_ids['RollAngle']] = floatdata\n info.tagtype[tag_ids['RollAngle']] = 11\n info[tag_ids['YawAngle']] = doubledata\n info.tagtype[tag_ids['YawAngle']] = 12\n\n info[ImageDescription] = textdata\n\n f = self.tempfile(\"temp.tif\")\n\n img.save(f, tiffinfo=info)\n\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts], (len(bindata),))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (len(bindata),))\n\n self.assertEqual(loaded.tag[ImageJMetaData], bindata)\n self.assertEqual(loaded.tag_v2[ImageJMetaData], bindata)\n\n self.assertEqual(loaded.tag[ImageDescription], (reloaded_textdata,))\n self.assertEqual(loaded.tag_v2[ImageDescription], reloaded_textdata)\n\n loaded_float = loaded.tag[tag_ids['RollAngle']][0]\n self.assertAlmostEqual(loaded_float, floatdata, places=5)\n loaded_double = loaded.tag[tag_ids['YawAngle']][0]\n self.assertAlmostEqual(loaded_double, doubledata)\n\n # check with 2 element ImageJMetaDataByteCounts, issue #2006\n\n info[ImageJMetaDataByteCounts] = (8, len(bindata) - 8)\n img.save(f, tiffinfo=info)\n loaded = Image.open(f)\n\n self.assertEqual(loaded.tag[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))\n self.assertEqual(loaded.tag_v2[ImageJMetaDataByteCounts],\n (8, len(bindata) - 8))", "def getImageInformation(file_path):\n if os.path.isdir(file_path) == False:\n file_dir = os.path.basename(file_path)\n file_name = os.path.splitext(file_dir)[0]\n file_format = os.path.splitext(file_path)[1]\n return file_name, file_format", "def getTasseledCap(img):", "def info(self):\n info = []\n # meta data\n meta = self.meta\n for key in meta:\n info.append((key, self.meta[key]))\n # background correction\n info += self._fl.info\n return info", "def get_img_info(self, idx):\n\n image = self.get_img(idx)\n img_height = image.size[0]\n img_width = image.size[1]\n\n return {\"height\": img_height, \"width\": img_width}", "def get_itk_data(path_or_image, verbose=False):\n\n if isinstance(path_or_image, str):\n image = get_itk_image(path_or_image)\n else:\n image = path_or_image\n\n arr = itk.GetArrayFromImage(image)\n shape = arr.shape\n spacing = image.GetSpacing()[::-1]\n data_type = arr.dtype\n\n if verbose:\n print '\\t image shape: ' + str(shape)\n print '\\t image spacing: ' + str(spacing)\n print '\\t image data type: ' + str(data_type)\n\n return arr, shape, spacing", "def test_attributes_weigthed(self):\n fields = Mineral.attributes_weighted()\n self.assertListEqual(fields[:-2], [\n 'group',\n 'formula',\n 'category',\n 'strunz_classification',\n 'crystal_system',\n 'mohs_scale_hardness',\n 'luster',\n 'color',\n 'specific_gravity',\n 'cleavage',\n 'diaphaneity',\n 'crystal_habit',\n 'streak',\n 'optical_properties',\n 'refractive_index', ])\n self.assertSetEqual(set(fields[-2:]), {\n 'unit_cell',\n 'crystal_symmetry',\n })", "def get_params2D(ima, xform = \"xform.align2d\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"2D\")\n\treturn d[\"alpha\"],d[\"tx\"],d[\"ty\"],d[\"mirror\"],d[\"scale\"]", "def info(self):\n\n print(\"pixellisation:\", self.pixel)\n print(\"number of components:\", self.ncomp)\n print(\"number of pixels:\", self.data.shape[:] if self.ncomp == 1 else self.data.shape[1:])\n print(\"nside:\", self.nside)\n print(\"geometry:\", self.geometry)\n print(\"coordinates:\", self.coordinate)", "def compute(self) -> Any:\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n return per_class, micro, macro, weighted", "def _extract_weight_tuples(model):\n mlist = get_modules(model)\n return tuple([(m,'weight') for m in mlist])", "def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]", "def extract_img_feat(self, img, img_metas):\n if self.with_img_backbone and img is not None:\n input_shape = img.shape[-2:]\n # update real input shape of each single img\n for img_meta in img_metas:\n img_meta.update(input_shape=input_shape)\n\n if img.dim() == 5 and img.size(0) == 1:\n img.squeeze_()\n elif img.dim() == 5 and img.size(0) > 1:\n B, N, C, H, W = img.size()\n img = img.view(B * N, C, H, W)\n img_feats = self.img_backbone(img)\n else:\n return None\n if self.with_img_neck:\n img_feats = self.img_neck(img_feats)\n return img_feats", "def get_weights(self):\n \n w = torch.exp(self._weight) * self.mask_d + self._weight * self.mask_o\n\n w_squared_norm = (w ** 2).sum(-1, keepdim=True)\n \n w = self._diag_weight.exp() * w / w_squared_norm.sqrt()\n \n wpl = self._diag_weight + self._weight - 0.5 * torch.log(w_squared_norm) \n\n return w.t(), wpl.t()[self.mask_d.bool().t()].view(\n self.dim, self.in_features // self.dim, self.out_features // self.dim)", "def __getitem__(self, idx):\n\n image = self.preprocessor.resize_image(cv.imread(self.samples[idx][0], cv.IMREAD_GRAYSCALE), self.image_size)\n gt_text = self.samples[idx][1]\n return image, gt_text", "def __GetGradientInfo(self, image):\n # Create the output variable.\n gradient = [np.zeros(image.shape[:2])] * 2\n orientation = np.zeros(image.shape[:2])\n magnitude = np.zeros(image.shape[:2])\n\n # Grayscale image.\n grayscale = image.copy()\n if len(grayscale.shape) == 3:\n grayscale = cv2.cvtColor(grayscale, cv2.COLOR_BGR2GRAY)\n\n #<!--------------------------------------------------------------------------->\n #<!-- YOUR CODE HERE -->\n #<!--------------------------------------------------------------------------->\n gradient = np.gradient(grayscale)\n \n #orientation = np.arctan(gradient[0], gradient[1]) * (180. / np.pi)\n #magnitude = np.sqrt(np.power(gradient[0], 2) + np.power(gradient[1], 2))\n \n magnitude, orientation = cv2.cartToPolar(gradient[0], gradient[1], angleInDegrees=True)\n \n #<!--------------------------------------------------------------------------->\n #<!-- -->\n #<!--------------------------------------------------------------------------->\n \n # Return the final result.\n return gradient, orientation, magnitude", "def populate_image_stats(self, image):\n ti = image\n image_data = ti.data\n if not ti.data:\n return ti\n ti.size = len(image_data)\n try:\n with connect(Blobby) as c:\n ti.shahash = c.get_data_bhash(image_data)\n except o.Exception, ex:\n raise o.Exception('oException getting shahash: %s' % ex.msg)\n except Exception, ex:\n raise o.Exception('Exception getting shahash: %s' % ex)\n\n try:\n b = StringIO(image_data)\n img = Image.open(b)\n except Exception, ex:\n raise o.Exception('Exception getting PIL img: %s' % ex)\n try:\n ti.xdim, ti.ydim = img.size\n except Exception, ex:\n raise o.Exception('Exception getting dimensions: %s' % ex)\n try:\n ti.vhash = str(average_hash(img))\n except Exception, ex:\n raise o.Exception('Exception getting vhash: %s' % ex)\n\n return ti", "def _extract_images(source_path, target_path, merge_labels):\n\n images_path = os.path.join(source_path, 'imagesTr')\n labels_path = os.path.join(source_path, 'labelsTr')\n\n # Filenames have the form 'hippocampus_XX.nii.gz'\n filenames = [x for x in os.listdir(images_path) if x[:5] == 'hippo']\n\n # Create directories\n if not os.path.isdir(target_path):\n os.makedirs(target_path)\n\n for filename in filenames:\n\n # Extract only T2-weighted\n x = sitk.ReadImage(os.path.join(images_path, filename))\n x = sitk.GetArrayFromImage(x)\n y = sitk.ReadImage(os.path.join(labels_path, filename))\n y = sitk.GetArrayFromImage(y)\n\n # Shape expected: (35, 51, 35)\n # Average label shape: (24.5, 37.8, 21.0)\n assert x.shape == y.shape\n\n # No longer distinguish between hippocampus proper and subiculum\n if merge_labels:\n y[y == 2] = 1\n\n # Save new images so they can be loaded directly\n study_name = filename.replace('_', '').split('.nii')[0]\n sitk.WriteImage(sitk.GetImageFromArray(x), join_path([target_path, study_name + \".nii.gz\"]))\n sitk.WriteImage(sitk.GetImageFromArray(y), join_path([target_path, study_name + \"_gt.nii.gz\"]))", "def get_info_in_tuple(self):\r\n return self.key, self.value, self.get_color(), self.size_tree", "def compute_psnr_and_ssim(image1, image2, border_size=0):\r\n if len(image1.shape) == 2:\r\n image1 = image1.reshape(image1.shape[0], image1.shape[1], 1)\r\n if len(image2.shape) == 2:\r\n image2 = image2.reshape(image2.shape[0], image2.shape[1], 1)\r\n\r\n if image1.shape[0] != image2.shape[0] or image1.shape[1] != image2.shape[1] or image1.shape[2] != image2.shape[2]:\r\n return None\r\n\r\n image1 = trim_image_as_file(image1)\r\n image2 = trim_image_as_file(image2)\r\n\r\n if border_size > 0:\r\n image1 = image1[border_size:-border_size, border_size:-border_size, :]\r\n image2 = image2[border_size:-border_size, border_size:-border_size, :]\r\n\r\n psnr = peak_signal_noise_ratio(image1, image2, data_range=255)\r\n ssim = structural_similarity(image1, image2, win_size=11, gaussian_weights=True, multichannel=True, K1=0.01, K2=0.03,\r\n sigma=1.5, data_range=255)\r\n return psnr, ssim", "def _get_wimage(self, arr_np):\n #return result\n raise NotImplementedError", "def get_image_info_dtype(path_len,\n image_id_len=None,\n wcs_len=None,\n ext_len=None,\n extra_dtype=None):\n\n path_fmt = 'U%d' % path_len\n\n if image_id_len is None:\n image_id_descr = 'i8'\n else:\n image_id_descr = 'U%d' % image_id_len\n\n if ext_len is not None:\n ext_descr = 'U%d' % ext_len\n else:\n ext_descr = 'i2'\n dt=[]\n for ctype in IMAGE_INFO_TYPES:\n path_name = '%s_path' % ctype\n ext_name = '%s_ext' % ctype\n\n dt += [\n (path_name, path_fmt),\n (ext_name,ext_descr),\n ]\n\n dt += [\n ('image_id', image_id_descr),\n ('image_flags', 'i8'),\n ('magzp', 'f4'),\n ('scale', 'f4'),\n ('position_offset','f8'),\n ]\n if wcs_len is not None:\n wcs_fmt = 'U%d' % wcs_len\n dt += [\n ('wcs',wcs_fmt),\n ]\n\n if extra_dtype is not None:\n dt += extra_dtype\n\n return dt", "def __getitem__(self, i: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n instance = self.data_manager[i]\n x = self.im_to_x(instance.im)\n loss_weights, c_star, b_star = self._encoder(instance.labels)\n\n return loss_weights, x, c_star, b_star", "def get_waveform_info():\n dpo.write('acquire:stopafter sequence')\n dpo.write('acquire:state on')\n dpo.query('*OPC?')\n binaryFormat = dpo.query('wfmoutpre:bn_fmt?').rstrip()\n print('Binary format: ', binaryFormat)\n numBytes = dpo.query('wfmoutpre:byt_nr?').rstrip()\n print('Number of Bytes: ', numBytes)\n byteOrder = dpo.query('wfmoutpre:byt_or?').rstrip()\n print('Byte order: ', byteOrder)\n encoding = dpo.query('data:encdg?').rstrip()\n print('Encoding: ', encoding)\n if 'RIB' in encoding or 'FAS' in encoding:\n dType = 'b'\n bigEndian = True\n elif encoding.startswith('RPB'):\n dType = 'B'\n bigEndian = True\n elif encoding.startswith('SRI'):\n dType = 'b'\n bigEndian = False\n elif encoding.startswith('SRP'):\n dType = 'B'\n bigEndian = False\n elif encoding.startswith('FP'):\n dType = 'f'\n bigEndian = True\n elif encoding.startswith('SFP'):\n dType = 'f'\n bigEndian = False\n elif encoding.startswith('ASCI'):\n raise visa.InvalidBinaryFormat('ASCII Formatting.')\n else:\n raise visa.InvalidBinaryFormat\n return dType, bigEndian", "def pick19_18_16(ds1, ds2):\n wrf_number = int(re.findall(r'\\d+', ds2.name)[0])\n wrf_weight = (wrf_number==19) * 4 + (wrf_number==18) *2 + (wrf_number==16) *1 \n result = [(v.dataTime, wrf_weight) for v in ds2]\n return result, 'pick19_18_16'", "def get_maps_and_info(instrument,target,real=True):\n\n fitsfile, wtfile, wtext, wtisrms, tab = get_fits_files(instrument,target,real=True)\n data_map, header = fits.getdata(fitsfile, header=True)\n wt_map = fits.getdata(wtfile,wtext)\n if wtisrms == True:\n wt_map = 1.0/wt_map**2\n\n image_data, ras, decs, hdr, pixs = get_astro(fitsfile)\n w = wcs.WCS(fitsfile)\n \n return data_map, wt_map, header, ras, decs, pixs, w, tab", "def extract_mol_info(molecule_etree):\n smiles = extract_smiles(molecule_etree)\n alpha = extract_and_check_alpha(molecule_etree)\n beta = extract_and_check_beta(molecule_etree)\n return smiles, alpha, beta", "def getInternalMetadata(self, **kwargs):\n results = {}\n for idx, dir in enumerate(self._tiffDirectories[::-1]):\n if dir:\n if hasattr(dir, '_description_record'):\n results['xml' + (\n '' if not results.get('xml') else '_' + str(idx))] = self._xmlToMetadata(\n dir._description_record)\n for k, v in dir._tiffInfo.items():\n if k == 'imagedescription' and hasattr(dir, '_description_record'):\n continue\n if isinstance(v, (str, bytes)) and k:\n if isinstance(v, bytes):\n try:\n v = v.decode()\n except UnicodeDecodeError:\n continue\n results.setdefault('tiff', {})\n if not idx and k not in results['tiff']:\n results['tiff'][k] = v\n elif k not in results['tiff'] or v != results['tiff'][k]:\n results['tiff'][k + ':%d' % idx] = v\n return results", "def get_spec_weight(self, i, j):\n return self.weights[i][j]", "def get_sift_features(image):\n frames, descriptors = sift(image, compute_descriptor=True, float_descriptors=True, verbose=False)\n return frames, descriptors", "def get_sift_features(image):\n frames, descriptors = sift(image, compute_descriptor=True, float_descriptors=True, verbose=False)\n return frames, descriptors", "def get_sample_weights(self):\n target_to_weight = {}\n for target, count in self.class_count.items():\n target_to_weight[target] = self.total / count\n\n sample_weights = []\n for _, target in self.imgs:\n sample_weights.append(target_to_weight[target])\n\n return sample_weights", "def image_info(image, task_state, video_state):\n image_info = 'Frame {}/{} ({})'.format(video_state.image_idx + 1, video_state.num_frames, video_state.get_image_name())\n cv2.putText(image, image_info, (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, video_state.color, 1)\n\n label_info = []\n if len(video_state.labels) > 0:\n label_info = ['{}'.format(a) for (f, a) in video_state.labels.items() if video_state.get_image_name().split('.')[0] == f]\n if len(label_info) == 0:\n label_info = ['None']\n for i, row in enumerate(label_info):\n cv2.putText(image, row, (5, 35 + i * 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, video_state.color, 1)\n cv2.imshow('Video', image)\n if video_state.look_ahead == 0: # no lookahead\n cv2.destroyWindow('Foresight')\n cv2.destroyWindow('Hindsight')\n elif video_state.look_ahead == 1: # only foresight\n foresight(video_state)\n elif video_state.look_ahead == 2: # foresight and hindsight\n foresight(video_state)\n hindsight(video_state)", "def identify_image(im):\n score_cures = np.mean(im[1025:1065, 1130:1180, 0])\n score_ingredients = np.mean(im[1025:1065, 675:720, 0])\n if score_cures < 177.5:\n return 'cures'\n if score_ingredients < 177.5:\n return 'ingredients'\n else:\n return 'other'", "def __getitem__(self, idx):\n\t\tsample = self.samples[idx]\n\t\tfrom PIL import Image\n\t\timage = Image.open(self.DatasetWrapper.features(sample))\n\t\t\n\t\tlabel = self.DatasetWrapper.label(sample)\n\t\timage = self.transformer(image)\n\t\treturn image, label", "def weight_images(im_dir, wt_dir, weight_dir, im_weight_dir, wt_weight_dir, imtype='intbgsub', wttype='rrhr'):\n im_suff, wt_suff = '*-{}.fits'.format(imtype), '*-{}.fits'.format(wttype)\n imfiles = sorted(glob.glob(os.path.join(im_dir, im_suff)))\n wtfiles = sorted(glob.glob(os.path.join(wt_dir, wt_suff))) \n\n # weight each image\n for i in range(len(imfiles)):\n # read in the data\n imfile = imfiles[i]\n wtfile = os.path.join(os.path.dirname(wtfiles[i]), os.path.basename(imfile).replace(imtype, wttype))\n im, hdr = astropy.io.fits.getdata(imfile, header=True)\n rrhr, rrhrhdr = astropy.io.fits.getdata(wtfile, header=True)\n\n # weight the data by the exposure time\n wt = rrhr\n newim = im * wt\n\n # write data to new files and copy the *_area.fits files created by Montage to have the same naming convention\n newfile = os.path.join(im_weight_dir, os.path.basename(imfile))\n astropy.io.fits.writeto(newfile, newim, hdr)\n old_area_file = imfile.replace('.fits', '_area.fits')\n if os.path.exists(old_area_file):\n new_area_file = newfile.replace('.fits', '_area.fits')\n shutil.copy(old_area_file, new_area_file)\n\n weightfile = os.path.join(wt_weight_dir, os.path.basename(wtfile))\n astropy.io.fits.writeto(weightfile, wt, rrhrhdr)\n old_area_file = wtfile.replace('.fits', '_area.fits')\n if os.path.exists(old_area_file):\n new_area_file = weightfile.replace('.fits', '_area.fits')\n shutil.copy(old_area_file, new_area_file)", "def get_info_inst(self):\n return self.get_info(\"INST\")", "def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def __getitem__(self, item):\n\n image = Image.open(self.imgs_path[item]).convert(\"RGB\")\n\n # Applying the transformations\n image = self.transform(image)\n\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n # print(self.labels[item])\n # print(self.extra_info[item])\n\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n\n return image, labels, extra_info, img_name", "def detectAndDescribe(image, method=None):\n assert method is not None, \"You need to define a feature detection method. Values are: 'sift', 'surf'\"\n # detect and extract features from the image\n if method == 'sift':\n descriptor = cv2.xfeatures2d.SIFT_create()\n elif method == 'surf':\n descriptor = cv2.xfeatures2d.SURF_create()\n elif method == 'brisk':\n descriptor = cv2.BRISK_create()\n elif method == 'orb':\n descriptor = cv2.ORB_create()\n \n # get keypoints and descriptors\n (kps, features) = descriptor.detectAndCompute(image, None)\n \n return (kps, features)", "def parse_image_meta(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8] # (x1, y1, x2, y2) window of image in in pixels\n active_class_ids = meta[:, 8:]\n return image_id, image_shape, window, active_class_ids", "def detectAndDescribe(image, method=None):\n \n assert method is not None, \"You need to define a feature detection method. Values are: 'sift', 'surf'\"\n \n # detect and extract features from the image\n if method == 'sift':\n descriptor = cv2.xfeatures2d.SIFT_create()\n elif method == 'surf':\n descriptor = cv2.xfeatures2d.SURF_create()\n elif method == 'brisk':\n descriptor = cv2.BRISK_create()\n elif method == 'orb':\n descriptor = cv2.ORB_create()\n \n # get keypoints and descriptors\n (kps, features) = descriptor.detectAndCompute(image, None)\n \n return (kps, features)" ]
[ "0.56604415", "0.56434953", "0.5608303", "0.55392164", "0.55089027", "0.5473448", "0.5417464", "0.5324782", "0.5314561", "0.5309392", "0.5288557", "0.5276307", "0.524422", "0.52250624", "0.52200425", "0.52154565", "0.5210282", "0.5189751", "0.51866674", "0.515786", "0.5154455", "0.5144844", "0.5139518", "0.5130497", "0.5125902", "0.5121374", "0.5097309", "0.50726503", "0.50677985", "0.50657916", "0.5058501", "0.5054201", "0.50508016", "0.5032657", "0.5031851", "0.5025383", "0.5007715", "0.49992195", "0.49978915", "0.4994852", "0.49902457", "0.49890143", "0.49875546", "0.49800912", "0.496437", "0.49636486", "0.496354", "0.49627703", "0.49627703", "0.49627426", "0.4957436", "0.49496806", "0.49495298", "0.49489966", "0.4946127", "0.49420825", "0.4940696", "0.49401", "0.49293792", "0.4924934", "0.4920904", "0.49100578", "0.4906822", "0.49034002", "0.49016145", "0.48901537", "0.4851263", "0.4851233", "0.48450196", "0.4843851", "0.48419645", "0.48390928", "0.48374602", "0.483524", "0.48330328", "0.48307478", "0.48289698", "0.48272556", "0.48261738", "0.48223585", "0.4811237", "0.47983098", "0.47906077", "0.4790394", "0.47873232", "0.4786132", "0.47829872", "0.47816285", "0.47791272", "0.47791272", "0.47749275", "0.47687232", "0.4767447", "0.4765069", "0.47618756", "0.47568375", "0.47562954", "0.47561622", "0.4754274", "0.47521427", "0.4751035" ]
0.0
-1
Create list of epis in pfile format (epi_series) and of epis in dicom format (epirt_paths)
def _EpiInfo(self, info, path): epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \ 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']} for key in self.epi_keys.keys(): if self.epi_keys[key] != str(epi_vals[key]): # Return None, which will cause these data to be ignored. return None # Early versions of the EPIC software saved p-files for the setup epis. # Don't process these (or any epi with fewer than eight useable frames). if self.hdr['tdim'] < (8 + self.skip): return None info['slice_order'] = self.shdr.get('SliceOrder', 'altplus') if self.shdr['EffEchoSpacing'] is not None: info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000. else: info['echo_spacing'] = 0. if info['data_filetype'] == 'dicom': # Entry is name of dirctory for dicom images. if not os.path.isdir(path): entry = os.path.dirname(path) else: entry = path else: # Otherwise it is the name of a directory containing p-files. entry = path if info['data_filetype'] == 'ge_data' and info['type'] is not None: # Found a pfile. Add it to the list. if entry not in self.pfiles and info['tdim'] > 2: self.pfiles.append(entry) self.entry_map['epi'].append(entry) if info['series'] not in self.epi_series: self.epi_series.append(info['series']) elif info['data_filetype'] == 'dicom' and \ info['psdname'] == 'epibold': # This is the initial EPI done during setup. info['outdir'] = self.episetup_dir info['type'] = 'first_epi' self.entry_map['first_epi'].append(entry) info['imgfile'] = '%s/first_epi_%d' % \ (self.episetup_dir, len(self.entry_map['first_epi'])) elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \ info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2: # This is an epi reconstructed on the scanner. self.epi_series.append(info['series']) self.entry_map['epi'].append(entry) if not os.path.isdir(path): tmp_path = os.path.dirname(path) else: tmp_path = path self.epirt_paths.append(tmp_path) if self.fsl_flip: info['filetype'] = 'brik' else: info['filetype'] = self.tmplt['epi_file_format'] info['TR'] = self.hdr['tsize'] if self.tmplt['acq_tr'] is None: info['acq_tr'] = float(info['TR']) else: info['acq_tr'] = float(self.tmplt['acq_tr']) return OK
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)", "def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])", "def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compression'] is not None:\n# Data are compressed, copy to tmp.\n compression = self.info[pfile]['compression']\n\n pfile_decomp = '%s/%s' % (self.tmpdir, \\\n os.path.basename(self.info[pfile]['pfile_decomp']))\n if os.path.exists(pfile_decomp):\n errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \\\n ' in ReconEpis'\n\n cmd = '%s %s > %s' % \\\n (decompress_cmds[compression], pfile, pfile_decomp)\n self.ExecCmd(cmd)\n else:\n# Create a link on /tmp to the pfile so the link to ref.dat will also\n# be on /tmp, (which is always writeable.)\n pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))\n if not os.path.exists(pfile_decomp):\n os.symlink(pfile, pfile_decomp)\n refname, refcmpress = self.CheckCompression( \\\n self.info[pfile]['refdat'])\n if refcmpress is not None:\n refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))\n cmd = '%s %s > %s' % \\\n (decompress_cmds[refcmpress], \\\n self.info[pfile]['refdat'], refdat_decomp)\n self.ExecCmd(cmd)\n else:\n refdat_decomp = self.info[pfile]['refdat']\n if refdat_decomp is not None:\n if refdat_decomp != 'ref.dat':\n# Create link bearing the file name epirecon_ex expects.\n refdat_link = '%s/ref.dat' % self.tmpdir\n if not os.path.exists(refdat_link):\n if self.verbose:\n print 'ln -s %s %s' % (refdat_decomp, refdat_link)\n if os.path.islink(refdat_link):\n# ref.dat is a broken symbolic link.\n if self.verbose:\n print 'rm %s' % ref_file\n os.remove(refdat_link)\n try:\n os.symlink(refdat_decomp, refdat_link)\n except OSError:\n self.errors = True\n pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))\n os.symlink(pfile_decomp, pfile_link)\n os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)\n\n series = int(self.info[pfile]['series'])\n run[series] = run[series] + 1\n epiname = self.info[pfile]['imgfile']\n cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \\\n (pfile_decomp, epiname, self.skip)\n fname = '%s+orig.BRIK' % epiname\n self.CheckExec(cmd, [fname])\n# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']\n else:\n errstr = '*******************************************\\n' + \\\n 'No ref.dat file exists for %s\\n' % pfile + \\\n '*******************************************\\n'\n self.error_log = self.error_log + errstr\n self.f_crash.write(errstr)", "def ExtractFirstEpi(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'first_epi':\n epiname = self.info[entry]['imgfile']\n cmd = 'convert_file %s -f0 %s %s %s' % \\\n (self.flip_opts, entry,epiname, self.info[entry]['filetype'])\n fname = '%s%s' % (epiname, self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])\n self.info[entry]['imgfile'] = fname", "def dicom_load():\n # Identify folders with EPI data\n dirs = [i for i in os.listdir(dcm_dir) if os.path.isdir(os.path.join(dcm_dir, i))]\n d_cnt = 0\n for d in dirs:\n dcm_file = os.path.join(dcm_dir,d,os.listdir(os.path.join(dcm_dir,d))[0])\n try:\n dcm_data = pydicom.dcmread(dcm_file)\n except:\n pass\n else:\n # If data is EPI then get start time, etc\n if 'EPI' in dcm_data.ImageType:\n dcm_dict[d_cnt] = {}\n dcm_dict[d_cnt]['dcm_file'] = dcm_file\n dcm_dict[d_cnt]['task_name'] = dcm_data.SeriesDescription\n dcm_dict[d_cnt]['task_name'] = dcm_dict[d_cnt]['task_name'].replace('_','-')\n date = dcm_data.SeriesDate\n start = dcm_data.SeriesTime\n start_time = '%s-%s-%s %s:%s:%s'%(date[0:4],date[4:6],date[6:],start[0:2],start[2:4],start[4:])\n dcm_dict[d_cnt]['start_time'] = datetime.fromisoformat(start_time)\n dcm_dict[d_cnt]['run_length'] = dcm_data[0x0019,0x105a].value/1000\n dcm_dict[d_cnt]['end_time'] = dcm_dict[d_cnt]['start_time'] + timedelta(milliseconds=dcm_dict[d_cnt]['run_length'])\n d_cnt = d_cnt+1", "def get_endpoints(self, epg_dn):\n result = []\n for item in filter(lambda x: type(x).__name__ == 'CEp', self.query_child_objects(epg_dn)):\n # Creates a dynamic object type.\n endpoint = type('endpoint', (object,), {})\n\n # Filter the endpoint in memory looking for the object that contains the interface where the endpoint is\n # attached\n endpoint_connection_mo = filter(lambda x: type(x).__name__ == 'RsCEpToPathEp',\n self.query_child_objects(item.dn))[0]\n\n # Format the string to be human readable\n endpoint_connection_interface = str(endpoint_connection_mo.tDn).replace('topology/pod-1/paths','node').\\\n replace('pathep-[', '').replace(']','')\n\n # Add attributes to the object\n endpoint.ip = item.ip\n endpoint.mac = item.mac\n endpoint.name = item.name\n endpoint.interface = endpoint_connection_interface\n\n # Append it to the list\n result.append(endpoint)\n return result", "def list_all_ephemerides_files(self) -> Dict:\n ephs = self.list_result_ephemerides_files()\n while 'nextPageToken' in ephs:\n next_page_token = ephs['nextPageToken']\n _, e = self.list_result_ephemerides_files(page_token=next_page_token)\n ephs['ephemerisResourcePath'].extend(e['ephemerisResourcePath'])\n return ephs", "def readEpi_fromSequence(fpath, position=0, direction='h'):\n assert isinstance(fpath, str)\n\n fnames = []\n for f in glob(fpath + \"*.png\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.jpg\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.bmp\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.ppm\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.tif\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.bmp\"):\n fnames.append(f)\n fnames.sort()\n\n im = misc.imread(fnames[0])\n channels = 1\n if len(im.shape) == 3:\n channels = 3\n\n if direction == 'h':\n epi = np.zeros((len(fnames), im.shape[1], channels))\n if direction == 'v':\n epi = np.zeros((len(fnames), im.shape[0], channels))\n\n for n,f in enumerate(fnames):\n im = misc.imread(fnames[n])\n if direction == 'h':\n if len(im.shape) == 3:\n epi[n, :, 0:3] = im[position, :, 0:3]\n else:\n epi[n, :, 0] = im[position, :]\n if direction == 'v':\n if len(im.shape) == 3:\n epi[n, :, 0:3] = im[ :, position, 0:3]\n else:\n epi[n, :, 0] = im[:, position]\n\n return epi[:, :, 0:channels]", "def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps", "def plotERP(self, ep):\n import os \n import matplotlib.pyplot as plt\n \n try:\n filename = ep.filename.split('\\\\')[-1].split('.fif')[0]\n filename = 'plotsEEG_'+filename.split('_')[0] \n except Exception as err: \n filename = 'plots_eeg_file' \n print(err) \n finally:\n print('Saving ERP plots at >>>>', os.getcwd())\n \n try:\n os.mkdir(os.path.join(os.getcwd(), filename)) \n os.chdir(os.path.join(os.getcwd(), filename)) \n except Exception as err:\n print(err) \n \n \n ep = ep.interpolate_bads(reset_bads='True', mode = 'accurate')\n ep.info['bads'] = []\n \n ep.plot_psd(area_mode='range',fmin=0, fmax=40, tmax=10.0).savefig(filename + '_psd')\n\n# picks = ['FC2', 'C4', 'Cz', 'C5', 'FC1'] \n \n ep.plot_image(picks = None, cmap='interactive', sigma=1) \n \n plt.savefig(filename + '_image') \n \n bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),\n (12, 30, 'Beta'), (30, 45, 'Gamma')] \n \n ep.plot_psd_topomap(bands=bands, vmin=None, vmax=None, \n tmin=0, tmax=0.5).savefig(filename + '_psd_topo')\n \n ep.plot_sensors().savefig(filename + '_sensors_') \n \n ep.plot_topo_image(vmin=-25, vmax=25, title='ERF images', sigma=3.,\n fig_facecolor='w', font_color='k').savefig(filename + '_image_topo') \n \n ep.average().plot().savefig(filename + 'erp_average_')\n ep.average().plot_image().savefig(filename + '_erp_average_image')\n print('Saving ERP plots at >>>>', os.getcwd())", "def _get_ais_paths(self) -> list:\n ais_files = []\n year = self.year\n end_year = self.year\n for month in range(1, 13):\n end_month = month + 1\n if month == 12:\n end_year += 1\n end_month = 1\n\n for vessel_type in self.vessel_types:\n path_template = f\"{vessel_type}_{year}{month:02}01-{end_year}{end_month:02}01_total.tif\"\n fname = self.dir / path_template\n ais_files.append(fname)\n\n return ais_files", "def get_filepaths_and_exts(self):\n filepaths = [prod.filepath for prod in self.products]\n exts = [prod.ext for prod in self.products]\n\n return filepaths, exts", "def epitopes(record, info, ens_data):\n\n funcensGene = info.Consequence\n allowed_contigs = ens_data.contigs()\n epitopes = list()\n if 'missense' in funcensGene or 'frame' in funcensGene:\n gene = info.SYMBOL\n transcript = info.Feature\n # sequence = ens_data.transcript_by_id(info.Feature)\n mut_dna = info.HGVSc.split(':')[1] if len(info.HGVSc.split(':')) > 1 else ''\n mut_aa = info.HGVSp.split(':')[1] if len(info.HGVSp.split(':')) > 1 else ''\n chrom = record.CHROM.replace('chr', '') if 'chr' in record.CHROM else record.CHROM\n if chrom == 'M':\n chrom = 'MT'\n if chrom in allowed_contigs:\n # TODO this should return a list \n pos, flags, wtmer, mutmer = create_epitope_varcode(chrom,\n record.POS,\n record.REF,\n info.Allele,\n ens_data,\n transcript)\n epitopes.append(Epitope(transcript, gene, funcensGene, mut_dna, mut_aa, flags, wtmer, mutmer))\n else:\n print(\"Unable to infer epitope for contig {}\".format(chrom))\n return epitopes", "def _get_pex_paths(self) -> list:\r\n pex_paths: list = []\r\n\r\n for object_name, script_path in self.psc_paths.items():\r\n pex_path = os.path.join(self.options.output_path, object_name.replace('.psc', '.pex'))\r\n\r\n # do not check if file exists, we do that in _find_missing_script_paths for a different reason\r\n if pex_path not in pex_paths:\r\n pex_paths.append(pex_path)\r\n\r\n return pex_paths", "def get_2Dtodo(loc=BASE):\n toproc = []\n for ff in Path(loc).glob(\"**/*.d/proces*.mscf\"):\n if (ff.parent/'ser').exists():\n toproc.append(ff)\n if DEBUG:\n print('get_2Dtodo:')\n pprint([str(i.parent.name) for i in toproc])\n return toproc", "def extract_mediapackage_endpoints(mp_client, mp_channel_id_list):\n emp_endpoint_list = {}\n for channel in mp_channel_id_list:\n emp_endpoint_list[str(channel)] = []\n response = mp_client.list_origin_endpoints()\n for endpoint in response['OriginEndpoints']:\n if str(endpoint[\"ChannelId\"]) in mp_channel_id_list:\n emp_endpoint_list[str(endpoint[\"ChannelId\"])].append(str(endpoint['Id']))\n return emp_endpoint_list", "def list_result_ephemerides_files(\n self, page_size: int = 100, page_token: str = None) -> Dict:\n params = {}\n if page_size < 0 or page_size > 100:\n page_size = 100\n params['pageSize'] = page_size\n if page_token:\n params['pageToken'] = page_token\n ephs = self._rp._rest.get(\n f'/projects/{self._rp._project}/jobs/{self._job_uuid}'\n f'/ephemerides?{urllib.parse.urlencode(params)}')\n return ephs", "def _GetEpiOrder(self):\n self.epi_series.sort()\n for series in self.epi_series:\n self.GetEpiAcqTimes(series)\n self.AssignEpiNames()", "def path_convert(self):\n pub_path = Exp_msg()\n for i in self.path:\n epoint = Cordi()\n (epoint.x, epoint.y) = i\n pub_path.bliss.append(epoint)\n return(pub_path)", "def _get_fsevent_image_files(self):\r\n # Print the header columns to the output file\r\n Output.print_columns(self.l_all_fsevents)\r\n \r\n scan_path_spec = None\r\n scanner = source_scanner.SourceScanner()\r\n scan_context = source_scanner.SourceScannerContext()\r\n scan_context.OpenSourcePath(self.meta['source'])\r\n\r\n scanner.Scan(\r\n scan_context,\r\n scan_path_spec=scan_path_spec\r\n )\r\n\r\n for file_system_path_spec, file_system_scan_node in scan_context._file_system_scan_nodes.items():\r\n t_files = 0\r\n self.all_files_count = 0\r\n self.error_file_count = 0\r\n self.all_records_count = 0\r\n self.parsed_file_count = 0\r\n \r\n try:\r\n location = file_system_path_spec.parent.location\r\n except:\r\n location = file_system_path_spec.location\r\n \r\n print(\" Processing Volume {}.\\n\".format(location))\r\n\r\n fs_event_path_spec = path_spec_factory.Factory.NewPathSpec(\r\n file_system_path_spec.type_indicator,\r\n parent=file_system_path_spec.parent,\r\n location=\"/.fseventsd\"\r\n )\r\n\r\n file_entry = resolver.Resolver.OpenFileEntry(\r\n fs_event_path_spec\r\n )\r\n \r\n if file_entry != None:\r\n\r\n t_files = file_entry.number_of_sub_file_entries\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n t_files -= 1\r\n\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n counter = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n counter += 1\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(counter, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Name of source fsevent file\r\n self.src_filename = sub_file_entry.name\r\n self.src_fullpath = self.meta['source'] + \": \" + location + sub_file_entry.path_spec.location\r\n\r\n stat_object = sub_file_entry.GetStat()\r\n\r\n # UTC mod date of source fsevent file\r\n self.m_time = datetime.datetime.fromtimestamp(\r\n stat_object.mtime).strftime(\r\n '%Y-%m-%d %H:%M:%S') + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(self.src_filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n file_object = sub_file_entry.GetFileObject()\r\n\r\n compressedFile = io.StringIO.BytesIO()\r\n compressedFile.write(file_object.read())\r\n compressedFile.seek(0)\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(fileobj=compressedFile, mode='rb')\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_filename)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)\r\n \r\n else:\r\n print('Unable to process volume or no fsevent files found')\r\n continue\r\n\r\n print('\\n\\n All Files Attempted: {}\\n All Parsed Files: {}\\n Files '\r\n 'with Errors: {}\\n All Records Parsed: {}'.format(\r\n self.all_files_count,\r\n self.parsed_file_count,\r\n self.error_file_count,\r\n self.all_records_count))", "def _get_hostendpoints(self, host, intf_ep, config):\n\n for uuid in intf_ep.keys():\n\n intf = intf_ep[uuid][0]\n iftype = intf_ep[uuid][1]\n\n host_endpoints = dict()\n hep_name = host.hostname + \"-\" + intf.ifname + \"-if-hep\"\n\n host_endpoints[\"apiVersion\"] = \"crd.projectcalico.org/v1\"\n host_endpoints[\"kind\"] = \"HostEndpoint\"\n host_endpoints.update({\"metadata\": dict()})\n host_endpoints[\"metadata\"].update({\"name\": hep_name})\n host_endpoints[\"metadata\"].update({\"labels\": dict()})\n host_endpoints[\"metadata\"][\"labels\"].update({\"nodetype\": host.personality})\n host_endpoints[\"metadata\"][\"labels\"].update({\"ifname\":\n f\"{host.hostname}.{intf.ifname}\"})\n host_endpoints[\"metadata\"][\"labels\"].update({\"iftype\": iftype})\n\n host_endpoints.update({\"spec\": dict()})\n host_endpoints[\"spec\"].update({\"node\": host.hostname})\n interfaceName = puppet_intf.get_interface_os_ifname(self.context, intf)\n host_endpoints[\"spec\"].update({\"interfaceName\": interfaceName})\n\n # adding only for OAM for compatibility with old implementation\n if constants.NETWORK_TYPE_OAM in iftype:\n hep_name = host.hostname + \"-oam-if-hep\"\n host_endpoints[\"metadata\"][\"name\"] = hep_name\n self._add_hep_expected_ip(host, constants.NETWORK_TYPE_OAM, host_endpoints)\n\n config[hep_name] = copy.copy(host_endpoints)", "def make_photon_arrays(path, numevents):\n xcoords = []\n zcoords = []\n \n nsipmarrays = []\n nabsarrays = []\n \n for filename in os.listdir(path):\n\n photondata = np.loadtxt(path+'/'+filename,delimiter=',',usecols=[1,4])\n\n coords = filename[0:8]\n\n arraylen = len(photondata.flatten('F'))\n \n nsipmphotons = photondata.flatten('F')[numevents:arraylen]\n #print(len(nsipmphotons))\n nabsphotons = photondata.flatten('F')[0:numevents] \n \n nsipmarrays.append(nsipmphotons)\n nabsarrays.append(nabsphotons)\n \n x = re.findall('(-[0-9]+)x',coords) \n \n if bool(x) == False:\n x = re.findall('([0-9]+)x', coords)\n \n z = re.findall('(-[0-9]+)z',coords) \n\n if bool(z) == False:\n z = re.findall('([0-9]+)z',coords)\n\n xcoords.append(x[0])\n zcoords.append(z[0])\n \n xcoords = np.array(xcoords).astype(np.float)\n zcoords = np.array(zcoords).astype(np.float)\n \n return xcoords, zcoords, nsipmarrays, nabsarrays", "def partid2eids(self, partid, etype=...):\n ...", "def partid2eids(self, partid, etype=...):\n ...", "def process_output_files(pst, pst_path=\".\"):\n if not isinstance(pst, pyemu.Pst):\n raise Exception(\n \"process_output_files error: 'pst' arg must be pyemu.Pst instance\"\n )\n series = []\n for ins, out in zip(pst.instruction_files, pst.output_files):\n ins = os.path.join(pst_path, ins)\n out = os.path.join(pst_path, out)\n if not os.path.exists(out):\n warnings.warn(\"out file '{0}' not found\".format(out), PyemuWarning)\n f = os.path.join(pst_path, ins)\n i = InstructionFile(ins, pst=pst)\n try:\n s = i.read_output_file(out)\n series.append(s)\n except Exception as e:\n warnings.warn(\"error processing output file '{0}': {1}\".format(out, str(e)))\n if len(series) == 0:\n return None\n series = pd.concat(series)\n # print(series)\n return series", "def partid2eids(self, partid, etype): # -> None:\n ...", "def as_exons(self,input={}):\n # handle potentially applied input argument\n self._handle_input_subdict(input)\n # parse data in the AbgpGeneLocusDir\n self.parseinputgff()\n self.rungetorf()\n # we need abgp_geneconfirmation.geneconfirmation first!\n geneconfirmation( { self._create_auto_key(): self.input } )\n\n # get only the CDS-type of tracks that define the coding sequence\n genecdstracks = filtergffs4fmethod( self._obtain_gene_gff(), GFF_CDS_FMETHOD ) \n\n if len(genecdstracks) == 1:\n # deal with SingleExonOnOrf -> TSS + donor\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n return [ SingleExonOnOrf(tss,genecdstracks[-1][4],orf,gff={}) ]\n\n elif len(genecdstracks) == 0:\n # no tracks !?\n return []\n elif not self.input['orfid-genestructure']:\n # not mappable on Orfs / or no genestructure provided\n return []\n else:\n # list with exons,introns to return \n exons = []\n introns = []\n exonsandintrons = []\n\n # deal with FirstExonOnOrf -> TSS + donor\n try:\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n except:\n print self.input.keys(), self.input['proteinfref']\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n donor = self._gene_cds_track_2_donor( genecdstracks[0], orf )\n donor.phase = ( genecdstracks[0][4]-genecdstracks[0][3]-1 ) % 3\n exons.append( FirstExonOnOrf(tss,donor,orf,gff={}) )\n exonsandintrons.append( exons[-1] )\n\n # deal with internal ExonOnOrf(s): -> acceptor + donor\n for pos in range(1,len(genecdstracks)-1):\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][pos])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[pos], orf )\n accep.phase = exons[-1].donor.phase\n donor = self._gene_cds_track_2_donor( genecdstracks[pos], orf )\n donor.phase = ( genecdstracks[pos][4]-genecdstracks[pos][3]-1+accep.phase ) % 3\n exons.append( ExonOnOrf(accep,donor,orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf, exons[-2].orf,\n exons[-1].acceptor, exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # deal with FinalExonOnOrf -> acceptor + StopCodon\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][-1])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[-1], orf )\n accep.phase = exons[-1].donor.phase\n exons.append( FinalExonOnOrf(accep,genecdstracks[-1][4],orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf,exons[-2].orf,\n exons[-1].acceptor,exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # return list of exons&introns\n return exonsandintrons", "def plotSpectrums2D(path_fsrcnn, path_liif, path_msrn, path_esrgan, path_bicubic, out_path):\n radial_FSRCNN = []\n radial_LIIF = []\n radial_MSRN = []\n radial_ESRGAN = []\n radial_BICUBIC = []\n\n for FSRCNN_path, LIIF_path, MSRN_path, ESRGAN_path, bicubic_path in zip(sorted(glob.glob(path_fsrcnn)), sorted(glob.glob(path_liif)), sorted(glob.glob(path_msrn)), sorted(glob.glob(path_esrgan)), sorted(glob.glob(path_bicubic))):\n print(FSRCNN_path)\n radialProfile_FSRCNN = spectrum2D(FSRCNN_path)\n print(len(radialProfile_FSRCNN))\n radial_FSRCNN.append(radialProfile_FSRCNN)\n print(LIIF_path)\n radialProfile_LIIF = spectrum2D(LIIF_path)\n radial_LIIF.append(radialProfile_LIIF)\n print(MSRN_path)\n radialProfile_msrn = spectrum2D(MSRN_path)\n radial_MSRN.append(radialProfile_msrn)\n print(ESRGAN_path)\n radialProfile_esrgan = spectrum2D(ESRGAN_path)\n radial_ESRGAN.append(radialProfile_esrgan)\n print(bicubic_path)\n radialProfile_bicubic = spectrum2D(bicubic_path)\n radial_BICUBIC.append(radialProfile_bicubic)\n\n image_name = FSRCNN_path.split('/')[-1]\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n\n plt.figure()\n plt.plot(radialProfile_FSRCNN, label='FSRCNN')\n plt.plot(radialProfile_LIIF, label='LIIF')\n plt.plot(radialProfile_msrn, label='MSRN')\n plt.plot(radialProfile_esrgan, label='ESRGAN')\n plt.plot(radialProfile_bicubic, label='BICUBIC')\n plt.legend()\n plt.title(image_name)\n plt.yscale('log')\n plt.savefig(out_path + image_name)\n\n mean_FSRCNN = tolerant_mean(radial_FSRCNN)\n mean_LIIF = tolerant_mean(radial_LIIF)\n mean_MSRN = tolerant_mean(radial_MSRN)\n mean_ESRGAN = tolerant_mean(radial_ESRGAN)\n mean_BICUBIC = tolerant_mean(radial_BICUBIC)\n plt.figure()\n plt.plot(mean_FSRCNN, label='FSRCNN')\n plt.plot(mean_LIIF, label='LIIF')\n plt.plot(mean_MSRN, label='MSRN')\n plt.plot(mean_ESRGAN, label='ESRGAN')\n plt.plot(mean_BICUBIC, label='BICUBIC')\n plt.legend()\n plt.title('MEAN')\n plt.yscale('log')\n plt.savefig(out_path + 'mean')\n #return radialProfile_FSRCNN.shape, radialProfile_LIIF.shape, radialProfile_msrn.shape", "def list_files(tag='', inst_id='', data_path='', format_str=None,\n supported_tags=None, file_cadence=dt.timedelta(days=1),\n two_digit_year_break=None, delimiter=None):\n\n if format_str is None:\n # pyast performs a check against `inst_id` and `tag` before calling\n # `list_files`. However, supported_tags is a non-pysat input.\n try:\n format_str = supported_tags[inst_id][tag]\n except KeyError as kerr:\n raise ValueError(' '.join(('Unknown inst_id or tag:',\n str(kerr))))\n\n # Get the series of files\n out = pysat.Files.from_os(data_path=data_path, format_str=format_str,\n two_digit_year_break=two_digit_year_break,\n delimiter=delimiter)\n\n # If the data is not daily, pad the series. Both pds.DateOffset and\n # dt.timedelta contain the 'days' attribute, so evaluate using that\n if not out.empty and not is_daily_file_cadence(file_cadence):\n emonth = out.index[-1]\n out.loc[out.index[-1] + file_cadence\n - dt.timedelta(days=1)] = out.iloc[-1]\n new_out = out.asfreq('D')\n\n for i, out_month in enumerate(out.index):\n if(out_month.month == emonth.month\n and out_month.year == emonth.year):\n out_month = emonth\n\n crange = pds.date_range(start=out_month, periods=2,\n freq=file_cadence)\n irange = pds.date_range(*crange.values, freq=\"D\").values[:-1]\n sel_range = new_out.index.intersection(irange)\n new_out[sel_range] = out.loc[out_month]\n\n # Assign the non-NaN files to out and add days to the filenames\n out = new_out.dropna()\n out = out + '_' + out.index.strftime('%Y-%m-%d')\n\n return out", "def calculate(self):\r\n #process_data = psxview.PsXview(self._config).calculate()\r\n #for offset, eprocess, ps_sources in process_data:\r\n # method = \"Process\"\r\n # pid = eprocess.UniqueProcessId\r\n # name = (eprocess.ImageFileName or '')\r\n # path = ' # check volshell > dt(\"_EPROCESS\") for attrib?\r\n # yield method, pid, name, '-'\r\n\r\n \"\"\" Look at Internet paths \"\"\"\r\n internet_data = iehistory.IEHistory(self._config).calculate()\r\n for process, record in internet_data:\r\n method = \"Internet\"\r\n proc = process.ImageFileName\r\n pid = process.UniqueProcessId\r\n fpath = record.Url\r\n if record.FileOffset > 0:\r\n fpath = fpath +' | '+record.File\r\n if self._config.whitelist:\r\n if self._is_whitelisted(fpath, proc, method):\r\n continue\r\n yield method, pid, proc, fpath\r\n\r\n for task in taskmods.DllList.calculate(self):\r\n pid = task.UniqueProcessId\r\n proc = str(task.ImageFileName)\r\n\r\n \"\"\" Look at the Handle file paths \"\"\"\r\n if task.ObjectTable.HandleTableList:\r\n for handle in task.ObjectTable.handles():\r\n\r\n if not handle.is_valid():\r\n continue\r\n\r\n method = \"Handle\"\r\n object_type = handle.get_object_type()\r\n if object_type == \"File\":\r\n # Only look at \"File\" object_type's\r\n file_obj = handle.dereference_as(\"_FILE_OBJECT\")\r\n fpath = str(file_obj.file_name_with_device())\r\n #fname = str(fpath).rsplit('\\\\',1)[1] # might get IndexError\r\n if fpath:\r\n if self._config.whitelist:\r\n if self._is_whitelisted(fpath, None, method):\r\n continue\r\n if not self._has_extension(fpath):\r\n continue\r\n if self._is_blacklisted(fpath, None, method):\r\n yield method, pid, proc, fpath\r\n\r\n \"\"\" Look at file paths in processes CLI args \"\"\"\r\n cmdline = \"\"\r\n if task.Peb:\r\n method = \"CLI\"\r\n fpath = \"{0}\".format(str(task.Peb.ProcessParameters.CommandLine or '')).strip()\r\n if self._config.whitelist:\r\n if self._is_whitelisted(fpath, proc, method):\r\n continue\r\n if not self._has_extension(fpath):\r\n continue\r\n if self._is_blacklisted(fpath, proc, method):\r\n yield method, pid, proc, fpath\r\n\r\n \"\"\" Look at Service file paths \"\"\"\r\n scanner = svcscan.SvcScan(self._config)\r\n for service in scanner.calculate():\r\n method = \"Service\"\r\n name = str(service.ServiceName.dereference() or '')\r\n if service.Binary:\r\n fpath = service.Binary.strip('\"')\r\n if self._config.whitelist:\r\n if self._is_whitelisted(fpath, name, method):\r\n continue\r\n if self._is_blacklisted(fpath, name, method):\r\n yield method, \"-\", name, fpath\r\n\r\n \"\"\" Look at file paths \"\"\"\r\n scanner = filescan.FileScan(self._config)\r\n for fobj in scanner.calculate():\r\n method = \"File\"\r\n fpath = str(fobj.file_name_with_device() or '')\r\n if fpath:\r\n if self._config.whitelist:\r\n if self._is_whitelisted(fpath, None, method):\r\n continue\r\n if not self._has_extension(fpath):\r\n continue\r\n if self._is_blacklisted(fpath, None ,method):\r\n yield method, '-', '-', fpath\r\n\r\n \"\"\" Look at ShimCache file paths \"\"\"\r\n shimcache_data = shimcache.ShimCache(self._config).calculate()\r\n if shimcache_data:\r\n method = \"Shim\"\r\n for path, last_modified, last_updated in shimcache_data:\r\n fpath = str(path).strip()\r\n yield method, '-', '-', fpath\r\n\r\n # takes a long time...\r\n \"\"\" Look at Shellbag file paths \"\"\"\r\n #shellbag_data = shellbags.ShellBags(self._config).calculate()\r\n #if shellbag_data:\r\n # method = \"Shellbag\"\r\n # try:\r\n # for item, shell, path in shellbag_data:\r\n # yield method, '-', '-', path\r\n # except Exception as err:\r\n # print err\r\n # for item, num, shell, path in shellbag_data:\r\n # yield method, '-', '-', path\r\n\r\n \"\"\" Look at SymLink file paths \"\"\"\r\n #scanner = filescan.SymLinkScan(self._config)\r\n #for symlink in scanner.calculate():\r\n # method = \"SymLink\"\r\n # fpath = str(symlink.LinkTarget or '')\r\n # yield method, '-', '-', fpath\r\n\r\n \"\"\" Look at Driver file paths \"\"\"\r\n #scanner = filescan.DriverScan(self._config)\r\n #for driver in scanner.calculate():\r\n # method = \"Driver\"\r\n # fpath = str(driver.DriverName or '')\r\n # yield method, '-', '-', fpath\r", "def partid2eids(self, partid):\n # TODO do we need to cache it?\n start = self._edge_map[partid - 1] if partid > 0 else 0\n end = self._edge_map[partid]\n return F.arange(start, end)", "def getExons(self):\n rtrn = []\n for i in range(0,len(self.exonStarts)):\n rtrn.append(Interval(self.chr,self.exonStarts[i],self.exonEnds[i],self.strand,name = self.name+\"_exon_\"+str(i+1)))\n return rtrn", "def _generateEphemeris(self, orbits, observers):\n err = \"This backend does not have ephemeris generation implemented.\"\n raise NotImplementedError(err)", "def _gen_ellipse(twiss, ep=1, num=100):\n a, b, c = twiss\n\n t = np.linspace(0, 2 * np.pi, num)\n t0 = np.arctan(a)\n x = np.sqrt(b * ep) * np.cos(t)\n y = np.sqrt(c * ep) * np.sin(t - t0)\n\n return np.vstack([x, y])", "def get_psf_path(self):\n exp_ids = sorted(list(set(self.info['entry_number'])))\n exp_id_indices = []\n for exp_id in self.info['entry_number']:\n exp_id_indices.append(exp_ids.index(exp_id))\n n_activities = len(exp_ids)\n\n # If no path explicitly provided, use the default path.\n if self.psf_paths is None:\n self.logger.info('No PSF path provided. Using default path as PSF path for all yamls.')\n paths_out = []\n for instrument in self.info['Instrument']:\n default_path = self.config_information['global_psfpath'][instrument.lower()]\n paths_out.append(default_path)\n return paths_out\n\n elif isinstance(self.psf_paths, str):\n self.logger.info('Using provided PSF path.')\n paths_out = [self.psf_paths] * len(self.info['act_id'])\n return paths_out\n\n elif isinstance(self.psf_paths, list) and len(self.psf_paths) != n_activities:\n raise ValueError('Invalid PSF paths parameter provided. Please '\n 'provide the psf_paths in the form of a list of '\n 'strings with a length equal to the number of '\n 'activities in the APT program ({}), not equal to {}.'\n .format(n_activities, len(self.psf_paths)))\n\n elif isinstance(self.psf_paths, list):\n self.logger.info('Using provided PSF paths.')\n paths_out = [self.psf_paths[i] for i in exp_id_indices]\n return paths_out\n\n elif not isinstance(self.psf_paths, list) or not isinstance(self.psf_paths, str):\n raise TypeError('Invalid PSF paths parameter provided. Please '\n 'provide the psf_paths in the form of a list or string, not'\n '{}'.format(type(self.psf_paths)))", "def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths", "def build_filelist(basepath):\n log.info(\"Building list of files containing EDM symbols in %s\", basepath)\n symbol_files = []\n for dir_path, _, filenames in os.walk(basepath):\n for filename in filenames:\n filepath = os.path.join(dir_path, filename)\n if filename.endswith(\".opi\") and utils.grep(filepath, \"EDM Symbol\"):\n symbol_files.append(filepath)\n\n return symbol_files", "def _getfilenames(self):\n\n # Set up the path and file prefix depending on the filetype.\n if self._filetype == 'nightwatch':\n fileprefix = 'qcframe'\n\n if self._location == 'nersc':\n prefix = '/global/project/projectdirs/desi/spectro/nightwatch/kpno'\n elif self._location == 'kpno':\n prefix = '/exposures/desi' # not correct path!\n else:\n raise ValueError('Unknown location {}'.format(self._location))\n elif self._filetype == 'redux':\n fileprefix = 'sframe'\n\n if self._location == 'nersc':\n prefix = '/global/project/projectdirs/desi/spectro/redux/daily/exposures'\n elif self._location == 'kpno':\n prefix = '/exposures/desi' # not correct path!\n else:\n raise ValueError('Unknown location {}'.format(self._location))\n else:\n raise ValueError('Unknown file type {}'.format(self._filetype))\n\n # Find the exposures files.\n exfiles = {}\n for ex in self._exposures:\n folder = '{}/{}/{:08d}'.format(prefix, self._date, ex)\n files = sorted(glob('{}/{}*.fits'.format(folder, fileprefix)))\n exfiles[ex] = files\n\n return exfiles", "def eid2partid(self, eids, etype=...):\n ...", "def eid2partid(self, eids, etype=...):\n ...", "def create_sdxmetadata(sdx_dir, output_dir):\n #define list to store SDX information\n instrument = []\n picks = [] \n phases = []\n \n #segment and store metadata \n #define SDX files to be read\n for root, dirs, files in os.walk(sdx_dir):\n for idx, file in enumerate(files):\n if file.endswith(\".sdx\"):\n \n print(\"Reading File: \" + file)\n \n #define list to store SDX information\n instrument = []\n picks = [] \n phases = []\n \n #scan for pick info\n with open(root + file,\"r\") as f:\n searchlines = f.readlines()\n for i, line in enumerate(searchlines):\n #strip whitespace/end-of-line characters for exact text matching\n line = line.rstrip()\n #find pick info\n if \"pick\" == line:\n for l in searchlines[i:i+16]: \n #print(l)\n #assign pick info/instrument info to variables and store\n instrument_info = searchlines[i+1]\n pick_info = searchlines[i+2]\n phase_info = searchlines[i+9:i+13]\n instrument.append(instrument_info)\n picks.append(pick_info)\n phases.append(phase_info)\n \n #create a .txt file for each seperate event to store pick info\n pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)\n\n f = open(output_dir + os.path.splitext(file)[0] + \".txt\",'w')\n #header information...\n f.write('Data read from correpsonding SDX file:' + '\\n')\n f.write(file + '\\n\\n')\n f.write('Instrument/component' + '\\t\\t\\t' + 'Pick information' '\\t\\t\\t' + 'Phase information\\n')\n \n # print both instrument and pick information to the \n # associated event file\n for item in zip(instrument, picks, phases):\n \n #remove preceding whitespace/formatting characters\n item0 = item[0].rstrip()\n item1 = item[1].rstrip()\n item2 = list(map(str.strip, item[2]))\n \n #remove associated list formatting\n item2 = (\", \".join( str(e) for e in item2))\n\n #print...\n #format | instrument info | pick info | phase info\n f.write(\"%s\\t\\t%s\\t\\t%s\\n\" % (item0,item1,item2))\n \n f.close()", "def GEEphenMODIS(ptsFile,metric,startYear,endYear,buf,poly,username,folderOut, scalePix = 500):\n \n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n years = ee.List(list(range(startYear, endYear + 1)))\n \n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n #define dictionary for raster random names\n phen_d = {}\n phen_d['GreenInc'] = 'Onset_Greenness_Increase1'\n phen_d['GreenMax'] = 'Onset_Greenness_Maximum1'\n phen_d['GreenDec'] = 'Onset_Greenness_Decrease1'\n phen_d['GreenMin'] = 'Onset_Greenness_Minimum1'\n\n for met in metric:\n\n modis1 = ee.ImageCollection('MODIS/MCD12Q2').select(phen_d[met])\n \n def map_m(i):\n i = ee.Number(i)\n filtered = (modis1\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n return filtered\n\n img_col = ee.ImageCollection(years.map(map_m).flatten())\n\n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(phen_d[met])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'p_MCD12Q2_'+str(met)+'_'+str(startYear)+'_'+str(endYear)+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print ('buffered pts by:' + str(buf) + ' for phen for ' + str(met))\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(phen_d[met])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'p_MCD12Q2_'+str(met)+'_'+str(startYear)+'_'+str(endYear)+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print ('spatial mean in poly: no buffer for phen for ' + str(met))\n\n else:\n def table_m(image):\n table = (image\n .select(phen_d[met])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'p_MCD12Q2_'+str(met)+'_'+str(startYear)+'_'+str(endYear)+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for phen for ' + str(met))", "def load_dcm_series(files: List[str]):\n volume = []\n files.sort(key=get_slice_location)\n for file in files:\n dcm = pydicom.dcmread(file, force=True)\n if not dcm.file_meta.get('TransferSyntaxUID'):\n dcm.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian\n volume.append(dcm.pixel_array)\n return files, np.stack(volume)", "def get_exons(chromStart, chromEnd, blockSizes, blockStarts):\n blockSizes = [int(i) for i in blockSizes.split(\",\") if not i == \"\" ]\n blockStarts = [int(i) for i in blockStarts.split(\",\") if not i == \"\" ]\n n = len(blockSizes)\n exons = []\n #print(\"block: \" + str(n))\n #print(blockSizes, blockStarts)\n for i in range(n):\n #print(i)\n blockStart = blockStarts[i]\n blockSize = blockSizes[i]\n exonStart = chromStart + blockStart\n exonEnd = exonStart + blockSize\n exons.append([exonStart, exonEnd])\n return(exons)", "def scraping_episodes(self, serie_data, episodes_list):\n episodes_data = []\n for episode in episodes_list:\n # Se arma este dict para localizar los campos\n # en el json y que sea mas facil procesarlos mas adelante\n epi_details = episode[0]['body']['details']\n epi_dict = {\n 'ParentId': serie_data.id,\n 'ParentTitle': serie_data.clean_title,\n 'Id': episode[0]['id'],\n 'Title': epi_details['title'],\n 'Type': 'episode',\n 'JSON': {\n 'Synopsis': epi_details['description'],\n 'Metadata': epi_details['metadata'].replace('\\xa0', ''),\n 'Rating': epi_details['localizedRating']['value'],\n 'Image': epi_details,\n 'Groups': episode[1]['body']['groups'],\n 'SeasonAndNumber': episode[2]['body']['metadata'],\n 'isFree': episode[0]['body']['isFree']\n }\n }\n payload_epi = self.build_payload(epi_dict)\n # Si la serie es original sus episodios también\n payload_epi.is_original = serie_data.is_original\n episodes_data.append(payload_epi)\n payload_epi = payload_epi.payload_episode()\n Datamanager._checkDBandAppend(\n self, payload_epi, self.scraped_epi, self.payloads_epi,\n isEpi=True\n )\n return episodes_data", "def get_ephemeris(rundate, sat_name):\n file_key = \"slr_ephemeris\"\n ephemeris_data = get_satellite_vars(sat_name)\n provider_list = config.tech.prediction_providers.list\n # Find the latest version of the observation file\n versions = config.files.glob_variable(file_key, \"version\", r\"\\d+\", file_vars=ephemeris_data)\n\n try:\n ephemeris_data[\"version\"] = sorted(versions)[-1]\n providers = config.files.glob_variable(file_key, \"provider\", r\"\\w+\", file_vars=ephemeris_data)\n for provider in provider_list:\n if provider in providers:\n ephemeris_data[\"provider\"] = provider\n break\n else:\n log.fatal(f\"No valid provider found: {', '.join(providers)}\")\n except IndexError:\n log.info(\"No ephemeris data found\")\n log.info(f\"Download manually from https://cddis.nasa.gov/archive/slr/cpf_predicts/{rundate.year}/{sat_name}\")\n log.fatal(f\"Please save missing file as '{config.files.path(file_key)}' !\")\n eph_parser = parsers.parse_key(file_key, file_vars=ephemeris_data)\n eph = calculate_initial_values(eph_parser.as_dict(), rundate)\n\n return eph", "def createExon(strand_p, five_p_utr, cds_cod, three_p_utr):\n exon_pos = []\n if strand_p == '+': \n utr5_start, utr5_end = 0, 0\n if five_p_utr != []:\n utr5_start, utr5_end = five_p_utr[-1][0], five_p_utr[-1][1] \n cds_5start, cds_5end = cds_cod[0][0], cds_cod[0][1]\n jun_exon = []\n if cds_5start-utr5_end == 0 or cds_5start-utr5_end == 1:\n jun_exon = [utr5_start, cds_5end] \n if len(cds_cod) == 1:\n five_prime_flag = 0\n if jun_exon != []:\n five_p_utr = five_p_utr[:-1]\n five_prime_flag = 1\n for utr5 in five_p_utr:\n exon_pos.append(utr5)\n jun_exon = []\n utr3_start, utr3_end = 0, 0\n if three_p_utr != []: \n utr3_start = three_p_utr[0][0]\n utr3_end = three_p_utr[0][1]\n if utr3_start-cds_5end == 0 or utr3_start-cds_5end == 1:\n jun_exon = [cds_5start, utr3_end]\n three_prime_flag = 0\n if jun_exon != []: \n cds_cod = cds_cod[:-1]\n three_p_utr = three_p_utr[1:]\n three_prime_flag = 1\n if five_prime_flag == 1 and three_prime_flag == 1:\n exon_pos.append([utr5_start, utr3_end])\n if five_prime_flag == 1 and three_prime_flag == 0:\n exon_pos.append([utr5_start, cds_5end])\n cds_cod = cds_cod[:-1]\n if five_prime_flag == 0 and three_prime_flag == 1:\n exon_pos.append([cds_5start, utr3_end])\n for cds in cds_cod:\n exon_pos.append(cds)\n for utr3 in three_p_utr:\n exon_pos.append(utr3)\n else: \n if jun_exon != []:\n five_p_utr = five_p_utr[:-1]\n cds_cod = cds_cod[1:]\n for utr5 in five_p_utr:\n exon_pos.append(utr5)\n exon_pos.append(jun_exon) if jun_exon != [] else ''\n jun_exon = []\n utr3_start, utr3_end = 0, 0\n if three_p_utr != []:\n utr3_start = three_p_utr[0][0]\n utr3_end = three_p_utr[0][1]\n cds_3start = cds_cod[-1][0]\n cds_3end = cds_cod[-1][1]\n if utr3_start-cds_3end == 0 or utr3_start-cds_3end == 1: \n jun_exon = [cds_3start, utr3_end]\n if jun_exon != []:\n cds_cod = cds_cod[:-1]\n three_p_utr = three_p_utr[1:]\n for cds in cds_cod:\n exon_pos.append(cds)\n exon_pos.append(jun_exon) if jun_exon != [] else ''\n for utr3 in three_p_utr:\n exon_pos.append(utr3)\n elif strand_p == '-':\n utr3_start, utr3_end = 0, 0 \n if three_p_utr != []:\n utr3_start = three_p_utr[-1][0]\n utr3_end = three_p_utr[-1][1]\n cds_3start = cds_cod[0][0]\n cds_3end = cds_cod[0][1]\n jun_exon = []\n if cds_3start-utr3_end == 0 or cds_3start-utr3_end == 1:\n jun_exon = [utr3_start, cds_3end] \n if len(cds_cod) == 1: \n three_prime_flag = 0\n if jun_exon != []:\n three_p_utr = three_p_utr[:-1]\n three_prime_flag = 1\n for utr3 in three_p_utr:\n exon_pos.append(utr3)\n jun_exon = []\n (utr5_start, utr5_end) = (0, 0)\n if five_p_utr != []:\n utr5_start = five_p_utr[0][0]\n utr5_end = five_p_utr[0][1]\n if utr5_start-cds_3end == 0 or utr5_start-cds_3end == 1:\n jun_exon = [cds_3start, utr5_end]\n five_prime_flag = 0\n if jun_exon != []:\n cds_cod = cds_cod[:-1]\n five_p_utr = five_p_utr[1:]\n five_prime_flag = 1\n if three_prime_flag == 1 and five_prime_flag == 1:\n exon_pos.append([utr3_start, utr5_end])\n if three_prime_flag == 1 and five_prime_flag == 0:\n exon_pos.append([utr3_start, cds_3end])\n cds_cod = cds_cod[:-1]\n if three_prime_flag == 0 and five_prime_flag == 1:\n exon_pos.append([cds_3start, utr5_end]) \n for cds in cds_cod:\n exon_pos.append(cds)\n for utr5 in five_p_utr:\n exon_pos.append(utr5)\n else:\n if jun_exon != []:\n three_p_utr = three_p_utr[:-1]\n cds_cod = cds_cod[1:]\n for utr3 in three_p_utr:\n exon_pos.append(utr3) \n if jun_exon != []:\n exon_pos.append(jun_exon)\n jun_exon = []\n (utr5_start, utr5_end) = (0, 0)\n if five_p_utr != []:\n utr5_start = five_p_utr[0][0]\n utr5_end = five_p_utr[0][1] \n cds_5start = cds_cod[-1][0]\n cds_5end = cds_cod[-1][1]\n if utr5_start-cds_5end == 0 or utr5_start-cds_5end == 1:\n jun_exon = [cds_5start, utr5_end]\n if jun_exon != []:\n cds_cod = cds_cod[:-1]\n five_p_utr = five_p_utr[1:]\n for cds in cds_cod:\n exon_pos.append(cds)\n if jun_exon != []:\n exon_pos.append(jun_exon) \n for utr5 in five_p_utr:\n exon_pos.append(utr5)\n return exon_pos", "def PIL_series(self, map_list, pos_g_val=100, neg_g_val=-100, size=4):\n pil_maps = []\n success_count = 0\n\n for i, sub_map in enumerate(map_list):\n pos_map, neg_map = self.dt.identify_pos_neg_region(sub_map, pos_gauss=pos_g_val, neg_gauss=neg_g_val)\n\n pos_edge = self.dt.edge_detection(pos_map)\n neg_edge = self.dt.edge_detection(neg_map)\n\n pos_dil_edge = self.dt.buff_edge(pos_edge, size=size)\n neg_dil_edge = self.dt.buff_edge(neg_edge, size=size)\n\n pil_maps.append(self.PIL_extraction(pos_dil_edge, neg_dil_edge, sub_map))\n\n success_count += 1\n\n print(\"Number of Detected Candidate RoPIs: \", success_count)\n\n return pil_maps", "def GEEnasaNEXGDDP(ptsFile,metric,timeStep,startYear,endYear,scenarios,buf,poly,username,folderOut,models = ['ACCESS1-0', 'bcc-csm1-1', 'BNU-ESM',\n 'CanESM2', 'CCSM4', 'CESM1-BGC', 'CNRM-CM5', 'CSIRO-Mk3-6-0',\n 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'inmcm4', 'IPSL-CM5A-LR',\n 'IPSL-CM5A-MR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 'MIROC5', 'MPI-ESM-LR',\n 'MPI-ESM-MR', 'MRI-CGCM3', 'NorESM1-M'], scalePix = 25000):\n \n # load required libraries\n import ee\n\n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n time_d = {}\n time_d['day'] = 'projd'\n time_d['month'] = 'projm'\n time_d['year'] = 'projy'\n \n for met in metric:\n\n for scenario in scenarios:\n\n for model in models:\n\n NEX = (ee.ImageCollection('NASA/NEX-GDDP')\n .select(met)\n .filterMetadata('model', 'equals', model)\n .filterMetadata('scenario', 'equals', scenario))\n\n metL = [met]\n \n years = list(range(startYear, endYear + 1))\n monthsEE = ee.List(list(range(0,(12*len(years)))))\n yearsEE = ee.List(years)\n\n######Turned off unit conversion, because it fails when there are too many pts\n## if (met == 'pr'):\n##\n## def Scale1(img):\n## return (img.float()\n## .multiply(86400)\n## .copyProperties(img,['system:time_start','system:time_end']))\n##\n## NEX = NEX0.map(Scale1)\n## \n## elif any([(met == 'tasmin'),(met == 'tasmax')]):\n##\n## def KtoC(img):\n## return (img.float()\n## .subtract(273.15)\n## .copyProperties(img,['system:time_start','system:time_end']))\n##\n## NEX = NEX0.map(KtoC)\n \n if all([(timeStep == 'year'),any([(met == 'tasmin'),(met == 'tasmax')])]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif all([(timeStep == 'month'),any([(met == 'tasmin'),(met == 'tasmax')])]):\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif all([(timeStep == 'year'),(met == 'pr')]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .sum()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif all([(timeStep == 'month'),(met == 'pr')]):\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .sum()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif timeStep == 'day':\n\n img_col = NEX.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_NEX_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for NEX: ' + met + ' ' + scenario + ' ' + model)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_NEX_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for NEX: ' + met + ' ' + scenario + ' ' + model)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_NEX_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for NEX: ' + met + ' ' + scenario + ' ' + model)", "def _get_eps_xml(self):\n format_path = os.path.join(os.path.dirname(__file__), \"formats\")\n\n # loop through files where filename starts with \"eps_ascat\".\n for filename in fnmatch.filter(os.listdir(format_path), \"eps_ascat*\"):\n doc = etree.parse(os.path.join(format_path, filename))\n file_extension = doc.xpath(\"//file-extensions\")[0].getchildren()[0]\n\n format_version = doc.xpath(\"//format-version\")\n for elem in format_version:\n major = elem.getchildren()[0]\n minor = elem.getchildren()[1]\n\n # return the xml file matching the metadata of the datafile.\n if major.text == self.mphr[\"FORMAT_MAJOR_VERSION\"] and \\\n minor.text == self.mphr[\"FORMAT_MINOR_VERSION\"] and \\\n self.mphr[\n \"PROCESSING_LEVEL\"] in file_extension.text and \\\n self.mphr[\"PRODUCT_TYPE\"] in file_extension.text:\n return os.path.join(format_path, filename)", "def create_pegs():\n peg_rows = [-2, -4, -6, -8]\n peg_cols = [-6, -4, -2, 0, 2, 4, 6]\n\n pegs = []\n pid = -1\n for r in peg_rows:\n for c in peg_cols:\n pid += 1\n pegs.append(geom.Peg(c, r, PEG_RADIUS, pid))\n\n # include offset rows\n for r in peg_rows:\n for c in peg_cols:\n pid += 1\n pegs.append(geom.Peg(c+1, r+1, PEG_RADIUS, pid))\n\n return pegs", "def find_epilines(imgLeft, imgRight, ptsLeft, ptsRight, F):\n color = []\n for i in range(ptsLeft.shape[0]):\n color.append(tuple(np.random.randint(0, 255, 3).tolist()))\n print(color)\n\n # Find epilines corresponding to points in right image (right image)\n linesLeft = cv2.computeCorrespondEpilines(ptsRight.reshape(-1, 1, 2), 2, F)\n linesLeft = linesLeft.reshape(-1, 3)\n # Draw its lines on left image\n img5, img6 = drawlines(imgLeft, imgRight, linesLeft, ptsLeft, ptsRight, color)\n\n # Find epilines corresponding to points in left image (left image)\n linesRight = cv2.computeCorrespondEpilines(ptsLeft.reshape(-1, 1, 2), 1, F)\n linesRight = linesRight.reshape(-1, 3)\n # Draw its lines on right image\n img3, img4 = drawlines(imgRight, imgLeft, linesRight, ptsRight, ptsLeft, color)\n\n plt.subplot(121), plt.imshow(img5)\n plt.subplot(122), plt.imshow(img3)\n plt.show()", "def get_img_annot_pairs_from_paths(images_path , segs_path):\n imagepaths = glob_match_image_files(images_path)\n annotpaths = glob_match_image_files(segs_path)\n annotnames = [remove_extension(os.path.basename(path)) for path in annotpaths]\n\n ret = []\n\n for imgpath in imagepaths:\n\n imgname = remove_extension(os.path.basename(imgpath))\n\n try:\n index = annotnames.index(imgname)\n except:\n print(\"Image: '%s', does not have an annotation!\" % imgpath)\n continue\n\n segpath = annotpaths[index]\n\n ret.append((imgpath , segpath))\n\n return ret", "def ps_pids_e(self, pids):\n cmd = [rcEnv.syspaths.ps, \"-p\", pids, \"e\"]\n out, _, _ = justcall(cmd)\n return out", "def read_ripser_output(output_path,max_dim,output_name=None):\n # \\todo add persistence by density (columns pers by threshold and column pers by dens) ## only needed if input weighted network\n output_file_path =os.path.join(output_path,'output_ripser.txt')\n data = open(output_file_path,'rb').readlines()\n value_range = eval(data[1].rstrip().split(' ')[-1])\n holes = dict() ## save holes by dimension (birth, death, persistence)\n for dimH in range(0,max_dim+1):#[0,1,2]:\n print 'dimH ', dimH\n h_start, h_end = ripser_PDs_dim(data,dim=dimH)\n pers = np.array(h_end)-np.array(h_start)\n d = pd.DataFrame()\n d['birth'] = h_start\n d['death'] = h_end\n d['persistence'] = pers\n d['dimH'] = dimH\n holes[dimH] = d \n data_pds = pd.concat(holes.values())\n if(output_name!=None):\n output_file_path = os.path.join(output_path,'%s_PDS.csv'%output_name)\n data_pds.to_csv(output_file_path) ## save pandas file with PDs for dim 0,1,2\n print 'Saved results in %s'%(output_file_path)\n else:\n output_file_path = os.path.join(output_path,'outputs_PDS.csv')\n data_pds.to_csv(output_file_path) ## save pandas file with PDs for dim 0,1,2\n print 'Saved results in %s'%output_file_path\n return()", "def extractSeriesInfo(self, inputdir):\n self.m_status.SetLabelText(\"Detecting DICOM data ... please wait\")\n allfiles = [y for x in walk(inputdir) for y in iglob(join(x[0], '*.IMA'))]\n self.controller.parseDicom(self, allfiles)\n # n = 1\n # for filename in allfiles:\n # try:\n # if not self.db.hasFile(filename):\n # dcm = dicom.read_file(filename)\n # updatemsg = \"Detecting DICOM data ... %d of %d\" % (n, len(allfiles))\n # self.m_status.SetLabelText(updatemsg)\n # n += 1\n #\n # # Check DICOM header info\n # series_num = str(dcm.SeriesInstanceUID)\n # uuid = self.generateuid(series_num)\n # imagetype = str(dcm.ImageType[2])\n # dicomdata = {'uuid': uuid,\n # 'patientid': str(dcm.PatientID),\n # 'patientname': str(dcm.PatientName),\n # 'seriesnum': series_num,\n # 'sequence': str(dcm.SequenceName),\n # 'protocol': str(dcm.ProtocolName),\n # 'imagetype': imagetype\n # }\n #\n # if not self.db.hasUuid(uuid):\n # self.db.addDicomdata(dicomdata)\n # if not self.db.hasFile(filename):\n # self.db.addDicomfile(uuid, filename)\n # except InvalidDicomError:\n # print(\"Not DICOM - skipping: \", filename)\n # continue\n # Load for selection\n # Columns: Toggle Select\n # Text PatientID\n # Text Sequence\n # Text Protocol\n # Text Image Type\n # Text Num Files\n # Text Series ID\n\n # for suid in db.getNewUuids():\n # numfiles = db.getNumberFiles(suid)\n # self.m_dataViewListCtrl1.AppendItem(\n # [True, self.controller.db.getDicomdata(suid, 'patientname'),\n # self.controller.db.getDicomdata(suid, 'sequence'),\n # self.controller.db.getDicomdata(suid, 'protocol'),\n # self.controller.db.getDicomdata(suid, 'imagetype'), str(numfiles),\n # self.controller.db.getDicomdata(suid, 'seriesnum')])\n #\n # msg = \"Total Series loaded: %d\" % self.m_dataViewListCtrl1.GetItemCount()\n # self.m_status.SetLabelText(msg)", "def get_test_files(self):\n train_dir = os.path.join(self.data_dir, \"test_{}_new\".format(self.patient_no))\n filenames = os.listdir(train_dir)\n interm = ((os.path.splitext(f)[0].split(\"_\"), os.path.join(train_dir, f)) for f in filenames)\n return [(int(p[0][1]), int(p[0][2]), p[1]) for p in interm]", "def scanDir(dcmdir):\n\n if not enabled():\n raise RuntimeError('dcm2niix is not available or is too old')\n\n dcmdir = op.abspath(dcmdir)\n cmd = f'{dcm2niix()} -b o -ba n -f %s -o . \"{dcmdir}\"'\n series = []\n\n with tempdir.tempdir() as td:\n\n with open(os.devnull, 'wb') as devnull:\n sp.call(shlex.split(cmd), stdout=devnull, stderr=devnull)\n\n files = glob.glob(op.join(td, '*.json'))\n\n if len(files) == 0:\n return []\n\n for fn in files:\n with open(fn, 'rt') as f:\n meta = json.load(f)\n meta['DicomDir'] = dcmdir\n # SeriesDescription is not\n # guaranteed to be present\n if 'SeriesDescription' not in meta:\n meta['SeriesDescription'] = meta['SeriesNumber']\n series.append(meta)\n\n # sort by series number\n def key(s):\n return s.get('SeriesNumber', sys.maxsize)\n\n series = list(sorted(series, key=key))\n\n return series", "def get_itds_v2(timestamps, ears, types, max_itd=800e-6, save_to_file=None, verbose=False, return_attributes=False):\n ears = ears.astype(np.bool)\n itds_to_return, timestamps_to_return, ears_to_return, types_to_return = [], [], [], []\n\n timestamps_dict = {}\n timestamp_indices_dict = {}\n for ear in np.unique(ears):\n timestamps_dict[ear] = {}\n timestamp_indices_dict[ear] = {}\n for type_of_event in np.unique(types):\n timestamps_dict[ear][type_of_event] = []\n timestamp_indices_dict[ear][type_of_event] = []\n\n for idx, (timestamp, ear, type_of_event) in enumerate(zip(timestamps, ears, types)):\n timestamps_dict[ear][type_of_event].append(timestamp)\n timestamp_indices_dict[ear][type_of_event].append(idx)\n\n if verbose:\n print('Initialized the timestamp lists.')\n\n bar = progressbar.ProgressBar() if verbose else lambda x: x\n\n for type_of_event in bar(np.unique(types)):\n timestamps_left = np.array(timestamps_dict[True][type_of_event])\n timestamp_indices_left = timestamp_indices_dict[True][type_of_event]\n timestamps_right = np.array(timestamps_dict[False][type_of_event])\n timestamp_indices_right = timestamp_indices_dict[False][type_of_event]\n\n for ts_right, ts_idx_right in zip(timestamps_right, timestamp_indices_right):\n matched_indices = np.where((timestamps_left >= ts_right - max_itd) &\n (timestamps_left < ts_right + max_itd))[0]\n for matched_index in matched_indices:\n matched_itd = ts_right - timestamps_left[matched_index]\n itds_to_return.append(matched_itd)\n timestamps_to_return.append(ts_right)\n ears_to_return.append(False)\n types_to_return.append(type_of_event)\n\n for ts_left, ts_idx_left in zip(timestamps_left, timestamp_indices_left):\n matched_indices = np.where((timestamps_right >= ts_left - max_itd) &\n (timestamps_right < ts_left + max_itd))[0]\n for matched_index in matched_indices:\n matched_itd = timestamps_right[matched_index] - ts_left\n itds_to_return.append(matched_itd)\n timestamps_to_return.append(ts_left)\n ears_to_return.append(True)\n types_to_return.append(type_of_event)\n\n indices = np.argsort(timestamps_to_return)\n timestamps_to_return = np.array(timestamps_to_return, dtype=np.float32)[indices]\n itds_to_return = np.array(itds_to_return, dtype=np.float32)[indices]\n types_to_return = np.array(types_to_return, dtype=np.int16)[indices]\n ears_to_return = np.array(ears_to_return, dtype=np.int8)[indices]\n\n if save_to_file is not None:\n np.savez(save_to_file, timestamps=timestamps_to_return, ears=ears_to_return,\n types=types_to_return, itds=itds_to_return)\n\n if return_attributes:\n return itds_to_return, timestamps_to_return, ears_to_return, types_to_return\n\n return itds_to_return", "def read_ephem_file(infile):\n target_id, epoch, period, tdur = [], [], [], []\n with open(infile) as ff:\n data = ff.readlines()\n for row in data:\n s = row.split()\n target_id.append(s[0])\n epoch.append(float(s[1]))\n period.append(float(s[2]))\n tdur.append(float(s[3]))\n return target_id, epoch, period, tdur", "def _get_result_paths(self, data):\r\n # access the output dir through self.Parameters so we know it's been cast\r\n # to a FilePath\r\n od = self.Parameters['-o'].Value\r\n\r\n result = {}\r\n # the before/after coords plot\r\n result['plot'] = ResultPath(\r\n Path=join(od,\r\n 'PCoA_vs_projection.pdf'),\r\n IsWritten=True)\r\n # the detrended coords file\r\n result['coords'] = ResultPath(\r\n Path=join(od, 'detrended_pcoa.txt'), IsWritten=True)\r\n # the summary file, only present if metadata was included\r\n summary_fp = join(od, 'summary.txt')\r\n result['summary'] = ResultPath(Path=summary_fp,\r\n IsWritten=self.Parameters['-c'].isOn())\r\n return result", "def list_files(excel_file, data_folder):\n\n wb = xlrd.open_workbook(excel_file)\n sheet = wb.sheet_by_index(0)\n sheet.cell_value(0, 0)\n\n # Extracting number of rows\n nsampes = sheet.nrows\n vol_paths, seg_paths =[],[]\n\n for i in range(1, nsampes):\n row = sheet.row_values(i)\n row = row[0]\n\n filename= row.split('.')[0]\n folder = row.split('C')[0]\n folder = folder[:-1]\n vol_paths.append(os.path.join(data_folder,folder,filename +'.nii.gz'))\n segname= filename+'_seg.nii.gz'\n seg_paths.append(os.path.join(data_folder, folder, segname))\n\n return vol_paths, seg_paths", "def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def ProcessDTI(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'dti':\n if self.verbose:\n print 'Processing DTI data in %s' % os.path.basename(entry)\n# dtiname = '%s/s%s_dti' % \\\n# (self.info[entry]['outdir'],self.info[entry]['series'])\n cmd = 'convert_file %s %s %s' % (entry, \\\n self.info[entry]['imgfile'], self.info[entry]['filetype'])\n fname = '%s%s' % \\\n (self.info[entry]['imgfile'], self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])", "def read_iers_EOP(input_file):\n #-- read data file splitting at line breaks\n with open(input_file,'r') as f:\n file_contents = f.read().splitlines()\n #-- number of data lines\n n_lines = len(file_contents)\n dinput = {}\n dinput['MJD'] = np.zeros((n_lines))\n dinput['x'] = np.zeros((n_lines))\n dinput['y'] = np.zeros((n_lines))\n #-- for each line in the file\n flag = 'I'\n counter = 0\n while (flag == 'I'):\n line = file_contents[counter]\n i = 2+2+2+1; j = i+8\n dinput['MJD'][counter] = np.float(line[i:j])\n i = j+1\n flag = line[i]\n i += 2; j = i+9\n dinput['x'][counter] = np.float(line[i:j])\n i = j+10; j = i+9\n dinput['y'][counter] = np.float(line[i:j])\n counter += 1\n #-- reduce to data values\n dinput['MJD'] = dinput['MJD'][:counter]\n dinput['x'] = dinput['x'][:counter]\n dinput['y'] = dinput['y'][:counter]\n #-- return the date, flag and polar motion values\n return dinput", "def read_data(self):\n self.days = [0, 2, 3, 5, 6, 8, 9, 11, 13, 14]\n path = '../data/'\n data = []\n for day in self.days:\n filename = path + 'spectrum_day{}.txt'.format(day)\n data.append(read_file(filename))\n return data", "def get_segments(self):\n\t\tos.chdir(self.segment_path)\n\t\tfor path in glob.glob(\"%s/*.seg\" % self.segment_path):\n\t\t\t_file = os.path.split(path)[1]\n\t\t\tdae = DiscreetArchiveElement(self,_file,element_type='segment')\n\t\t\tself.elements.append(dae)\n\t\treturn True", "def _update_imgs_and_pt_list(self, points, edge_points, segs, index):\n # index specifies whether to use the x or y coordinate in x_pts\n x_pts=[]\n for i in range(0, len(points)):\n pt=points[i]\n #edge_points[pt[0],pt[1]] = 255\n x_pts.append(pt[index])\n #segs[pt[0],pt[1]]=150\n\n return x_pts, segs, edge_points", "def sim_exon_stop_density(simulations, seqs, exon_positions_bed, temp_dir, seeds=None):\n\n temp_files = []\n if len(simulations):\n\n exon_info = get_exon_info(exon_positions_bed)\n\n codon_regex = re.compile(\".{3}\")\n codon_list = {name: re.findall(codon_regex, seqs[name]) for name in seqs}\n # get a list of start and stop codons\n starts = {name: codon_list[name][0] for name in codon_list}\n stops = {name: codon_list[name][-1] for name in codon_list}\n # get a list of internal codons in the list\n codon_list = {name: codon_list[name][1:-1] for name in codon_list}\n\n for sim_no, simulation in enumerate(simulations):\n # set the seed\n set_seed(seeds, simulation)\n # print the simulation number out\n gen.print_simulation(sim_no+1, simulations)\n # get the randomised seqs\n randomised_seqs = sim_cds_seqs(codon_list, starts, stops)\n sim_densities, sim_core_densities = get_exon_stop_densities(randomised_seqs, exon_info)\n temp_file = \"{0}/{1}.{2}.txt\".format(temp_dir, random.random(), simulation+1)\n temp_files.append(temp_file)\n with open(temp_file, \"w\") as outfile:\n for end in sim_densities:\n for region in sim_densities[end]:\n outfile.write(\"{0},{1},{2}\\n\".format(end, region, \",\".join(gen.stringify(sim_densities[end][region]))))\n outfile.write(\"core,{0}\\n\".format(\",\".join(gen.stringify(sim_core_densities))))\n\n return temp_files", "def export_events_ioe(self):\n for event in self.positive_ids:\n pos_trans = ','.join(sorted(self.positive_ids[event]))\n all_trans = ','.join(list(set(sorted(self.positive_ids[event] + self.negative_ids[event]))))\n full_event = '{};{}:{}'.format(self.gene.name, self.etype, event)\n\n yield ('{}\\t{}\\t{}\\t{}\\t{}\\n'.format(self.gene.chr, self.gene.name, full_event,\n pos_trans, all_trans),\n self.etype)", "def episodes(self):\n episodes = []\n for series in self.series:\n episodes.extend(series.episodes)\n return episodes", "def export_2D_edp(self, filename=\"2Dedp.dat\", xmin=-100, xmax=100, \n zmin=-100, zmax=100, N=201):\n rho_xz = []\n xgrid = np.linspace(xmin, xmax, num=N)\n zgrid = np.linspace(zmin, zmax, num=N)\n for x in xgrid:\n for z in zgrid:\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n rho_xz.append([x, z, tmp.sum(axis=0)])\n rho_xz = np.array(rho_xz, float) \n X, Y, Z= rho_xz[:,0], rho_xz[:,1], rho_xz[:,2]\n with open(filename, 'w') as f:\n f.write(\"x z ED\\n\")\n for x, y, z in zip(X, Y, Z):\n f.write(\"{0: 3.1f} {1: 3.1f} {2: }\\n\".format(x, y, z))", "def unit_test_cal_hc_e2ds_ea_spirou(p, rname, inputs):\n # define name and arguments\n name = 'cal_HC_E2DS_EA_spirou'\n arg_names = ['night_name', 'files']\n arg_types = [str, list]\n\n # get arguments\n args = get_args(p, name, rname, inputs, arg_names, arg_types)\n # return args\n return args, name", "def GetEpiAcqTimes(self, series):\n# Find minimum and maximum start times for each acquistion in series.\n self.epi_times = {}\n for entry in self.entry_map['epi']:\n# Loop through each file in this series.\n if self.info[entry]['series'] == series and \\\n self.info[entry]['tdim'] > 2:\n# Relate each entry to its time of acquisition.\n self.epi_times[self.info[entry]['acqtime']] = entry", "def compute_products(self):\r\n src_to_classfiles = defaultdict(list)\r\n for pcd_entry in self.pcd_entries:\r\n srcfile = pcd_entry[1]\r\n # In the file classes are represented with slashes, not dots. E.g., com/foo/bar/Baz.\r\n src_to_classfiles[srcfile].append(pcd_entry[0] + '.class')\r\n return src_to_classfiles", "def _generateExtensionConfigFilePointers(self, eggFileName):\n # Always use forward slashes in eggs\n sep = \"/\"\n eggFile = pylabsZipFile(eggFileName)\n for internalFileName in eggFile.namelist():\n parts = internalFileName.split(sep)\n if parts and parts[-1] == self.extensionConfigName:\n # construct egg path i.e.\n # /opt/qbase2/lib/pylabs/extensions/my_extension.egg/my_first_extension/\n # This format is supported by the eggfile module\n path = sep.join([eggFileName] + parts[:-1])\n yield eggFile.open(internalFileName), path", "def get_imlist2(path):\n return [\n os.path.join(path, f) for f in os.listdir(path) if f.endswith('.ppm')\n ]", "def output_files(self):\n return [self.input_files()[0].replace(\".lhe.gz\", \".stdhep\").replace(\".lhe\", \".stdhep\")]", "def mkfilelist(filesN, filesE):\n newlist = []\n for _fN in filesN:\n for _fE in filesE:\n trN = SacIO(_fN, headonly=True)\n stN = trN.kstnm.rstrip()\n dtN = trN.delta\n trE = SacIO(_fE, headonly=True)\n stE = trE.kstnm.rstrip()\n dtE = trE.delta\n if stN == stE:\n if dtE != dtN:\n print \"sampling intervals are not identical for %s and %s\" % (_fN, _fE)\n break\n else:\n newlist.append((_fN, _fE))\n break\n\n return newlist, 1. / dtN", "def _get_fsevent_files(self):\r\n # Print the header columns to the output files\r\n Output.print_columns(self.l_all_fsevents)\r\n\r\n # Total number of files in events dir #\r\n t_files = len(os.listdir(self.path))\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n t_files -= 1\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Run simple test to see if file mod dates\r\n # should be used to generate time ranges\r\n # In some instances fsevent files may not have\r\n # their original mod times preserved on export\r\n # This code will flag true when the same date and hour\r\n # exists for the first file and the last file\r\n # in the provided source fsevents folder\r\n first = os.path.join(self.path, os.listdir(self.path)[0])\r\n last = os.path.join(self.path, os.listdir(self.path)[len(os.listdir(self.path)) - 1])\r\n first = os.path.getmtime(first)\r\n last = os.path.getmtime(last)\r\n first = str(datetime.datetime.utcfromtimestamp(first))[:14]\r\n last = str(datetime.datetime.utcfromtimestamp(last))[:14]\r\n\r\n if first == last:\r\n self.use_file_mod_dates = False\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(self.all_files_count, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Full path to source fsevent file\r\n self.src_fullpath = os.path.join(self.path, filename)\r\n # Name of source fsevent file\r\n self.src_filename = filename\r\n # UTC mod date of source fsevent file\r\n self.m_time = os.path.getmtime(self.src_fullpath)\r\n self.m_time = str(datetime.datetime.utcfromtimestamp((self.m_time))) + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(self.src_fullpath, \"rb\")\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n # When permission denied is encountered\r\n if \"Permission denied\" in str(exp) and not os.path.isdir(self.src_fullpath):\r\n print('\\nEnsure that you have permissions to read '\r\n 'from {}\\n{}\\n'.format(self.path, str(exp)))\r\n sys.exit(0)\r\n # Otherwise write error to log file\r\n else:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_fullpath)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)", "def paths_for_od(self, r, s):\n pass", "def _points_to_paths(self, points):\n prev = points[0]\n result = []\n for point in points[1:]:\n path = specctraobj.Path()\n path.aperture_width = self._from_pixels(1)\n path.vertex.append(prev)\n path.vertex.append(point)\n result.append(path)\n prev = point\n return result", "def getInterpretedSpectraForAllEvents(self, particleName=\"pion\", pTs=np.linspace(0,2.5,10), where=\"\", orderBy=\"event_id\", verbose=False):\n # processing\n dNdyData = self.getSpectraDataForAllEvents(particleName=particleName, where=where, orderBy=orderBy)\n dNdyintepBlock = []\n if verbose: print(\"Looping over {} events... (please be patient)\".format(dNdyData.shape[0]))\n for iev in range(dNdyData.shape[0]):\n dNdyintep = exp(np.interp(pTs, dNdyData[iev,:,0], log(dNdyData[iev,:,1])))\n dNdyintepBlock.append(dNdyintep)\n if verbose: print(\"Done. Thanks for waiting.\")\n return np.asarray(dNdyintepBlock)", "def day2part2_data():\n test_data = []\n test_data.append([5, 9, 2, 8])\n test_data.append([9, 4, 7, 3])\n test_data.append([3, 8, 6, 5])\n return test_data", "def _id_edges(self):\n edge_list = []\n # identify all edges along the hexagons we explicitly index\n for x_hex_idx in range(0, self.n_hex_x_rows, 1):\n for y_hex_idx in range(0, self.n_hex_y_rows, 1):\n for vertex_idx in range(0, 6, 1):\n first_end_pt = self.arch_vertex_positions[x_hex_idx, y_hex_idx, vertex_idx, :]\n if vertex_idx == 5:\n second_end_pt = self.arch_vertex_positions[x_hex_idx, y_hex_idx, 0, :]\n else:\n second_end_pt = self.arch_vertex_positions[x_hex_idx, y_hex_idx, vertex_idx + 1, :]\n edge_list.append(Edge(np.array([first_end_pt, second_end_pt])))\n\n # identify edges that help form the implicit hexagon in between the explicitly indexed hexagons\n for x_hex_idx in range(0, self.n_hex_x_rows - 1, 1):\n for y_hex_idx in range(0, self.n_hex_y_rows, 1):\n first_end_pt = self.arch_vertex_positions[x_hex_idx, y_hex_idx, 0, : ]\n second_end_pt = self.arch_vertex_positions[x_hex_idx + 1, y_hex_idx, 3, :]\n edge_list.append(Edge(np.array([first_end_pt, second_end_pt])))\n return edge_list", "def get_all_segments(edfFiles):\n\n segments = []\n preprocessor = Preprocessor(config_startShift,\n config_endShift,\n config_powerLineFreq,\n config_bandLowCut,\n config_bandHighCut)\n for edf in edfFiles:\n print(\"getting the labeled segments from the recording \", str(edf.filename))\n segments.extend(get_segments_from_edf(edf, preprocessor))\n if edfFiles.index(edf) == 20: break\n return segments", "def files(self):\n if self._files is None:\n if helpers['isoinfo']: # TODO\n # It's safe to specify -R even for non-rockridge ISOs\n args = [\"-i\", self.path, \"-f\", \"-R\"]\n # At this time we don't support Joliet extensions\n output = helpers['isoinfo'].call(args)\n result = []\n for line in output.split(\"\\n\"):\n # discard non-file output lines\n if not line or line[0] != \"/\":\n continue\n # Non-Rock-Ridge filenames look like this in isoinfo:\n # /IOSXR_CONFIG.TXT;1\n # but the actual filename thus is:\n # /iosxr_config.txt\n if self.disk_subformat != \"rockridge\" and \";1\" in line:\n line = line.lower()[:-2]\n # Strip the leading '/'\n result.append(line[1:])\n self._files = result\n return self._files", "def _safe_to_treat_as_episodic(self):\n\n # First check: Old AxoGraph file formats do not contain enough metadata\n # to know for certain that the file is episodic.\n if self.info['format_ver'] < 3:\n self.logger.debug('Cannot treat as episodic because old format '\n 'contains insufficient metadata')\n return False\n\n # Second check: If the file is episodic, it should report that it\n # contains more than 1 episode.\n if 'n_episodes' not in self.info:\n self.logger.debug('Cannot treat as episodic because episode '\n 'metadata is missing or could not be parsed')\n return False\n if self.info['n_episodes'] == 1:\n self.logger.debug('Cannot treat as episodic because file reports '\n 'one episode')\n return False\n\n # Third check: If the file is episodic, groups of traces should all\n # contain the same number of traces, one for each episode. This is\n # generally true of \"continuous\" (single-episode) recordings as well,\n # which normally have 1 trace per group.\n if 'group_header_info_list' not in self.info:\n self.logger.debug('Cannot treat as episodic because group '\n 'metadata is missing or could not be parsed')\n return False\n if 'trace_header_info_list' not in self.info:\n self.logger.debug('Cannot treat as episodic because trace '\n 'metadata is missing or could not be parsed')\n return False\n\n group_id_to_col_indexes = {}\n for group_id in self.info['group_header_info_list']:\n col_indexes = []\n for trace_header in self.info['trace_header_info_list'].values():\n if trace_header['group_id_for_this_trace'] == group_id:\n col_indexes.append(trace_header['y_index'])\n group_id_to_col_indexes[group_id] = col_indexes\n n_traces_by_group = {k: len(v) for k, v in\n group_id_to_col_indexes.items()}\n all_groups_have_same_number_of_traces = len(np.unique(list(\n n_traces_by_group.values()))) == 1\n\n if not all_groups_have_same_number_of_traces:\n self.logger.debug('Cannot treat as episodic because groups differ '\n 'in number of traces')\n return False\n\n # Fourth check: The number of traces in each group should equal\n # n_episodes.\n n_traces_per_group = np.unique(list(n_traces_by_group.values()))\n if n_traces_per_group != self.info['n_episodes']:\n self.logger.debug('Cannot treat as episodic because n_episodes '\n 'does not match number of traces per group')\n return False\n\n # Fifth check: If the file is episodic, all traces within a group\n # should have identical signal channel parameters (e.g., name, units)\n # except for their unique ids. This too is generally true of\n # \"continuous\" (single-episode) files, which normally have 1 trace per\n # group.\n signal_channels_with_ids_dropped = \\\n self.header['signal_channels'][\n [n for n in self.header['signal_channels'].dtype.names\n if n != 'id']]\n group_has_uniform_signal_parameters = {}\n for group_id, col_indexes in group_id_to_col_indexes.items():\n # subtract 1 from indexes in next statement because time is not\n # included in signal_channels\n signal_params_for_group = np.array(\n signal_channels_with_ids_dropped[np.array(col_indexes) - 1])\n group_has_uniform_signal_parameters[group_id] = \\\n len(np.unique(signal_params_for_group)) == 1\n all_groups_have_uniform_signal_parameters = \\\n np.all(list(group_has_uniform_signal_parameters.values()))\n\n if not all_groups_have_uniform_signal_parameters:\n self.logger.debug('Cannot treat as episodic because some groups '\n 'have heterogeneous signal parameters')\n return False\n\n # all checks passed\n self.logger.debug('Can treat as episodic')\n return True", "def eid2partid(self, eids, etype): # -> None:\n ...", "def loadEpisodes(fichier):\n \n # Load the file\n with open(fichier, 'r') as f:\n episodes = []\n for episode in f.readlines(): # Read lines one by one\n episode = np.array([p.split(':') # Remove the last ';' and split':'\n for p in episode[:-2].split(';')], float)\n episode = np.array(episode, int)\n episode = episode[episode[:,1].argsort()] # Sort the array in order of the \n episodes.append(episode) # infection time \n \n return episodes", "def partid2eids(self, partid):\n return self._partid2eids[partid]", "def read_paths(path):\n images = [[] for _ in range(2)]\n for dirname, dirnames, _ in os.walk(path):\n for subdirname in dirnames:\n filepath = os.path.join(dirname, subdirname)\n for filename in os.listdir(filepath):\n try:\n imgpath = str(os.path.join(filepath, filename))\n images[0].append(imgpath)\n limit = re.findall('[0-9]+', filename)\n images[1].append(limit[0])\n except IOError as err:\n print(\"I/O error\")\n except:\n print(\"I/O error 2\")\n raise\n return images", "def get_paths(self,test=False):\n\n if test:\n\n filenames= self.filenames_test\n class_numbers=self.class_numbers_test\n\n test_dir='test/'\n\n else:\n filenames=self.filenames\n class_numbers=self.class_numbers\n\n test_dir=''\n\n for filename,cls in zip(filenames,class_numbers):\n\n #sto creando un generatore variabile(val1,val2,val3,val4)\n path=os.path.join(self.in_dir,self.class_names[cls],test_dir,filename)\n\n #yield è utilizzato quando una funzione ritorna un generatore\n yield path", "def get_gld_endpoints(self, gld_comp_list, gld_ep_obj_dic, ep_property):\n gld_ep_list = []\n for cur_comp_str in gld_comp_list:\n cur_ep_dic = {}\n\n # --other properties\n cur_ep_dic[\"global\"] = self.param_gld_ep_global\n cur_ep_dic[\"name\"] = cur_comp_str\n cur_ep_dic[\"type\"] = self.param_gld_ep_type\n\n # --'info' property\n if cur_comp_str in gld_ep_obj_dic:\n cur_ep_info_obj = gld_ep_obj_dic[cur_comp_str]\n else:\n cur_ep_info_obj = cur_comp_str\n cur_ep_info_dic = {\"object\": cur_ep_info_obj, \"property\": ep_property}\n cur_ep_dic[\"info\"] = json.dumps(cur_ep_info_dic)\n\n # --assemble\n gld_ep_list.append(cur_ep_dic)\n\n self.gld_ep_all_list += gld_ep_list\n self.gld_all_key_list += gld_comp_list", "def get_emotion(path):\n onlyfiles = [join(path,f) for f in listdir(path) if isfile(join(path, f))]\n png_files = [f for f in onlyfiles if f.endswith(\".png\")]\n\n for f in png_files:\n dir, file = os.path.split(f)\n e = emotions.Emotions()\n print e.build_vector(file[0])", "def ils(self):\n cmd = Popen(['ils'], stdout=PIPE, stderr=STDOUT, shell=True)\n stdout = cmd.communicate()[0].decode('ascii')\n if cmd.returncode != 0:\n print('Failed to fetch irods file list: %s' % stdout)\n return []\n return [s.replace('C-', '').strip() for s in\n stdout.split('\\n')[1:] if s != '']\n\n print('Not Implemented')", "def runErdosRenyi(n,p):\n s = z.Optimize()\n g = ig.Graph.Erdos_Renyi(n, p, directed=True, loops=True)\n while g.is_dag():\n g = ig.Graph.Erdos_Renyi(n, p, directed=True, loops=True)\n\n return MFAS_set_cover(s,g), u.get_feedback_arc_set(g)", "def read_dicom_series(directory, filepattern = \"image_*\"):\n \n if not os.path.exists(directory) or not os.path.isdir(directory):\n raise ValueError(\"Given directory does not exist or is a file : \"+str(directory))\n print('\\tRead Dicom',directory)\n lstFilesDCM = natsorted(glob.glob(os.path.join(directory, filepattern)))\n print('\\tLength dicom series',len(lstFilesDCM) )\n # Get ref file\n RefDs = dicom.read_file(lstFilesDCM[0])\n # get the space sampling\n dx = np.float(RefDs.PixelSpacing[0])\n dy = np.float(RefDs.PixelSpacing[1])\n dz = np.float(RefDs.SliceThickness)\n dsampling = np.array([dx,dy,dz])\n # Load dimensions based on the number of rows, columns, and slices (along the Z axis)\n ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM))\n # The array is sized based on 'ConstPixelDims'\n ArrayDicom = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype)\n\n # loop through all the DICOM files\n for filenameDCM in lstFilesDCM:\n # read the file\n ds = dicom.read_file(filenameDCM)\n # transform the raw data to HU using Rescale slope and intercept and store it as array \n ArrayDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array\n\n return ArrayDicom, dsampling", "def get_data_iue(obsid, filt):\n\n # This error code will be used unless there's a problem reading any\n # of the FITS files in the list, or the FILTER value is not understood.\n errcode = 0\n\n # This defines a data point for a DataSeries object as a namedtuple.\n data_point = collections.namedtuple('DataPoint', ['x', 'y'])\n\n # For IUE, this defines the x-axis and y-axis units as a string.\n iue_xunit = \"Angstroms (vacuum, heliocentric)\"\n iue_yunit = \"ergs/cm^2/s/Angstrom\"\n\n # Parse the obsID string to determine the paths+files to read. Note:\n # this step will assign some of the error codes returned to the top level.\n if filt == ' ':\n filt = \"UNKNOWN\"\n if filt.upper() in [\"LOW_DISP\", \"HIGH_DISP\"] or filt == \"UNKNOWN\":\n parsed_files_result = parse_obsid_iue(obsid, filt.upper())\n errcode = parsed_files_result.errcode\n else:\n errcode = 4\n\n # In the case of low dispersion spectra, there can be two apertures for\n # a single obsID. In that case, we return a list of TWO DataSeries, one\n # for each aperture. In other words, we treat the single obsID as if it\n # were two different obsIDs in the case of a double-aperture.\n all_data_series = []\n\n # For each file, read in the contents and create a return JSON object.\n if errcode == 0:\n for sfile in parsed_files_result.specfiles:\n # Figure out if this is an mxhi or mxlo spectrum.\n if sfile[-7:] == \"mxlo.gz\":\n is_lo = True\n is_hi = False\n else:\n is_lo = False\n is_hi = True\n\n try:\n with fits.open(sfile) as hdulist:\n if is_lo:\n # Get the dispersion type from the primary header.\n dispersion = hdulist[0].header[\"disptype\"]\n # Get the aperture size(s) from the header.\n apertures = hdulist[1].data[\"aperture\"]\n n_apertures = len(apertures)\n # Number of spectral data points for each aperture size.\n n_wls = [int(x) for x in hdulist[1].data[\"npoints\"]]\n # Initial wavelength value(s).\n starting_wl = [float(x) for x in\n hdulist[1].data[\"wavelength\"]]\n # Step size(s) for each subsequent wavelength.\n delta_wl = [float(x) for x in hdulist[1].data[\"deltaw\"]]\n\n # Generate the full array of wavelength values, and get\n # full array of flux values, for each aperture.\n for aper in range(n_apertures):\n wls = [starting_wl[aper] +\n x*delta_wl[aper] for\n x in range(n_wls[aper])]\n fls = [float(x) for\n x in hdulist[1].data[\"flux\"][aper]]\n # Make sure wavelengths and fluxes are sorted\n # from smallest wavelength to largest.\n sort_indexes = [x[0] for x in\n sorted(enumerate(wls),\n key=itemgetter(1))]\n wls = [wls[x] for x in sort_indexes]\n fls = [fls[x] for x in sort_indexes]\n wlfls = [(x, y) for x, y in zip(wls, fls) if\n y != 0.]\n if wlfls != []:\n datapoints = [\n [data_point(x=float(\"{0:.8f}\".format(x)),\n y=float(\"{0:.8e}\".format(y)))\n for x, y in wlfls]]\n # Create the return DataSeries object.\n all_data_series.append(\n DataSeries('iue', obsid,\n datapoints,\n ['IUE_' + obsid + ' DISP:'\n + dispersion + ' APER:' +\n apertures[aper]],\n [iue_xunit], [iue_yunit],\n errcode))\n\n if is_hi:\n # Get the aperture from the primary header.\n aperture = hdulist[0].header[\"aperture\"].strip()\n # Get the dispersion type from the primary header.\n dispersion = hdulist[0].header[\"disptype\"].strip()\n # Get the camera used (SWP, LWP, LWR).\n camera = hdulist[0].header[\"camera\"].strip()\n # Get a list of spectral orders. Those that are beyond\n # the range defined in Solano are not considered.\n if camera == \"LWP\":\n max_order = 124\n elif camera == \"LWR\":\n max_order = 119\n else:\n max_order = 120\n orders = [int(x) for x in hdulist[1].data[\"order\"] if x\n <= max_order]\n n_orders = len(orders)\n # This lists will store each orders' spectral info.\n order_spectra = []\n\n # Loop over each order.\n for order in range(n_orders):\n # Number of fluxes for this order.\n n_p = int(hdulist[1].data[\"npoints\"][order])\n # Starting pixel within the array of 768 elements.\n s_pix = int(\n hdulist[1].data[\"startpix\"][order])\n # Wavelength corresponding to this start pixel.\n starting_wl = float(\n hdulist[1].data[\"wavelength\"][order])\n # Step size for each subsequent wavelength.\n delta_wl = float(\n hdulist[1].data[\"deltaw\"][order])\n # Generate the full array of wavelength values.\n wls = [starting_wl + x*delta_wl for x in\n range(n_p)]\n # Extract the fluxes that go along with these wls.\n all_fluxes = hdulist[1].data[\"abs_cal\"][order]\n fls = [float(x) for x in\n all_fluxes[(s_pix-1):(s_pix-1+n_p-1+1)]]\n # Extract the quality flags that go along with\n # these wls.\n all_qfs = hdulist[1].data[\"quality\"][order]\n qfs = [int(x) for x in all_qfs[(s_pix-1):(s_pix-1+\n n_p-1+1)]]\n # Only keep good Quality Flags, if the order is all\n # bad flags, don't add it.\n keep = [i for i, x in enumerate(qfs) if (qfs[i] >\n -16384)]\n if keep != [] and fls != [0.]*len(fls):\n wls = [wls[i] for i in keep]\n fls = [fls[i] for i in keep]\n # Create a dict that will store this order's\n # info.\n order_spec = {'order':orders[order],\n 'wls':numpy.asarray(wls),\n 'fls':numpy.asarray(fls)}\n order_spectra.append(order_spec)\n\n # Order-combine the spectra.\n comb_spec = order_combine(order_spectra, camera, False)\n\n # Resample onto an evenly-spaced wavelength scale.\n comb_spec_reb = resample_spectrum(comb_spec, camera,\n False)\n\n # Create the return DataSeries object.\n datapoints = [\n [data_point(x=float(\"{0:.8f}\".format(x)),\n y=float(\"{0:.8e}\".format(y)))\n for x, y in comb_spec_reb]]\n all_data_series.append(\n DataSeries('iue', obsid,\n datapoints,\n ['IUE_' + obsid + ' DISP:'\n + dispersion + ' APER:' +\n aperture],\n [iue_xunit], [iue_yunit],\n errcode))\n\n except IOError:\n errcode = 3\n all_data_series.append(\n DataSeries('iue', obsid, [], [''], [''], [''], errcode))\n\n else:\n # This is where an error DataSeries object would be returned.\n all_data_series.append(\n DataSeries('iue', obsid, [], [], [],\n [], errcode))\n\n # Return the DataSeries object back to the calling module.\n if len(all_data_series) == 1:\n return all_data_series[0]\n return all_data_series", "def emit_list_episodes_orgmode(self, filename):\n\n fileObj = open(filename, 'w')\n \n for ep in self.episodes:\n fileObj.write('***' + \n ' Season:' + str(ep.season))\n fileObj.write(' Episode:' + str(ep.number))\n fileObj.write('\\tAired:' + ep.aired) \n fileObj.write('\\tRating:' + str(ep.rating))\n fileObj.write('\\n')\n fileObj.write(ep.description)\n fileObj.write('\\n')\n\n fileObj.close()" ]
[ "0.6314016", "0.59568083", "0.5881365", "0.5844795", "0.5802068", "0.5752258", "0.56392115", "0.54253495", "0.5387592", "0.5376296", "0.53625387", "0.5316459", "0.52880996", "0.5277681", "0.52515364", "0.52128714", "0.5204417", "0.5186836", "0.5180629", "0.5144857", "0.51443803", "0.514352", "0.50614315", "0.50614315", "0.50500137", "0.5040265", "0.50369966", "0.50259423", "0.50053895", "0.49987602", "0.49970743", "0.49897066", "0.49833074", "0.4977011", "0.49769014", "0.49632642", "0.49607632", "0.49521124", "0.49468347", "0.49468347", "0.49440336", "0.49405253", "0.4894341", "0.4888534", "0.48855338", "0.48832354", "0.48831347", "0.4862785", "0.48621497", "0.48611227", "0.4858316", "0.48414263", "0.48298427", "0.48294875", "0.482746", "0.48255965", "0.48239732", "0.48170996", "0.48140272", "0.48129573", "0.4803265", "0.4794211", "0.47864982", "0.4783009", "0.47759798", "0.477412", "0.47660235", "0.47544917", "0.47326988", "0.4730431", "0.4730405", "0.47291625", "0.47226313", "0.47154492", "0.47122133", "0.47046044", "0.47024053", "0.47017333", "0.4700328", "0.4700066", "0.4685359", "0.46835053", "0.4677862", "0.46777824", "0.46758336", "0.46607256", "0.46598205", "0.46546212", "0.46508902", "0.4649731", "0.46427962", "0.46420583", "0.46409607", "0.46374702", "0.46359062", "0.46350607", "0.4633358", "0.46333265", "0.46242395", "0.46240193" ]
0.63749075
0
Read the header from the raw data specified by "path" and use this information combined with the template information to generate the "info" dict object. This object defines the options and paths for each operation.
def _GetImageInfo(self,path): hd = Header(path, scan=True) hdr = hd.hdr self.hdr = hdr if hdr is None: # Either a ref.dat file or it isn't an imaging file. if 'ref' in path and 'dat' in path: self.refdats[os.path.realpath(path)] = True info = {'type':'refdat'} return info else: return None elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'): # Write a yaml file to the raw data directory if possible. dirname, outfile = self._yaml_filename(path) yaml_name = '%s/%s' % (dirname, outfile) if not os.path.exists(yaml_name): # Create yaml file using dirname, # e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml try: hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile)) except IOError: # This is a nonessential function, so ignore exceptions # such as access violations. pass elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile': if not os.path.isdir(path): path = os.path.dirname(path) shdr = hdr['subhdr'] nhdr = hdr['native_header'] self.shdr = shdr if 'dti' in shdr.get('PulseSequenceName','').lower() \ or 'dti' in nhdr.get('PulseSequenceFile',''): psdname = 'dti' else: psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower()) info = {'psdname':psdname, \ 'acqtime':shdr['AcqTime'], \ 'series':int(shdr['SeriesNumber']), \ 'plane':hdr['plane'].strip(), \ 'type':self.imgtype.get(psdname,None), \ 'plane':hdr['plane'], \ 'acqtime':shdr['SeriesTime'], \ # 'fmapdir':None, \ 'refdat':None, \ 'imgfile':None, \ 'base':None, \ 'tdim':int(hdr['tdim']), \ 'echo_spacing':None, \ 'filetype':'brik', \ 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \ 'data_filetype':hdr['filetype']} if info['type'] == 'localizer': # Don't process the localizer. return info if isinstance(info['acqtime'], int): info['acquisition_time'] = time.ctime(info['acqtime']) if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi': # Sometimes screenshots are defined as epis. info['type'] = None # Call the method appropriate to the type of scan in this series. stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \ [info, path]) if stat: info = {'type':'break'} return info info['suffix'] = self.suffix.get(info['filetype'], 'brik') return info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _main_header(self, hdr):\n d = {}\n # Called readDefAnalysis in OpenMIMS\n d['sample type'], d['data included'], d['sample x'], d['sample y'], \\\n d['analysis type'], d['user name'], d['sample z'], date, time = \\\n unpack(self._bo + '4i 32s 16s i 12x 16s 16s', hdr.read(112))\n\n d['data included'] = bool(d['data included'])\n d['user name'] = self._cleanup_string(d['user name'])\n d['analysis type'] = self._cleanup_string(d['analysis type']).lower()\n date = self._cleanup_string(date)\n time = self._cleanup_string(time)\n d['date'] = self._cleanup_date(date + ' ' + time)\n\n if self.header['file type'] in (27, 29, 39):\n # Called MaskImage/readMaskIm in OpenMIMS\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 3i 3h 2x 3i', hdr.read(48))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = {}\n d['HVControl']['hvcontrol enabled'] = False\n\n elif self.header['file type'] in (22, 41):\n # Called MaskSampleStageImage/readMaskIss in OpenMIMS\n d['original filename'], d['analysis duration'], d['scan type'], \\\n d['steps'], d['step size x'], d['step size y'], d['step size?'], \\\n d['step waittime'], d['frames'], d['beam blanking'], \\\n d['presputtering'], d['presputtering duration'] = \\\n unpack(self._bo + '16s 6i d 4i', hdr.read(64))\n\n d['scan type'] = _stage_scan_types.get(d['scan type'], str(d['scan type']))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n # Don't know if this unused byte needs to go after HVControl or after SigRef.\n hdr.seek(4, 1)\n\n elif self.header['file type'] in (21, 26):\n # Not in OpenMIMS\n # this bit same as image, 1 extra unused/unknown\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 4x 3i 3h 2x 3i', hdr.read(52))\n\n # this bit same as stage scan\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n # 24 bytes unknown, not sure if they go here or before AutoCal\n hdr.seek(24, 1)\n\n elif self.header['file type'] == 31:\n # Don't know if this is correct, all 0s anyway\n d['original filename'], d['scan type'], \\\n d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 3i 4x', hdr.read(32))\n\n elif self.header['file type'] == 35:\n d['original filename'], d['scan type'], d['analysis duration'], \\\n d['frames'], d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 5i 40x', hdr.read(76))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n else:\n raise TypeError('What type of image are you? {}'.format(self.header['file type']))\n\n # Continue main header for all types\n d['SigRef'] = self._sigref(hdr)\n d['masses'] = unpack(self._bo + 'i', hdr.read(4))[0]\n\n # scan type is set for stage scan analysis, set others\n if isinstance(d['scan type'], int):\n if d['scan type'] == 0:\n d['scan type'] = ''\n else:\n d['scan type'] = str(d['scan type'])\n\n d['beam blanking'] = bool(d['beam blanking'])\n d['presputtering'] = bool(d['presputtering'])\n d['original filename'] = self._cleanup_string(d['original filename'])\n\n if self.header['file type'] in (21, 26, 27, 29, 35, 39):\n if self.header['file version'] >= 4108:\n n = 60\n else:\n n = 10\n elif self.header['file type'] in (22, 31, 40, 41):\n n = 20\n else:\n n = 0\n\n # Not sure what this is, memory pointers? Not needed.\n # d['mass table ptr'] = unpack(self._bo + 2*n*'h', hdr.read(n*4))\n hdr.seek(n*4, 1)\n\n if self.header['file type'] in (21, 22, 26, 40, 41, 35):\n hdr.seek(4, 1) # 4 bytes unused\n\n # Mass table, dict by species label.\n d['MassTable'] = collections.OrderedDict()\n for m in range(d['masses']):\n mi = {}\n mi['trolley index'], unknown, mi['mass'], mi['matrix or trace'], \\\n mi['detector'], mi['wait time'], mi['frame count time'] = \\\n unpack(self._bo + '2i d 2i 2d', hdr.read(40))\n\n if self.header['file type'] == 31:\n if d['analysis type'].endswith('trolley step scan'):\n # start and end are in mm, step is in μm; convert to mm\n mi['radius start'], mi['radius end'], \\\n mi['radius step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n mi['radius step'] /= 1000\n else:\n mi['voltage start'], mi['voltage end'], \\\n mi['voltage step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n else:\n mi['offset'], mi['b field bits'] = unpack(self._bo + '2i', hdr.read(8))\n\n mi.update(self._species(hdr))\n\n if self.header['file type'] == 31:\n hdr.seek(4, 1)\n\n # Add correction controls, my own addition.\n mi['background corrected'] = False\n mi['deadtime corrected'] = False\n mi['yield corrected'] = False\n\n label = mi.pop('label')\n # This is true for NS50L and file version 4108.\n # Anywhere else different?\n # Maybe confirm this with the Trolleys dict,\n # there is an Esi trolley.\n if mi['trolley index'] == 8:\n label = 'SE'\n\n d['MassTable'][label] = mi\n\n # Create a few convenient lists\n d['label list'] = tuple(d['MassTable'].keys())\n d['label list fmt'] = tuple(format_species(m) for m in d['label list'])\n d['mass list'] = tuple(d['MassTable'][m]['mass'] for m in d['label list'])\n\n return d", "def __head_or_get(self, path):\n try:\n info = self.get_cont_stat(path)\n if not isinstance(info, types.DictType):\n raise info()\n headers = HeaderKeyDict({\n 'X-Container-Object-Count': info['object_count'],\n 'X-Container-Bytes-Used': info['bytes_used'],\n 'X-Timestamp': info['created_at'],\n 'X-PUT-Timestamp': info['put_timestamp'],\n })\n metadata = info['metadata']\n for key, value in metadata.iteritems():\n if key == 'r-':\n headers.update({'x-container-read' : value})\n elif key == 'w-':\n headers.update({'x-container-write' : value})\n else:\n ser_key = key.split('-')[0]\n if ser_key == 'm':\n #Supported a single word key till first '-' \n #in the entire metadata header as X-Container-Meta-A\n #key = 'x-container-meta-' + key.split('-')[1]\n \n #SANCHIT: This supports multi-part key for metadata \n #such as X-Container-Meta-A-B-C\n key = 'x-container-meta-' + key.split('-', 1)[1]\n else:\n #key = 'x-container-sysmeta-' + key.split('-')[1]\n key = 'x-container-sysmeta-' + key.split('-', 1)[1]\n headers.update({key : value})\n return headers\n except HTTPException as error:\n self.logger.exception(error)\n return error.status_int\n except Exception as err:\n self.logger.exception(err)\n return HTTP_INTERNAL_SERVER_ERROR", "def _read_header(self, line):\n try:\n creation_date = datetime.strptime(line[23:33], '%y%m%d%H%M')\n except ValueError as err:\n print('Error parsing file creation date -> ' + str(err))\n creation_date = '000000'\n\n self.file_header = {'Priority Code': line[1:3],\n 'Immediate Destination': line[3:13].strip(),\n 'Immediate Origin': line[13:23].strip(),\n 'Creation Date': creation_date,\n 'File ID Modifier': line[33],\n 'Record Size': int(line[34:37].strip()),\n 'Blocking Factor': int(line[37:39]),\n 'Format Code': line[39],\n 'Immediate Destination Name': line[40:63].strip(),\n 'Immediate Origin Name': line[63:86].strip(),\n 'Reference Code': line[86:93]}", "def stat_file(self, path, info):\n return {}", "def _read_new_header(self, raw):\n\n byte_count = 0\n\n data_size = 4\n self.label = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.version = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.revision = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 28\n self.date = struct.unpack('<28s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_format = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_type = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.original_file_name = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.reference_file_name = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_a = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_b = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_c = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 84\n self.annotate = struct.unpack('<84s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.instrument_model = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.instrument_serial_number = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.software_version_number = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.crystal_material = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.laser_wavelength_microns = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.laser_null_doubling = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.padding = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xc = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xm = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xb = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.num_chan = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.interferogram_size = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.scan_direction = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.acquire_mode = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.emissivity = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.apodization = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.zero_fill = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.run_time_math = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.fft_size = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.number_of_coadds = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.single_sided = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.chan_display = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.amb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.inst_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.wbb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.cbb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.temperature_dwr = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.emissivity_dwr = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.laser_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 40\n self.spare_i = struct.unpack('<llllllllll',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 80\n self.spare_f = struct.unpack('<dddddddddd',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 68\n self.spare_na = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nb = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nc = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nd = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_ne = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.header_end = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size", "def info(self, *path):\n target = self.localpath(*path)\n return _open_file_info(target + '.info')", "def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h", "def load_from_file(self, path):\n structure = None\n if re.search(\".pdb\", path):\n parser = PDBParser()\n else:\n parser = MMCIFParser()\n\n path = path.strip()\n model_id = os.path.basename(path)\n #if os.path.basename(path).split('.')[-1] == 'gz':\n # GZ = gzip.open(path, 'rb')\n # GZ.close()\n #else :\n\n structure = parser.get_structure(model_id, open_file( path ))\n header = parser.get_header()\n\n return structure, header", "def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header", "def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h", "def _read_file_definition(self):\n row_count = 0\n #\n # THIS METHOD ASSUMES A 14 ROW HEADER\n # If the number of header row lines in the glider ASCII input file changes from 14,\n # this method will NOT WORK\n num_hdr_lines = 14\n\n header_pattern = r'(.*): (.*)$'\n header_re = re.compile(header_pattern)\n\n line = self._stream_handle.readline()\n\n while line and row_count < num_hdr_lines:\n\n match = header_re.match(line)\n\n if match:\n key = match.group(1)\n value = match.group(2)\n value = value.strip()\n\n # update num_hdr_lines based on the header info.\n if key == 'num_ascii_tags':\n # this key has a required value of 14, otherwise we don't know how to parse the file\n if int(value) != num_hdr_lines:\n raise DatasetParserException(\"Header must be %d rows, but it is %s\" % (num_hdr_lines, value))\n\n elif key == 'num_label_lines':\n # this key has a required value of 3, otherwise we don't know how to parse the file\n if int(value) != 3:\n raise DatasetParserException(\"There must be 3 Label lines from the header for this parser\")\n\n elif key == 'sensors_per_cycle':\n # save for future use\n self._header_dict[key] = int(value)\n\n elif key in ['filename_label', 'mission_name', 'fileopen_time']:\n # create a dictionary of these 3 key/value pairs strings from\n # the header rows that need to be saved for future use\n self._header_dict[key] = value\n\n else:\n log.warn(\"Failed to parse header row: %s.\", line)\n\n row_count += 1\n # only read the header lines in this method so make sure we stop\n if row_count < num_hdr_lines:\n line = self._stream_handle.readline()\n\n if row_count < num_hdr_lines:\n log.error('Not enough data lines for a full header')\n raise DatasetParserException('Not enough data lines for a full header')", "def _read_header(\n self, header, filename, run_check_acceptability=True, background_lsts=True\n ):\n # get telescope information\n latitude = header[\"latitude\"][()]\n longitude = header[\"longitude\"][()]\n altitude = header[\"altitude\"][()]\n self.telescope_location_lat_lon_alt_degrees = (latitude, longitude, altitude)\n self.instrument = header[\"instrument\"][()].tobytes().decode(\"utf8\")\n self.telescope_name = header[\"telescope_name\"][()].tobytes().decode(\"utf8\")\n\n # get source information\n self.object_name = header[\"object_name\"][()].tobytes().decode(\"utf8\")\n\n # set history appropriately\n self.history = header[\"history\"][()].tobytes().decode(\"utf8\")\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n\n # check for vis_units\n if \"vis_units\" in header:\n self.vis_units = header[\"vis_units\"][()].tobytes().decode(\"utf8\")\n else:\n # default to uncalibrated data\n self.vis_units = \"UNCALIB\"\n\n # check for optional values\n if \"dut1\" in header:\n self.dut1 = float(header[\"dut1\"][()])\n if \"earth_omega\" in header:\n self.earth_omega = float(header[\"earth_omega\"][()])\n if \"gst0\" in header:\n self.gst0 = float(header[\"gst0\"][()])\n if \"rdate\" in header:\n self.rdate = header[\"rdate\"][()].tobytes().decode(\"utf8\")\n if \"timesys\" in header:\n self.timesys = header[\"timesys\"][()].tobytes().decode(\"utf8\")\n if \"x_orientation\" in header:\n self.x_orientation = header[\"x_orientation\"][()].tobytes().decode(\"utf8\")\n if \"blt_order\" in header:\n blt_order_str = header[\"blt_order\"][()].tobytes().decode(\"utf8\")\n self.blt_order = tuple(blt_order_str.split(\", \"))\n if self.blt_order == (\"bda\",):\n self._blt_order.form = (1,)\n\n if \"antenna_diameters\" in header:\n self.antenna_diameters = header[\"antenna_diameters\"][()]\n if \"uvplane_reference_time\" in header:\n self.uvplane_reference_time = int(header[\"uvplane_reference_time\"][()])\n if \"eq_coeffs\" in header:\n self.eq_coeffs = header[\"eq_coeffs\"][()]\n if \"eq_coeffs_convention\" in header:\n self.eq_coeffs_convention = (\n header[\"eq_coeffs_convention\"][()].tobytes().decode(\"utf8\")\n )\n\n # check for phasing information\n self.phase_type = header[\"phase_type\"][()].tobytes().decode(\"utf8\")\n if self.phase_type == \"phased\":\n self._set_phased()\n self.phase_center_ra = float(header[\"phase_center_ra\"][()])\n self.phase_center_dec = float(header[\"phase_center_dec\"][()])\n self.phase_center_epoch = float(header[\"phase_center_epoch\"][()])\n if \"phase_center_frame\" in header:\n self.phase_center_frame = (\n header[\"phase_center_frame\"][()].tobytes().decode(\"utf8\")\n )\n elif self.phase_type == \"drift\":\n self._set_drift()\n else:\n self._set_unknown_phase_type()\n\n # get antenna arrays\n # cast to native python int type\n self.Nants_data = int(header[\"Nants_data\"][()])\n self.Nants_telescope = int(header[\"Nants_telescope\"][()])\n self.ant_1_array = header[\"ant_1_array\"][:]\n self.ant_2_array = header[\"ant_2_array\"][:]\n self.antenna_names = [\n n.tobytes().decode(\"utf8\") for n in header[\"antenna_names\"][:]\n ]\n self.antenna_numbers = header[\"antenna_numbers\"][:]\n self.antenna_positions = header[\"antenna_positions\"][:]\n\n # set telescope params\n try:\n self.set_telescope_params()\n except ValueError as ve:\n warnings.warn(str(ve))\n\n # get baseline array\n self.baseline_array = self.antnums_to_baseline(\n self.ant_1_array, self.ant_2_array\n )\n self.Nbls = len(np.unique(self.baseline_array))\n\n # get uvw array\n self.uvw_array = header[\"uvw_array\"][:, :]\n\n # get time information\n self.time_array = header[\"time_array\"][:]\n integration_time = header[\"integration_time\"]\n self.integration_time = integration_time[:]\n proc = None\n if \"lst_array\" in header:\n self.lst_array = header[\"lst_array\"][:]\n # check that lst_array in file is self-consistent\n if run_check_acceptability:\n (\n latitude,\n longitude,\n altitude,\n ) = self.telescope_location_lat_lon_alt_degrees\n lst_array = uvutils.get_lst_for_time(\n self.time_array, latitude, longitude, altitude\n )\n if not np.all(\n np.isclose(\n self.lst_array,\n lst_array,\n rtol=self._lst_array.tols[0],\n atol=self._lst_array.tols[1],\n )\n ):\n warnings.warn(\n \"LST values stored in {file} are not self-consistent \"\n \"with time_array and telescope location. Consider \"\n \"recomputing with utils.get_lst_for_time.\".format(file=filename)\n )\n else:\n # compute lst_array from time_array and telescope location\n proc = self.set_lsts_from_time_array(background=background_lsts)\n\n # get frequency information\n self.freq_array = header[\"freq_array\"][:, :]\n self.channel_width = float(header[\"channel_width\"][()])\n self.spw_array = header[\"spw_array\"][:]\n\n # get polarization information\n self.polarization_array = header[\"polarization_array\"][:]\n\n # get data shapes\n self.Nfreqs = int(header[\"Nfreqs\"][()])\n self.Npols = int(header[\"Npols\"][()])\n self.Ntimes = int(header[\"Ntimes\"][()])\n self.Nblts = int(header[\"Nblts\"][()])\n self.Nspws = int(header[\"Nspws\"][()])\n\n # get extra_keywords\n if \"extra_keywords\" in header:\n self.extra_keywords = {}\n for key in header[\"extra_keywords\"].keys():\n if header[\"extra_keywords\"][key].dtype.type in (np.string_, np.object_):\n self.extra_keywords[key] = (\n header[\"extra_keywords\"][key][()].tobytes().decode(\"utf8\")\n )\n else:\n self.extra_keywords[key] = header[\"extra_keywords\"][key][()]\n\n if proc is not None:\n # if lsts are in the background wait for them to return\n proc.join()\n\n return", "def processFileLocInfo(self):\n\t\t# If bit 1 of the flags field is set\n\t\tif int(self.header['flags']) & 2 > 0:\n\n\t\t\t# Read size of file location info\n\t\t\ttxt = self.fpLnk.read(4)\n\t\t\tself.file_loc = {}\n\t\t\tself.file_loc['size'] = struct.unpack(\"<I\", txt)[0]\n\t\t\t\t\n\t\t\t# Read size of file location info and prepend the previous read value.\n\t\t\t# Txt was prepended to remove a special condition case need to skip\n\t\t\t# the re-reading of the size field.\n\t\t\tfile_loc_raw = txt + self.fpLnk.read(self.file_loc['size'] - 4)\n\n\t\t\t# Loop throuh predefine file format, extracting field into a new data\n\t\t\t# file location header dictionary.\n\t\t\t# XXX: do we really want to clobber the dictionary we just created\n\t\t\t# and not self.file_loc.update(parseStructuredData())?\n\t\t\tself.file_loc = parseStructuredData(file_loc_raw, FILE_LOC_HEADER)\n\t\t\n\t\t\t# Process local volume info if flag is set\n\t\t\tif (self.file_loc['flags'] & 1) > 0:\n\t\t\t\tlocalVolTbl = processVolTbl(file_loc_raw, \n\t\t\t\t\tself.file_loc['local_vol_info_offset'], LOCAL_VOL_TBL)\n\t\t\t\tself.file_loc['localVolTbl'] = localVolTbl\n\t\t\t\toffset = self.file_loc['local_base_path_offset']\n\t\t\t\tbasePathname = file_loc_raw[offset:].split('\\x00')[0]\n\t\t\t\tself.file_loc['basePathname'] = basePathname\n\t\t\telse:\n\t\t\t\tself.file_loc['localVolTbl'] = None\n\n\t\t\t# Process net volume info if flag is set\n\t\t\tif (self.file_loc['flags'] & 2) > 0:\n\t\t\t\tnetVolTbl = processVolTbl(file_loc_raw, \n\t\t\t\t\tself.file_loc['net_vol_info_offset'], NET_VOL_TBL)\n\t\t\t\tself.file_loc['netVolTbl'] = netVolTbl\n\t\t\telse:\n\t\t\t\tself.file_loc['netVolTbl'] = None\n\n\t\t\t# Process remaining portion of pathname\n\t\t\toffset = self.file_loc['remain_pathname_offset']\n\t\t\tremainPathname = file_loc_raw[offset:].split('\\x00')[0]\n\t\t\tself.file_loc['remainPathname'] = remainPathname", "def fill_header_section():\n section = _SectionData(\"Header\")\n section.props.append((\"FormatVersion\", 1))\n section.props.append((\"Source\", get_combined_ver_str()))\n section.props.append((\"Type\", \"Configuration\"))\n section.props.append((\"Note\", \"User settings of SCS Blender Tools\"))\n author = bpy.context.user_preferences.system.author\n if author:\n section.props.append((\"Author\", str(author)))\n section.props.append((\"ConfigStoragePlace\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.config_storage_place)))\n section.props.append((\"DumpLevel\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.dump_level)))\n return section", "def load_header(base_path, subvolume):\n with h5py.File(file_path(base_path, subvolume, 'subvolume'), 'r') as f:\n header = dict(f['Header'].attrs.items())\n header.update({key: f['Header'][key][:] for key in f['Header'].keys()})\n \n return header", "def generate_api_header(stmt, struct, operation, path, is_collection=False):\n childPath = False\n parentContainer = [to_upper_camelcase(element) for i,element in enumerate(str(path).split('/')[1:-1]) if str(element)[0] =='{' and str(element)[-1] == '}' ]\n\n\n if len(str(path).split('/'))>3:\n childPath = True\n parentContainer = ''.join([to_upper_camelcase(element) for i,element in enumerate(str(path).split('/')[1:-1])\n if not str(element)[0] =='{' and not str(element)[-1] == '}' ])\n\n struct['summary'] = '%s %s%s' % (\n str(operation), str(stmt.arg),\n ('' if is_collection else ' by ID'))\n struct['description'] = str(operation) + ' operation of resource: ' \\\n + str(stmt.arg)\n struct['operationId'] = '%s%s%s%s' % (str(operation).lower(),\n (parentContainer if childPath else ''),\n to_upper_camelcase(stmt.arg),\n ('' if is_collection else 'ById'))\n struct['produces'] = ['application/json']\n struct['consumes'] = ['application/json']", "def _read_old_header(self, raw):\n\n byte_count = 0\n\n data_size = 4\n self.label = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.version = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.revision = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 26\n self.date = struct.unpack('<26s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.file_format = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_type = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.original_file_name = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.reference_file_name = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_a = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_b = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_c = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_d = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 82\n self.annotate = struct.unpack('<82s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.instrument_model = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.instrument_serial_number = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.software_version_number = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.crystal_material = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.laser_wavelength_microns = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.laser_null_doubling = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.optical_ratio = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xc = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xm = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xb = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.interferogram_size = struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.interferogram_center.append(struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0])\n byte_count += data_size\n\n data_size = 2\n self.interferogram_center.append(struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0])\n byte_count += data_size\n\n data_size = 2\n self.acquire_mode = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.emissivity = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.apodization = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.zero_fill = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.run_time_math = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.fft_size = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.number_of_coadds = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.number_of_igrams = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.amb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.inst_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.wbb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.cbb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 20\n self.spare_i = struct.unpack('<hhhhhhhhhh',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 40\n self.spare_f = struct.unpack('<ffffffffff',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 40\n self.spare_l = struct.unpack('<ffffffffff',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 65\n self.spare_na = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nb = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nc = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nd = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_ne = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.header_end = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size", "def handle(self, data: Dict[str, Any], **kwargs) -> None:\n self._handle_path(data['headers']['path'])(data, **kwargs)", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def _read_data(self, path: str) -> T:\n raise NotImplementedError", "def read_ldat_header(cls, headerpath):\n # TODO extract CalTable info.\n if os.path.isdir(headerpath):\n files = os.listdir(headerpath)\n headerfiles = [f for f in files if f.endswith('.h')]\n headerfile = os.path.join(headerpath, headerfiles.pop())\n else:\n headerfile = headerpath\n stnid = None\n starttime = None\n headerversion = 0\n with open(headerfile, 'r') as hf:\n for hline in hf:\n if \"Header version\" in hline:\n headerversion = hline.split()[-1]\n beamctl_line = \"\"\n contents = {}\n datatype = None\n with open(headerfile, 'r') as hf:\n if headerversion == '1':\n rspctl_lines = []\n for line in hf:\n if \"Observer\" in line:\n _label, _observer = line.split('=')\n if \"Project\" in line:\n _label, _project = line.split('=')\n if \"DataType\" in line:\n _label, datatype = line.split('=')\n if \"StationID\" in line:\n _label, stnid = line.split('=')\n stnid = stnid.strip()\n if \"StartTime\" in line:\n _label, starttime = line.split('=')\n starttime = starttime.strip()\n if \"beamctl\" in line:\n # HACK\n beamctl_line = line\n if \"rspctl\" in line:\n rspctl_lines.append(line)\n elif headerversion == '2':\n contents = yaml.safe_load(hf)\n _observer = contents['Observer']\n _project = contents['Project']\n datatype = contents['DataType']\n stnid = contents['StationID']\n starttime = contents['StartTime']\n beamctl_line = contents['BeamctlCmds']\n rspctl_lines = contents['RspctlCmds'].split('\\n')\n else:\n # headerversion == '4':\n contents = yaml.safe_load(hf)\n datatype = contents['ldat_type']\n filenametime = contents['filenametime']\n stnid = contents['station_id']\n rcusetup_cmds = contents['rcusetup_cmds']\n beamctl_cmds = contents['beamctl_cmds']\n rspctl_cmds = contents['rspctl_cmds']\n if 'caltabinfos' in contents:\n caltabinfos = contents['caltabinfos']\n else:\n caltabinfos = []\n if 'septonconf' in contents:\n septonconf = contents['septonconf']\n else:\n septonconf = None\n obsinfo = cls(datatype, stnid, rcusetup_cmds, beamctl_cmds, rspctl_cmds,\n caltabinfos=caltabinfos, septonconf=septonconf)\n obsinfo.filenametime = filenametime\n return obsinfo", "def _parse_header(path):\n with open(path) as f:\n text = f.read().splitlines()\n raw_segs = [line.split() for line in text if ':' in line]\n\n # convert the content into a giant dict of all key, values\n return dict((i[0][:-1], i[1:]) for i in raw_segs)", "def _ReadFileHeader(self, file_object):\n data_type_map = self._GetDataTypeMap('recycle_bin_metadata_file_header')\n\n file_header, _ = self._ReadStructureFromFileObject(\n file_object, 0, data_type_map, 'file header')\n\n if self._debug:\n debug_info = self._DEBUG_INFORMATION.get(\n 'recycle_bin_metadata_file_header', None)\n self._DebugPrintStructureObject(file_header, debug_info)\n\n if file_header.format_version not in self._SUPPORTED_FORMAT_VERSION:\n raise errors.ParseError(\n f'Unsupported format version: {file_header.format_version:d}')\n\n return file_header", "def _parseFileHeader(self):\n self.fileheader = FileHeader()\n self.fileheader.parse(self.f)\n #print('Parsed fileheader')", "def dataOrHeader(self, name, doH):\r\n f = open(self.location + \"/\" + name)\r\n r = f.read()\r\n f.close()\r\n index = r.find(self.dividerString_)\r\n dataOrHeader = r[index+1:len(r)] if doH else r[0:index]\r\n #hacky fix for random \\r\r\n dataOrHeader = dataOrHeader.replace(\"\\r\", \"\") \r\n return dataOrHeader", "def construct_dicts(self, path):\n module_dicts = self.read_dict(path, use_superpkg=True)\n\n id_dict = dict()\n name_dict = dict()\n\n for cmd_dict in module_dicts:\n # Create a cmd template object\n cmd_temp = cmd_template.CmdTemplate(\n cmd_dict[self.OP_CODE_FIELD],\n cmd_dict[self.MNEMONIC_FIELD],\n cmd_dict[self.COMPONENT_FIELD],\n cmd_dict[self.ARGS_FIELD],\n cmd_dict[self.DESC_FIELD],\n )\n\n id_dict[cmd_dict[self.OP_CODE_FIELD]] = cmd_temp\n name_dict[cmd_dict[self.MNEMONIC_FIELD]] = cmd_temp\n\n return (id_dict, name_dict)", "def read_parse_raw_data(path):\n file_list = TopologyHelper.get_file_list(path)\n print(\"Reading \" + str(len(file_list)) + \" files from \" + path)\n topology_info = []\n file_name = []\n for file in file_list:\n try:\n r = TopologyHelper.parse_file(file)\n tmp = (r[0])['Topology']\n topology_info.append(tmp)\n t = r[1]\n file_name.append(t)\n except:\n continue\n print(\"Parsing completed\")\n return file_name, topology_info", "def parse_header(self):", "def create_header_file(self, path: pathlib.Path) -> str:\n code_reader = CodeReader()\n parser = Parser.find(self.language)()\n ast_generalizer = AstGeneralizer.find(self.language)({'path': path})\n unparser = Unparser.find(self.language)(headers=True)\n code = code_reader.read_file(path)\n cpp_tree = parser.parse(code, path)\n tree = ast_generalizer.generalize(cpp_tree)\n header_code = unparser.unparse(tree)\n _LOG.debug('unparsed raw header file: \"\"\"%s\"\"\"', header_code)\n return header_code", "def get_data_for_header(req):\n try:\n user_id = req.user\n except KeyError as e:\n msg = req.get_error_msg(e)\n return send_error_response(msg)\n try:\n header_data = dict()\n lang = rt.get_state(user_id).language\n #TODO change on database access\n header_data['languages'] = common_getter.get_languages_list(pt, lang)\n header_data['user'] = common_getter.get_user_info(pt, user_id, lang)\n header_data['client'] = common_getter.get_client_info(pt, user_id, lang)\n return send_success_response(header_data)\n except Exception as e:\n msg = req.get_error_msg(e, lang=lang)\n return send_error_response(msg)", "def _load_template(self):\n filename = os.path.join(get_conf('DEFAULT_TEMPLATE_PATH'), self._template, '__init__.ini')\n cf = ApplicationConf.get_instance()\n with comp_open(filename, mode='r') as fp:\n content = fp.read()\n content = content.format(**cf)\n conf = CompConfigParser(allow_no_value=True)\n conf.read_string(content, '__init__.ini')\n ini = {'dirs': [], 'files': [], 'binaries': []}\n if conf.has_section('dirs'):\n for key in conf.options('dirs'):\n ini['dirs'].append(key)\n if conf.has_section('files'):\n for key in conf.options('files'):\n ini['files'].append(self.__remap(key))\n if conf.has_section('binaries'):\n for key in conf.options('binaries'):\n ini['binaries'].append(self.__remap(key))\n if isinstance(self._ini, dict):\n self._ini.update(ini)\n else:\n self._ini = ini", "def info(self, *path):\n self._download_server_info()\n if self._info:\n return self._info.get(path, {})\n path = list(path)\n path[-1] += \".info\"\n t = self._open(*path)\n if t.status_code == 200:\n return json.loads(t.text)\n else:\n return {}", "def peek(self):\n self.fh.seek(0)\n snip = self.fh.read(12)\n if unpack('<i', snip[4:8])[0] <= max(_supported_file_types):\n self.header['byte order'] = '<'\n self._bo = '<'\n elif unpack('>i', snip[4:8])[0] <= max(_supported_file_types):\n self.header['byte order'] = '>'\n self._bo = '>'\n else:\n raise TypeError(\"Cannot determine file endianess.\")\n\n self.header['file version'], self.header['file type'], \\\n self.header['header size'] = \\\n unpack(self._bo + '3i', snip)\n\n if self.header['file type'] not in _supported_file_types:\n msg = \"File of type {} is not supported at the moment.\"\n msg = msg.format(self.header['file type'])\n raise NotImplementedError(msg)", "def set_content(self, offset: int, content_dict: dict):\n super().set_content(offset, content_dict)\n\n # Get the list of files from the properties and determine basenames from the path, which\n # will be used as name in the readfs\n for file in content_dict.get('properties').get('files'):\n self.file_paths.append([os.path.basename(file), file])\n\n # First declare the sub-sections so that the right offsets are computed\n\n # Top structure which will gather all sub-sections\n self.top_struct = CStructParent('readfs', parent=self)\n\n # Main header for readfs size and number of files\n self.header = ReadfsHeader('header', parent=self.top_struct)\n\n # One header per file containig file size, name and flash offset\n for i, path in enumerate(self.file_paths):\n filename, filepath = path\n self.file_headers.append(ReadfsFileHeader(f'file{i} header', len(filename)+1,\n parent=self.top_struct))\n\n # File contents\n for i, path in enumerate(self.file_paths):\n filename, filepath = path\n self.files.append(ReadfsFile(f'file{i}', os.path.getsize(filepath),\n parent=self.top_struct))\n\n\n\n # Now that the offsets have been computed, we can fill-in the various fields\n\n # Main header\n header_size = self.header.get_size()\n for file_header in self.file_headers:\n header_size += file_header.get_size()\n\n self.header.set_field('fs_size', header_size)\n self.header.set_field('nb_files', len(self.files))\n\n for i, path in enumerate(self.file_paths):\n filename, filepath = self.file_paths[i]\n file_header = self.file_headers[i]\n file = self.files[i]\n\n # Per-file header\n file_header.set_field('offset', file.get_offset() - self.get_offset())\n file_header.set_field('file_size', os.path.getsize(filepath))\n file_header.set_field('name_len', len(filename)+1)\n file_header.set_field('name', filename.encode('utf-8') + bytes([0]))\n\n # Per-file content\n with open(filepath, 'rb') as file_desc:\n file.set_field('data', file_desc.read())", "def get_metadata_from_path(path):\n try:\n import yaml\n # assumes index card is in the top-level of path\n index_card = os.path.join(path, \"M_index.yaml\")\n with open(index_card, \"r\") as stream:\n file_info = yaml.safe_load(stream)\n\n metadata_dict = {}\n metadata_dict[\"book_id\"] = file_info[\"book_id\"]\n metadata_dict[\"timestamp_start\"] = file_info[\"start_time\"]\n metadata_dict[\"type\"] = file_info[\"type\"]\n metadata_dict[\"obsid\"] = _convert_book_id_to_obsid(file_info[\"book_id\"])\n # get optional bits\n if \"stop_time\" in file_info:\n metadata_dict[\"timestamp_end\"] = file_info[\"stop_time\"]\n if \"observatory\" in file_info:\n metadata_dict[\"observatory\"] = file_info[\"observatory\"]\n if \"telescope\" in file_info:\n metadata_dict[\"telescope\"] = file_info[\"telescope\"]\n if \"stream_ids\" in file_info:\n metadata_dict[\"stream_ids\"] = file_info[\"stream_ids\"]\n if \"subtype\" in file_info:\n metadata_dict[\"subtype\"] = file_info[\"subtype\"]\n if \"tags\" in file_info:\n metadata_dict[\"tags\"] = file_info[\"tags\"]\n if \"scanification\" in file_info:\n metadata_dict[\"scanification\"] = file_info[\"scanification\"]\n if \"hwp_rate_hz\" in file_info:\n metadata_dict[\"hwp_rate_hz\"] = file_info[\"hwp_rate_hz\"]\n if \"sequencer_ref\" in file_info:\n metadata_dict[\"sequencer_ref\"] = file_info[\"sequencer_ref\"]\n return metadata_dict\n except (ImportError, FileNotFoundError, KeyError):\n pass\n\n return None", "def _get_template_data(self):\n self._set_meta_info()\n if self._report_key == ReportTypes.SEARCH_TOC_REPORT:\n self._set_selected()\n elif self._report_key == ReportTypes.MHR_COVER:\n self._report_data['cover'] = report_utils.set_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n elif self._report_key == ReportTypes.MHR_REGISTRATION_COVER:\n self._report_data['regCover'] = report_utils.set_registration_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n if str(self._report_data.get('registrationType', '')).startswith('TRAN'):\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n else:\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_search_additional_message()\n elif self._report_key == ReportTypes.MHR_TRANSFER:\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n self._set_date_times()\n self._set_addresses()\n self._set_owner_groups()\n if self._report_key not in (ReportTypes.MHR_REGISTRATION,\n ReportTypes.MHR_TRANSFER,\n ReportTypes.MHR_TRANSPORT_PERMIT):\n self._set_notes()\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_selected()\n self._set_ppr_search()\n elif self._report_key == ReportTypes.SEARCH_BODY_REPORT:\n # Add PPR search template setup here:\n self._set_ppr_search()\n if self._report_key not in (ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION, ReportTypes.MHR_NOTE):\n self._set_location()\n if self._report_key != ReportTypes.MHR_TRANSPORT_PERMIT:\n self._set_description()\n return self._report_data", "def __init__(self,\n source_path='./*.py',\n template_path='./docs/templates/*_template.md',\n output_path='./docs/documentation.md',\n ignore=['extra']\n ):\n\n template_files = glob.glob(template_path)\n # filename = t.split('/')[-1]\n self.sources = {os.path.basename(s).split('.')[0]: os.path.normpath(s) for s in glob.glob(source_path) if not any(i in s for i in ignore)}\n self.templates = {os.path.basename(t).split('_')[0]: os.path.normpath(t) for t in template_files}\n self.output_path = output_path\n\n self.template_content = {}\n for k, v in self.templates.items():\n path = v\n with open(path, 'r') as template_file:\n self.template_content[k] = template_file.read()\n\n self.text = ''\n self.classes = []\n self.headers = ['Params', 'Returns', 'Attributes']\n self.hierarchy = [\n 'class',\n 'method',\n 'parameter',\n 'pinfo',\n 'extra'\n ]\n self.tab_length = 6", "def create_file_meta_data(vk4_container, args):\n log.debug(\"Entering create_file_meta_data()\")\n\n header_list = list()\n header_list.append(args.layer)\n header_list.append('\\n')\n header_list.append('File name')\n header_list.append(args.input)\n header_list.append('Title')\n header_list.append(args.input[:-4])\n header_list.append('Measurement date')\n header_list.append(str(vk4_container.measurement_conditions['month']) + '\\\\' +\n str(vk4_container.measurement_conditions['day']) + '\\\\' +\n str(vk4_container.measurement_conditions['year']))\n header_list.append('Measurement time')\n header_list.append(str(vk4_container.measurement_conditions['hour']) + ':' +\n str(vk4_container.measurement_conditions['minute']) + ':' +\n str(vk4_container.measurement_conditions['second']))\n # User mode?\n header_list.append('Objective lens')\n header_list.append(vk4_container.string_data['lens_name'] + ' ' +\n str(vk4_container.measurement_conditions['lens_magnification'] / 10.0) + 'x')\n header_list.append('Numerical Aperture')\n header_list.append(vk4_container.measurement_conditions['num_aperture'] / 1000.0)\n # Size? Standard?\n # Mode? Surface profile?\n # RPD? OFF?\n header_list.append('Quality')\n header_list.append('Skip 4 lines')\n header_list.append('Pitch (um)')\n header_list.append(vk4_container.measurement_conditions['pitch'] / 1000.0)\n header_list.append('Z measurement distance (um)')\n header_list.append(vk4_container.measurement_conditions['distance'] / 1000.0)\n # Double scan? OFF?\n header_list.append('Brightness 1')\n header_list.append(vk4_container.measurement_conditions['PMT_gain'])\n header_list.append('Brightness 2')\n br_2 = vk4_container.measurement_conditions['PMT_gain_2']\n header_list.append('---') if br_2 == 0 else header_list.append(br_2)\n # Not sure how they got ND filter to 30% in example csv\n header_list.append('ND filter (%)')\n header_list.append(vk4_container.measurement_conditions['ND_filter'] * 30)\n header_list.append('Optical zoom')\n header_list.append(vk4_container.measurement_conditions['optical_zoom'] / 10.0)\n # Average count? 1 time?\n # Filter? OFF?\n # Fine mode? ON?\n header_list.append('Line count')\n l_count = vk4_container.measurement_conditions['number_of_lines']\n header_list.append(l_count)\n\n header_list.append('Line position1')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][0])\n\n header_list.append('Line position2')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][1])\n\n header_list.append('Line position3')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][2])\n\n header_list.append('Camera gain (db)')\n header_list.append(vk4_container.measurement_conditions['camera_gain'] * 6)\n header_list.append('Shutter speed')\n header_list.append(vk4_container.measurement_conditions['shutter_speed'])\n header_list.append('White balance mode')\n wb_mode = vk4_container.measurement_conditions['white_balance_mode']\n header_list.append('Auto') if wb_mode == 1 else header_list.append(wb_mode)\n header_list.append('White balance R')\n header_list.append(vk4_container.measurement_conditions['white_balance_red'])\n header_list.append('White balance B')\n header_list.append(vk4_container.measurement_conditions['white_balance_blue'])\n header_list.append('Intensity correction mode')\n header_list.append('Gamma correction')\n header_list.append('Gamma correction value')\n header_list.append(vk4_container.measurement_conditions['gamma'] / 100.0)\n header_list.append('Gamma offset (%)')\n header_list.append(vk4_container.measurement_conditions['gamma_correction_offset'] /\n 65536.0)\n # W/B inversion? OFF?\n # Head type? VK-X110?\n # Correct intensity eccentricity? OFF?\n # Correct field curvature? OFF?\n header_list.append('XY calibration (nm/pixel)')\n header_list.append(vk4_container.measurement_conditions['x_length_per_pixel'] / 1000.0)\n header_list.append('Z calibration (nm/digit)')\n header_list.append(vk4_container.measurement_conditions['z_length_per_digit'] / 1000.0)\n # Saturation?\n # Contrast?\n # Brightness?\n # AI noise elimination? Auto(ON)?\n # Angled surface noise filter? Auto(OFF)?\n header_list.append('Width')\n header_list.append(vk4_container.image_width)\n header_list.append('Height')\n header_list.append(vk4_container.image_height)\n # Skip amount? 1?\n\n out_type = args.type\n if out_type == 'hcsv':\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return np.reshape(header_list, (len(header_list) // 2, 2))\n else:\n # Can use a dict to attach info to an image using PILs Image module\n meta_dict = dict()\n for n in range(0, len(header_list), 2):\n meta_dict[header_list[n]] = header_list[n + 1]\n\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return meta_dict", "def parse_header(self, header):\n # \n \n # this is what the line'll look like:\n # e.g.: /logs/1\n # e.g.: /detailed/host/timestamp\n\n # get index of first slash\n first_slash = header.index('/')\n \n \n # splice the string now and remove any spaces\n requested_folder = header.strip('/')\n \n # check if it's just a slash\n if not requested_folder:\n # return a 0 meaning we want the latest log file\n return (0, 0)\n else:\n # check that it's a valid request\n detailed_request = requested_folder.split('/')\n # detailed_request should be of form /log/* where * is a number\n # two types of requests:\n # type 1: /log/* where * is a number\n # type 2: /detailed/node_name/timestamp\n # node_name: node name\n # timetamp is the timestamp of the run\n \n \n if len(detailed_request) == 2:\n # type 1 request\n # first entry is '' since there's a leading '/'\n if detailed_request[0] == 'log':\n # now get a valid number for a folder request\n try:\n log_number = int(detailed_request[1])\n except Exception, e:\n print \"Error obtaining log (request: \"+requested_folder+\")\"\n return (-1, str(e))\n else:\n return (0, log_number)\n else:\n return (-1, 'Invalid request (len 2)')\n elif len(detailed_request) == 3:\n # type 2 request\n if detailed_request[0] == 'detailed':\n nodename = detailed_request[1]\n timestamp = detailed_request[2]\n # verify that timestamp is a valid #\n try:\n timestamp_int = int(timestamp)\n except ValueError, ve:\n print 'Invalid timestamp requested, '+timestamp\n print ve\n return (-1, 'Invalid timestamp')\n else:\n # return the filepath as our response\n return (1, './detailed_logs/'+nodename+'/'+timestamp)\n \n \n else:\n return (-1, 'Invalid request (len 3)')\n \n else:\n # invalid!\n return (-1, 'Invalid detailed log request ('+str(detailed_request)+')')", "def read_data(self):\n if not self.header['data included']:\n pass\n elif self.header['file type'] in (21, 26):\n self._isotope_data()\n if os.path.exists(self.filename + '_txt'):\n self._isotope_txt_data()\n elif self.header['file type'] == 22:\n # line scan types, no ImageHeader\n warnings.warn('No data read for line scan, fix')\n pass\n elif self.header['file type'] in (31, 35):\n self._beamstability_data()\n else:\n self._image_data()", "def debug_info_header(header):\n print(colored(\"Header:\", 'cyan'), colored(\"Valid FDT magic value found\", \"green\", attrs=['bold']))\n print(colored(\"Header\", 'cyan'), \"-> Total Size of file: \",\n colored('{0:>8d} {0:>#8x}'.format(header.totalsize), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to Struct Block: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_dt_struct), 'yellow'), \" with size: \",\n colored('{0:>8d} {0:>#8x}'.format(header.size_dt_struct), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to String Block: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_dt_strings), 'yellow'), \" with size: \",\n colored('{0:>8d} {0:>#8x}'.format(header.size_dt_strings), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to Memory Reser: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_mem_rsvmap), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Version of DTB: \",\n colored('{0:>8d} {0:>#8x}'.format(header.version), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Previous Version of DTB:\",\n colored('{0:>8d} {0:>#8x}'.format(header.last_comp_version), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Boot CPU Number: \",\n colored('{0:>8d} {0:>#8x}'.format(header.boot_cpuid_phys), 'yellow'))\n print()", "def get_file_infos(path_spec):\n\n file_entry = dfvfs_utils.pathspec_to_fileentry(path_spec)\n stat = file_entry.GetStat()\n if not stat:\n LOGGER.warning(\"Could not get stat object for %s\", file_entry.name)\n\n entry = {\n \"size\": getattr(stat, 'size', 0),\n \"name\": file_entry.name,\n \"type\": file_entry.entry_type,\n }\n for time in [('atime', 'accessed'), ('mtime', 'modified'), ('crtime', 'created')]:\n secs = getattr(stat, time[0], 0)\n nanos = getattr(stat, time[0] + '_nano', 0)\n if secs and secs != 0:\n datetime_entry = datetime.utcfromtimestamp(secs)\n datetime_entry = datetime_entry.replace(microsecond=int(nanos / 10))\n entry[time[1]] = datetime_entry.isoformat(timespec='milliseconds') + 'Z'\n\n # the path is not part of STIX 2.0 for file objects, but is very useful to have,\n # so we make it a custom attribute\n entry[\"path\"] = path_spec.location\n\n return entry", "def parse_metadata(path, site):\n headers = ['name', 'lat', 'lon', 'altitude', 'depth', 'prefecture', 'otherlat', 'otherlon', 'instrument']\n site_info = pd.read_csv(path, index_col=0, header=None).loc[site].values[0:9]\n return {header: site_info[i] for i, header in enumerate(headers)}", "def read_header(self):\n # Read entire header into memory in one read to minimize Disk I/O.\n self.fh.seek(0)\n hdr = self.fh.read(self.header['header size'])\n\n # Find several markers in the byte-string\n # Each of these may occur more than once, find last.\n polylist_pos = hdr.rfind(b'Poly_list\\x00')\n champslist_pos = hdr.rfind(b'Champs_list\\x00')\n offsetlist_pos = hdr.rfind(b'Offset_list\\x00')\n\n # Find first occurance for these.\n # analparam_pos = hdr.find(b'Anal_param\\x00')\n analparamnano_pos = hdr.find(b'Anal_param_nano\\x00')\n analparamnanobis_pos = hdr.find(b'Anal_param_nano_bis\\x00')\n\n # Turn byte-string into BytesIO file-like object; reading and\n # keeping track of where we are is easier that way than trying to\n # slice byte-string as an array and keeping track of indices.\n hdr = io.BytesIO(hdr)\n\n # Main header\n hdr.seek(12)\n self.header.update(self._main_header(hdr))\n\n # NanoSIMS header, starts with PolyList/ChampsList/OffsetList\n # The following configurations have been found in the wild, so far:\n # 1. NS header\n # 2. PL, NS header\n # 3. PL, CL, OL, NS header\n # 4. PL, CL, OL, partial NS header, PL, NS header, PL, CL, OL,\n # partial NS header, PL, NS header\n # Note: I have not seen any *lists with contents (only length 0).\n # From OpenMIMS documentation I know that PolyList is as list of\n # Species dicts, but don't know how to read ChampsList or OffsetList.\n if polylist_pos < 0:\n # Case 1: No PL marker, so far only found for Real Time Images,\n # beam stability, or secondary ion beam centering files.\n if (self.header['analysis type'].endswith('rti') or\n self.header['file type'] == 35):\n hdr.seek(216, 1)\n elif self.header['file type'] == 31:\n if (self.header['analysis type'].endswith('hmr') or\n self.header['analysis type'].endswith('trolley step scan')):\n hdr.seek(120, 1)\n else:\n # secondary ion beam\n hdr.seek(600, 1)\n else:\n raise NotImplementedError('No PolyList marker found in header '\n 'and not and RTI image. Don\\'t know '\n 'how to continue.')\n elif (champslist_pos < 0 and offsetlist_pos < 0):\n # Case 2: PL, NS header\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n elif (polylist_pos < champslist_pos < offsetlist_pos):\n # Case 3: PL, CL, OL, NS header\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)\n self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)\n elif (champslist_pos < offsetlist_pos < polylist_pos):\n # Case 4: PL, CL, OL, partial NS header, PL, NS header\n # with possible repeat\n self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)\n self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n else:\n raise NotImplementedError(\n 'An unknown order of the Poly/Champs/Offset Lists occured.\\n'\n 'Positions: PL = {}, CL = {}, OL = {}'\n ''.format(polylist_pos, champslist_pos, offsetlist_pos))\n\n self.header['NanoSIMSHeader'] = self._nanosims_header(hdr)\n\n # How much to skip? Chomping does not work; what if first value is 0?\n # This is correct so far, for nsheader v8 and 9\n hdr.seek(948, 1)\n self.header['BFields'] = []\n for b in range(self.header['NanoSIMSHeader']['b fields']):\n bf = self._bfield(hdr)\n bf['counting frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['counting frame height'] * \\\n self.header['NanoSIMSHeader']['counting frame width']\n bf['scanning frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['scanning frame height'] * \\\n self.header['NanoSIMSHeader']['scanning frame width']\n bf['working frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['working frame height'] * \\\n self.header['NanoSIMSHeader']['working frame width']\n self.header['BFields'].append(bf)\n # End nanosims_header/bfield based on Poly_list position\n\n # Analytical parameters\n\n # anal_param is not in OpenMIMS at all, represents file\n # Cameca NanoSIMS Data/raw_spec/cur_anal_par\n # However, only few useful things in this section, all of\n # which are also in other sections. Skip.\n # if analparam_pos < 0:\n # msg = 'Anal_param not found in header, skipping.'\n # warnings.warn(msg)\n # else:\n # hdr.seek(analparam_pos + 24)\n # print(analparam_pos)\n # d = {}\n # d['primary ion'], d['primary current begin'], \\\n # d['primary current end'], d['raster'], \\\n # d['X 00 always 1.0'], \\\n # d['X 01 always 1'], d['X 02 always 0'], \\\n # d['X 03 always 1'], d['X 04 always 0'], \\\n # d['X 05 always 0'], d['X 06 (not0 always 0'], \\\n # d['X 07 (not) always 0'], d['X 08 always 0'], \\\n # d['pressure 1'], d['e0w'], d['X 09 always 35 or #'], \\\n # d['X 10 junk'], \\\n # d['X 11 always 1'], d['X 12 always 0'], \\\n # d['X 13 always 1'], d['X 14 always 0'], \\\n # d['X 15 always 0'], d['X 16 always 0'], \\\n # d['X 17 always 0'], d['X 18 always 0'], \\\n # d['X 19 always 0'], d['X 20 always 300'], \\\n # d['X 21'], d['X 22'], d['X 23'], d['X 24'], \\\n # d['pressure 2'], d['X 25 junk'] = \\\n # unpack(self._bo + '24s 4d 8i 48s d i 28s 14i 8s 176s', hdr.read(416))\n #\n # d['pressure 1'] = self._cleanup_string(d['pressure 1'])\n # d['pressure 2'] = self._cleanup_string(d['pressure 2'])\n # d['primary ion'] = self._cleanup_string(d['primary ion'])\n #\n # self.header['AnalParam'] = d\n\n # Called AnalyticalParamNano AND AnalysisParamNano in OpenMIMS.\n # Here, split out Primary and Secondary beam.\n # Represents the file Cameca NanoSIMS Data/raw_spec/cur_anal_par_nano\n if analparamnano_pos < 0:\n msg = 'Anal_param_nano not found in header, '\n msg += 'don\\'t know where PrimaryBeam section starts.'\n warnings.warn(msg)\n else:\n hdr.seek(analparamnano_pos + 16)\n self.header['analysis version'], self.header['n50large'], \\\n self.header['comment'] = \\\n unpack(self._bo + '2i 8x 256s', hdr.read(272))\n\n self.header['n50large'] = bool(self.header['n50large'])\n self.header['comment'] = self._cleanup_string(self.header['comment'])\n\n self.header['PrimaryBeam'] = self._primary_beam(hdr)\n self.header['SecondaryBeam'] = self._secondary_beam(hdr)\n self.header['Detectors'] = self._detectors1(hdr)\n\n self.header['SecondaryBeam']['E0S'] = self.header['Detectors'].pop('E0S')\n self.header['SecondaryBeam']['pressure multicollection chamber'] = \\\n self.header['Detectors'].pop('pressure multicollection chamber')\n\n # Add overall mode of machine, based on E0W\n if self.header['SecondaryBeam']['E0W'] < 0:\n self.header['polarity'] = '+'\n else:\n self.header['polarity'] = '-'\n\n # Combine pixel size from NanoSIMSHeader and raster from PrimaryBeam\n # Prevent ZeroDivisionError if undefined\n wfw = self.header['NanoSIMSHeader']['working frame width']\n if not wfw:\n wfw = 1\n self.header['NanoSIMSHeader']['working frame raster'] = \\\n self.header['PrimaryBeam']['raster']\n self.header['NanoSIMSHeader']['scanning frame raster'] = \\\n self.header['NanoSIMSHeader']['working frame raster'] * \\\n self.header['NanoSIMSHeader']['scanning frame width'] / wfw\n self.header['NanoSIMSHeader']['counting frame raster'] = \\\n self.header['NanoSIMSHeader']['working frame raster'] * \\\n self.header['NanoSIMSHeader']['counting frame width'] / wfw\n\n # Header for non-nano SIMS\n magic = unpack(self._bo + 'i', hdr.read(4))[0]\n if magic != 2306:\n msg = 'SIMSHeader magic number not found here at byte {}.'\n msg = msg.format(hdr.tell()-4)\n raise ValueError(msg)\n self.header['SIMSHeader'] = self._sims_header(hdr)\n\n if self.header['analysis version'] >= 5:\n if analparamnanobis_pos < 0:\n msg = 'Anal_param_nano_bis not found in header, '\n msg += 'don\\'t know where second Detectors section starts.'\n warnings.warn(msg)\n else:\n hdr.seek(analparamnanobis_pos + 24)\n self.header['Detectors'].update(self._detectors2(hdr))\n xl = self.header['Detectors'].pop('exit slit xl')\n for n in range(7):\n det = self.header['Detectors']['Detector {}'.format(n+1)]\n w = list(det['exit slit widths'])\n w[2] = xl[5*n:5*(n+1)]\n det['exit slit widths'] = tuple(w)\n h = list(det['exit slit heights'])\n h[2] = xl[5*(n+1):5*(n+2)]\n det['exit slit heights'] = tuple(h)\n\n # Presets\n self.header['Presets'] = self._presets(hdr)\n\n # End Detectors pt 2 based on anal_param_nano_bis position\n\n # Last part of detectors\n if self.header['analysis version'] >= 6:\n d3 = self._detectors3(hdr)\n self.header['Detectors']['TIC'] = d3.pop('TIC')\n for k, v in d3.items():\n self.header['Detectors'][k].update(v)\n # End PrimaryBeam/SecondaryBeam/Presets/Detectors based on anal_param_nano position\n\n # Image header, at end of overall header\n if self.header['file type'] == 26:\n hdr.seek(-176, 2)\n self.header['Isotopes'] = self._isotopes_hdr(hdr)\n elif self.header['file type'] in (21, 22, 31, 35):\n # no image header for line scan or beam stability\n pass\n else:\n hdr.seek(-84, 2)\n self.header['Image'] = self._image_hdr(hdr)\n\n # Done reading header. Check for and read external files for extra info.\n if os.path.exists(os.path.splitext(self.filename)[0] + '.chk_is'):\n self._read_chk_is()", "def readHead(self):\n filesize = self.rhd.tell()\n \n #the order in which all of this is called is critcal\n self.header_identifier = hex(np.uint32(struct.unpack('<I', self.rhd.read(4))))\n v = np.int8(struct.unpack('BBBB', self.rhd.read(4)))\n\n #read each property of the header\n self.version = str(v[0]) + '.' + str(v[2])\n self.sample_rate = np.float32(struct.unpack('f', self.rhd.read(4)))[0] \n self.dsp_enabled = np.int8(struct.unpack('BB', self.rhd.read(2)))[0]\n self.actual_dsp_cutoff_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.actual_lower_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.actual_upper_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.desired_dsp_cutoff_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.desired_lower_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.desired_upper_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.notch_cutoff_mode = np.int8(struct.unpack('BB', self.rhd.read(2)))[0]\n self.desired_impedance_test_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.actual_impedance_test_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n #list of 3 notes\n self.note = [_qstring(self.rhd),_qstring(self.rhd),_qstring(self.rhd)]\n self.number_of_temperature_sensors = np.int16(struct.unpack('h', self.rhd.read(2)))[0]\n self._TEMP_SENSORS = self.number_of_temperature_sensors\n self.board_mode = np.int16(struct.unpack('h', self.rhd.read(2)))[0]\n self.number_of_signal_groups = np.int16(struct.unpack('h', self.rhd.read(2)))[0]\n\n #dict of signal groups\n self.signal_groups = {} \n for i in range(self.number_of_signal_groups):\n sg = Signal_Group(self)\n self.signal_groups[sg.signal_group_name] = sg\n \n #dict of channels\n self.channels = {}\n for key, group in self.signal_groups.iteritems():\n self.channels.update(group.channels)", "def _read_hdr_file(ktlx_file):\r\n with open(ktlx_file, 'rb') as f:\r\n\r\n hdr = {}\r\n assert f.tell() == 0\r\n\r\n hdr['file_guid'] = hexlify(f.read(16))\r\n hdr['file_schema'], = unpack('<H', f.read(2))\r\n if not hdr['file_schema'] in (1, 3, 7, 8, 9):\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'file_schema ' + str(hdr['file_schema']))\r\n\r\n hdr['base_schema'], = unpack('<H', f.read(2))\r\n if not hdr['base_schema'] == 1: # p.3: base_schema 0 is rare, I think\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'base_schema ' + str(hdr['base_schema']))\r\n\r\n hdr['creation_time'] = datetime.fromtimestamp(unpack('<i',\r\n f.read(4))[0])\r\n hdr['patient_id'], = unpack('<i', f.read(4))\r\n hdr['study_id'], = unpack('<i', f.read(4))\r\n hdr['pat_last_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_first_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_middle_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['patient_id'] = _make_str(unpack('c' * 80, f.read(80)))\r\n assert f.tell() == 352\r\n\r\n if hdr['file_schema'] >= 7:\r\n hdr['sample_freq'], = unpack('<d', f.read(8))\r\n n_chan, = unpack('<i', f.read(4))\r\n hdr['num_channels'] = n_chan\r\n hdr['deltabits'], = unpack('<i', f.read(4))\r\n hdr['phys_chan'] = unpack('<' + 'i' * hdr['num_channels'],\r\n f.read(hdr['num_channels'] * 4))\r\n\r\n f.seek(4464)\r\n hdr['headbox_type'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sn'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sw_version'] = _make_str(unpack('c' * 40, f.read(40)))\r\n hdr['dsp_hw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['dsp_sw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['discardbits'], = unpack('<i', f.read(4))\r\n\r\n if hdr['file_schema'] >= 8:\r\n hdr['shorted'] = unpack('<' + 'h' * 1024, f.read(2048))[:n_chan]\r\n hdr['frequency_factor'] = unpack('<' + 'h' * 1024,\r\n f.read(2048))[:n_chan]\r\n return hdr", "def read_header(fobj, endian=''): \n\n # read the header\n lstr = fobj.read(4)\n if lstr == '':\n raise EOFError('read_header: EOF encountered at start of header read')\n (lmap,) = struct.unpack(endian + 'i', lstr)\n \n head = subs.Odict()\n for i in xrange(lmap):\n name = read_string(fobj, endian)\n (itype,) = struct.unpack(endian + 'i', fobj.read(4))\n comment = read_string(fobj, endian)\n \n if itype == 0: # double\n (value,) = struct.unpack(endian + 'd', fobj.read(8))\n elif itype == 1: # char\n raise CppError('read_header: char not enabled')\n elif itype == 2: # int\n (value,) = struct.unpack(endian + 'i', fobj.read(4))\n elif itype == 3: # uint\n raise CppError('read_header: uint not enabled')\n elif itype == 4: # lint\n raise CppError('read_header: linit not enabled')\n elif itype == 5: # ulint\n raise CppError('read_header: ulint not enabled')\n elif itype == 6: # float\n (value,) = struct.unpack(endian + 'f', fobj.read(4))\n elif itype == 7: # string\n value = read_string(fobj, endian)\n elif itype == 8: # bool\n (value,) = struct.unpack(endian + 'B', fobj.read(1))\n elif itype == 9: # directory\n value = subs.Odict()\n elif itype == 10: # date\n raise CppError('read_header: date not enabled')\n elif itype == 11: # time\n (mjd,) = struct.unpack(endian + 'i', fobj.read(4))\n (hour,) = struct.unpack(endian + 'd', fobj.read(8))\n value = (mjd, hour)\n elif itype == 12: # position\n value = subs.Odict()\n (value['RA'],) = struct.unpack(endian + 'd', fobj.read(8))\n (value['Dec'],) = struct.unpack(endian + 'd', fobj.read(8))\n value['System'] = 'ICRS'\n (value['Epoch'],) = struct.unpack(endian + 'd', fobj.read(8))\n (value['PmRA'],) = struct.unpack(endian + 'f', fobj.read(4))\n (value['PmDec'],) = struct.unpack(endian + 'f', fobj.read(4))\n (value['Parallax'],) = struct.unpack(endian + 'f', fobj.read(4))\n (value['RV'],) = struct.unpack(endian + 'f', fobj.read(4))\n elif itype == 13: # dvector\n raise CppError('read_header: dvector not enabled')\n elif itype == 14: # uchar\n (value,) = struct.unpack(endian + 'c', fobj.read(1))\n elif itype == 15: # telescope\n tname = read_string(fobj, endian)\n sname = read_string(fobj, endian)\n (longitude,) = struct.unpack(endian + 'd', fobj.read(8))\n (latitude,) = struct.unpack(endian + 'd', fobj.read(8))\n (height,) = struct.unpack(endian + 'f', fobj.read(4))\n value = subs.Odict()\n value['Name'] = tname\n value['Observatory'] = sname\n value['Longitude'] = longitude\n value['Latitude'] = latitude\n value['Height'] = height\n else:\n raise CppError('read_header: itype = ' + str(itype) + ' not recognised.')\n\n clist = name.split('.')\n head_set(head, clist, value)\n \n return head", "def GetIndexFileHeaderText(headerinfo):#{{{\n (dbname, version, ext, prefix) = headerinfo\n indexFileHeaderText = []\n indexFileHeaderText.append(\"DEF_VERSION %s\"%(version))\n indexFileHeaderText.append(\"DEF_DBNAME %s\"%(dbname))\n indexFileHeaderText.append(\"DEF_EXTENSION %s\"%(ext))\n indexFileHeaderText.append(\"DEF_PREFIX %s\"%(prefix))\n return indexFileHeaderText", "def getHeaderDict(self):\r\n #put the headers into a dict\r\n \r\n print(\"opening \",self.filename)\r\n with open(self.filename, 'r') as readfile:\r\n headers = readfile.readline()\r\n firstrow = readfile.readline()\r\n if not firstrow:\r\n print(\"first line after headers is blank\")\r\n self.loadDictRow(keystring=headers)\r\n else: #assume first row after headers is test router\r\n print(\"load test router row\") \r\n self.loadDictRow(keystring = headers, valuestring = firstrow) \r\n \r\n # check for headers\r\n miscount=0\r\n for key in self.dataheader:\r\n if not key in self.objdict:\r\n print(\"missing key !\", key)\r\n miscount += 1\r\n\r\n if miscount == 0:\r\n print(\"all Columns found. Thank you.\")\r\n # elif (miscount == 11) and (\"IPADDRESS\" in ):\r\n # print(\"Found IP Address column. program will add additional columns\")\r\n elif miscount > 11:\r\n print(\"Could not locate Header Row\")\r\n elif miscount > 0:\r\n print(\"some columns missing, will add additional columns\")\r\n \r\n \r\n #end file check on filename \r", "def readParams(path):\n tiles = open(path, \"r\")\n #--- Starting date\n tiles.readline()\n index = tiles.readline()[:-1]\n \n #--- Starting date\n tiles.readline()\n B_date = tiles.readline()[:-1]\n \n #--- Stopping date\n tiles.readline()\n E_date = tiles.readline()[:-1]\n \n #--- DATA \n tiles.readline()\n DATA_path = tiles.readline()[:-1]\n \n #--- Csv \n tiles.readline()\n out = tiles.readline()[:-1]\n \n #--- Shapefile\n tiles.readline()\n shp = tiles.readline()[:-1]\n \n #--- Water mask\n water = DATA_path + '/waterMask'\n \n return index, B_date, E_date, DATA_path, out, shp, water", "def getData(self, **kwargs):\n\n for k,v in kwargs.items():\n if k== 'path': key= v\n\n try:\n swiftHandler = SwiftHandler()\n self.swiftConnection = swiftHandler._initiateSwiftConnection()\n dataObject = self._getObject(key, False)\n except Exception as err:\n print(err)\n \n objectInformation= dataObject[0]\n objectValue= dataObject[1]\n fileContent= objectValue\n\n fileBytes = BytesIO(fileContent)\n\n zipfileObj = zipfile.ZipFile(fileBytes, 'r', compression = zipfile.ZIP_DEFLATED)\n # We are extracting to the file to /share/incoming in container as plugin container is hardcoded to read from\n # /share/incoming.\n # TODO: @ravig. Remove this hardcoding. Need to have named arguments in all plugins.\n zipfileObj.extractall('/share/incoming')\n # Create /share/outgoing directory\n if not os.path.exists('/share/outgoing'):\n os.makedirs('/share/outgoing')", "def read(path):", "def __gather_file_info_win(self, file):\n if self.debug:\n Console.auto_line(f\"[*] Gathering binary information: '{file}'\")\n self.target_info = {}\n with open(file, 'rb') as binary:\n binary.seek(int('3C', 16))\n self.target_info['buffer'] = 0\n self.target_info['JMPtoCodeAddress'] = 0\n self.target_info['dis_frm_pehdrs_sectble'] = 248\n self.target_info['pe_header_location'] = struct.unpack('<i', binary.read(4))[0]\n # Start of COFF\n self.target_info['COFF_Start'] = self.target_info['pe_header_location'] + 4\n binary.seek(self.target_info['COFF_Start'])\n self.target_info['MachineType'] = struct.unpack('<H', binary.read(2))[0]\n binary.seek(self.target_info['COFF_Start'] + 2, 0)\n self.target_info['NumberOfSections'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['TimeDateStamp'] = struct.unpack('<I', binary.read(4))[0]\n binary.seek(self.target_info['COFF_Start'] + 16, 0)\n self.target_info['SizeOfOptionalHeader'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['Characteristics'] = struct.unpack('<H', binary.read(2))[0]\n # End of COFF\n self.target_info['OptionalHeader_start'] = self.target_info['COFF_Start'] + 20\n\n # if self.target_info['SizeOfOptionalHeader']:\n # Begin Standard Fields section of Optional Header\n binary.seek(self.target_info['OptionalHeader_start'])\n self.target_info['Magic'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['MajorLinkerVersion'] = struct.unpack(\"!B\", binary.read(1))[0]\n self.target_info['MinorLinkerVersion'] = struct.unpack(\"!B\", binary.read(1))[0]\n self.target_info['SizeOfCode'] = struct.unpack(\"<I\", binary.read(4))[0]\n self.target_info['SizeOfInitializedData'] = struct.unpack(\"<I\", binary.read(4))[0]\n self.target_info['SizeOfUninitializedData'] = struct.unpack(\"<I\",\n binary.read(4))[0]\n self.target_info['AddressOfEntryPoint'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['PatchLocation'] = self.target_info['AddressOfEntryPoint']\n self.target_info['BaseOfCode'] = struct.unpack('<I', binary.read(4))[0]\n if self.target_info['Magic'] != 0x20B:\n self.target_info['BaseOfData'] = struct.unpack('<I', binary.read(4))[0]\n # End Standard Fields section of Optional Header\n # Begin Windows-Specific Fields of Optional Header\n if self.target_info['Magic'] == 0x20B:\n self.target_info['ImageBase'] = struct.unpack('<Q', binary.read(8))[0]\n else:\n self.target_info['ImageBase'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SectionAlignment'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['FileAlignment'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['MajorOperatingSystemVersion'] = struct.unpack('<H',\n binary.read(2))[0]\n self.target_info['MinorOperatingSystemVersion'] = struct.unpack('<H',\n binary.read(2))[0]\n self.target_info['MajorImageVersion'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['MinorImageVersion'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['MajorSubsystemVersion'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['MinorSubsystemVersion'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['Win32VersionValue'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SizeOfImageLoc'] = binary.tell()\n self.target_info['SizeOfImage'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SizeOfHeaders'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['CheckSum'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['Subsystem'] = struct.unpack('<H', binary.read(2))[0]\n self.target_info['DllCharacteristics'] = struct.unpack('<H', binary.read(2))[0]\n if self.target_info['Magic'] == 0x20B:\n self.target_info['SizeOfStackReserve'] = struct.unpack('<Q', binary.read(8))[0]\n self.target_info['SizeOfStackCommit'] = struct.unpack('<Q', binary.read(8))[0]\n self.target_info['SizeOfHeapReserve'] = struct.unpack('<Q', binary.read(8))[0]\n self.target_info['SizeOfHeapCommit'] = struct.unpack('<Q', binary.read(8))[0]\n\n else:\n self.target_info['SizeOfStackReserve'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SizeOfStackCommit'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SizeOfHeapReserve'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['SizeOfHeapCommit'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['LoaderFlags'] = struct.unpack('<I', binary.read(4))[0] # zero\n self.target_info['NumberofRvaAndSizes'] = struct.unpack('<I', binary.read(4))[0]\n # End Windows-Specific Fields of Optional Header\n # Begin Data Directories of Optional Header\n self.target_info['ExportTableRVA'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['ExportTableSize'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['ImportTableLOCInPEOptHdrs'] = binary.tell()\n # ImportTable SIZE|LOC\n self.target_info['ImportTableRVA'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['ImportTableSize'] = struct.unpack('<I', binary.read(4))[0]\n self.target_info['ResourceTable'] = struct.unpack('<Q', binary.read(8))[0]\n self.target_info['ExceptionTable'] = struct.unpack('<Q', binary.read(8))[0]\n self.target_info['CertTableLOC'] = binary.tell()\n self.target_info['CertLOC'] = struct.unpack(\"<I\", binary.read(4))[0]\n self.target_info['CertSize'] = struct.unpack(\"<I\", binary.read(4))[0]\n if self.debug:\n Console.auto_line(f\"[+] Information successfully recovered\")", "def _readheader(lines):\n hdrdict = {}\n #input list of 26 lines of header\n #station and channel\n line = lines[5]\n parts = line.strip().split()\n fname = parts[1]\n fparts = fname.split('_')\n hdrdict['station'] = fparts[-2]+'_'+fparts[-1]\n\n #the \"Component\" lines look like either: Component S00W, Component S90E, Component Up\n compstr = lines[12].strip().split()[1]\n hdrdict['channel'] = get_comp_name(compstr)\n\n #instrument\n hdrdict['instrument'] = lines[3].split()[1].strip()\n \n #location string\n line = lines[6]\n hdrdict['location'] = line.strip()\n #event origin, buffer start year/month\n line = lines[16]\n parts = line.strip().split()\n bufyear = int(parts[8])\n bufmonth = int(parts[9])\n #epicentral location, buffer start day/hour\n line = lines[17]\n parts = line.strip().split()\n bufday = int(parts[8])\n bufhour = int(parts[9])\n #numpoints, buffer start min/sec\n line = lines[19]\n parts = line.strip().split()\n hdrdict['npts'] = int(parts[0])\n bufmin = int(parts[8])\n millisec = int(parts[9])\n bufsec = int(millisec/1000)\n bufmicrosec = int(np.round(millisec/1000.0 - bufsec))\n hdrdict['starttime'] = UTCDateTime(datetime(bufyear,bufmonth,bufday,bufhour,bufmin,bufsec,bufmicrosec))\n #part C\n #frequency, calibration value and some other stuff we don't care about\n line = lines[20]\n parts = line.strip().split()\n hdrdict['sampling_rate'] = float(parts[0])\n hdrdict['delta'] = 1.0/hdrdict['sampling_rate']\n hdrdict['calib'] = float(parts[7])\n #site location info, this time in dd\n line = lines[21]\n parts = line.strip().split()\n hdrdict['lat'] = float(parts[0]) * -1\n hdrdict['lon'] = float(parts[1])\n hdrdict['height'] = 0.0\n #duration\n line = lines[22]\n parts = line.strip().split()\n hdrdict['duration'] = float(parts[0])\n hdrdict['endtime'] = hdrdict['starttime'] + hdrdict['duration']\n #max acceleration - good for sanity check\n line = lines[23]\n parts = line.strip().split()\n hdrdict['maxacc'] = float(parts[0])\n hdrdict['network'] = 'NZ'\n hdrdict['units'] = 'acc'\n return hdrdict", "def common_template_data(request, revision=None, mime_type=None):\n\n cfg = request.cfg\n\n # Initialize data dictionary members (sorted alphanumerically)\n data = TemplateData(\n {\n \"annotate_href\": None,\n \"cfg\": cfg,\n \"docroot\": (\n cfg.options.docroot is None\n and request.script_name + \"/\" + docroot_magic_path\n or cfg.options.docroot\n ),\n \"download_href\": None,\n \"download_text_href\": None,\n \"graph_href\": None,\n \"home_href\": request.script_name or \"/\",\n \"kv\": request.kv,\n \"lockinfo\": None,\n \"log_href\": None,\n \"nav_path\": nav_path(request),\n \"pathtype\": None,\n \"prefer_markup\": ezt.boolean(0),\n \"queryform_href\": None,\n \"rev\": None,\n \"revision_href\": None,\n \"rootname\": (request.rootname and request.server.escape(request.rootname) or None),\n \"rootpath\": request.rootpath,\n \"roots_href\": None,\n \"roottype\": request.roottype,\n \"rss_href\": None,\n \"tarball_href\": None,\n \"up_href\": None,\n \"username\": request.username,\n \"view\": _view_codes[request.view_func],\n \"view_href\": None,\n \"vsn\": __version__,\n \"where\": request.server.escape(request.where),\n }\n )\n\n rev = revision\n if not rev:\n rev = request.query_dict.get(\"annotate\")\n if not rev:\n rev = request.query_dict.get(\"revision\")\n if not rev and request.roottype == \"svn\":\n rev = request.query_dict.get(\"pathrev\")\n try:\n data[\"rev\"] = hasattr(request.repos, \"_getrev\") and request.repos._getrev(rev) or rev\n except vclib.InvalidRevision:\n raise ViewVCException(\"Invalid revision\", \"404 Not Found\")\n\n if request.pathtype == vclib.DIR:\n data[\"pathtype\"] = \"dir\"\n elif request.pathtype == vclib.FILE:\n data[\"pathtype\"] = \"file\"\n\n if request.path_parts:\n dir = _path_join(request.path_parts[:-1])\n data[\"up_href\"] = request.get_url(\n view_func=view_directory, where=dir, pathtype=vclib.DIR, params={}, escape=1\n )\n\n if \"roots\" in cfg.options.allowed_views:\n data[\"roots_href\"] = request.get_url(view_func=view_roots, escape=1, params={})\n\n if request.pathtype == vclib.FILE:\n fvi = get_file_view_info(request, request.where, data[\"rev\"], mime_type)\n data[\"view_href\"] = fvi.view_href\n data[\"download_href\"] = fvi.download_href\n data[\"download_text_href\"] = fvi.download_text_href\n data[\"annotate_href\"] = fvi.annotate_href\n data[\"revision_href\"] = fvi.revision_href\n data[\"prefer_markup\"] = fvi.prefer_markup\n data[\"log_href\"] = request.get_url(view_func=view_log, params={}, escape=1)\n if request.roottype == \"cvs\" and cfg.options.use_cvsgraph:\n data[\"graph_href\"] = request.get_url(view_func=view_cvsgraph, params={}, escape=1)\n file_data = request.repos.listdir(request.path_parts[:-1], request.pathrev, {})\n entries = [item for item in file_data if item.name == request.path_parts[-1]]\n if len(entries) == 1:\n request.repos.dirlogs(request.path_parts[:-1], request.pathrev, entries, {})\n data[\"lockinfo\"] = entries[0].lockinfo\n elif request.pathtype == vclib.DIR:\n data[\"view_href\"] = request.get_url(view_func=view_directory, params={}, escape=1)\n if \"tar\" in cfg.options.allowed_views:\n data[\"tarball_href\"] = request.get_url(view_func=download_tarball, params={}, escape=1)\n if request.roottype == \"svn\":\n data[\"revision_href\"] = request.get_url(\n view_func=view_revision, params={\"revision\": data[\"rev\"]}, escape=1\n )\n\n data[\"log_href\"] = request.get_url(view_func=view_log, params={}, escape=1)\n\n if is_querydb_nonempty_for_root(request):\n if request.pathtype == vclib.DIR:\n params = {}\n if request.roottype == \"cvs\" and request.pathrev:\n params[\"branch\"] = request.pathrev\n data[\"queryform_href\"] = request.get_url(\n view_func=view_queryform, params=params, escape=1\n )\n data[\"rss_href\"] = request.get_url(\n view_func=view_query, params={\"date\": \"month\", \"format\": \"rss\"}, escape=1\n )\n elif request.pathtype == vclib.FILE:\n parts = _path_parts(request.where)\n where = _path_join(parts[:-1])\n data[\"rss_href\"] = request.get_url(\n view_func=view_query,\n where=where,\n pathtype=request.pathtype,\n params={\"date\": \"month\", \"format\": \"rss\", \"file\": parts[-1], \"file_match\": \"exact\"},\n escape=1,\n )\n return data", "def parseheader(self):\n for line in self.rawheader.split(\"\\n\"):\n pat = \"QUITTING\"\n if pat in line:\n self.prefix = line\n continue\n\n pat = \"VERSION NUMBER\"\n if pat in line:\n self.softvers = line[28:].strip()\n continue\n\n pat = \"DATE/TIME IS\"\n if pat in line:\n meta = line[22:].strip()\n matchobj = dtpat.match(meta)\n if matchobj:\n try:\n self.dumpdt = datetime.strptime(meta, moddtfmt)\n except:\n self.nodump = True\n self.comment += (\n \" *** Cannot read module date/time: {}\\n\".format(meta)\n )\n continue\n\n pat = \"NUMBER RECORDS IS\"\n if pat in line:\n self.ndumprec = line[22:].strip()\n continue\n\n pat = \"MODULE TYPE IS\"\n if pat in line:\n self.modtype = line[22:].strip()\n continue\n\n pat = \"SERIAL NUMBER IS\"\n if pat in line:\n self.modserial = line[22:].strip()\n continue\n\n pat = \"COND S/N IS\"\n if pat in line:\n meta = line[22:].strip()\n serials = meta.split(\"/\")\n self.cellserial = serials[1]\n self.ioserial = serials[0]\n continue\n\n pat = \"SAMPLING INTERVAL IS\"\n if pat in line:\n meta = line[22:].strip()\n self.sampintv = meta\n if meta == \"00:01:00\":\n self.nodump = False\n self.comment += \" *** Sample interval is {}\\n\".format(meta)\n elif meta != \"00:02:00\":\n self.nodump = True\n self.comment += \" *** Sample interval is {}\\n\".format(meta)\n continue\n\n pat = \"AVERAGE INTERVAL IS\"\n if pat in line:\n self.avgintv = line[22:].strip()\n if int(self.avgintv) != 24:\n self.nodump = True\n self.comment += \" *** Average interval is {}\\n\".format(meta)\n continue\n\n pat = \"BATTERY VOLTAGE IS\"\n if pat in line:\n self.voltage = line[22:].strip()\n continue\n\n return self.modserial", "def request_file(self, path: str, token: str) -> Tuple[IO[bytes], dict]:\n response = self.request('get', path, token, stream=True)\n stream = ReadWrapper(response.iter_content,\n int(response.headers['Content-Length']))\n return stream, response.headers", "def read_hdr_file(self, rawfilename):\n\n # Get the filename without path or extension\n filename = os.path.basename(rawfilename)\n filesplit = os.path.splitext(filename)\n filebase = filesplit[0]\n dirname = os.path.dirname(rawfilename)\n\n # See if we can find the header file to use\n if os.path.isfile(os.path.join(dirname, filebase + '.hdr')):\n hdrfilename = os.path.join(dirname, filebase + '.hdr')\n elif os.path.isfile(os.path.join(dirname, filename + '.hdr')):\n hdrfilename = os.path.join(dirname, filename + '.hdr')\n else:\n raise IOError('Could not find coresponding header file')\n\n hdrfile = open(hdrfilename, 'r')\n output = collections.OrderedDict()\n inblock = False\n\n # Read line, split it on equals, strip whitespace from resulting strings\n # and add key/value pair to output\n for currentline in hdrfile:\n # ENVI headers accept blocks bracketed by curly braces - check for these\n if not inblock:\n # Split line on first equals sign\n if re.search('=', currentline) is not None:\n linesplit = re.split('=', currentline, 1)\n # Convert all values to lower case\n key = linesplit[0].strip().lower()\n value = linesplit[1].strip()\n\n # If value starts with an open brace, it's the start of a block\n # - strip the brace off and read the rest of the block\n if re.match('{', value) is not None:\n inblock = True\n value = re.sub('^{', '', value, 1)\n\n # If value ends with a close brace it's the end\n # of the block as well - strip the brace off\n if re.search('}$', value):\n inblock = False\n value = re.sub('}$', '', value, 1)\n value = value.strip()\n output[key] = value\n else:\n # If we're in a block, just read the line, strip whitespace\n # (and any closing brace ending the block) and add the whole thing\n value = currentline.strip()\n if re.search('}$', value):\n inblock = False\n value = re.sub('}$', '', value, 1)\n value = value.strip()\n output[key] = output[key] + value\n\n hdrfile.close()\n\n return output", "def process_request_header(file_directory, file_path, login_request, user_id, write = False, file_data =''):\n\n # Fetch the details of the user from DB\n user_details = distributed_file_system_db.dfs_users.find_one({'user_id': user_id})\n # Get the public key of the user\n user_public_key = user_details['public_key']\n hashed_public_key = AES.new(user_public_key, AES.MODE_ECB)\n # Get the log in ticket\n encoded_user_ticket = json.loads(login_request)[\"user_ticket\"]\n decoded_user_ticket = hashed_public_key.decrypt(base64.b64decode(encoded_user_ticket))\n data = json.loads(decoded_user_ticket.strip())\n\n user_session_id = data[\"session_id\"]\n server_host = data[\"server_host\"]\n server_port = data[\"server_port\"]\n access_key = data[\"access_key\"]\n # Encrypt the session ID and hash it\n hashed_session_id = AES.new(user_session_id, AES.MODE_ECB)\n\n # Encrypted file directory\n encrypted_dir = base64.b64encode(\n hashed_session_id.encrypt(file_directory + b\" \" * (AES.block_size - len(file_directory) % AES.block_size)))\n # Encrypted file file name\n encrypted_file_name = base64.b64encode(\n hashed_session_id.encrypt(file_path + b\" \" * (AES.block_size - len(file_path) % AES.block_size)))\n\n data = open(file_path, 'rb').read()\n # if write is true change the headers\n if write:\n headers = {'access_key': access_key\n , 'file_directory': encrypted_dir\n , 'file_name': encrypted_file_name\n , 'file_data': file_data}\n else:\n headers = {'access_key': access_key\n , 'file_directory': encrypted_dir\n , 'file_name': encrypted_file_name}\n\n return data, headers, server_host, server_port", "def get_chip_info(chip_path):\r\n with rasterio.open(chip_path) as ds:\r\n chip_crs = ds.crs\r\n chip_shape = ds.shape\r\n chip_transform = ds.transform\r\n chip_bounds = ds.bounds\r\n\r\n # Use the first part of the chip filename as a prefix\r\n prefix = os.path.basename(chip_path).split(\"_\")[0]\r\n\r\n return ChipInfo(\r\n path=chip_path,\r\n prefix=prefix,\r\n crs=chip_crs,\r\n shape=chip_shape,\r\n transform=chip_transform,\r\n bounds=chip_bounds,\r\n footprint=get_footprint(chip_bounds, chip_crs),\r\n )", "def _get_file_data_object(self, file_path, spec_header):\n logger.DLOG(\"Creating FVaRFileData object from file '%s'\", file_path)\n return acm.Risk().CreateScenarioFileData(file_path,\n spec_header.DelimiterChar(), spec_header.CommentChar())", "def __init__(self, header: CmdHeader, raw_data: bytes) -> None:\n super().__init__(header, raw_data)\n self.status, self.cmd_tag = unpack_from('<2I', raw_data)", "def parse_header(header_lines):\n info = {}\n for line in header_lines:\n if line.startswith('Citation'):\n info['Citation'] = line.split()[-1].strip()\n elif ':' in line:\n try:\n field, value = map(strip,line.split(':',1))\n info[field] = value\n except ValueError:\n #no interesting header line\n continue\n else:\n continue\n return Info(info)", "def _describe(self) -> Dict[str, Any]:\n return dict(filepath=self._filepath, protocol=self._protocol)", "def _get_file_meta(cls, file=None, file_path=None):\n if not file and file_path:\n file = open(file_path, 'r')\n file_body = file.read()\n meta = {\n 'title': file.name, # read title from html\n 'subtitle': 'dupa', # read from html\n 'slug': os.path.splitext(os.path.basename(file.name))[0],\n 'abstract_html': 'Abstract', \n 'body_html': file_body,\n 'tags': [db.Category('one tag'), db.Category('second tag')],\n }\n if file_path:\n meta.update({\n 'date_created': datetime.datetime.fromtimestamp(os.path.getctime(file_path)),\n 'date_updated': datetime.datetime.fromtimestamp(os.path.getmtime(file_path)),\n })\n return meta", "def get_header(header_row):\n header = {}\n header['station'], c1, c2, c3, date, time, tz = header_row.split()\n header['short_model'] = c1\n header['model'] = f'{c1} {c2} {c3}' \n header['runtime'] = dateutil.parser.parse(f'{date} {time} {tz}')\n return header", "def generate_volume_info(self, NAME, path):\n info = {'tags': [], 'name': NAME, 'path': path, 'AttachedToVm': [],\n 'State': 'available', 'machine_path': None,\n 'time': datetime.datetime.now()}\n return info", "def create_path_file_obj(path):\n extension = os.path.splitext(path)[1]\n\n return {\n u'path': path, u'content': None,\n u'extension': extension}", "def tar_file_header(self, name, file_size, mtime=None):\n info = tarfile.TarInfo(name=name)\n info.type = tarfile.REGTYPE\n info.size = file_size\n\n if mtime is not None:\n info.mtime = mtime\n return info.tobuf()", "def _read_info(self):\n my_filelines = self.file_lines\n info = dict()\n\n for i, line in enumerate(my_filelines):\n if line.startswith(\"VEHICLE\"):\n vehicle_pro_start = i + 2\n elif line.startswith(\"CUSTOMER\"):\n customer_pro_start = i + 3\n\n elif line.startswith(\"NUMBER\"):\n splited = line.split(' ')\n info[splited[0]] = 0\n info[splited[-1]] = 0\n return info, (vehicle_pro_start, customer_pro_start)", "def _readHeader(self):\n self.ControllerVersion = self._readInt(0)\n self.LogicOutput = self._readInt(2)\n self.AppHiCapLowNoise = self._readInt(4)\n self.TimingMode = self._readInt(8)\n self.Exposure = self._readFloat(10)\n self.DetTemperature = self._readFloat(36)\n self.DetectorType = self._readInt(40)\n self.TriggerDiode = self._readInt(44)\n self.DelayTime = self._readFloat(46)\n self.ShutterControl = self._readInt(50)\n self.AbsorbLive = self._readInt(52)\n self.AbsorbMode = self._readInt(54)\n self.CanDoVirtualChip = self._readInt(56)\n self.ThresholdMinLive = self._readInt(58)\n self.ThresholdMin = self._readFloat(60)\n self.ThresholdMaxLive = self._readInt(64)\n self.ThresholdMax = self._readFloat(66)\n self.ADCOffset = self._readInt(188)\n self.ADCRate = self._readInt(190)\n self.ADCType = self._readInt(192)\n self.ADCRes = self._readInt(194)\n self.ADCBitAdj = self._readInt(196)\n self.Gain = self._readInt(198)\n self.GeometricOps = self._readInt(600)", "def __init__(self, header: CmdHeader, raw_data: bytes) -> None:\n super().__init__(header, raw_data)\n self.status, *self.values = unpack_from(f'<{self.header.params_count}I', raw_data)", "def parse_template(self):\n for line in self.raw_template.split(\"\\n\"):\n line = line.strip()\n if line.startswith('#m3'):\n key, val = line[3:].strip().split('=', 1)\n key = key.strip()\n val = val.strip()\n self.variables[key] = val\n\n for fitem in self.finditem.finditer(self.raw_template):\n fgrp = fitem.groups()\n categ = fgrp[0]\n name = fgrp[1]\n rest_str = fgrp[2]\n rest = {} # type: dict\n for item in rest_str.split('|'):\n item = item.strip()\n if item:\n key, val = item.split('=')\n rest[key] = val\n\n self.data[name] = (categ, rest)", "def get_template_data(self) -> dict:\n template_data = self._get_template_data()\n\n @dataclass\n class FileEntry:\n \"\"\"Provides an entry into manifest object.\"\"\"\n\n name: str\n size: str\n md5: Optional[str]\n\n template_data[\"resource_files\"] = [\n FileEntry(entry.name, convert_size(entry.size), entry.md5)\n for entry in self.resource.get_manifest().entries.values()\n if not entry.name.startswith(\"statistics\")\n and entry.name != \"index.html\"]\n template_data[\"resource_files\"].append(\n FileEntry(\"statistics/\", \"\", \"\"))\n return template_data", "def parse_headers(self):\n\n logger.debug(f\"parse headers of {self.path}\")\n with open(self.path, 'rb') as f:\n parser = BinaryParser(f)\n magic, version_major, version_minor = parser.unpack(\"<2sBB\")\n if magic != b'RW':\n raise ValueError(\"invalid magic code\")\n self.version = (version_major, version_minor)\n\n if version_major == 1:\n parser.seek(8)\n elif version_major == 2:\n parser.seek(100)\n elif version_major == 3:\n parser.seek(268)\n else:\n raise ValueError(f\"unsupported WAD version: {version_major}.{version_minor}\")\n\n entry_count, = parser.unpack(\"<I\")\n\n if version_major == 1:\n self.files = [WadFileHeader(*parser.unpack(\"<QIIII\")) for _ in range(entry_count)]\n else:\n self.files = [WadFileHeader(*parser.unpack(\"<QIIIBBBBQ\")) for _ in range(entry_count)]", "def blosxom_process_path_info(args):\n request = args['request']\n config = request.getConfiguration()\n data = request.getData()\n pyhttp = request.getHttp()\n\n form = pyhttp[\"form\"]\n data['flavour'] = (form.has_key('flav') and form['flav'].value or \n config.get('defaultFlavour', 'html'))\n\n path_info = []\n data['pi_yr'] = ''\n data['pi_mo'] = ''\n data['pi_da'] = ''\n \n if pyhttp.get('PATH_INFO', ''):\n path_info = pyhttp['PATH_INFO'].split('/')\n path_info = [x for x in path_info if x != '']\n\n data['path_info'] = list(path_info)\n data['root_datadir'] = config['datadir']\n\n got_date = 0\n for path_data in path_info:\n if not path_data:\n continue\n elif len(path_data) == 4 and path_data.isdigit():\n # We got a hot date here guys :)\n got_date = 1\n break\n else:\n data['pi_bl'] = os.path.join(data['pi_bl'], path_data)\n\n if got_date:\n # Get messy with dates\n while not (len(path_info[0]) == 4 and path_info[0].isdigit()):\n path_info.pop(0)\n # Year\n data['pi_yr'] = path_info.pop(0)\n # Month\n if path_info and path_info[0] in tools.MONTHS:\n data['pi_mo'] = path_info.pop(0)\n # Day\n if path_info and re.match(\"^([0-2][0-9]|3[0-1])\", path_info[0]):\n # Potential day here\n data['pi_da'] = path_info.pop(0)\n\n if path_info and path_info[0]:\n # Potential flavour after date\n filename, ext = os.path.splitext(path_info[0])\n if filename == 'index':\n data['flavour'] = ext[1:]\n\n blog_result = os.path.join(config['datadir'], data['pi_bl'])\n \n data['bl_type'] = ''\n\n # If all we've got is a directory, things are simple\n if os.path.isdir(blog_result):\n if data['pi_bl'] != '':\n config['blog_title'] += ' : %s' % data['pi_bl']\n data['root_datadir'] = blog_result\n data['bl_type'] = 'dir'\n\n # Else we may have a file\n if not data['bl_type']:\n # Try for file\n\n ext = tools.what_ext(data[\"extensions\"].keys(), blog_result)\n if ext:\n config['blog_title'] += ' : %s' % data['pi_bl']\n data['bl_type'] = 'file'\n data['root_datadir'] = blog_result + '.' + ext\n\n else:\n # We may have flavour embedded here\n filename, ext = os.path.splitext(blog_result)\n fileext = tools.what_ext(data[\"extensions\"].keys(), filename)\n dirname = os.path.dirname(filename)\n\n if fileext:\n data['flavour'] = ext[1:]\n data['root_datadir'] = filename + '.' + fileext\n config['blog_title'] += ' : %s' % data['pi_bl']\n data['bl_type'] = 'file'\n\n elif (os.path.basename(filename) == 'index' and os.path.isdir(dirname)):\n # blanket flavours?\n data['flavour'] = ext[1:]\n if data['pi_bl'] != '':\n config['blog_title'] += ' : %s' % os.path.dirname(data['pi_bl'])\n data['root_datadir'] = dirname\n data['bl_type'] = 'dir'\n \n # Construct our final URL\n data['url'] = '%s/%s' % (config['base_url'], data['pi_bl'])", "def read_header(self):\n\n def read_mpq_header(offset=None):\n if offset:\n self.file.seek(offset)\n data = self.file.read(32)\n header = MPQFileHeader._make(\n struct.unpack(MPQFileHeader.struct_format, data))\n header = header._asdict()\n if header['format_version'] == 1:\n data = self.file.read(12)\n extended_header = MPQFileHeaderExt._make(\n struct.unpack(MPQFileHeaderExt.struct_format, data))\n header.update(extended_header._asdict())\n return header\n\n def read_mpq_user_data_header():\n data = self.file.read(16)\n header = MPQUserDataHeader._make(\n struct.unpack(MPQUserDataHeader.struct_format, data))\n header = header._asdict()\n header['content'] = self.file.read(header['user_data_header_size'])\n return header\n\n magic = self.file.read(4)\n self.file.seek(0)\n\n if magic == b'MPQ\\x1a':\n header = read_mpq_header()\n header['offset'] = 0\n elif magic == b'MPQ\\x1b':\n user_data_header = read_mpq_user_data_header()\n header = read_mpq_header(user_data_header['mpq_header_offset'])\n header['offset'] = user_data_header['mpq_header_offset']\n header['user_data_header'] = user_data_header\n else:\n raise ValueError(\"Invalid file header.\")\n\n return header", "def image_header(self):\n\n if not self._image_header:\n path_image_header = os.path.join(\n self._path, f\"ImageSet_{self._image['ImageSetID']}.header\"\n )\n\n # Make sure the ImageInfo file really exists\n if not os.path.exists(path_image_header):\n self.logger.warning(\n \"ImageHeader path doesn't exist: %s\", path_image_header\n )\n return None\n\n self.logger.debug(\"Reading image data from: %s\", path_image_header)\n self._image_header = {}\n with open(path_image_header) as f:\n for line in f:\n parts = line.split(\" = \")\n\n if len(parts) < 2:\n parts = line.split(\" : \")\n\n if len(parts) > 1:\n self._image_header[parts[0].strip()] = (\n parts[1].replace(\";\", \"\").replace(\"\\n\", \"\")\n )\n\n return self._image_header", "def get_file_and_metadata(self, from_path, rev=None):\n file_res = self.get_file(from_path, rev)\n metadata = DropboxClient.__parse_metadata_as_dict(file_res)\n\n return file_res, metadata", "def read_prism_hdr(hdr_path): \n with open(hdr_path, 'r') as input_f:\n header_list = input_f.readlines()\n \n return dict(item.strip().split() for item in header_list)", "def parse(self, dict, header=TRUE):\n if type(dict) != types.DictType:\n raise TypeError, \"Second argument must be a dictionary\"\n if not self.template:\n raise OpagMissingPrecondition, \"template path is not set\"\n # Open the file if its not already open. If it is, seek to the\n # beginning of the file.\n if not self.template_file:\n self.template_file = open(self.template, \"r\")\n else:\n self.template_file.seek(0)\n # Instantiate a new bound method to do the replacement.\n replacer = Replacer(dict).replace\n # Read in the entire template into memory. I guess we'd better keep\n # the templates a reasonable size if we're going to keep doing this.\n buffer = self.template_file.read()\n replaced = \"\"\n if header:\n replaced = \"Content-Type: text/html\\n\\n\"\n replaced = replaced + re.sub(\"%%(\\w+)%%\", replacer, buffer)\n return replaced", "def getHeaders(self):\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tif len(self.line) == 7:\n\t\t\tself.header.kod = self.line[0]\n\t\t\tself.header.ver = self.line[1]\n\t\t\tpID_date = self.line[2]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_date)\n\t\t\tpID_time = self.line[3]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_time)\n\t\t\tself.header.knod = int(self.line[4])\n\t\t\tself.header.nps = int(self.line[5])\n\t\t\tself.header.rnr = int(self.line[6])\n\t\telif len(self.line) == 3:\n\t\t\tself.header.knod = int(self.line[0])\n\t\t\tself.header.nps = int(self.line[1])\n\t\t\tself.header.rnr = int(self.line[2])\n\t\t\t\n\n\t\tself.header.title = self.mctalFile.readline().strip()\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tself.header.ntal = int(self.line[1])\n\n\t\tif self.header.ntal == 0:\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mNo tallies in this MCTAL file. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tif len(self.line) == 4:\n\t\t\tself.header.npert = int(self.line[3])\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mMCTAL file with perturbation card. Not supported. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\twhile self.line[0].lower() != \"tally\":\n\t\t\tfor l in self.line: self.header.ntals = np.append(self.header.ntals,int(l))\n\t\t\tself.line = self.mctalFile.readline().split()", "def allinfo(self, *path):\n files = self.listfiles(*path)\n dic = {}\n for filename in files:\n dic[filename] = self.info(*filename)\n return dic", "def render_header(header_dict={}):\n header = globals().get(\"%s_header\" % SUFFIX).rstrip()\n debug(\"rendering header.\")\n debug(\"header: %s\" % header)\n debug(\"header_dict: %s\" % header_dict)\n data_dict = defaultdict(str)\n data_dict.update({\n 'encoding': ENCODING,\n 'filename': FILENAME,\n 'date': datetime.now().strftime(\"%Y-%m-%d %H:%M\"),\n 'cdate': datetime.now().strftime(\"%Y-%m-%d %H:%M\"),\n 'modified_by': AUTHOR,\n 'author': AUTHOR,\n })\n debug(\"updated dict(stage 1): %s\" % data_dict)\n keywords = copy(KEYWORDS)\n more_keywords = vim.eval(\"g:BHKeywords\")\n keywords.update(more_keywords)\n\n # update data_dict with user provided key-value pairs.\n for key in header_dict:\n if not key in keywords:\n continue\n data_dict[keywords[key]] = header_dict[key]\n debug(\"updated dict(stage 2): %s.\" % data_dict)\n\n # if we are editing an shell script, and there are line seps in the comment, add a # if we have to.\n if SUFFIX == \"sh\":\n fix_sh_header_with_sharp(data_dict)\n\n rendered_header = header % data_dict\n debug(\"rendered header: %s\" % rendered_header)\n return rendered_header", "def format(data, request, response, toke_info):\n response.headers[\"Content-Type\"] = 'text/html'\n if response.status == HTTPStatus.UNAUTHORIZED.value:\n if response.headers.get(\"WWW-Authenticate\") and request.config.get(\"auth.allow_basic_authentication\"):\n response.headers[\"WWW-Authenticate\"] = \"Basic\" + response.headers[\"WWW-Authenticate\"][6:]\n return\n else:\n return html_auth2.format(error=data)\n if request.path_info in (\"/version\", \"/system\"):\n return \"<pre>\" + yaml.safe_dump(data, explicit_start=False, indent=4, default_flow_style=False) + \"</pre>\"\n body = html_body.format(item=request.path_info)\n if response.status and response.status > 202:\n body += html_body_error.format(yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False))\n elif isinstance(data, (list, tuple)):\n # if request.path_info == \"/ns/v1/deploy\":\n # body += html_upload_body.format(request.path_info + \"_content\", \"VNFD\")\n # elif request.path_info == \"/nsd/v1/ns_descriptors\":\n # body += html_upload_body.format(request.path_info + \"_content\", \"NSD\")\n # elif request.path_info == \"/nst/v1/nst_templates\":\n # body += html_upload_body.format(request.path_info + \"_content\", \"NSTD\")\n for k in data:\n if isinstance(k, dict):\n data_id = k.pop(\"_id\", None)\n elif isinstance(k, str):\n data_id = k\n if request.path_info == \"/ns/v1/deploy\":\n body += '<p> <a href=\"/ro/{url}/{id}?METHOD=DELETE\"> <img src=\"/ro/static/delete.png\" height=\"25\"' \\\n ' width=\"25\"> </a><a href=\"/ro/{url}/{id}\">{id}</a>: {t} </p>' \\\n .format(url=request.path_info, id=data_id, t=html_escape(str(k)))\n else:\n body += '<p> <a href=\"/ro/{url}/{id}\">{id}</a>: {t} </p>'.format(url=request.path_info, id=data_id,\n t=html_escape(str(k)))\n elif isinstance(data, dict):\n if \"Location\" in response.headers:\n body += '<a href=\"{}\"> show </a>'.format(response.headers[\"Location\"])\n else:\n body += '<a href=\"/ro/{}?METHOD=DELETE\"> <img src=\"/ro/static/delete.png\" height=\"25\" width=\"25\"> </a>'\\\n .format(request.path_info[:request.path_info.rfind(\"/\")])\n if request.path_info.startswith(\"/nslcm/v1/ns_instances_content/\") or \\\n request.path_info.startswith(\"/nslcm/v1/ns_instances/\"):\n _id = request.path_info[request.path_info.rfind(\"/\")+1:]\n body += html_nslcmop_body.format(id=_id)\n elif request.path_info.startswith(\"/nsilcm/v1/netslice_instances_content/\") or \\\n request.path_info.startswith(\"/nsilcm/v1/netslice_instances/\"):\n _id = request.path_info[request.path_info.rfind(\"/\")+1:]\n body += html_nsilcmop_body.format(id=_id)\n body += \"<pre>\" + html_escape(yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False)) + \\\n \"</pre>\"\n elif data is None:\n if request.method == \"DELETE\" or \"METHOD=DELETE\" in request.query_string:\n body += \"<pre> deleted </pre>\"\n else:\n body = html_escape(str(data))\n user_text = \" \"\n if toke_info:\n if toke_info.get(\"username\"):\n user_text += \"user: {}\".format(toke_info.get(\"username\"))\n if toke_info.get(\"project_id\"):\n user_text += \", project: {}\".format(toke_info.get(\"project_name\"))\n return html_start.format(user_text) + body + html_end\n # yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False)\n # tags=False,\n # encoding='utf-8', allow_unicode=True)", "def parse_tarinfo(\n tarinfo: tarfile.TarInfo, tar_file: tarfile.TarFile\n) -> Tuple[Optional[bytes], Path]:\n path = Path(tarinfo.path)\n if path.suffix == \".nodata\" or path.suffix == \".nometa\":\n return None, path\n data = tar_file.extractfile(tarinfo).read()\n return data, path", "def get_header(self, root):\n header = etree.SubElement(root, \"FileHeader\")\n header.set(\"revMajor\", \"1\")\n header.set(\"revMinor\", \"0\")\n header.set(\"date\", datetime.today().strftime(\"%Y-%m-%dT%H:%M:%S\"))\n header.set(\"description\", \"Generated OpenSCENARIO File\")\n header.set(\"author\", \"QGIS OSCGenerator Plugin\")", "def __init__(self, header: CmdHeader, raw_data: bytes) -> None:\n super().__init__(header, raw_data)\n self.status, self.length, *self.values = unpack_from(f'<{self.header.params_count}I', raw_data)\n self.data = raw_data[8:8 + self.length] if self.length > 0 else b''", "def __init__(self, header: CmdHeader, raw_data: bytes) -> None:\n assert isinstance(header, CmdHeader)\n assert isinstance(raw_data, (bytes, bytearray))\n self.header = header\n self.raw_data = raw_data\n self.status, = unpack_from(\"<L\", raw_data)", "def get_path_data(self, path):\n url = self.api_server + path\n return self.get_url_data(url)", "def build_header_variables(\n self,\n simulation_data,\n block_header_structure,\n block_path,\n data,\n dimensions,\n ):\n self.data_items = []\n var_path = block_path + (block_header_structure[0].name,)\n\n # fix up data\n fixed_data = []\n if (\n block_header_structure[0].data_item_structures[0].type\n == DatumType.keyword\n ):\n data_item = block_header_structure[0].data_item_structures[0]\n fixed_data.append(data_item.name)\n if isinstance(data, tuple):\n data = list(data)\n if isinstance(data, list):\n fixed_data = fixed_data + data\n else:\n fixed_data.append(data)\n if len(fixed_data) > 0:\n fixed_data = [tuple(fixed_data)]\n # create data object\n new_data = self.block.data_factory(\n simulation_data,\n None,\n block_header_structure[0],\n True,\n var_path,\n dimensions,\n fixed_data,\n )\n\n self.add_data_item(new_data, data)", "def read_header(options, infile):\n\n contigs = dict()\n line = ''\n if options.is_bam:\n #chrm = infile.getrname(line.tid).replace('chr', '')\n for i in range(len(infile.references)):\n if infile.references[i] == 'chrM_rCRS':\n chr_key = 'chrM'\n else:\n chr_key = infile.references[i]\n\n if contigs.has_key(chr_key):\n if not contigs[chr_key] == infile.lengths[i]:\n print >> sys.stderr, \"Headers in BAM files have inconsistent contig lengths. Stopping ...\"\n sys.exit(1)\n else:\n contigs[chr_key] = infile.lengths[i]\n else:\n for line in infile:\n if not line[0] == '@':\n if len(contigs) == 0:\n print >> sys.stderr, \"No header found in %s. Stopping.\" % file\n sys.exit(1)\n else:\n break\n\n sl = line.strip().split('\\t')\n\n if not sl[0] == '@SQ':\n continue\n\n if sl[1][3:] == 'chrM_rCRS':\n chr_key = 'chrM'\n else:\n chr_key = sl[1][3:]\n if contigs.has_key(chr_key):\n if not contigs[chr_key] == int(sl[2][3:]):\n print >> sys.stderr, \"Headers in BAM files have inconsistent contig lengths. Stopping ...\"\n sys.exit(1)\n else:\n contigs[chr_key] = int(sl[2][3:])\n \n return (contigs, line)", "def __init__(self, template, file_path):\n self.navigator = Navigator()\n self.template = template\n self.file_path = file_path\n self.template_data = read_xml_get_json(template, ordered_dict=True)\n self.data = read_xml_get_json(file_path, ordered_dict=True)\n self.output = {\"status\": True, \"message\": \"\"}\n self.root = \"Testcase\"\n self.major = (\"Details\", \"Requirements\", \"Steps\")", "def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')", "def tabular_parser(path: str, header: bool = True) -> TabularData:\n with open(path, \"r\") as read_obj:\n csv_reader = reader(read_obj)\n list_of_rows = list(csv_reader)\n rows = np.array(list_of_rows)\n\n if header:\n return TabularData(column_names=rows[0, :], data=rows[1:, :])\n else:\n return TabularData(column_names=None, data=rows[1:, :])", "def pareHeader(headerFile,Ldontcares=['GData','BiasCoeff','headerFile','y_m_d','TimeZero']):\n reload(chd) # KEN SCOPE ISSUE?\n dHeader = chd.main(['headerFile=' + headerFile])\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d','TimeZero']\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d']\n for k in Ldontcares:\n del dHeader[k]\n dataFile = split(headerFile,'.header')[0] # toss extension\n return dHeader,dataFile", "def get_info(self):\n self.id = basename(self.path)\n\n # Check all files\n for filename in listdir(self.path):\n if isfile(join_paths(self.path, filename)):\n self.files.append(filename)\n\n # Check handlers\n for match in self._file_handlers.keys():\n if match in filename:\n handler = getattr(self, self._file_handlers[match])\n handler(filename)", "def head(self, tenant: str, filename: str) -> None:\n try:\n if not self.allow_info:\n raise ClientMethodNotAllowed\n if not filename:\n raise ClientError(\"No resource specified\")\n self.path = self.export_dir\n self.filepath = f\"{self.path}/{url_unescape(self.resource)}\"\n # ensure there are no symlinks in filepath\n any_path_islink(self.filepath, opts=options)\n if not os.path.lexists(self.filepath):\n raise ClientResourceNotFoundError(f\"{self.filepath} not found\")\n size, mime_type, mtime = self.get_file_metadata(self.filepath)\n logging.info(\n f\"user: {self.requestor}, checked file: {self.filepath} , MIME type: {mime_type}\"\n )\n self.set_header(\"Content-Length\", size)\n self.set_header(\"Accept-Ranges\", \"bytes\")\n self.set_header(\"Content-Type\", mime_type)\n self.set_header(\"Modified-Time\", str(mtime))\n self.set_status(HTTPStatus.OK.value)\n except Exception as e:\n error = error_for_exception(e, details=self.additional_log_details())\n logging.error(error.message)\n for name, value in error.headers.items():\n self.set_header(name, value)\n self.set_status(error.status, reason=error.reason)\n self.finish()", "def generateHeader(param_dict, filename_out, test_mode=False, template=\"uvfits_headers/header.tpl\"):\n findAndReplace(param_dict, template,filename_out, test_mode)", "def ReadInData(self):\n fileio.GetOptData(self)" ]
[ "0.58803326", "0.57795227", "0.5498735", "0.5484519", "0.5375694", "0.5355326", "0.5330148", "0.5242671", "0.5242115", "0.52344966", "0.52192587", "0.52183825", "0.51776946", "0.51531774", "0.51515484", "0.5147676", "0.5141665", "0.50517124", "0.5044997", "0.5039923", "0.50276595", "0.50230604", "0.5006144", "0.5000837", "0.49910143", "0.49731618", "0.49702293", "0.49603927", "0.49587288", "0.49569497", "0.4955487", "0.49423298", "0.4935902", "0.49328116", "0.49316975", "0.49136558", "0.49079803", "0.49029586", "0.48989633", "0.48924407", "0.48757306", "0.48538324", "0.48497593", "0.48493594", "0.48231074", "0.48150307", "0.48124674", "0.48068407", "0.47896355", "0.47672942", "0.47653672", "0.47543728", "0.47500128", "0.4747789", "0.47347036", "0.47196332", "0.47196135", "0.47184348", "0.47179642", "0.471793", "0.47146592", "0.47072378", "0.4702024", "0.4689889", "0.46876523", "0.4687637", "0.46834925", "0.46800923", "0.46778622", "0.46749294", "0.46714965", "0.46692055", "0.46681502", "0.46660674", "0.4665669", "0.4663849", "0.46606606", "0.4653683", "0.46464166", "0.46451336", "0.46439198", "0.4642361", "0.46411794", "0.4634886", "0.46320057", "0.46265766", "0.4624998", "0.462427", "0.4617401", "0.46163774", "0.46150324", "0.46146694", "0.46133944", "0.46097064", "0.46086082", "0.460731", "0.46070647", "0.45981443", "0.459313", "0.4589641" ]
0.56649214
2
Pair up each epi with a fieldmap.
def _SetFmapInfo(self): for epi in self.pfiles + self.epirt_paths: self.info[epi]['fmapname'] = None self.info[epi]['fmap_entry'] = None for entry in self.entry_map['fmap']: fmap_name = self.info[entry]['imgfile'] + self.info[entry]['suffix'] if self.info[entry]['plane'] == self.info[epi]['plane']: # Use the fieldmap acquired at the same plane. self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break else: # for fmap in self.fmaps.keys(): for entry in self.entry_map['fmap']: # No fmap at same orientation, look for fmaps in other planes. # There won't be more than one, so it isn't much of a choice. fmap_name = self.info[entry]['imgfile'] + \ self.info[entry]['suffix'] if self.info[entry]['plane'] == 'sagittal': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break elif self.info[entry]['plane'] == 'axial': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break elif self.info[entry]['plane'] == 'coronal': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry break elif self.info[entry]['plane'] == 'oblique': self.info[epi]['fmapname'] = fmap_name self.info[epi]['fmap_entry'] = entry self.info[epi]['plane'] = 'oblique' break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MakeFieldmaps(self):\n if self.verbose:\n print 'Compute fieldmaps.'\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n if self.info[entry]['imgfile'] == None:\n# Fieldmap data not found.\n return\n# Make a magnitude image for use in checking registration.\n cmd = 'convert_file -f0 -m0 %s %s nii' % \\\n (entry, self.info[entry]['magfile'])\n self.CheckExec(cmd, [self.info[entry]['magfile'] + '.nii'])\n\n# Make fieldmap. Use separate loop in case make_fmap aborts.\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n fmapname = self.info[entry]['imgfile']\n if not os.path.exists('%s.nii' % fmapname) or self.redo:\n# Couldn't find or existing fmap, compute a new one.\n if self.verbose:\n extra_args = '-v'\n else:\n extra_args = ''\n if self.info[entry]['correct_fmap_phase'] == 'force':\n extra_args += ' --force-slicecorr'\n elif self.info[entry]['correct_fmap_phase'] == 'omit':\n extra_args += ' --omit-slicecorr'\n cmd = 'make_fmap %s %s %s' % (extra_args, entry, fmapname)\n# error = self.ExecCmd(cmd, halt_on_error=False)\n if self.no_fmapcorr:\n halt_on_error = False\n else:\n halt_on_error = True\n error = self.CheckExec(cmd, ['%s.nii' % fmapname], \\\n halt_on_error=halt_on_error)\n if error:\n self.info[entry]['valid'] = False\n del self.fmaps[entry]", "def _map_field_names(self, members):\n result = []\n for member in members:\n mapped_info = {}\n for entry_key, entry_value in member.iteritems():\n if not entry_key in self.DATA_MAPPING: # skip the entry if there is no mapping\n continue\n mapped_info[self.DATA_MAPPING[entry_key]] = entry_value\n result.append(mapped_info)\n return result", "def _make_field_map(fields):\n field_map = {}\n for field in fields:\n if field.name in field_map:\n raise SchemaParseException(\n 'Duplicate record field name %r.' % field.name)\n field_map[field.name] = field\n return field_map", "def map():", "def field_mapping(self):\n fields = self.fields\n if self.target_field is not None:\n del fields[self.target_field.get('name')]\n field_labels = list(self.fields.keys())\n\n field_mapping = {\n name: (\n field_labels.index(name),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DataField'\n }\n\n field_mapping.update({\n name: (\n field_labels.index(self.find(e, 'FieldRef').get('field')),\n lambda value, e=e: self.parse_type(value, e)\n )\n for name, e in fields.items()\n if e.tag == f'{{{self.namespace}}}DerivedField'\n })\n\n return field_mapping", "def _extract_field(in_file, epi_meta):\n from nipype.utils.filemanip import fname_presuffix\n import numpy as np\n import nibabel as nb\n from sdcflows.utils.epimanip import get_trt\n\n fieldnii = nb.load(in_file[0])\n trt = get_trt(epi_meta[1], in_file=epi_meta[0])\n data = (\n np.squeeze(fieldnii.get_fdata(dtype=\"float32\"))[\n ..., \"ijk\".index(epi_meta[1][\"PhaseEncodingDirection\"][0])\n ]\n / trt\n * (-1.0 if epi_meta[1][\"PhaseEncodingDirection\"].endswith(\"-\") else 1.0)\n )\n out_file = fname_presuffix(in_file[0], suffix=\"_fieldmap\")\n nii = nb.Nifti1Image(data, fieldnii.affine, None)\n nii.header.set_xyzt_units(fieldnii.header.get_xyzt_units()[0])\n nii.to_filename(out_file)\n return out_file", "def extract_mapping(self) -> DatasetMapping:\n # store fields\n fields = []\n for col in self.data.columns:\n #get field label\n label = col\n #get field type using PANDAS_TYPE (see apps.utils.utils)\n col_type = self.data[col].dtype\n field_type = PANDAS_TYPE[col_type]\n #set field\n field = FieldMapping(label=label, type=field_type)\n fields.append(field)\n self.mapping.append(label)\n return DatasetMapping(fields=fields)", "def edge_mapping(self):\n ...", "def mapper(self, _, doc):\n ret = doc.split('\\t')\n key = ret[2]\n values = {}\n try:\n values[\"ts_ini\"] = datetime.utcfromtimestamp(float(ret[0]))\n except:\n values[\"ts_ini\"] = None\n try:\n values[\"ts_end\"] = datetime.utcfromtimestamp(float(ret[1]))\n except:\n values[\"ts_end\"] = None\n try:\n values[\"value\"] = ret[3]\n except:\n values[\"value\"] = None\n try:\n values[\"energytype\"] = ret[4]\n except:\n values[\"energytype\"] = None\n try:\n values[\"source\"] = ret[5]\n except:\n values[\"source\"] = None\n\n yield key, values", "def _do_mapping(self):\n pass", "def AlignFieldmaps(self):\n for entry in self.entry_map['fmap']:\n info = self.info[entry]\n\n# Register the magnitude image at the shortest TR to the T1-IR\n# structural image.\n target = self.info[self.norm_src]['imgfile'] + \\\n self.info[self.norm_src]['suffix']\n source = info['magfile'] + info['suffix']\n matfile = info['matfile']\n fmt = '3dAllineate -prefix NULL -1Dmatrix_save %s -base %s ' + \\\n '-source %s -cost mi -warp shift_rotate'\n cmd = fmt % (info['matfile'], target, source)\n self.CheckExec(cmd, [info['matfile']])\n\n# Convert to unitary matrix (remove scaling component.)\n cmd = 'cat_matvec -ONELINE %s -P > %s' % \\\n (info['matfile'], info['matfile_unitary'])\n self.CheckExec(cmd, [info['matfile_unitary']])\n\n# Rotate the magnitude image to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['magfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['magfile'] + info['suffix'])\n self.CheckExec(cmd, [info['magfile_r']+info['suffix']])\n\n# Rotate the fieldmap to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['imgfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['imgfile'] + info['suffix'])\n self.CheckExec(cmd, [info['imgfile_r']+info['suffix']])", "def mapper(record):\n personA = record[0]\n personB = record[1]\n mr.emit_intermediate(personA, personB)", "def pair_items_mapper(self, user_id, values):\r\n\t pass #your code here\r", "def compose_fieldmap(rf1, rf2):\n offset1, size1, step1 = rf1\n offset2, size2, step2 = rf2\n\n size = tuple((size2c - 1) * step1c + size1c\n for size1c, step1c, size2c in zip(size1, step1, size2))\n offset = tuple(offset2c * step1c + offset1c\n for offset2c, step1c, offset1c in zip(offset2, step1, offset1))\n step = tuple(step2c * step1c\n for step1c, step2c in zip(step1, step2))\n return (offset, size, step)", "def _build_participant_pairing_map(self, files: List[ConsentFile]) -> Dict[int, ParticipantPairingInfo]:\n participant_ids = {file.participant_id for file in files}\n participant_pairing_data = self.participant_dao.get_pairing_data_for_ids(participant_ids)\n return {\n participant_id: ParticipantPairingInfo(hpo_name=hpo_name, org_name=org_name, site_name=site_name)\n for participant_id, hpo_name, org_name, site_name in participant_pairing_data\n }", "def traj_fields_map(self, func, fields, args,\n map_func=map, idxs=False, traj_sel=None):\n\n # check the args and kwargs to see if they need expanded for\n # mapping inputs\n #first go through each run and get the number of cycles\n n_cycles = 0\n for run_idx in self.run_idxs:\n n_cycles += self.num_run_cycles(run_idx)\n\n mapped_args = []\n for arg in args:\n # make a generator out of it to map as inputs\n mapped_arg = (arg for i in range(n_cycles))\n mapped_args.append(mapped_arg)\n\n # make a generator for the arguments to pass to the function\n # from the mapper, for the extra arguments we just have an\n # endless generator\n map_args = (self.iter_trajs_fields(fields, traj_sel=traj_sel, idxs=False),\n *(it.repeat(arg) for arg in args))\n\n results = map_func(func, *map_args)\n\n if idxs:\n if traj_sel is None:\n traj_sel = self.run_traj_idx_tuples()\n return zip(traj_sel, results)\n else:\n return results", "def result_field_map():\n return {\n \"[run number]\": \"run_number\",\n \"map-file\": \"map_file\",\n \"People\": \"people\",\n \"person_path_weight\": \"person_path_weight\",\n \"Slow\": \"slow\",\n \"Medium\": \"medium\",\n \"Fast\": \"fast\",\n \"display-path-cost?\": \"display_path_cost_p\",\n \"add-person-spacing?\": \"add_person_spacing_p\",\n \"people-wait?\": \"people_wait_p\",\n \"equal-diagonal-weight?\": \"equal_diagonal_weight_p\",\n \"Slow-Speed\": \"slow_speed\",\n \"Medium-Speed\": \"medium_speed\",\n \"Fast-Speed\": \"fast_speed\",\n \"set-fire?\": \"set_fire_p\",\n \"Fire_Speed\": \"fire_speed\" ,\n \"mean-escape-time\": \"mean_escape_time\",\n }", "def parse_pairs(pairs, extra_lines):\n onoff_pairs = pairs[12:-8]\n keyval_pairs = pairs[:12] + pairs[-8:]\n\n # \"Additional notes\" at the end of the file\n # We append that to the key-value pair list and parse it as any other\n notes = '\\n'.join(extra_lines[1:]).strip()\n keyval_pairs.append(('notes', notes))\n\n # Parsed key-value pairs as dictionary\n items = {}\n for pair, plan_step in zip(keyval_pairs, presto_inf_parsing_plan):\n descr, value = pair\n keyname, keytype = plan_step\n items[keyname] = keytype(value)\n return items", "def map_probes(probeset, entrez_ids): \n entrez_idx = None\n mapping = {}\n with open(probeset) as probes:\n for line in probes:\n if line.startswith('ID'):\n entrez_idx = line.split('\\t').index('ENTREZ_GENE_ID')\n elif entrez_idx:\n # if the index has been defined then we're past the header\n row = [x.strip() for x in line.split('\\t')]\n # if we're doing percentile rank, we need all the mappings, otherwise can just track the mappings of interest\n if PERCENTILE_RANK:\n if '///' in row[entrez_idx]:\n # multile genes add an entry for every gene overlapped by the probe\n # TODO: FIX; THIS IS A MANY TO MANY MAPPING ISSUE \n # since this only happens once in this dataset, I'm just using the first one but can also use last (or develop a solution that works for all cases...)\n mapping[row[0]] = row[entrez_idx].split(' /// ')[0]\n \"\"\" # option to use the last one \n for entrez_id in [x for x in row[entrez_idx].split(' /// ')]:\n print('Entrez ID:'+str(entrez_id)+' in probe that maps to multiple genes')\n mapping[row[0]] = entrez_id[0] \n \"\"\"\n print('MANY TO MANY: '+str(row[0])+\"->\"+str(row[entrez_idx]))\n else:\n mapping[row[0]] = row[entrez_idx]\n elif row[entrez_idx] in entrez_ids:\n mapping[row[0]] = row[entrez_idx]\n\n return mapping", "def _get_feature2field(self):\n fea_id = 0\n for names in self.feature_names:\n if names is not None:\n for name in names:\n self.feature2id[name] = fea_id\n fea_id += 1\n\n if self.fields is None:\n field_id = 0\n for key, value in self.feature2id.items():\n self.feature2field[self.feature2id[key]] = field_id\n field_id += 1\n else:\n for key, value in self.fields.items():\n for v in value:\n try:\n self.feature2field[self.feature2id[v]] = key\n except:\n pass", "def _SetBaseEpi(self):\n tinfo = {}\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n if self.info[entry]['fmap_entry'] is None:\n tgt = info['anat_tgt']\n else:\n tgt = info['fmap_entry']\n tgt_time = self.info[tgt]['acqtime']\n\n plane = info['plane']\n if not tinfo.has_key(plane):\n tinfo[plane] = {}\n tdiff = abs(info['acqtime'] - tgt_time)\n tinfo[plane][tdiff] = (entry, 'start')\n tdiff = abs(info['acqtime'] + info['TR']*info['tdim']/1000 - tgt_time)\n tinfo[plane][tdiff] = (entry, 'end')\n\n bases = {}\n for plane in tinfo.keys():\n tdiffs = tinfo[plane].keys()\n tdiffs.sort()\n bases[plane] = tinfo[plane][tdiffs[0]]\n\n for epi in self.entry_map['epi']:\n plane = self.info[epi]['plane']\n base_entry, base = bases[plane]\n self.info[epi]['base_entry'] = base_entry\n self.info[epi]['base'] = base\n self.info[epi]['basefile'] = '%s'%(self.info[base_entry]['imgfile'])", "def _hacked_transform(typemap, node):\n entries = []\n groupindices = {}\n types = {}\n\n # step 1: traverse all fields and collect field types and content\n for field in node:\n fieldname, fieldbody = field\n try:\n # split into field type and argument\n fieldtype, fieldarg = fieldname.astext().split(None, 1)\n except ValueError:\n # maybe an argument-less field type?\n fieldtype, fieldarg = fieldname.astext(), ''\n typedesc, is_typefield = typemap.get(fieldtype, (None, None))\n\n # sort out unknown fields\n if typedesc is None or typedesc.has_arg != bool(fieldarg):\n # either the field name is unknown, or the argument doesn't\n # match the spec; capitalize field name and be done with it\n new_fieldname = fieldtype[0:1].upper() + fieldtype[1:]\n if fieldarg:\n new_fieldname += ' ' + fieldarg\n fieldname[0] = nodes.Text(new_fieldname)\n entries.append(field)\n continue\n\n typename = typedesc.name\n\n # collect the content, trying not to keep unnecessary paragraphs\n if _is_single_paragraph(fieldbody):\n content = fieldbody.children[0].children\n else:\n content = fieldbody.children\n\n # if the field specifies a type, put it in the types collection\n if is_typefield:\n # filter out only inline nodes; others will result in invalid\n # markup being written out\n content = [n for n in content if isinstance(n, nodes.Inline) or\n isinstance(n, nodes.Text)]\n if content:\n types.setdefault(typename, {})[fieldarg] = content\n continue\n\n # also support syntax like ``:param type name:``\n if typedesc.is_typed:\n try:\n argtype, argname = fieldarg.split(None, 1)\n except ValueError:\n pass\n else:\n types.setdefault(typename, {})[argname] = \\\n [nodes.Text(argtype)]\n fieldarg = argname\n\n translatable_content = nodes.inline(fieldbody.rawsource,\n translatable=True)\n translatable_content.source = fieldbody.parent.source\n translatable_content.line = fieldbody.parent.line\n translatable_content += content\n\n # grouped entries need to be collected in one entry, while others\n # get one entry per field\n if typedesc.is_grouped:\n if typename in groupindices:\n group = entries[groupindices[typename]]\n else:\n groupindices[typename] = len(entries)\n group = [typedesc, []]\n entries.append(group)\n entry = typedesc.make_entry(fieldarg, [translatable_content])\n group[1].append(entry)\n else:\n entry = typedesc.make_entry(fieldarg, [translatable_content])\n entries.append([typedesc, entry])\n\n return (entries, types)", "def process_field_mapping(self, analysis, observable: Observable, result, result_field, result_time=None) -> None:\n pass", "def map_profile_fields(data, fields):\n profile = {}\n for dst, src in fields.items():\n if callable(src):\n value = src(data)\n else:\n value = data.get(src)\n\n if value is not None and value != '':\n profile[dst] = value\n\n return profile", "def receiverMapping():", "def createFieldMapping(sgidPoints):\n # Create field mappings\n sgidFMs = arcpy.FieldMappings()\n\n # Perform some field renaming\n mapPairs = [\n ('State', 'State'),\n ('City', 'Inc_Muni'),\n ('CountyID', 'County'),\n ('ZipCode', 'Zip_Code'),\n ('PrefixDir', 'StN_PreDir'),\n ('StreetName', 'StreetName'),\n ('StreetType', 'StN_PosTyp'),\n ('SuffixDir', 'StN_PosDir'),\n ('AddNum', 'Add_Number'),\n ('LandmarkName', 'landmkName'),\n ('Building', 'Building'),\n ('UnitType', 'Unit'),\n ('AddSource', 'AddAuth'),\n ('AddSystem', 'UniqWithin'),\n ('LoadDate', 'LastUpdate')]\n\n for p in mapPairs:\n print p\n sgidFMs.addFieldMap(getRenameFieldMap(sgidPoints, p[0], p[1]))\n\n return sgidFMs", "def test_fields_to_dict(self):\r\n test_data = \\\r\n \"\"\"0\tR27DLI_4812\tR27DLI_600\tR27DLI_727\tU1PLI_403\tU1PLI_8969\tU1PLI_9080\tU1PLI_9526\tW3Cecum_6642\tW3Cecum_8992\r\n1\tU1PLI_7889\r\n2\tW3Cecum_4858\r\n3\tR27DLI_3243\tR27DLI_4562\tR27DLI_6828\tR27DLI_9097\tU1PLI_2780\tU1PLI_67\tU9PSI_10475\tU9PSI_4341\tW3Cecum_5191\"\"\".splitlines() # output from cd-hit\r\n obs = fields_to_dict(test_data)\r\n exp = {\r\n '0': ['R27DLI_4812', 'R27DLI_600', 'R27DLI_727', 'U1PLI_403',\r\n 'U1PLI_8969', 'U1PLI_9080', 'U1PLI_9526', 'W3Cecum_6642', 'W3Cecum_8992'],\r\n '1': ['U1PLI_7889'],\r\n '2': ['W3Cecum_4858'],\r\n '3': ['R27DLI_3243', 'R27DLI_4562', 'R27DLI_6828', 'R27DLI_9097', 'U1PLI_2780', 'U1PLI_67', 'U9PSI_10475', 'U9PSI_4341', 'W3Cecum_5191']}\r\n self.assertEqual(obs, exp)", "def map(self, records, task):\n for key, json in records:\n record = happy.json.decode(json)\n if happy.flow.isIterable(self.aggkey):\n outkey = ''\n for ak in self.aggkey:\n if record.has_key(ak):\n outkey = outkey + record[ak] + \":\"\n task.collect(outkey, json) \n elif record.has_key(self.aggkey):\n if (record[self.aggkey]):\n task.collect(record[self.aggkey], json)", "def applyMapping(self):\n pass", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('ANAP').get('abstractTypes')\n exolinks = globalMap.get('ANAP').get('exolinks')\n\n # DataType GraphicsHandlerType\n currentMap = {}\n abstractTypes['GraphicsHandlerType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'] = currentMap\n loadMaps['ANAP.GraphicsHandlerType'] = currentMap\n currentMap['tag'] = 'ANAP.GraphicsHandlerType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AnalysisProfile\n currentMap = {}\n abstractTypes['AnalysisProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'analysisProfiles'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnalysisProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnalysisProfile.bgColor\n currentMap = {}\n contentMap['bgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'] = currentMap\n loadMaps['ANAP.AnalysisProfile.bgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.bgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'\n currentMap['name'] = 'bgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#FFFFFF'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.fgColor\n currentMap = {}\n contentMap['fgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'] = currentMap\n loadMaps['ANAP.AnalysisProfile.fgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.fgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'\n currentMap['name'] = 'fgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#000000'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.font\n currentMap = {}\n contentMap['font'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'] = currentMap\n loadMaps['ANAP.AnalysisProfile.font'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.font'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'\n currentMap['name'] = 'font'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.graphicsHandler\n currentMap = {}\n contentMap['graphicsHandler'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'] = currentMap\n loadMaps['ANAP.AnalysisProfile.graphicsHandler'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.graphicsHandler'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'\n currentMap['name'] = 'graphicsHandler'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'Tk'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001')\n\n # Attribute AnalysisProfile.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AnalysisProfile.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'] = currentMap\n loadMaps['ANAP.AnalysisProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AnalysisProfile.panView\n currentMap = {}\n contentMap['panView'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'] = currentMap\n loadMaps['ANAP.AnalysisProfile.panView'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.panView'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'\n currentMap['name'] = 'panView'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.sendBugReports\n currentMap = {}\n contentMap['sendBugReports'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile.sendBugReports'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.sendBugReports'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'\n currentMap['name'] = 'sendBugReports'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'maybe'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2010-11-17-16:21:33_00001')\n\n # Attribute AnalysisProfile.transientDialogs\n currentMap = {}\n contentMap['transientDialogs'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientDialogs'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientDialogs'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'\n currentMap['name'] = 'transientDialogs'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.transientWindows\n currentMap = {}\n contentMap['transientWindows'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientWindows'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientWindows'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'\n currentMap['name'] = 'transientWindows'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.twoCharShortcuts\n currentMap = {}\n contentMap['twoCharShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'] = currentMap\n loadMaps['ANAP.AnalysisProfile.twoCharShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.twoCharShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'\n currentMap['name'] = 'twoCharShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useCrosshair\n currentMap = {}\n contentMap['useCrosshair'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useCrosshair'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useCrosshair'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'\n currentMap['name'] = 'useCrosshair'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useGlobalShortcuts\n currentMap = {}\n contentMap['useGlobalShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useGlobalShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useGlobalShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'\n currentMap['name'] = 'useGlobalShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.userEmail\n currentMap = {}\n contentMap['userEmail'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userEmail'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userEmail'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'\n currentMap['name'] = 'userEmail'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute AnalysisProfile.userName\n currentMap = {}\n contentMap['userName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userName'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'\n currentMap['name'] = 'userName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.userOrganisation\n currentMap = {}\n contentMap['userOrganisation'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userOrganisation'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userOrganisation'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'\n currentMap['name'] = 'userOrganisation'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.webBrowser\n currentMap = {}\n contentMap['webBrowser'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'] = currentMap\n loadMaps['ANAP.AnalysisProfile.webBrowser'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.webBrowser'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'\n currentMap['name'] = 'webBrowser'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role AnalysisProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnalysisProfile.colorSchemes\n currentMap = {}\n contentMap['colorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'] = currentMap\n loadMaps['ANAP.AnalysisProfile.colorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.colorSchemes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'\n currentMap['name'] = 'colorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.macros\n currentMap = {}\n contentMap['macros'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'] = currentMap\n loadMaps['ANAP.AnalysisProfile.macros'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.macros'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'\n currentMap['name'] = 'macros'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.marksColor\n currentMap = {}\n contentMap['marksColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'] = currentMap\n loadMaps['ANAP.AnalysisProfile.marksColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.marksColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'\n currentMap['name'] = 'marksColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n\n # Role AnalysisProfile.refExpProfiles\n currentMap = {}\n contentMap['refExpProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'] = currentMap\n loadMaps['ANAP.AnalysisProfile.refExpProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.refExpProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'\n currentMap['name'] = 'refExpProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.residueProfiles\n currentMap = {}\n contentMap['residueProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'] = currentMap\n loadMaps['ANAP.AnalysisProfile.residueProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.residueProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'\n currentMap['name'] = 'residueProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.rulersColor\n currentMap = {}\n contentMap['rulersColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'] = currentMap\n loadMaps['ANAP.AnalysisProfile.rulersColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.rulersColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'\n currentMap['name'] = 'rulersColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n # End of AnalysisProfile\n\n currentMap = abstractTypes.get('AnalysisProfile')\n aList = ['createdBy', 'graphicsHandler', 'guid', 'isModifiable', 'lastUnlockedBy', 'name', 'panView', 'sendBugReports', 'transientDialogs', 'transientWindows', 'twoCharShortcuts', 'useCrosshair', 'useGlobalShortcuts', 'userEmail', 'webBrowser']\n currentMap['headerAttrs'] = aList\n aList = ['bgColor', 'fgColor', 'font', 'userName', 'userOrganisation', 'marksColor', 'rulersColor']\n currentMap['simpleAttrs'] = aList\n aList = ['residueProfiles', 'refExpProfiles', 'macros', 'colorSchemes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['colorSchemes', 'macros', 'refExpProfiles', 'residueProfiles']\n currentMap['children'] = aList\n\n # Class ColorScheme\n currentMap = {}\n abstractTypes['ColorScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'] = currentMap\n loadMaps['ANAP.ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'colorSchemes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ColorScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ColorScheme.colors\n currentMap = {}\n contentMap['colors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'] = currentMap\n loadMaps['ANAP.ColorScheme.colors'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.colors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'\n currentMap['name'] = 'colors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute ColorScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'] = currentMap\n loadMaps['ANAP.ColorScheme.name'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ColorScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ColorScheme\n\n currentMap = abstractTypes.get('ColorScheme')\n aList = ['colors', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Macro\n currentMap = {}\n abstractTypes['Macro'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'] = currentMap\n loadMaps['ANAP.Macro'] = currentMap\n currentMap['tag'] = 'ANAP.Macro'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'macros'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Macro.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Macro.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'] = currentMap\n loadMaps['ANAP.Macro.details'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Macro.function\n currentMap = {}\n contentMap['function'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'] = currentMap\n loadMaps['ANAP.Macro.function'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.function'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'\n currentMap['name'] = 'function'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.isInMenu\n currentMap = {}\n contentMap['isInMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'] = currentMap\n loadMaps['ANAP.Macro.isInMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'\n currentMap['name'] = 'isInMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.isInMouseMenu\n currentMap = {}\n contentMap['isInMouseMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'] = currentMap\n loadMaps['ANAP.Macro.isInMouseMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMouseMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'\n currentMap['name'] = 'isInMouseMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.module\n currentMap = {}\n contentMap['module'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'] = currentMap\n loadMaps['ANAP.Macro.module'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.module'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'\n currentMap['name'] = 'module'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'] = currentMap\n loadMaps['ANAP.Macro.name'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Macro.ordering\n currentMap = {}\n contentMap['ordering'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'] = currentMap\n loadMaps['ANAP.Macro.ordering'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.ordering'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'\n currentMap['name'] = 'ordering'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.path\n currentMap = {}\n contentMap['path'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'] = currentMap\n loadMaps['ANAP.Macro.path'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.path'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'\n currentMap['name'] = 'path'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00003')\n\n # Attribute Macro.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'] = currentMap\n loadMaps['ANAP.Macro.serial'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.shortcut\n currentMap = {}\n contentMap['shortcut'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'] = currentMap\n loadMaps['ANAP.Macro.shortcut'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.shortcut'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'\n currentMap['name'] = 'shortcut'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Macro.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Macro\n\n currentMap = abstractTypes.get('Macro')\n aList = ['function', 'isInMenu', 'isInMouseMenu', 'module', 'ordering', 'serial', 'shortcut']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'path']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class RefExpProfile\n currentMap = {}\n abstractTypes['RefExpProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'] = currentMap\n loadMaps['ANAP.RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refExpProfiles'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefExpProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefExpProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'] = currentMap\n loadMaps['ANAP.RefExpProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute RefExpProfile.peakSymbolColors\n currentMap = {}\n contentMap['peakSymbolColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakSymbolColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakSymbolColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'\n currentMap['name'] = 'peakSymbolColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.peakTextColors\n currentMap = {}\n contentMap['peakTextColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakTextColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakTextColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'\n currentMap['name'] = 'peakTextColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.refExpNames\n currentMap = {}\n contentMap['refExpNames'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'] = currentMap\n loadMaps['ANAP.RefExpProfile.refExpNames'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.refExpNames'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'\n currentMap['name'] = 'refExpNames'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role RefExpProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefExpProfile.negColorSchemes\n currentMap = {}\n contentMap['negColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'] = currentMap\n loadMaps['ANAP.RefExpProfile.negColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.negColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'\n currentMap['name'] = 'negColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role RefExpProfile.posColorSchemes\n currentMap = {}\n contentMap['posColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'] = currentMap\n loadMaps['ANAP.RefExpProfile.posColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.posColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'\n currentMap['name'] = 'posColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of RefExpProfile\n\n currentMap = abstractTypes.get('RefExpProfile')\n aList = ['name']\n currentMap['headerAttrs'] = aList\n aList = ['peakSymbolColors', 'peakTextColors', 'refExpNames', 'negColorSchemes', 'posColorSchemes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ResidueProfile\n currentMap = {}\n abstractTypes['ResidueProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'] = currentMap\n loadMaps['ANAP.ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'residueProfiles'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ResidueProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ResidueProfile.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'] = currentMap\n loadMaps['ANAP.ResidueProfile.ccpCode'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.guiName\n currentMap = {}\n contentMap['guiName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'] = currentMap\n loadMaps['ANAP.ResidueProfile.guiName'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.guiName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'\n currentMap['name'] = 'guiName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'] = currentMap\n loadMaps['ANAP.ResidueProfile.molType'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ResidueProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ResidueProfile\n\n currentMap = abstractTypes.get('ResidueProfile')\n aList = ['ccpCode', 'guiName', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AnalysisProfile\n currentMap = {}\n exolinks['AnalysisProfile'] = currentMap\n loadMaps['ANAP.exo-AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-AnalysisProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['name'] = 'AnalysisProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to ColorScheme\n currentMap = {}\n exolinks['ColorScheme'] = currentMap\n loadMaps['ANAP.exo-ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ColorScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['name'] = 'ColorScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Macro\n currentMap = {}\n exolinks['Macro'] = currentMap\n loadMaps['ANAP.exo-Macro'] = currentMap\n currentMap['tag'] = 'ANAP.exo-Macro'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['name'] = 'Macro'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to RefExpProfile\n currentMap = {}\n exolinks['RefExpProfile'] = currentMap\n loadMaps['ANAP.exo-RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-RefExpProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['name'] = 'RefExpProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ResidueProfile\n currentMap = {}\n exolinks['ResidueProfile'] = currentMap\n loadMaps['ANAP.exo-ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ResidueProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['name'] = 'ResidueProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))", "def add_fields(self, fields):\n for label, data in fields.items():\n self[label] = data", "def init_mapping_field_list(\n name_columen: list,\n xdm_one_data_model: list,\n raw_event: dict,\n xdm_rule_to_dtype: dict,\n xdm_rule_to_dclass: dict,\n) -> List[MappingField]:\n mapping_list = []\n xdm_onedata_model_names = xdm_rule_to_dclass.keys()\n for (field_name, xdm_field_name) in zip(name_columen, xdm_one_data_model):\n raw_event_data_list: List[RawEventData] = handle_raw_evnet_data(\n field_name, raw_event\n )\n\n if xdm_field_name not in xdm_onedata_model_names:\n if not xdm_field_name:\n logger.warning(f\"No xdm rule was specified for {field_name}\")\n else:\n raise ValueError(\n f\"No XDM field {xdm_field_name} exists in the onedata model. Please check your modelling rules file.\"\n )\n\n xdm_field_type = xdm_rule_to_dtype.get(xdm_field_name)\n xdm_class_type = xdm_rule_to_dclass.get(xdm_field_name)\n\n mapping_list.append(\n MappingField(\n xdm_rule=xdm_field_name,\n xdm_field_type=xdm_field_type,\n xdm_class_type=xdm_class_type,\n mapped_to_raw=raw_event_data_list,\n )\n )\n\n return mapping_list", "def iter_fields(self):\n # there are two ways a mergefield can be represented: the simple way with fldSimple and the more\n # complex way with instrText and fldChar.\n\n simple_fields = ((field.attrib[namespaced('instr')], field) for field in self._element.xpath('.//w:fldSimple'))\n complex_fields = ((field.text, field) for field in self._element.xpath('.//w:instrText'))\n\n for instr, field in itertools.chain(simple_fields, complex_fields):\n\n m = r.match(instr)\n if not m and self.strict:\n raise ValueError(\"Could not determine name of merge field with instr text '{}'\".format(instr))\n elif not m:\n logger.warning(\"Could not determine name of merge field with instr text '{}'. Skipping\".format(instr))\n continue\n\n yield m.group(1), field", "def map(self, key, value):\n namelist = value.strip().split()\n namelist.insert(0, '')\n namelist.append('')\n n = len(namelist)\n for i in range(1, n - 1):\n self.outputcollector.collect(namelist[i], \\\n tuple2str((namelist[i-1] ,namelist[i+1])))", "def get_processed_form_wizard_data(form_wizard, form_list,\n form_element_entries):\n field_name_to_label_map = {}\n cleaned_data = {}\n for form in form_list:\n _field_name_to_label_map, _cleaned_data = get_processed_form_data(\n form,\n form_element_entries\n )\n field_name_to_label_map.update(_field_name_to_label_map)\n cleaned_data.update(_cleaned_data)\n\n return (\n field_name_to_label_map,\n cleaned_data\n )", "def _add_mapping(self, mother_element: GraphElement,\n daughter_element: GraphElement) -> None:\n pass", "def parse_pairs(self):\n pass", "def retrieve_metadata_alignments(self, fields):\n print 'retrieve_metadata_alignments'\n alignments = dict()\n for field in fields:\n root = self.alignment_config_items['alignment_pages_root']\n alignment_template = self.alignment_config_items['alignment_template']\n #wikipage = root + field\n wikipage = field\n alignments[field] = self.retrieve_from_wiki(wikipage,\n alignment_template)\n self.mapper = alignments", "def mapper(record):\n matrix, row, col, value = record\n if matrix == A_MATRIX:\n # For all A(i,j) emit key (j, k) for k=1 to number of columns in B\n for k in range(0, B_COLS):\n mr.emit_intermediate((row, k), [matrix, col, value])\n else:\n # For all B(j, k) emit key (j, i) for i=1 to number of rows in B\n for i in range(0, A_ROWS):\n mr.emit_intermediate((i, col), [matrix, row, value])", "def make_tag_pairs(self, input, start, end, elements):\n tps = TagPairs()\n for e in elements:\n k = [k for k in e.keys()][0]\n tps[k] = e[k]\n return tps", "def __iter__(self) -> Iterator[Tuple[str, str]]:\n for fixup in self._mapping._fixup.values():\n yield fixup.var, fixup.value", "def itermap(parent, indexmap):\n for k, v in indexmap.items():\n if not isinstance(v, dict):\n logging.fatal(\"Property {k} description not dictionary: {v} \".format(k=k, v=v))\n return\n prop = \"{}.{}\".format(parent, k)\n if \"type\" in v:\n if \"fields\" in v:\n embedded_map = v[\"fields\"]\n itermap(prop, embedded_map)\n del v[\"fields\"]\n propdict[prop] = v\n elif \"properties\" in v:\n if len(v) != 1:\n logging.fatal(\"Embedded properties of {} not in dictionary with length 1: {}\".format(k, v))\n return\n embedded_map = v[\"properties\"]\n itermap(prop, embedded_map)\n return", "def read_activity_mappings_both(self):\n with open('oca.translate', \"r\") as file:\n for line in file:\n x = str(str(line).strip()).split(' ', 3)\n self.amappings[x[0]] = x[1]\n self.amappings2[x[0]] = x[2]", "def _map_onto(self, field_struct, value, options=None):\n if isinstance(value, list):\n # Fill 'repeated' structure\n # a.b = [1, 2]\n # a.b.add() = 1\n # a.b.add() = 2\n for sub in value:\n if hasattr(field_struct, \"add\"):\n nested = field_struct.add()\n # Composite lists will never\n # need to be set by us\n self._map_onto(nested, sub)\n elif hasattr(field_struct, 'append'):\n # Scalar lists will always\n # need to be set by us\n field_struct.append(self._process_value(sub))\n if options:\n self._check_field_length(field_struct, sub, options)\n else:\n raise FieldWrongTypeException(\"Tried to map illegal structure \" +\n str(value) +\n \" onto an object/message field.\")\n elif isinstance(value, dict):\n # Fill message structure\n # a.b = {c: 1, d: 2}\n # a.b.c = 1\n # a.b.d = 2\n for key in value:\n nested = getattr(field_struct, key)\n r = self._map_onto(nested, value[key], self._get_options(field_struct, key))\n if r:\n self._checked_set(field_struct, key, r[0])\n elif isinstance(value, tuple):\n # Fill message structure (in order)\n # a.b = (1, 2)\n # a.b.c = 1\n # a.b.d = 2\n if not hasattr(field_struct, 'DESCRIPTOR'):\n raise FieldWrongTypeException(\"Tried to map illegal structure \" +\n str(value) +\n \" onto a list/repeated field.\")\n fields = field_struct.DESCRIPTOR.fields\n for i in range(len(value)):\n nested = getattr(field_struct, fields[i].name)\n r = self._map_onto(nested, value[i], self._get_options(field_struct, fields[i].name))\n if r:\n self._checked_set(field_struct, fields[i].name, r[0])\n else:\n return [self._process_value(value), ]", "def map_to_per_etype(self, ids): # -> tuple[Unknown, Unknown]:\n ...", "def map_to_per_etype(self, ids): # -> tuple[Unknown, Unknown]:\n ...", "def _get_info_from_fields(self, fields):\n info = []\n for field in fields:\n if field is icemac.ab.calendar.interfaces.IEvent['persons']:\n value = self.persons\n else:\n schema_field = (\n icemac.addressbook.entities.get_bound_schema_field(\n self.context, None, field,\n default_attrib_fallback=False))\n try:\n value = schema_field.get(schema_field.context)\n except AttributeError:\n # Field defined on IEvent but not on IRecurringEvent, thus\n # it does not exist on the RecurredEvent.\n value = None\n if value is not None:\n value = six.text_type(value)\n if value:\n if field is icemac.ab.calendar.interfaces.IEvent['text']:\n info.extend(value.split('\\n'))\n else:\n info.append(value)\n return info", "def secondary_field_data(pmap):\n # Get place field data from spatial map\n fdata = pmap.get_field_data()\n units = numpy.unique(fdata['unit'])\n \n # Find dual fields and store data\n norm_peak = []\n primary_dist = []\n for u in units:\n ix = (fdata['unit'] == u).nonzero()[0]\n if len(ix) <= 1:\n continue\n fields = fdata[ix]\n sort_ix = numpy.argsort(fields['peak'])\n P = fields[sort_ix[-1]]\n for S in fields[sort_ix[:-1]]:\n norm_peak.append(S['peak']/P['peak'])\n primary_dist.append(\n numpy.sqrt((P['x']-S['x'])**2 + (P['y']-S['y'])**2))\n \n # Return array data\n return numpy.c_[numpy.array(norm_peak), numpy.array(primary_dist)].T", "def field_labels(label_row, datum_row):\n return dict(zip(label_row, datum_row))", "def audit_fields(elem, fields):\r\n errs = []\r\n parsed = {}\r\n for field, field_type, dict_field in fields:\r\n if field not in elem.attrib:\r\n errs.append(('missing value', field))\r\n else:\r\n value = ensure_type(elem.get(field), field_type)\r\n if not value:\r\n errs.append(('wrong type', field))\r\n else:\r\n parsed[dict_field] = value\r\n \r\n if errs:\r\n parsed = None\r\n return parsed, errs", "def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'", "def map_field_name_to_attribute() -> typing.Dict:\n return {\n \"tag\": \"tag\",\n \"contact\": \"contact\",\n }", "def map(self):\r\n pass", "def _factorize_fields(self,\n reggroups: Dict[str, Dict[str, OMRegField]]) \\\n -> Dict[str, Tuple[Dict[str, OMRegField], int]]:\n outregs = {}\n for gname, gregs in reggroups.items():\n factorize = True\n field0 = None\n stride = 0\n for field in gregs.values():\n if not field0:\n field0 = field\n stride = field0.offset & (self._regwidth -1)\n continue\n if field.offset & (self._regwidth - 1) != stride:\n factorize = False\n break\n if (field.size != field0.size or\n field.reset != field0.reset or\n field.access != field0.access):\n factorize = False\n break\n if factorize:\n repeat = len(gregs)\n cname = commonprefix(list(gregs.keys()))\n cdesc = commonprefix([f.desc for f in gregs.values()])\n field = OMRegField(HexInt(field0.offset), field0.size, cdesc,\n field0.reset, field0.access)\n greg = {}\n cname = cname.rstrip('_')\n greg[cname] = field\n outregs[gname] = (greg, repeat)\n else:\n outregs[gname] = (gregs, 1)\n return outregs", "def multi_mapping(func_name, arg_value_pairs, module_name = \"__main__\"):\n func, arg_names = get_function_args(module_name = module_name, function_name = func_name)\n \n return list(map(lambda arg_value_pair: call_func_dynamically(function_name = func_name, \n argument_names = arg_names, \n arg_value_pair = arg_value_pair) ,\n arg_value_pairs))", "def get_fields(maps_dg):\n fields = []\n for mapi in maps_dg:\n fields.append(nmt.NmtField(des_mask, [mapi]))\n\n return fields", "def _secondary_beam(self, hdr):\n # Called ApSecondaryNano in OpenMIMS\n d = {}\n tmp = unpack(self._bo + 'd 42i 2d', hdr.read(192))\n d['E0W'], d['ES'] = tmp[:2]\n d['ES widths'] = tmp[2:12]\n d['ES heights'] = tuple(tmp[12:22])\n d['AS'] = tmp[22]\n d['AS widths'] = tuple(tmp[23:33])\n d['AS heights'] = tuple(tmp[33:43])\n d['EnS'], d['EnS width'] = tmp[43:]\n return d", "def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))", "def map2(inKey, inVal):\n return [([inKey[0]],inKey[1:]+inVal)]", "def get_electrodes_mapping(electrodes):\n return {\n (\n electrodes[\"group\"][idx].device.name,\n electrodes[\"id\"][idx],\n ): idx\n for idx in range(len(electrodes))\n }", "def create_sqlalchemy_mapperproperties_from_dbfields(cls,modeltable):\n allprops = {}\n #\n for field in cls.fieldlist:\n props = field.create_sqlalchemy_mapperproperties(cls,modeltable)\n if (props!=None):\n allprops.update(props)\n return allprops", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def transfer2meta():\n for i in ekindicts:\n e=ekindicts[i]\n t.showMeta(e)\n #t.transfer2meta(e)\n #t.returnData()\n\n return", "def map(self, attr1, attr2):\n return dict(zip(getattr(self, attr1), getattr(self, attr2)))", "def map(self, attr1, attr2):\n return dict(zip(getattr(self, attr1), getattr(self, attr2)))", "def _scatgat_fields(self, reggroups: Dict[str, Dict[str, OMRegField]]) \\\n -> Dict[str, Dict[str, OMRegField]]:\n outfields = {}\n rwidth = self._regwidth\n for gname, gregs in reggroups.items():\n first = None\n wmask = rwidth -1\n base = 0\n newfields = {}\n fields = []\n skip = False\n for fname, field in gregs.items():\n while not skip:\n if not first:\n first = field\n base = field.offset & ~wmask\n fbase = field.offset-base\n if fbase < rwidth:\n if fbase + field.size > rwidth:\n # crossing a word boundary, skipping\n skip = True\n break\n fields.append((fname, field))\n break\n if gname not in newfields:\n newfields[gname] = []\n newfields[gname].append(fields)\n fields = []\n first = None\n # cannot modify the fields\n if skip:\n # simply copy them over\n outfields[gname] = gregs\n # nothing more to do for this group, preserve the insertion\n # order in the output dictionary\n continue\n # the above loop may have left last fields not yet stored into\n # the new dictionary\n if fields:\n if gname not in newfields:\n newfields[gname] = []\n newfields[gname].append(fields)\n for name, groups in newfields.items():\n for pos, fields in enumerate(groups):\n newname = f'{name}_{pos}' if len(groups) > 1 else name\n if newname not in outfields:\n outfields[newname] = {}\n for fpair in fields:\n fname, field = fpair\n outfields[newname][fname] = field\n return outfields", "def items(self):\n for k, v in self._pairs():\n yield k, util.annotate(v)", "def map_to_per_etype(self, ids): # -> None:\n ...", "def eamap_second(*args):\n return _ida_hexrays.eamap_second(*args)", "def mapfn(k, v):\n for row in v:\n # rellenar el codigo\n pass", "def map_field_value(\n row: DLCSRecord, field_name: str, config: typing.Dict\n) -> typing.Any:\n mapping: mapper.MappigDictValue = mapper.FIELD_MAPPING[field_name]\n\n if mapping is None:\n return None\n\n if callable(mapping):\n return mapping(row)\n\n if isinstance(mapping, str):\n mapping = [mapping]\n\n if not isinstance(mapping, typing.Collection):\n raise TypeError(\n f\"FIELD_MAPPING[field_name] must be iterable, unless it is None, Callable, or a string.\"\n )\n\n output: typing.List[str] = []\n for csv_field in mapping:\n input_value = row.get(csv_field)\n if input_value:\n if isinstance(input_value, str):\n output.extend(input_value.split(\"|~|\"))\n else:\n output.append(input_value)\n\n bare_field_name = get_bare_field_name(field_name)\n if bare_field_name in config.get(\"controlled_fields\", {}):\n terms = config[\"controlled_fields\"][bare_field_name][\"terms\"]\n output = [terms.get(value, value) for value in output]\n\n return [value for value in output if value] # remove untruthy values like ''", "def _decode(self, parts: typing.List[int]) -> typing.Dict:\n info = {field.name: field.decode(parts[i]) for i, field in enumerate(self.fields)}\n return info", "def process_map(file_in, db_table):\n data = []\n i = 0\n for _, element in ET.iterparse(file_in):\n el = shape_element(element)\n if el != None:\n data.append(el)\n i = i + 1\n #Insert every 10,000 records to the database\n if i == 10000:\n db_table.insert_many(data)\n #Empty data list and restart count\n data[:] = []\n i = 0\n #Insert rest of the data list to the database\n db_table.insert_many(data)", "def map(item):\n yield (\n item.committer_id, {\n 'exploration_id': item.get_unversioned_instance_id(),\n 'version_string': item.get_version_string(),\n })", "def ds_map(n_caps, n_refs, n_codes, differential=False):\n\n p_map = pipe_map(n_caps, n_refs, differential)\n return adjust_map(p_map, n_codes)", "def process_entry(self):\n\n for line_item in self.entry:\n pairs = line_item.split(' ')\n for pair in pairs:\n if ':' in pair:\n key, value = pair.split(':')\n if value.isdigit():\n self.fields[key] = int(value)\n else:\n self.fields[key] = value", "def map(pointer, objfile=\"\"):\n ei_class, ehdr = get_ehdr(pointer)\n return map_inner(ei_class, ehdr, objfile)", "def map_from_app_features(self, app):\n if 'features' in app and len(app['features']) > 0:\n empty_fieldlist(self.features)\n for feature in app.get('features', []):\n self.features.append_entry()\n form_feature = self.features.entries[-1].form\n form_feature.map_from_app(feature)", "def get_field_list(filename):\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n field_mapping = {}\n for row in reader:\n field_mapping[row[0]] = row[1]\n return field_mapping", "def apply_rule_files(self, pairs, field1='pron', field2=None):\n if field2 is None:\n field2 = field1\n for e in self:\n e[field2] = self.cache(e[field1], pairs)[0]", "def _transform_sub_mapping(self, cr, uid, external_session, convertion_type, resource, vals, sub_mapping_list,\n mapping, mapping_id, mapping_line_filter_ids=None, defaults=None, context=None):\n if not defaults:\n defaults={}\n ir_model_field_obj = self.pool.get('ir.model.fields')\n for sub_mapping in sub_mapping_list:\n sub_object_name = sub_mapping['child_mapping_id'][1]\n sub_mapping_id = sub_mapping['child_mapping_id'][0]\n if convertion_type == 'from_external_to_openerp':\n from_field = sub_mapping['external_field']\n if not from_field:\n from_field = \"%s_%s\" %(sub_object_name, sub_mapping_id)\n to_field = sub_mapping['internal_field']\n\n elif convertion_type == 'from_openerp_to_external':\n from_field = sub_mapping['internal_field']\n to_field = sub_mapping['external_field'] or 'hidden_field_to_split_%s'%from_field # if the field doesn't have any name we assume at that we will split it\n\n field_value = resource[from_field]\n sub_mapping_obj = self.pool.get(sub_object_name)\n sub_mapping_defaults = sub_mapping_obj._get_default_import_values(cr, uid, external_session, sub_mapping_id, defaults.get(to_field), context=context)\n\n if field_value:\n transform_args = [cr, uid, external_session, convertion_type, field_value]\n transform_kwargs = {\n 'defaults': sub_mapping_defaults,\n 'mapping': mapping,\n 'mapping_id': sub_mapping_id,\n 'mapping_line_filter_ids': mapping_line_filter_ids,\n 'parent_data': vals,\n 'context': context,\n }\n\n if sub_mapping['internal_type'] in ['one2many', 'many2many']:\n if not isinstance(field_value, list):\n transform_args[4] = [field_value]\n if not to_field in vals:\n vals[to_field] = []\n if convertion_type == 'from_external_to_openerp':\n lines = sub_mapping_obj._transform_resources(*transform_args, **transform_kwargs)\n else:\n mapping, sub_mapping_id = self._init_mapping(cr, uid, external_session.referential_id.id, \\\n convertion_type=convertion_type,\n mapping=mapping,\n mapping_id=sub_mapping_id,\n context=context)\n field_to_read = [x['internal_field'] for x in mapping[sub_mapping_id]['mapping_lines']]\n sub_resources = sub_mapping_obj.read(cr, uid, field_value, field_to_read, context=context)\n transform_args[4] = sub_resources\n lines = sub_mapping_obj._transform_resources(*transform_args, **transform_kwargs)\n for line in lines:\n if 'external_id' in line:\n del line['external_id']\n if convertion_type == 'from_external_to_openerp':\n if sub_mapping['internal_type'] == 'one2many':\n #TODO refactor to search the id and alternative keys before the update\n external_id = vals.get('external_id')\n alternative_keys = mapping[mapping_id]['alternative_keys']\n #search id of the parent\n existing_ir_model_data_id, existing_rec_id = \\\n self._get_oeid_from_extid_or_alternative_keys(\n cr, uid, vals, external_id,\n external_session.referential_id.id,\n alternative_keys, context=context)\n vals_to_append = (0, 0, line)\n if existing_rec_id:\n sub_external_id = line.get('external_id')\n if mapping[sub_mapping_id].get('alternative_keys'):\n sub_alternative_keys = list(mapping[sub_mapping_id]['alternative_keys'])\n if self._columns.get(to_field):\n related_field = self._columns[to_field]._fields_id\n elif self._inherit_fields.get(to_field):\n related_field = self._inherit_fields[to_field][2]._fields_id\n sub_alternative_keys.append(related_field)\n line[related_field] = existing_rec_id\n #search id of the sub_mapping related to the id of the parent\n sub_existing_ir_model_data_id, sub_existing_rec_id = \\\n sub_mapping_obj._get_oeid_from_extid_or_alternative_keys(\n cr, uid, line, sub_external_id,\n external_session.referential_id.id,\n sub_alternative_keys, context=context)\n del line[related_field]\n if sub_existing_rec_id:\n vals_to_append = (1, sub_existing_rec_id, line)\n vals[to_field].append(vals_to_append)\n else:\n vals[to_field].append(line)\n\n elif sub_mapping['internal_type'] == 'many2one':\n if convertion_type == 'from_external_to_openerp':\n res = sub_mapping_obj._record_one_external_resource(cr, uid, external_session, field_value,\n defaults=sub_mapping_defaults, mapping=mapping, mapping_id=sub_mapping_id, context=context)\n vals[to_field] = res.get('write_id') or res.get('create_id')\n else:\n sub_resource = sub_mapping_obj.read(cr, uid, field_value[0], context=context)\n transform_args[4] = sub_resource\n vals[to_field] = sub_mapping_obj._transform_one_resource(*transform_args, **transform_kwargs)\n else:\n raise except_osv(_('User Error'),\n _('Error with mapping : %s. Sub mapping can be only apply on one2many, many2one or many2many fields') % (sub_mapping['name'],))\n return vals", "def fields(self, forge, values):\n\n values[\"forge\"] = forge['id']\n\n fields = opengui.Fields(\n values=values,\n fields=FIELDS,\n ready=True\n )\n\n fields[\"forge\"].description = forge[\"description\"]\n\n if os.path.exists(\"/opt/service/forge/fields.yaml\"):\n with open(\"/opt/service/forge/fields.yaml\", \"r\") as fields_file:\n fields.extend(yaml.safe_load(fields_file).get(\"fields\", []))\n\n for field in forge.get(\"input\", {}).get(\"fields\", []):\n if field[\"name\"] in RESERVED:\n raise Exception(f\"field name '{field['name']}' is reserved\")\n self.field(fields, field)\n\n return fields", "def _do_mapping(self):\n\n distro = None\n versions = None\n flavor = None\n\n try:\n distro = self._map_name(self.from_distro, self.from_version, self.from_like_distro, self.found_mapping)\n flavor = self._map_flavor(self.from_distro, self.from_version, self.from_like_distro, self.found_mapping)\n versions = self._map_version(self.from_distro, self.from_version, self.from_like_distro, self.found_mapping)\n return [DistroTuple(distro=distro, version=v, flavor=flavor) for v in versions]\n except:\n log.exception(\n 'Failed to fully construct the mapped distro from: {}, {}, {}'.format(self.from_distro,\n self.from_version,\n self.from_like_distro))\n raise", "def _GetFieldValues(\n self, output_mediator, event, event_data, event_data_stream, event_tag):\n return {}", "def dict() -> Dict[str, Pin]:", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def _assemble_mapper(mappers, mapper_0, data_members, mapper_renumber=None):\n if mapper_renumber is not None:\n mappers_all = [_renumber_mapper(mapper_0, mapper_renumber)]\n\n for mapper in mappers:\n mapper_temp = {}\n for map_type in data_members:\n #for map_type, sub_mappper in mapper.items():\n sub_mappper = mapper[map_type]\n mapper_temp[map_type] = {}\n for id_orig, id_merge in sub_mappper.items():\n # map from original to renumbered\n mapper_temp[map_type][id_orig] = mapper_renumber[map_type][id_merge]\n mappers_all.append(mapper_temp)\n else:\n # the first model nids are unchanged\n mappers_all = [mapper_0] + mappers\n\n return mappers_all", "def post_process(data):\n for record in data[\"Records\"]:\n for name, value in record.items():\n if type(value) == list:\n newlist = []\n for entry in value:\n newlist.append(post_process_pair(name, entry))\n record[name] = newlist\n else:\n record[name] = post_process_pair(name, value)", "def ToMap(*args):\n return dict((v, str(i)) for i, v in enumerate(args))", "def extract_fields(entry_string):\n for field, value in re.findall(\"(.*?)=(.*?)\\}\", entry_string):\n yield field.strip(\",\").strip(\" \"), value.strip(\"{\").strip(\"}\")", "def after_map(self, map):\n return map", "def after_map(self, map):\n return map", "def demo_two_map():\n # example using map\n temps_c = [(\"Berlin\", 29), (\"Cairo\", 36), (\"Buenos Aires\", 19),\n (\"Los Angeles\", 26), (\"Tokyo\", 27), (\"New York\", 28),\n (\"London\", 22), (\"Beijing\", 32)]\n\n # lambda to return tuple with calculated deg. F converted from deg. C\n c2f = lambda city_tmp: (city_tmp[0], (9.0/5.0)*city_tmp[1] + 32)\n\n print list(map(c2f, temps_c))", "def parseDataField(self):\r\n devId = str(self.deviceId)\r\n datamap = self._datamaps[devId]\r\n work = ''\r\n dataIndex = 0\r\n fieldIndex = 0\r\n mapIndex = 0\r\n self.fields=[]\r\n while mapIndex < len(datamap):\r\n mapChar = datamap[mapIndex]\r\n mapValue = int(mapChar)\r\n if fieldIndex == mapValue:\r\n #we've found another character in our current field\r\n work = work + self.dataField[dataIndex]\r\n mapIndex = mapIndex + 1\r\n dataIndex = dataIndex + 1\r\n elif fieldIndex+1 == mapValue:\r\n #we've found the end of the field we're working on\r\n self.fields.append(int(work, 16))\r\n work = ''\r\n fieldIndex = fieldIndex + 1\r\n else:\r\n if len(work) > 0:\r\n self.fields.append(int(work, 16))\r\n work = ''\r\n fieldIndex = fieldIndex + 1\r\n mapIndex = mapIndex + 1\r\n dataIndex = dataIndex + 1\r\n\r\n if len(work) > 0:\r\n self.fields.append(int(work, 16))\r\n\r\n self.service = self._servicemaps[devId]\r\n self.types = self._typemaps[devId]\r\n self.units = self._unitmaps[devId]\r\n\r\n self.customConvert()\r\n self.extendedConvert()\r\n self.convertLittleEndian()\r\n\r\n return", "def map(z):\n pass", "def test_split_mapping_file_on_field(self):\r\n actual = sorted(\r\n split_mapping_file_on_field(\r\n self.mapping_f1,\r\n 'Treatment'))\r\n self.mapping_exp.sort()\r\n self.assertEqual(actual, self.mapping_exp)", "def map_csv_fields(self):\n etod_csv_fields = {\n 'ctry_id': None,\n 'obst_identifier': None,\n 'obst_name': None,\n 'lon_src': None,\n 'lat_src': None,\n 'agl': None,\n 'amsl': None,\n 'vert_uom': None,\n 'hor_acc': None,\n 'hor_acc_uom': None,\n 'vert_acc': None,\n 'vert_acc_uom': None,\n 'obst_type': None,\n 'lighting': None,\n 'marking': None,\n 'is_group': None,\n }\n\n for field in etod_csv_fields:\n try:\n etod_csv_fields[field] = etod_map[self.ctry_short_name]['fields'][field]\n except KeyError:\n etod_csv_fields[field] = None\n\n self.field_map = etod_csv_fields", "def mapper(k, v):\n instance = json.loads(v)\n field_values = []\n for f in output_fields:\n field = getFromDict(instance, f.split('.'))\n if isinstance(field, dict):\n field_values.append(unicode(json.dumps(field), \"utf-8\"))\n elif isinstance(field, basestring):\n field_values.append(getFromDict(instance, f.split('.')))\n else:#list\n field_values.append(','.join(field))\n\n line = output_separator.join(v for v in field_values)\n yield line, ''", "def smap(self, records, task):\n for key, json in records:\n record = happy.json.decode(json)\n if record.has_key(self.joinkey1):\n record['__joinorder__'] = 1\n task.collect(record[self.joinkey1], 1, happy.json.encode(record))\n if record.has_key(self.joinkey2):\n record['__joinorder__'] = 2\n task.collect(record[self.joinkey2], 2, happy.json.encode(record))", "def map_exp_ids(self, exp):\n exp = [(x[0], x[1]) for x in exp]\n return exp" ]
[ "0.5794583", "0.56985533", "0.56411123", "0.5450113", "0.5425905", "0.5395057", "0.53146863", "0.5253061", "0.52433765", "0.5232632", "0.5187637", "0.51365435", "0.511374", "0.5107384", "0.51051044", "0.51040184", "0.50853544", "0.50818384", "0.50797653", "0.5053735", "0.5050864", "0.5049981", "0.5048164", "0.50020194", "0.49679694", "0.49636382", "0.4890645", "0.4858326", "0.48517907", "0.4825758", "0.48155075", "0.48058578", "0.4800256", "0.47999135", "0.4796818", "0.47932273", "0.47891605", "0.47794607", "0.47758138", "0.4775215", "0.47682676", "0.47580612", "0.47562608", "0.47545907", "0.47531742", "0.47531742", "0.47480074", "0.4744548", "0.4737243", "0.47359616", "0.47253844", "0.47124925", "0.47082877", "0.4707716", "0.4703284", "0.46875054", "0.46851024", "0.4679069", "0.4679016", "0.46780086", "0.46740314", "0.46720526", "0.46703714", "0.46660173", "0.46660173", "0.46582597", "0.46522242", "0.4651819", "0.46476", "0.4636382", "0.46336263", "0.46276182", "0.4623758", "0.462355", "0.46194187", "0.4616069", "0.46121338", "0.46080688", "0.46062207", "0.45999336", "0.4594614", "0.45938247", "0.4587895", "0.45828876", "0.4582372", "0.45797497", "0.4577406", "0.45723194", "0.45710504", "0.4560497", "0.45579985", "0.45579985", "0.45537847", "0.4542436", "0.4535624", "0.4533579", "0.4533339", "0.4525951", "0.45236042", "0.45216918" ]
0.6167625
0
Find the hires structural image that was acquired nearest to "acqtime"
def _FindNearestAnat(self, acqtime): tdiff_min = 1e6 for anat in self.entry_map['anat']: if self.info[anat]['type'] == 'T1High' and \ self.info[anat]['InversionTime'] > 0.: tdiff = abs(acqtime - self.info[anat]['acqtime']) if tdiff < tdiff_min: tdiff_min = tdiff anat_min = anat return anat_min
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closest_in_time(images, target):\n\n tgt_mjd = fits.getheader(target, ext=1)['mjd-obs']\n mjds = np.array([fits.getheader(i, ext=1)['mjd-obs'] for i in images])\n\n return images[abs(mjds - tgt_mjd).argsort()[0]]", "def find(image):\n keypoint, description = describe(image)\n # load keypoints, descriptions from mongodb\n\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n best_match_size = float(\"inf\")\n best_match_index = -1\n match_index = 0\n best_matches = 0\n\n for desc in descriptions:\n matches = bf.match(desc,description)\n matches = sorted(matches, key = lambda x:x.distance)\n if len(matches) > 0:\n match_size = sum(x.distance for x in matches[:10])\n\n print \"match size is \", match_size\n if match_size < best_match_size:\n best_match_size = match_size\n best_match_index = match_index\n best_matches = matches\n\n match_index += 1\n\n needle_color = cv2.imread('needle-stripped.png')[:,:,::-1] # needle\n best_match_image = cv2.imread(\"haystack/\"+files[best_match_index])\n print \"best match is \", files[best_match_index]\n\n # Draw first 10 matches.\n outImg = cv2.imread(\"output/outImg.png\")\n match = cv2.drawMatches(needle_color,keypoint,best_match_image[:,:,::-1],keypoints[best_match_index],best_matches[-20:],outImg, flags=6)\n\n plt.imshow(match),plt.show()\n return", "def look_for_reference_image(image):\n match_list = []\n thresh = 8\n final_value = -1\n references = import_reference_images()\n # Initialize the ORB detector algorithm\n orb = cv2.ORB_create()\n\n # Now detect the keypoints and compute\n # the descriptors for the query image\n imgKeypoints, imgDescriptors = orb.detectAndCompute(image, None)\n try:\n for ref in references:\n # Now detect the keypoints and compute\n # the descriptors for the train image\n ref.refKeypoints, ref.refDescriptors = orb.detectAndCompute(ref.img, None)\n\n # Initialize the Matcher for matching\n # the keypoints and then match the\n # keypoints\n matcher = cv2.BFMatcher()\n matches = matcher.knnMatch(imgDescriptors, ref.refDescriptors, k=2)\n\n for m, n in matches:\n if m.distance < 0.75 * n.distance:\n ref.refMatches.append([m])\n\n match_list.append(len(ref.refMatches))\n except:\n pass\n if len(match_list) != 0:\n if max(match_list) > thresh:\n final_value = match_list.index(max(match_list))\n\n return references[final_value].name", "def next_hit(self, ray):\n hit_candidates = [(i.time_to_bound(ray), i) for i in self._bounds]\n try:\n # WARNING - A hard cut on 'times' smaller than 10^-9 is made to exclude\n # a beam reinteracting with the same barrier. This cuts out any legitimate\n # interactions closer than 1nm of the beam position.\n return (sorted([(time, surface) for time, surface in hit_candidates\n if time is not None and time > 1e-9 and all(\n [b.contains(ray.propagate(time).position) for b in self._bounds\n if b is not surface])])[0])\n except IndexError:\n return None", "def closest_on_screen_point(trajectory, viewpoint, yaw, gaze_on_screen):\n\n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n #pprint(traj_angles)\n\n #onscreen_idx, dists, *_ = find_closest_index(traj_angles, gaze_on_screen)\n #idx = closest_node(traj_angles, gaze_on_screen)\n idx = find_closest_index(traj_angles, gaze_on_screen)\n # print(idx)\n\n #traj_ref = trajectory[idx, :]\n screen_ref = traj_angles[idx, :]\n world_ref = trajectory[idx, :]\n\n path_dist = ab_path_length(trajectory, viewpoint, world_ref)\n path_dist /= 8.0 #time headway\n\n #plot_traj(screen_ref, gaze_on_screen, traj_angles)\n\n return(idx, screen_ref, world_ref, path_dist)#, traj_angles)", "def find_closest_frame(point, trajs, cv_evals):\n\n closest_frame = None\n closest_distance = 1e10\n for i, t in enumerate(trajs):\n dists = np.linalg.norm(point - cv_evals[i], axis=1)\n # print(dists.shape, len(t))\n mindist_index = dists.argmin()\n mindist = dists[mindist_index]\n if mindist < closest_distance:\n # logger.debug(\"Found frame in %s at time %s\", simulation.id, t)\n closest_frame = t[mindist_index]\n closest_distance = mindist\n return closest_frame", "def testMatchSwarpNearestExposure(self):\n self.compareToSwarp(\"nearest\", useWarpExposure=True, atol=60)", "def closest(data):\n\n images, pc_projections, pcs = data.pca.load()\n\n pc_projections_truncated = pc_projections[:, :data.analysis.config.pc_projection_count]\n\n closest_group_count = int(round(data.analysis.config.closest_group * images.shape[0], 0))\n representative_count = int(round(data.analysis.config.representative * images.shape[0], 0))\n\n closest_group = kclosest.k_closest(closest_group_count, pc_projections_truncated)\n representative = closest_group[kclosest.k_closest(representative_count, pc_projections_truncated[closest_group, :])]\n\n data.analysis.save_closest(closest_group, representative)", "def nearest_neigh(self, atom):\n atoms = self.hutch.get_atoms_in_same_hutch(atom)[:]\n if atom in atoms: atoms.remove(atom)\n\n # This generation of nearby hutches isn't perfect but it will work\n rots = [(1,0,0),(0,1,0),(0,0,1)]\n i = 0\n while len(atoms) == 0:\n hutch = ((hutch[0]+rots[i][0])%self.hutch.nhutchs,(hutch[1]+rots[i][1])%self.hutch.nhutchs,(hutch[2]+rots[i][2])%self.hutch.nhutchs)\n i = (i+1) % 3\n atoms = self.hutch.hutchs[hutch]\n if atom in atoms: atoms.remove(atom)\n start = atoms[0]\n\n atoms = self.get_atoms_in_cutoff(atom,self.dist(atom,start))\n #if atom in atoms: atoms.remove(atom)\n d = float(\"inf\")\n for atomi in atoms:\n dt = self.dist(atom,atomi)\n if dt < d:\n d = dt\n a = atomi\n return a", "def calc_nearest_ind(self, robot_pose):\n pass", "def get_interest_map(far):\n\n # --- horizontal locations on 5 meter high in world coordinate\n height = -3.5\n x = np.arange(-4, 12, 1)\n x = x.reshape((-1, 1))\n high_horizon = np.concatenate([x, np.ones_like(x) * height, np.ones_like(x) * far], 1)\n\n # --- {3, 7, 11} meters right and 2.5 meter high in world coordinate\n height = -1.\n x = np.arange(3, 12, 4)\n x = x.reshape((-1, 1))\n right_candidate = np.concatenate([x, np.ones_like(x) * height, np.ones_like(x) * far], 1)\n\n p_world = np.concatenate([high_horizon, right_candidate], 0)\n p_img = project_pts3_to_image(p_world, K)\n\n # --- if close, search for top region in image coordinate\n if far < 8:\n x = np.arange(600, 1280, 50)\n x = x.reshape((-1, 1))\n y = 5\n close = np.concatenate([x, np.ones_like(x) * y], 1)\n p_img = np.concatenate([p_img, close], 0)\n\n # --- consider only locations in image\n ll = np.array([0, 0]) # lower-left\n ur = np.array([img_width, img_height]) # upper-right\n inidx = np.all(np.logical_and(ll <= p_img, p_img <= ur), axis=1)\n inbox = p_img[inidx]\n inbox = inbox.astype(np.int)\n\n interest = np.zeros((img_height, img_width))\n interest[inbox[:, 1], inbox[:, 0]] = 1\n interest = scipy.ndimage.morphology.distance_transform_edt(interest-1)\n interest = np.exp(-interest / 30**2)\n interest = (interest - np.min(interest)) / (np.max(interest) - np.min(interest))\n return interest", "def closest_on_screen_point_optim(trajectory, viewpoint, yaw, gaze_on_screen):\n \n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n \n #pprint(traj_angles)\n\n dist, idx = closest_node_tree(traj_angles, gaze_on_screen)\n ml_screen_ref = traj_angles[idx] \n\n return(idx, ml_screen_ref)", "def get_closest_record(self, time):\n dist = 10000000\n record = -1\n # TODO: optimise a bit\n for i, itime in enumerate(self.times):\n if (abs(time-itime)) < dist:\n dist = abs(time-itime)\n record = i\n\n return record", "def find_hrc_calib_obsid(inst):\n##\n##--- create a list of already processed data\n##\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/6* > '+ zspace\n# os.system(cmd)\n# with open(zspace, 'r') as f:\n# ftest = f.read()\n# wrd = str(inst) + '/61'\n# mc = re.search(wrd, ftest)\n# if mc is not None:\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/61* >' + zspace\n# os.system(cmd)\n#\n# cmd = 'ls -d /data/hrc/' + str(inst) + '/62* >' + zspace\n# os.system(cmd)\n#\n# data = mcf.read_data_file(zspace, remove=1)\n# prev_list = []\n# for ent in data:\n# atemp = re.split('\\/', ent)\n# prev_list.append(int(float(atemp[-1])))\n#\n##\n##--- find today's date and set checking range for the last 30 days\n##\n# today = time.strftime('%Y:%j:%H:%M:%S', time.gmtime())\n# today = int(Chandra.Time.DateTime(today).secs)\n# start = today - 10 * 86400\n##\n##--- extract hrc obsid information\n##\n# line = 'operation=browse\\n'\n# line = line + 'dataset=flight\\n'\n# line = line + 'level=1\\n'\n# line = line + 'detector=hrc\\n'\n# line = line + 'filetype=evt1\\n'\n# line = line + 'tstart=' + str(start) + '\\n'\n# line = line + 'tstop=' + str(today) + '\\n'\n# line = line + 'go\\n'\n#\n# with open('zline', 'w') as fo:\n# fo.write(line)\n#\n# cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > ' + zspace\n# os.system(cmd)\n#\n# mcf.rm_files('./zline')\n#\n# data = mcf.read_data_file(zspace, remove=1)\n##\n##--- select obsids with 61* and 62* starting\n##\n# h_list = []\n# for ent in data:\n# mc = re.search('hrcf', ent)\n# if mc is not None:\n# atemp = re.split('hrcf', ent)\n# btemp = re.split('_', atemp[1])\n# obsid = int(float(btemp[0]))\n# if obsid > 61000 and obsid < 63000:\n##\n##--- if it is already observed skip it\n##\n# if obsid in prev_list:\n# continue\n##\n##--- check which instrument\n##\n# chk = check_inst(obsid)\n# if chk == inst:\n# h_list.append(obsid)\n\n\n\n h_list = ['62410', '62423', '62435', '62437', '62439', '62441', '62443', '62635', '62637', '62649', '62973', '62997', '62422', '62426', '62436', '62438', '62440', '62442', '62446', '62636', '62638', '62796', '62991']\n\n\n return h_list", "def cacheFindEntry(cache, cameraID, desiredTime):\n if not cameraID in cache:\n return None\n cameraTimes = cache[cameraID]\n closestEntry = min(cameraTimes, key=lambda x: abs(x['time'] - desiredTime))\n if abs(closestEntry['time'] - desiredTime) < 30:\n # logging.warning('close: %s', str(closestEntry))\n return os.path.join(cache['readDir'], closestEntry['fileName'])\n else:\n # logging.warning('far: %s, %s', str(desiredTime), str(closestEntry))\n return None", "def find_scene(orig_scene, match):\n \n image_to_compare = orig_scene.copy()\n \n r,c,_ = match.shape\n ir, ic, _ = image_to_compare.shape\n min_ssd = None\n\n\n for x in range(r):\n for y in range(c):\n # compare to sample image to start off with...\n # mse(imageA, imageB, mask=0) \n\n# if x % 25 == 0 and y == 50:\n# print x\n\n # assume x,y is top left corner, \n imageA = match[x:x+ir, y:y+ic, :]\n\n if imageA.shape[0] != ir or imageA.shape[1] != ic:\n continue\n\n # add the mask \n\n current_ssd = ssd(imageA, image_to_compare)\n if current_ssd == None:\n pass\n elif min_ssd == None:\n min_ssd = current_ssd\n best_sample = imageA\n best_x = x\n best_y = y\n elif min_ssd > current_ssd:\n min_ssd = current_ssd\n best_sample = imageA\n best_x = x\n best_y = y\n return best_x, best_y, best_sample", "def closest_card(model, img):\r\n features = preprocess(img)\r\n closest_match = sorted(model.values(), key=lambda x: img_compare(x[1], features))[0]\r\n return closest_match[0]", "def nearest_sparse(self, query):\n self.best_dist = float(\"inf\")\n self.best_element = None\n self._register_best_element = self._register_best_element_single \n self._nearest_sparse_recursive(self._sparse2seq(query), self.root, 0.0)\n return self.best_element,self.best_dist", "def image_search_in_image(base_image, looking_for_img):\n base_image = cv2.imread(base_image)\n looking_for_img = cv2.imread(looking_for_img)\n # result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_SQDIFF_NORMED)\n result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_CCOEFF)\n (_, _, minLoc, maxLoc) = cv2.minMaxLoc(result)\n print(result)\n (waldoHeight, waldoWidth) = looking_for_img.shape[:2]\n topLeft = maxLoc\n botRight = (topLeft[0] + waldoWidth, topLeft[1] + waldoHeight)\n roi = base_image[topLeft[1]:botRight[1], topLeft[0]:botRight[0]]\n mask = np.zeros(base_image.shape, dtype=\"uint8\")\n puzzle = cv2.addWeighted(base_image, 0.25, mask, 0.75, 0)\n puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] = roi\n cv2.imshow(\"Puzzle\", puzzle)\n cv2.imshow(\"Waldo\", looking_for_img)\n cv2.waitKey(0)", "def locate_source(p,d):\n # M = sensors, n = dimensions\n M, n = p.shape\n p = np.matrix( p ).T\n\n # pick closest receiver\n c = np.argmin(d)\n #sensors delta time relative to sensor c\n d = d - min(d)\n\n indices = list(range(M))\n del indices[c]\n\n A = np.zeros([M-2,n])\n b = np.zeros([M-2,1])\n\n i = indices[0]\n for row,j in enumerate(indices[1:]):\n A[row,:] = 2*( (d[j])*(p[:,i]-p[:,c]).T - \\\n (d[i])*(p[:,j]-p[:,c]).T )\n b[row,0] = (d[i])*((d[j])**2-p[:,j].T*p[:,j]) + \\\n ((d[i])-(d[j]))*p[:,c].T*p[:,c] + \\\n (d[j])*(p[:,i].T*p[:,i]-(d[i])**2)\n\n\n x = np.asarray( np.linalg.lstsq(A,b)[0] )[:,0]\n return x", "def getfirstscam(image_list, starttime, instr, primary_mode, bvid):\n stime=starttime-datetime.timedelta(seconds=2*3600.0)\n for img in image_list:\n if img[4]>stime and img[5]==instr:\n return img[4]+datetime.timedelta(seconds=2*3600.0)\n return None", "def extract_blobs_closest_points(this_robot, in_image, active_mask):\n\n out_image = PointSampleImage(in_image.calib_array, in_image.neighbour_array)\n\n G = nx.Graph()\n\n # First add all nodes, where each node consists of an index into\n # calib_array for one of the active pixels.\n for i in range(in_image.n_rows):\n G.add_node(i)\n\n # We will add edges between neighbouring pixels. See\n # sensors/pointsamplecam for the definition of neighbouring.\n node_list = G.nodes()\n n = len(node_list)\n for i in range(n):\n if in_image.masks[i] & active_mask != 0:\n (ixi, iyi) = in_image.calib_array[i,0], in_image.calib_array[i,1]\n for j in in_image.neighbour_array[i]:\n if in_image.masks[j] & active_mask != 0:\n G.add_edge(i, j)\n\n clusters = nx.connected_component_subgraphs(G, copy=False)\n n_clusters = 0\n for cluster in clusters:\n n_clusters += 1\n # Find the closest pixel to the robot in this cluster. \n closest_i = None\n closest_distance = float('inf')\n for i in cluster.nodes():\n #(xr, yr) = in_image.calib_array[i,2], in_image.calib_array[i,3]\n #d = sqrt(xr*xr + yr*yr)\n\n # The pre-computed distance sqrt(xr*xr + yr*yr)\n d = in_image.calib_array[i,5]\n\n if d < closest_distance:\n closest_i = i\n closest_distance = d\n if closest_i != None:\n out_image.masks[closest_i] = in_image.masks[closest_i]\n\n return out_image", "def getfirstimage(image_list, starttime, instr, primary_mode, bvid):\n stime=starttime-datetime.timedelta(seconds=2*3600.0)\n if instr == 'MOS':\n for img in image_list:\n if img[4]>stime and img[5]=='RSS' and img[10]==bvid:\n if not img[11]=='N/A' and not img[12]=='0 - N/A' and not img[13]=='0 - HOME':\n return img[4]+datetime.timedelta(seconds=2*3600.0)\n for img in image_list:\n if img[4]>stime and img[5]==instr and img[10]==bvid:\n return img[4]+datetime.timedelta(seconds=2*3600.0)\n return None", "def find_target_data(params,star_catalog,lightcurves,image_trios,log):\n\n target = photometry_classes.Star()\n\n if params['target_ra'] != None:\n\n target_location = SkyCoord([params['target_ra']], [params['target_dec']], unit=(u.hourangle, u.deg))\n\n stars = SkyCoord(star_catalog['RA'], star_catalog['DEC'], unit=\"deg\")\n\n tolerance = 2.0 * u.arcsec\n\n match_data = matching.search_around_sky(target_location, stars,\n seplimit=tolerance)\n\n idx = np.argsort(match_data[2].value)\n\n if len(match_data[0]) > 0:\n target.star_index = star_catalog['star_index'][match_data[1][idx[0]]]\n target.ra = star_catalog['RA'][match_data[1][idx[0]]]\n target.dec = star_catalog['DEC'][match_data[1][idx[0]]]\n target.i = star_catalog['cal_ref_mag_ip'][match_data[1][idx[0]]]\n target.sig_i = star_catalog['cal_ref_mag_err_ip'][match_data[1][idx[0]]]\n target.r = star_catalog['cal_ref_mag_rp'][match_data[1][idx[0]]]\n target.sig_r = star_catalog['cal_ref_mag_err_rp'][match_data[1][idx[0]]]\n target.i_inst = star_catalog['ref_mag_ip'][match_data[1][idx[0]]]\n target.sig_i_inst = star_catalog['ref_mag_err_ip'][match_data[1][idx[0]]]\n target.r_inst = star_catalog['ref_mag_rp'][match_data[1][idx[0]]]\n target.sig_r_inst = star_catalog['ref_mag_err_rp'][match_data[1][idx[0]]]\n target.separation = match_data[2][idx[0]].to_string(unit=u.arcsec)\n try:\n target.g = star_catalog['cal_ref_mag_gp'][match_data[1][idx[0]]]\n target.sig_g = star_catalog['cal_ref_mag_err_gp'][match_data[1][idx[0]]]\n target.g_inst = star_catalog['ref_mag_gp'][match_data[1][idx[0]]]\n target.sig_g_inst = star_catalog['ref_mag_err_gp'][match_data[1][idx[0]]]\n except AttributeError:\n pass\n\n log.info('\\n')\n log.info('Target identified as star '+str(target.star_index)+\\\n ' in the combined ROME catalog, with parameters:')\n log.info('RA = '+str(target.ra)+' Dec = '+str(target.dec))\n log.info('Measured ROME photometry, instrumental:')\n log.info(target.summary(show_mags=False, show_instrumental=True))\n log.info('Measured ROME photometry, calibrated to the VPHAS+ scale:')\n log.info(target.summary(show_mags=True))\n\n target.set_delta_mag(params)\n\n log.info('Assigned delta mag offsets between DanDIA lightcurve and pyDANDIA reference frame analysis:')\n for f in ['g', 'r', 'i']:\n log.info('Delta m('+f+') = '+str(getattr(target, 'delta_m_'+f))+' +/- '+str(getattr(target, 'sig_delta_m_'+f)))\n\n if target.i != None and target.r != None:\n\n target.compute_colours(use_inst=True)\n\n log.info(target.summary(show_mags=False,show_colours=True))\n\n target.transform_to_JohnsonCousins()\n\n log.info(target.summary(show_mags=False,johnsons=True))\n\n for f in ['i', 'r', 'g']:\n\n if f in lightcurves.keys():\n\n images = []\n hjds = []\n mags = []\n magerrs = []\n fluxes = []\n fluxerrs = []\n\n for i in image_trios[f+'_images']:\n name = str(i).replace('\\n','').replace('.fits','')\n\n idx = np.where(lightcurves[f]['images'] == name)[0]\n\n if len(idx) > 0:\n images.append(lightcurves[f]['images'][idx][0])\n hjds.append(lightcurves[f]['hjd'][idx][0])\n mags.append(lightcurves[f]['mag'][idx][0])\n magerrs.append(lightcurves[f]['mag_err'][idx][0])\n (flux,ferr) = mag_to_flux_pylima(lightcurves[f]['mag'][idx][0],\n lightcurves[f]['mag_err'][idx][0])\n fluxes.append(flux)\n fluxerrs.append(ferr)\n\n else:\n images.append(name)\n hjds.append(9999999.999)\n mags.append(99.999)\n magerrs.append(-9.999)\n fluxes.append(9999999.999)\n fluxerrs.append(-9999999.999)\n\n lc = Table()\n lc['images'] = images\n lc['hjd'] = hjds\n lc['mag'] = mags\n lc['mag_err'] = magerrs\n lc['flux'] = fluxes\n lc['flux_err'] = fluxerrs\n\n target.lightcurves[f] = lc\n\n return target", "def find_circles_thres(current_frame_gray, num_of_rafts, radii_hough=[17, 19],\n thres_value=70, sigma_canny=1.0, low_threshold_canny=25, high_threshold_canny=127,\n min_sep_dist=20, raft_center_threshold=60,\n top_left_x=390, top_left_y=450, width_x=850, height_y=850):\n # key data set initialization\n raft_centers = np.zeros((num_of_rafts, 2), dtype=int)\n raft_radii = np.zeros(num_of_rafts, dtype=int)\n\n # crop the image\n image_cropped = current_frame_gray[top_left_y: top_left_y + height_y, top_left_x: top_left_x + width_x]\n\n # threshold the image\n retval, image_thres = cv.threshold(image_cropped, thres_value, 255, 0)\n\n # find edges\n image_edges = canny(image_thres, sigma=sigma_canny, low_threshold=low_threshold_canny,\n high_threshold=high_threshold_canny)\n\n # use Hough transform to find circles\n hough_results = hough_circle(image_edges, np.arange(*radii_hough))\n accums, cx, cy, radii = hough_circle_peaks(hough_results, np.arange(*radii_hough))\n\n # assuming that the first raft (highest accumulator score) is a good one\n # raft_centers[0,0] = cx[0]\n # raft_centers[0,1] = cy[0]\n # raft_radii[0] = radii[0]\n raft_count = 0 # starting from 1!\n\n # remove circles that belong to the same raft and circles that happened to be in between rafts\n for accum_score, detected_cx, detected_cy, detected_radius in zip(accums, cx, cy, radii):\n new_raft = 1\n if image_cropped[detected_cy, detected_cx] < raft_center_threshold:\n new_raft = 0\n elif image_cropped[detected_cy - detected_radius // 2: detected_cy + detected_radius // 2,\n detected_cx - detected_radius // 2:detected_cx + detected_radius // 2].mean() \\\n < raft_center_threshold:\n new_raft = 0\n # elif (detected_cx - width_x/2)**2 + (detected_cy - height_y/2)**2 > lookup_radius**2:\n # new_raft = 0\n else:\n cost_matrix = scipy_distance.cdist(np.array([detected_cx, detected_cy], ndmin=2),\n raft_centers[:raft_count, :], 'euclidean')\n if np.any(cost_matrix < min_sep_dist): # raft still exist\n new_raft = 0\n if new_raft == 1:\n raft_centers[raft_count, 0] = detected_cx\n # note that raft_count starts with 1, also note that cx corresponds to columns number\n raft_centers[raft_count, 1] = detected_cy # cy is row number\n raft_radii[raft_count] = detected_radius\n raft_count = raft_count + 1\n if raft_count == num_of_rafts:\n # error_message = 'all rafts found'\n break\n\n # convert the xy coordinates of the cropped image into the coordinates of the original image\n raft_centers[:, 0] = raft_centers[:, 0] + top_left_x\n raft_centers[:, 1] = raft_centers[:, 1] + top_left_y\n\n return raft_centers, raft_radii, raft_count", "def locate_tracker(self, debug):\n\n # tmp_image =\n # tmp_image = cv2.GaussianBlur(self.frame, (11, 11), 0) # Experiment with this\n\n hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) # Convert to HSV Color Space. This is temporary for testing using colored objects)\n\n mask = cv2.inRange(hsv, self.hueLower, self.hueUpper)\n\n try:\n mask = cv2.inRange(hsv, self.hueLower2, self.hueUpper2) + mask\n except AttributeError:\n pass\n\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n if debug:\n tmpMask = imutils.resize(mask, width=1000, height=1000)\n cv2.imshow(\"mask\", tmpMask)\n\n\n # find contours in the mask and initialize the current (x, y) center of the object\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n\n # only proceed if at least one contour was found\n if len(cnts) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n # only proceed if the radius meets a minimum size\n # if radius > 10:\n # # draw the circle and centroid on the frame,\n # # then update the list of tracked points\n # cv2.circle(frame, (int(x), int(y)), int(radius),\n # (0, 255, 255), 2)\n # cv2.circle(frame, center, 5, (0, 0, 255), -1)\n if debug:\n cv2.drawContours(self.frame, c, -1, (0, 255, 0), 20)\n return center, radius\n # update the points queue\n cv2.imshow(\"mask\", imutils.resize(mask, width=1000, height=1000))\n cv2.imshow(\"frame\", imutils.resize(self.frame, width=1000, height=1000))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n raise OpenCVError(\"Could not find tracker!\")\n\n # return (1, 1), 1", "def findNearestTime(foamCase, time):\n times = list(getTimeFolders(foamCase,returnType=\"float\"))\n strTimes = np.array(getTimeFolders(foamCase,returnType=\"string\"))\n if time in times:\n try:\n intTime = int(strTimes[times.index(time)])\n return int(time)\n except:\n return time\n else:\n nearestTime = times[np.argmin(np.abs(np.array(times)-time))]\n print(\"Time %f is not available, choosing nearest time %f\" % ( time, nearestTime))\n try:\n intTime = int(strTimes[times.index(nearestTime)])\n return int(nearestTime)\n except:\n return nearestTime", "def nearest_test_pulse(self):", "def getNearestTime(time_query):\n\n # Convert datetime object to string, for lookup in database.\n tstamp_query = coils.time2string(time_query)\n\n # Retrieve image timestamps.\n try:\n tstamp_left = db.session.query(mapping.Image.time).\\\n filter(mapping.Image.time <= tstamp_query).\\\n order_by(mapping.Image.time.desc()).limit(1)\n tstamp_left = tstamp_left[0].time\n delta_left = abs(coils.string2time(tstamp_left) - time_query)\n except:\n tstamp_left = None\n delta_left = dt.timedelta.max\n \n try:\n tstamp_right = db.session.query(mapping.Image.time).\\\n filter(mapping.Image.time >= tstamp_query).\\\n order_by(mapping.Image.time).limit(1)\n tstamp_right = tstamp_right[0].time\n delta_right = abs(coils.string2time(tstamp_right) - time_query)\n except:\n tstamp_right = None\n delta_right = dt.timedelta.max\n \n # The nearest value has the smallest delta from the query.\n result = tstamp_left if (delta_left < delta_right) else tstamp_right\n return result", "def find_nearest_time(self, time):\n\n idx = np.searchsorted(self.times, time, side=\"left\")\n if idx > 0 and (idx == len(self.times) or math.fabs(time - self.times[idx-1]) < math.fabs(time - self.times[idx])):\n return self.times[idx-1]\n else:\n return self.times[idx]", "def gfind(x,y,xr=None,yr=None):\n\n global BTRACK, GSTRUC, NPIX\n \n # Assume bad until proven otherwise \n flag,rms,noise,par,pind = None,None,None,None,None\n results = {'x':x,'y':y,'pind':pind,'rms':rms,'noise':noise,'par':par,'visited':None,'npix':None} # initial bad values\n\n if x is None or y is None:\n results['visited'] = 0\n return 0,results\n \n # Setting the ranges\n if xr is not None:\n x0 = xr[0] \n x1 = xr[1] \n else: \n x0 = 0 \n x1 = 1000\n if yr is not None:\n y0 = yr[0] \n y1 = yr[1] \n else: \n y0 = 0\n y1 = 1000\n \n if (x < x0) or (x > x1) or (y < y0) or (y > y1): \n flag = 0\n results['visited'] = 0\n return flag,results\n \n # No GSTRUC yet, first position\n try:\n dum = len(GSTRUC)\n except:\n return 0,results\n \n # Looking for the position \n t0 = time.time() \n # Check GSTRUC\n pind, = np.where((GSTRUC['x']==x) & (GSTRUC['y']==y))\n # Check if it was visited before but no good spectrum/solution\n if len(pind)==0:\n bind, = np.where((BTRACK['x']==x) & (BTRACK['y']==y))\n # Found it\n if len(bind)>0:\n return 1,{'x':x,'y':y,'pind':None,'rms':np.inf,'noise':None,'par':None,'visited':1,'npix':None}\n \n # Found something, getting the values \n if len(pind) > 0:\n tstr = GSTRUC['data'][pind[0]]\n rms = tstr['rms']\n noise = tstr['noise']\n par = tstr['par']\n npix = tstr['npix']\n flag = 1 \n \n # Nothing found \n else:\n pind,rms,noise,par,npix = None,None,None,None,None\n flag = 0 \n \n results = {'x':x,'y':y,'pind':pind,'rms':rms,'noise':noise,'par':par,'visited':flag,'npix':npix}\n return flag,results", "def get_frame(cap):\n\n #camera matrix for camera calibration\n mtx = np.array(np.mat(\"588.4525598886621, 0, 301.8008794717551; 0, 588.9763096391521, 242.617026416902; 0, 0, 1\"))\n\n #distrotion coefficients for camera calibration\n dist = np.array(np.mat(\"-0.4351555722591889, 0.2082765081608728, -0.006072767012672472, 0.008139871640987759, 0\"))\n\n #get image frame from the camera\n ret, frame = cap.read()\n\n return frame\n\n h, w = frame.shape[:2]\n\n #get the new optimal camera matrix and the roi which can be used to crop the result\n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),0,(w,h))\n\n #get the undistroted image\n dst = cv2.undistort(frame, mtx, dist, None, newcameramtx)\n\n x,y,w,h = roi\n\n #get the cropped image\n dst = dst[y:y+h, x:x+w]\n h, w = dst.shape[:2]\n\n #furthur crop the image to reduce the size of arena\n dst = dst[int(h/7):int(h*6/7), int(w/7):int(w*6/7)]\n\n #resize the arena to ARENA_SIZE\n dst = cv2.resize(dst, ARENA_SIZE, interpolation= cv2.INTER_CUBIC)\n\n return dst", "def _ref_pixel_calc(ifg_paths: List[str], params: dict) -> Tuple[int, int]:\n\n lon = params[cf.REFX]\n lat = params[cf.REFY]\n\n ifg = Ifg(ifg_paths[0])\n ifg.open(readonly=True)\n # assume all interferograms have same projection and will share the same transform\n transform = ifg.dataset.GetGeoTransform()\n\n if lon == -1 or lat == -1:\n\n log.info('Searching for best reference pixel location')\n half_patch_size, thresh, grid = refpixel.ref_pixel_setup(ifg_paths, params)\n process_grid = mpiops.array_split(grid)\n refpixel.save_ref_pixel_blocks(process_grid, half_patch_size, ifg_paths, params)\n mean_sds = refpixel._ref_pixel_mpi(process_grid, half_patch_size, ifg_paths, thresh, params)\n mean_sds = mpiops.comm.gather(mean_sds, root=0)\n if mpiops.rank == MASTER_PROCESS:\n mean_sds = np.hstack(mean_sds)\n\n refpixel_returned = mpiops.run_once(refpixel.find_min_mean, mean_sds, grid)\n\n if isinstance(refpixel_returned, ValueError):\n from pyrate.core.refpixel import RefPixelError\n raise RefPixelError(\n \"Reference pixel calculation returned an all nan slice!\\n\"\n \"Cannot continue downstream computation. Please change reference pixel algorithm used before \"\n \"continuing.\")\n refy, refx = refpixel_returned # row first means first value is latitude\n log.info('Selected reference pixel coordinate (x, y): ({}, {})'.format(refx, refy))\n lon, lat = refpixel.convert_pixel_value_to_geographic_coordinate(refx, refy, transform)\n log.info('Selected reference pixel coordinate (lon, lat): ({}, {})'.format(lon, lat))\n\n else:\n log.info('Using reference pixel from config file (lon, lat): ({}, {})'.format(lon, lat))\n log.warning(\"Ensure user supplied reference pixel values are in lon/lat\")\n refx, refy = refpixel.convert_geographic_coordinate_to_pixel_value(lon, lat, transform)\n log.info('Converted reference pixel coordinate (x, y): ({}, {})'.format(refx, refy))\n\n refpixel.update_refpix_metadata(ifg_paths, refx, refy, transform, params)\n\n log.debug(\"refpx, refpy: \"+str(refx) + \" \" + str(refy))\n ifg.close()\n return int(refx), int(refy)", "def find_before(self, src_idx, cut_time):\n i = np.searchsorted(self.node_to_edge_timestamps[src_idx], cut_time)\n\n return self.node_to_neighbors[src_idx][:i], self.node_to_edge_idxs[src_idx][:i], self.node_to_edge_timestamps[src_idx][:i]", "def get_reward(state, resolution, grid_x, grid_y):\n a,b = single_index_to_index(state, resolution)\n position = index_to_obs(a, b, grid_x, grid_y )[0]\n if position >= 0.5:\n return 0\n return -1", "def find_closest_pt(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find j index of closest grid point\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n jj = N.argsort(work)[0]\n\n # find i index of closest grid point\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n ii = N.argsort(work)[0]\n\n return ii, jj", "def spatial_planner():\n from scipy.spatial import KDTree\n # KDTree", "def find_path(self, origin, destination, max_time = 1):\r\n \r\n # Before we start, let's check we need to do something\r\n if origin == destination or self._heuristic_weight(origin, destination) == 0:\r\n return None\r\n \r\n # Add the starting point to the \"open\" list\r\n self.open_list.append(origin)\r\n self.g_cost[origin] = 0\r\n self.h_cost[origin] = self.f_cost[origin] = self._heuristic_weight(origin, destination)\r\n \r\n self.start_time = lib.clock()\r\n nearest_parent = {}\r\n self.path = PATH_INEXISTENT\r\n \r\n #while (lib.clock() - self.start_time) < max_time:\r\n while len(self.open_list):\r\n # The \"parent\" node, around which we look, is always the first node of the \"open\" list\r\n # This node is transferred to the \"closed\" list\r\n current_parent = self.open_list[0]\r\n self.closed_list.append(current_parent)\r\n del self.open_list[0]\r\n\r\n # The \"parent\" node is the destination : the path has been found.\r\n if current_parent == destination:\r\n self.path = PATH_FOUND\r\n break\r\n\r\n # Set the first element of the open list as the one that has the smallest F-cost\r\n for (i, node) in enumerate(self.open_list):\r\n if self.f_cost[self.open_list[0]] > self.f_cost[node]:\r\n (self.open_list[i], self.open_list[0]) = (self.open_list[0], node)\r\n \r\n # Check the adjacent nodes\r\n children = [road.end for road in current_parent.leaving_roads]\r\n \r\n for child in children:\r\n # Not already in the closed list neither in the open list\r\n if not (child in self.closed_list) and not (child in self.open_list):\r\n # Compute its G-cost, H-cost and F-cost\r\n self.g_cost[child] = self.g_cost[current_parent] + road.weight\r\n self.h_cost[child] = self._heuristic_weight(child, destination)\r\n self.f_cost[child] = self.g_cost[child] + self.h_cost[child]\r\n \r\n nearest_parent[child] = current_parent\r\n \r\n # Add the node to the open list, keeping the order (the first node has the smallest F-cost)\r\n if len(self.open_list) and (self.f_cost[self.open_list[0]] > self.f_cost[child]):\r\n self.open_list.insert(0, child)\r\n else:\r\n self.open_list.append(child)\r\n\r\n # Already in the open list : check to see if this path is a better one than the currently known path\r\n elif child in self.open_list:\r\n # Compute the G-cost of this possible new path\r\n current_g_cost = self.g_cost[current_parent] + road.weight\r\n \r\n # This path is shorter (lower G-cost) : store this path as default to reach this node\r\n if current_g_cost < self.g_cost[child]:\r\n # Set this path as the shortest path to reach this node\r\n nearest_parent[child] = current_parent\r\n self.g_cost[child] = current_g_cost\r\n self.f_cost[child] = self.g_cost[current_parent] + self.h_cost[child] # Do not forget to update the F-cost !\r\n \r\n # Check if the open list is still in the right order\r\n if self.f_cost[self.open_list[0]] > self.f_cost[child]:\r\n i = self.open_list.index(child)\r\n (self.open_list[0], self.open_list[i]) = (self.open_list[i], self.open_list[0])\r\n\r\n # Save the path if it exists.\r\n if self.path == PATH_FOUND:\r\n \r\n current_node = destination\r\n self.path = []\r\n self.path_length = 0\r\n \r\n while current_node != origin:\r\n self.path.insert(0, current_node)\r\n if current_node in nearest_parent:\r\n current_node = nearest_parent[current_node]\r\n else:\r\n raise Exception('ERROR (in gps.find_path()): ill-formed parent list, a node has no parent.')\r\n \r\n self.path_length += 1\r\n return self._build_path()\r\n\r\n return None", "def _get_closeup(self, idx):\n img_arr = p.getCameraImage(width=self._width,\n height=self._height,\n viewMatrix=self._view_matrix,\n projectionMatrix=self._proj_matrix)\n rgb = img_arr[2]\n depth = img_arr[3]\n min = 0.97\n max=1.0\n segmentation = img_arr[4]\n depth = np.reshape(depth, (self._height, self._width,1) )\n segmentation = np.reshape(segmentation, (self._height, self._width,1) )\n\n np_img_arr = np.reshape(rgb, (self._height, self._width, 4))\n np_img_arr = np_img_arr[:, :, :3].astype(np.float64)\n\n view_mat = np.asarray(self._view_matrix).reshape(4, 4)\n proj_mat = np.asarray(self._proj_matrix).reshape(4, 4)\n # pos = np.reshape(np.asarray(list(p.getBasePositionAndOrientation(self._objectUids[0])[0])+[1]), (4, 1))\n\n AABBs = np.zeros((len(self._objectUids), 2, 3))\n cls_ls = []\n \n for i, (_uid, _cls) in enumerate(zip(self._objectUids, self._objectClasses)):\n AABBs[i] = np.asarray(p.getAABB(_uid)).reshape(2, 3)\n cls_ls.append(NAME2IDX[_cls])\n\n # np.save('/home/tony/Desktop/obj_save/view_mat_'+str(self.img_save_cnt), view_mat)\n # np.save('/home/tony/Desktop/obj_save/proj_mat_'+str(self.img_save_cnt), proj_mat)\n # np.save('/home/tony/Desktop/obj_save/img_'+str(self.img_save_cnt), np_img_arr.astype(np.int16))\n # np.save('/home/tony/Desktop/obj_save/AABB_'+str(self.img_save_cnt), AABBs)\n # np.save('/home/tony/Desktop/obj_save/class_'+str(self.img_save_cnt), np.array(cls_ls))\n\n np.save(OUTPUT_DIR + '/closeup_' + str(self.img_save_cnt - 1) + '_' + str(idx), np_img_arr.astype(np.int16))\n dets = np.zeros((AABBs.shape[0], 5))\n for i in range(AABBs.shape[0]):\n dets[i, :4] = self.get_2d_bbox(AABBs[i], view_mat, proj_mat, IM_HEIGHT, IM_WIDTH)\n dets[i, 4] = int(cls_ls[i])\n # np.save(OUTPUT_DIR + '/annotation_'+str(self.img_save_cnt), dets)\n\n test = np.concatenate([np_img_arr[:, :, 0:2], segmentation], axis=-1)\n\n return test", "def closest(start, incoming_angle, timeleft):\n visited = set()\n frontier = [ (0, 0, 0, incoming_angle, start) ]\n distances = {}\n while frontier:\n (cost, difficulty, count, in_angle, n) = heappop(frontier)\n if n in visited:\n continue\n distances[n] = cost\n if cost > timeleft:\n # cannot reach a non visited edge on time\n return None\n edges = sorted(n.edges, key=priority(in_angle))\n for edge in edges:\n if cost + edge.cost <= timeleft:\n # we can take this edge\n if edge.distance > 0:\n return compute_path(distances, cost, start, n) + [ edge ]\n else:\n if edge.stop not in visited:\n difficulty = max(e2.difficulty for e2 in edge.stop.edges)\n candidate = (cost + edge.cost, difficulty, count + edge.visits, edge.angle, edge.stop)\n # print candidate\n heappush(frontier, candidate)\n visited.add(n)\n return None", "def find_closest_path(self):\n\t\tclosest_distance = sys.maxint\n\t\tclosest_path = 0\n\t\tbike_position = (self.map_model.bike.xB, self.map_model.bike.yB)\n\t\tfor path_index in range(len(self.map_model.paths)):\n\t\t\tnearest_point = geometry.nearest_point_on_path(self.map_model.paths[path_index], bike_position)\n\t\t\tdistance_to_bike = geometry.distance(nearest_point, bike_position)\n\t\t\tif (closest_distance > distance_to_bike):\n\t\t\t\tclosest_distance = distance_to_bike\n\t\t\t\tclosest_path = path_index \n\t\tdisp_next = self.displacement_to_turn(target_path = (closest_path+1)%len(self.map_model.paths))\n\t\ttarget_path = (closest_path+1)%len(self.map_model.paths)\n\t\tdistance_next = geometry.distance_from_path(bike_position, self.map_model.paths[target_path])\n\t\tif disp_next - np.abs(distance_next)>-0.01:\n\t\t\tclosest_path = np.mod(closest_path + 1,len(self.map_model.paths))\n\t\treturn closest_path", "def nearest_voxel(center, roi):\n nearest=[]\n min_dist = 10000\n for vxl in roi:\n dist = sum(abs(np.subtract(vxl,center)))/3\n if dist < min_dist:\n min_dist=dist\n nearest=[vxl]\n elif dist==min_dist:\n nearest.append(vxl)\n # print(nearest)\n return nearest[random.randint(0,len(nearest)-1)]", "def detect_board(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, 50, 150, apertureSize=3)\n\n lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)\n for rho, theta in lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n y0 = b * rho\n y1 = int(y0 + 1000 * a)\n y2 = int(y0 - 1000 * a)\n\n if abs(y1 - y2) < 5:\n return int(y0)\n\n return None", "def model_lookup(taskid, beam):\n # Assumes running on happili-05:\n model_dir = '/tank/apertif/driftscans/fits_files/'\n all_dates = get_dates()\n all_beam_stats = get_beam_stats(all_dates)\n if beam > all_beam_stats.shape[1] - 1:\n print(\"\\t{}: Pick a valid beam number 0-39.\".format(beam))\n exit()\n beam_stats = all_beam_stats[:, beam]\n\n # Divide into before & after beam attenuation on October 1st (big impact on beam quality)!\n taskid = str(taskid)[:6]\n if int(taskid) < 191001:\n # *** Until we have a full beam complement ***:\n index = np.where(all_dates == '190821')[0][0]\n # index = np.where(all_dates == '190916')[0][0]\n dates = all_dates[:index + 1]\n beams = beam_stats[:index + 1]\n else:\n # index = np.where(all_dates == '191002')[0][0]\n index = np.where(all_dates == '191023')[0][0]\n dates = all_dates[index:]\n beams = beam_stats[index:]\n\n print(\"[MODEL_LOOKUP] Searching for appropriate beam model for beam {}.\".format(beam))\n if np.all(beams == 0):\n print(\"\\tNo good beam model options for period when this was observed. Do more drift scans (or edit code).\")\n exit()\n elif len(beams[beams == 1]) == 1:\n # If only one good beam model exists, use it.\n best = dates[beams == 1][0]\n else:\n # Use nearest. Don't have enough beam statistics for floor, I think.\n dates = dates[beams == 1]\n best = nearest_date(dates, taskid)\n\n # *** Until we have a full beam complement ***:\n if beam >= 32:\n # best = '191002'\n best = '191023'\n\n model = model_dir + '{}/beam_models/chann_9/{}_{:02}_I_model.fits'.format(best, best, beam)\n\n return model", "def guess_image(which_cam, image, ntraps):\n threshes = [0.5, 0.65]\n ## Image Conditioning ##\n margin = 10\n threshold = np.max(image)*threshes[which_cam]\n im = image.transpose()\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n left_pos = 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n left_pos = i\n elif p < threshold and left_pos != 0:\n if first:\n pos_first = (left_pos + i) // 2\n first = False\n pos_last = (left_pos + i) // 2\n left_pos = 0\n\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n xdata = np.arange(x_len)\n plt.figure()\n plt.plot(xdata, peak_vals)\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)", "def getImageInfo(img, header=''):\n if (os.path.exists(img) == False):\n print \"image not found: \", img\n return\n # Assume this is a CASA image\n if (header == ''):\n try:\n print \"imhead\",\n header = imhead(img, mode = 'list') # This will work for most CASA builds\n except:\n print \"imhead\",\n header = imhead(img) # needed to prevent crash in early CASA 4.6 builds (see CAS-8214)\n print \"imhead\",\n header = imhead(img, mode = 'list')\n if (header is None):\n print \"imhead returned NoneType. This image header is not sufficiently standard.\"\n return\n if ('beammajor' in header.keys()):\n bmaj = header['beammajor']\n bmin = header['beamminor']\n bpa = header['beampa']\n elif ('perplanebeams' in header.keys()):\n beammajor = []\n beamminor = []\n beampa = []\n for beamchan in range(header['perplanebeams']['nChannels']):\n beamdict = header['perplanebeams']['*'+str(beamchan)]\n beammajor.append(beamdict['major']['value'])\n beamminor.append(beamdict['minor']['value'])\n beampa.append(beamdict['positionangle']['value'])\n bmaj = np.median(beammajor)\n bmin = np.median(beamminor)\n sinbpa = np.sin(np.radians(np.array(beampa)))\n cosbpa = np.cos(np.radians(np.array(beampa)))\n bpa = np.degrees(np.median(np.arctan2(np.median(sinbpa), np.median(cosbpa))))\n else:\n bmaj = 0\n bmin = 0\n bpa = 0\n naxis1 = header['shape'][0]\n naxis2 = header['shape'][1]\n cdelt1 = header['cdelt1']\n cdelt2 = header['cdelt2']\n if (header['cunit1'].find('rad') >= 0):\n # convert from rad to arcsec\n cdelt1 *= 3600*180/np.pi\n elif (header['cunit1'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt1 *= 3600\n if (header['cunit2'].find('rad') >= 0):\n cdelt2 *= 3600*180/np.pi\n # convert from rad to arcsec\n elif (header['cunit2'].find('deg') >= 0):\n # convert from deg to arcsec\n cdelt2 *= 3600\n if (type(bmaj) == dict):\n # casa >= 4.1.0 (previously these were floats)\n bmaj = headerToArcsec(bmaj)\n bmin = headerToArcsec(bmin)\n bpa = headerToArcsec(bpa)/3600.\n ghz = 0\n if ('ctype4' in header.keys()):\n if (header['ctype4'] == 'Frequency'):\n imgfreq = header['crval4']\n cdelt = header['cdelt4']\n crpix = header['crpix4']\n npix = header['shape'][3]\n ghz = imgfreq*1e-9\n if (ghz == 0):\n if ('ctype3' in header.keys()):\n if (header['ctype3'] == 'Frequency'):\n imgfreq = header['crval3']\n cdelt = header['cdelt3']\n crpix = header['crpix3']\n npix = header['shape'][2]\n ghz = imgfreq*1e-9\n return([bmaj,bmin,bpa,cdelt1,cdelt2,naxis1,naxis2,ghz], header)", "def find_arms(path,fr_nb):\n im=open_frame(path,fr_nb)\n img=im.copy()\n im=img_as_ubyte(im)\n mask_h = hysteresis_thresholding(img,6,10)\n \n ksize=5\n kernel = np.ones((ksize,ksize),dtype = np.uint8)\n kernel = skimage.morphology.disk(ksize)\n \n mask = cv2.morphologyEx(mask_h, cv2.MORPH_OPEN, kernel,iterations=2)\n \n arms = mask_h-mask\n \"\"\"\n lab,_ = ndi.label(diff)\n \n arms = skimage.morphology.remove_small_objects(lab,60)\"\"\" #Only temporary, to track only the biggest\n return mask,arms", "def _get_nearest_entry_with_artifact(self):\n\n local_entry = self._get_local_entry()\n if local_entry.has_artifact:\n return local_entry\n\n cloud_entry = self._get_cloud_entry()\n if cloud_entry is not None and cloud_entry.has_artifact:\n return cloud_entry\n\n return None", "def _find_best_match(icon: numpy.ndarray, critters: List[CritterImage]) -> CritterImage:\n fast_similarity_metric = lambda r: cv2.absdiff(icon, r.img).mean()\n similarities = list(map(fast_similarity_metric, critters))\n sim1, sim2 = numpy.partition(similarities, kth=2)[:2]\n\n # If the match seems obvious, return the quick result.\n if abs(sim1 - sim2) > 3:\n return critters[numpy.argmin(similarities)]\n\n # Otherwise, we use a slower matching, which tries various shifts.\n def slow_similarity_metric(critter):\n diffs = []\n for x in [-2, -1, 0, 1, 2]:\n shifted = numpy.roll(icon, x, axis=1)\n diffs.append(cv2.absdiff(shifted, critter.img).sum())\n return min(diffs) # Return lowest diff across shifts.\n\n similarities = list(map(slow_similarity_metric, critters))\n return critters[numpy.argmin(similarities)]", "def find_gate_posts(img, display_results=False):\n\n greyscale_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_GRAY2BGR)\n cm_image = cv2.applyColorMap(greyscale_image, cv2.COLORMAP_VIRIDIS)\n\n kernel = np.ones((5, 5), np.uint8)\n\n # cm_image = cv2.erode(cm_image, kernel, iterations=1)\n kernel = np.ones((5, 5), np.uint8)\n cm_image = cv2.dilate(cm_image, kernel, iterations=3)\n kernel = np.ones((4, 4), np.uint8)\n cm_image = cv2.erode(cm_image, kernel, iterations=1)\n\n cm_image = cv2.medianBlur(cm_image, 5) # Removes salt and pepper noise\n\n cm_copy_image = cm_image\n cv2.copyTo(cm_image, cm_copy_image)\n\n mask = mask_sonar_image(cm_image, display_results)\n\n cm_circles = cv2.findContours(mask, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n cm_circles = list(filter(lambda x: (cv2.contourArea(x) > 200\n and cv2.contourArea(x) < 5000),\n cm_circles))\n cm_circles = sorted(cm_circles,\n key=lambda x: (arc_circ(x)),\n reverse=False)\n\n cm_circles = list(filter(lambda x: (cv2.arcLength(x, True)**2/(4\n * math.pi*cv2.contourArea(x)) > 2.5), cm_circles))\n\n if len(cm_circles) < 1:\n print(\"Not enough circles found\")\n return None\n\n filtered_circles = cm_circles[0:1]\n\n circle_positions = []\n for circle in filtered_circles: # find center of circle code\n M = cv2.moments(circle)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n circle_positions.append((cX, cY, arc_circ(circle), cv2.arcLength(\n circle, True)**2/(4*math.pi*cv2.contourArea(circle))))\n\n if display_results:\n cv2.drawContours(cm_copy_image, filtered_circles, -1, (0, 255, 0), 2)\n cv2.imshow(\"found_gate_posts\", cm_copy_image)\n cv2.waitKey(0)\n\n return circle_positions", "def find_intersection(mask_part, houghlinePara=50):\n edge = cv.Canny(mask_part, 0, 1)\n lines = cv.HoughLines(edge, 1, np.pi / 180, houghlinePara)\n\n rhos = []\n thetas = []\n for line in lines:\n rho, theta = line[0]\n rhos.append(rho)\n thetas.append(theta)\n\n thetas = np.array(thetas)\n rhos = np.array(rhos)\n mean = np.mean(thetas)\n inx = thetas > mean\n\n thetas1 = thetas[inx]\n rhos1 = rhos[inx]\n thetas2 = thetas[1 - inx != 0]\n rhos2 = rhos[1 - inx != 0]\n # detect outliers\n inx2 = np.abs(rhos1-np.mean(rhos1)) <= np.std(rhos1)\n rhos1 = rhos1[inx2]\n thetas1 = thetas1[inx2]\n inx3 = np.abs(rhos2-np.mean(rhos2)) <= np.std(rhos2)\n rhos2 = rhos2[inx3]\n thetas2 = thetas2[inx3]\n\n theta1 = np.mean(thetas1)\n rho1 = np.mean(rhos1)\n theta2 = np.mean(thetas2)\n rho2 = np.mean(rhos2)\n\n k1 = -1 / np.tan(theta1)\n k2 = -1 / np.tan(theta2)\n b1 = rho1 * np.sin(theta1) - k1 * rho1 * np.cos(theta1)\n b2 = rho2 * np.sin(theta2) - k2 * rho2 * np.cos(theta2)\n\n x_cross = (b2-b1) / (k1-k2)\n y_cross = (k1 * b2 - k2 * b1) / (k1 - k2)\n # return thetas1, thetas2\n return x_cross, y_cross", "def find_nearest_neighbor(src, dst):\n return sp.spatial.KDTree(dst).query(src)", "def gfind(band='both', detsize=1.1, exponly=False, gaper=False, maxgap=1500.0,\n minexp=1.0, quiet=False, retries=100, skypos=None, trange=None,\n verbose=0, skyrange=None):\n\n # Determine if we have to loop over both bands or just one.\n if band.upper() == 'BOTH':\n output = {'NUV':None, 'FUV':None}\n elif band.upper() in ['NUV', 'FUV']:\n output = {band.upper():None}\n else:\n raise SystemExit('Invalid band: {b}'.format(b=band))\n\n for this_band in list(output.keys()):\n # Get valid time ranges, but only if trange is not provided.\n ranges = dbt.fGetTimeRanges(this_band, skypos, maxgap=maxgap,\n minexp=minexp, verbose=verbose,\n detsize=detsize, trange=trange,\n skyrange=skyrange)\n if not ranges.any():\n if not quiet:\n print('No {band} exposure'\n ' time in database.'.format(band=this_band))\n output[this_band] = {'expt':0, 't0':None, 't1':None}\n else:\n expt = (ranges[:, 1]-ranges[:, 0]).sum()\n if not quiet:\n print(\"{band}: {expt}s (raw) in {n} exposures.\".format(\n band=this_band, expt=expt, n=len(ranges)))\n if not exponly:\n if gaper:\n f = '['\n for r in ranges:\n f += '[%.3f' % r[0] + ', %.3f' % r[1] + '],'\n if not quiet:\n print(f[:-1]+']')\n else:\n for r in ranges:\n if not quiet:\n print(' [ %.3f' % r[0] + ', %.3f' % r[1] +\n ' ], %.3f' % (r[1]-r[0]) + ' seconds')\n output[this_band] = {'expt':expt, 't0':ranges[:, 0],\n 't1':ranges[:, 1],\n 'nearest_source':dbt.find_nearest_mcat(\n this_band, skypos, 0.05)}\n\n return output", "def mi_from_dm_alt_hq(distance_matrix, ns, nh, spike_train_list=None):\n \n print \"start loading\"\n \n nr = len(distance_matrix)\n nt = nr/ns\n #nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n nearest_neighbours = np.array([np.array(hq.nsmallest(nh, r)) for r in distance_matrix])\n near_to = [[j for j in range(nr) if i in nearest_neighbours[j] ] for i in range(nr)]\n \n print \"finished sorting\"\n return\n #nr = len(distance_matrix)\n #nearest_neighbours = np.array([[i] + distance_matrix[i].argsort()[1:nh].tolist() for i in range(nr)])\n \n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n \n counts = []\n counted_glob = False #set a flag for later use\n if spike_train_list is not None:\n for i in range(len(near_to)):\n c_i = 0\n \n if i not in members_of_glob:\n #print near_to[i]\n for j in near_to[i]:\n if j not in members_of_glob and spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1\n else:\n if not counted_glob: #this should only really happen if glob has a small number of members...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*g_i\n \n counted_glob = True\n else:\n pass\n \n else: #If i is in the glob...\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n g_i = f_i - 1.0/float(sum(glob_comp.values()))\n c_i = 1 + (nh - 1)*g_i\n \n counts.append(c_i) \n counts = np.array(counts) \n I = (1.0/nr)*sum( np.log2((ns*counts)/float(nh)) ) \n \n else:\n near_to_same_stim = [[n for n in near_to[j] if abs(n-j)%ns==0 ] for j in range(nr)]\n number_of_neighbourhoods = np.array([len(l) for l in near_to])\n number_of_neighbourhoods_same_stim = np.array([len(l) for l in near_to_same_stim])\n I = (1.0/nr)*sum( np.log2((ns*number_of_neighbourhoods_same_stim)/float(nh)) )\n \n return I", "def nearest_segment(self, pose):\n global_x, global_y, global_a = pose\n track_pose = 0, 0, 0\n best = None, None\n best_dist = None\n for segment in self.segments:\n # convert global (x,y) into pose relative coordinates\n x, y, a = track_pose\n ca, sa = math.cos(-a), math.sin(-a) # rotate back\n sx = global_x - x\n sy = global_y - y\n sx, sy = ca*sx - sa*sy, sa*sx + ca*sy\n dist = segment.get_offset((sx, sy, global_a - a))[0]\n if dist is not None:\n if best_dist is None or abs(dist) < best_dist:\n best = segment, (sx, sy, global_a - a)\n best_dist = abs(dist)\n track_pose = segment.step(track_pose)\n return best", "def find_nearest(ref_array,target_array):\n ref_tree = scipy.spatial.cKDTree(ref_array)\n dist, indices = ref_tree.query(target_array, k=1)\n return indices", "def find_inlier(self):\n len_of_matches = len(self.match)\n # The last line of W stores the whole number of consistency of this match\n self.W = np.zeros((len_of_matches+1, len_of_matches))\n for i in np.arange(len_of_matches):\n for j in np.arange(len_of_matches):\n if i >= j:\n continue\n\n # ASSUMPTION : the index of descriptor is the same with the index of image\n wa = self.featureFrameA[self.match[i].queryIdx].pt[0]-self.featureFrameA[self.match[j].queryIdx].pt[0]\n wb = self.featureFrameA[self.match[i].queryIdx].pt[1]-self.featureFrameA[self.match[j].queryIdx].pt[1]\n wa_ = self.featureFrameB[self.match[i].trainIdx].pt[0]-self.featureFrameB[self.match[j].trainIdx].pt[0]\n wb_ = self.featureFrameB[self.match[i].trainIdx].pt[1]-self.featureFrameB[self.match[j].trainIdx].pt[1]\n\n # Compare and complete the matrix W\n if abs(wa-wa_) + abs(wb-wb_) <= INLIER_DIST_THRE:\n self.W[i, j] = 1\n self.W[j, i] = 1\n self.W[len_of_matches, j] += 1\n\n # Choose the best inlier features\n self.best_matches = []\n candidate = np.arange(len_of_matches)\n while True:\n best_matchIdx = self.find_most_compatible_match(candidate)\n if not best_matchIdx or best_matchIdx == -1: # in case no best match is found\n break\n else:\n self.best_matches.append(self.match[best_matchIdx])\n candidate = np.delete(candidate, np.where(candidate == best_matchIdx), axis=0)", "def find_and_sort_circles(image_gray, num_of_rafts, prev_pos, radii_hough, thres_value=30, sigma_Canny=1.0,\n low_threshold_canny=25, high_threshold_canny=127, max_displ=50):\n # key data set initialization\n raft_centers = np.zeros((num_of_rafts, 2), dtype=int)\n raft_radii = np.zeros(num_of_rafts, dtype=int)\n\n # threshold the image first\n retval, image_thres = cv.threshold(image_gray, thres_value, 255, 0)\n # kernel = np.ones((3,3),np.uint8)\n # image_thres = cv.morphologyEx(image_thres, cv.MORPH_OPEN, kernel)\n\n # use canny and then Hough transform to find circles\n image_edges = canny(image_thres, sigma=sigma_Canny, low_threshold=low_threshold_canny,\n high_threshold=high_threshold_canny)\n hough_results = hough_circle(image_edges, np.arange(*radii_hough))\n accums, cx, cy, radii = hough_circle_peaks(hough_results, np.arange(*radii_hough))\n\n raft_count = 0\n for raftID in np.arange(num_of_rafts):\n for accumScore, detected_cx, detected_cy, detected_radius in zip(accums, cx, cy, radii):\n distance = np.sqrt((detected_cx - prev_pos[raftID, 0]) ** 2 + (detected_cy - prev_pos[raftID, 1]) ** 2)\n if distance < max_displ:\n raft_centers[raftID, 0] = detected_cx\n # note that raft_count starts with 1, also note that cx corresonds to columns number\n raft_centers[raftID, 1] = detected_cy\n # cy is row number\n raft_radii[raftID] = detected_radius\n raft_count += 1\n break\n\n return raft_centers, raft_radii, raft_count", "def imagetest(thetainput,doubleopponencyinput):\n theta = thetainput\n rgcMode = doubleopponencyinput\n\n\n C = retina.sample(img,x,y,coeff[i],loc[i],rgb=True) # CENTRE\n S = retina.sample(img,x,y,dcoeff[i],dloc[i],rgb=True) # SURROUND\n \n if rgcMode == 0:\n \tpV,nV = rgc.opponency(C,S,theta)\n else:\n \tpV,nV = rgc.doubleopponency(C,S,theta)\n cv2.namedWindow(\"Input\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Input\", img)\n rIntensity,cIntensity = showNonOpponency(C,theta)\n cv2.namedWindow(\"Intensity Responses\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Intensity Responses\", rIntensity)\n cv2.namedWindow(\"Intensity Responses Cortex\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Intensity Responses Cortex\", cIntensity)\n cv2.waitKey(0)\n #Generate backprojected images\n if showInverse:\n rOpponent = showBPImg(pV,nV)\n cv2.namedWindow(\"Backprojected Opponent Cells Output\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Backprojected Opponent Cells Output\", rOpponent)\n cv2.waitKey(0)\n # Cortex\n if showCortex:\n cOpponent = showCortexImg(pV,nV)\n cv2.namedWindow(\"Cortex Opponent Cells Output\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Cortex Opponent Cells Output\", cOpponent)\n cv2.waitKey(0)", "def time_info(input_file):\n original_path = os.getcwd() #set original directory\n save_path = input_file['save_path']\n planet = input_file['exoplanet'] #set exoplanet name\n print '\\nObtain the images .... \\n'\n print 'Change to ', save_path\n os.chdir(save_path) #change to save directory where is our scvience images\n images = sorted(glob.glob('AB'+input_file['exoplanet']+'*.fits'))\n print '\\nImages = \\n',images\n tempo_loc = [] #time object\n SUN = [] #Sun coordinate object\n ra_sun, dec_sun, dsun = np.zeros(len(images)),np.zeros(len(images)),np.zeros(len(images)) #sun coordinates\n JD = np.zeros(len(images)) #julian date from time object\n ST = np.zeros(len(images))\n HJD = np.zeros(len(images))\n #create the exoplanet object coordianate\n exoplanet = SkyCoord(dec=input_file['DEC'],ra=input_file['RA'],unit=('deg','deg'),frame=input_file['frame'])\n print '\\nObtain data info from header ....\\n'\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n UTC = hdr['date-obs']+'T'+hdr['UT'] #string that contain the time in UTC in isot format\n tempo_loc.append(Time(UTC,scale=input_file['scale-time'],format='isot',location=(input_file['lon-obs'],input_file['lat-obs'])))#,input_data['altitude'])))\n JD[i] = tempo_loc[i].jd\n ST[i] = tempo_loc[i].sidereal_time('apparent').hour\n SUN.append(get_sun(tempo_loc[i]))\n ra_sun[i],dec_sun[i] = SUN[i].ra.deg, SUN[i].dec.deg\n dsun[i] = SUN[i].distance.value\n HJD[i] = use.hjd_date(JD[i],dsun[i],dec_sun[i],ra_sun[i],exoplanet.dec.deg,exoplanet.ra.deg,circular_orbit=input_file['circular_orbit'])\n use.update_progress((i+1.)/len(images))\n print '\\n.... done.\\n'\n print '\\n Time from header = \\n'\n #print '\\nImages ** UTC (YYYY-MM-DDTHH:MM:SS) ** JD (7d.5d) ** ST (hours) ** ST (HH:MM:SS) ** Sun Coordinate (epoch,RA,DEC,Distance) (deg,deg,AU) \\n'\n ST_string = []\n for i in range(len(images)):\n ST1 = int(ST[i])\n ST2 = int((ST[i]-ST1)*60.)\n ST3 = (((ST[i]-ST1)*60.)-ST2)*60\n ST_string.append(str(ST1)+':'+str(ST2)+':'+str(ST3))\n tempo_loc[i] = tempo_loc[i].value\n use.update_progress((i+1.)/len(images))\n #print images[i], ' ** ',tempo_loc[i], ' ** ', JD[i], ' ** ', ST[i],' ** ',ST_string[i],' ** ',sun_loc[i],' ** ',HJD[i]\n print '\\nSave data file ... \\n'\n data = DataFrame([images,tempo_loc,list(JD),list(ST),list(ST_string),list(ra_sun),list(dec_sun),list(dsun),list(HJD)]).T\n data.columns=['images','UTC','JD','ST','ST_isot','RA_SUN','DEC_SUN','D_SUN','HJD']\n print data\n data.to_csv('results.csv')\n os.chdir(original_path)\n return", "def find_circles_adaptive(current_frame_gray, num_of_rafts, radii_hough,\n adaptive_thres_blocksize=9, adaptive_thres_const=-20,\n min_sep_dist=20, raft_center_threshold=60,\n top_left_x=390, top_left_y=450, width_x=850, height_y=850):\n # key data set initialization\n raft_centers = np.zeros((num_of_rafts, 2), dtype=int)\n raft_radii = np.zeros(num_of_rafts, dtype=int)\n\n # crop the image\n image_cropped = current_frame_gray[top_left_y: top_left_y + height_y, top_left_x: top_left_x + width_x]\n\n # threshold the image\n image_thres = cv.adaptiveThreshold(image_cropped, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY,\n adaptive_thres_blocksize, adaptive_thres_const)\n\n # use Hough transform to find circles\n hough_results = hough_circle(image_thres, np.arange(*radii_hough))\n accums, cx, cy, radii = hough_circle_peaks(hough_results, np.arange(*radii_hough))\n\n # assuming that the first raft (highest accumulator score) is a good one\n # raft_centers[0,0] = cx[0]\n # raft_centers[0,1] = cy[0]\n # raft_radii[0] = radii[0]\n raft_count = 0 # starting from 1!\n\n # remove circles that belong to the same raft and circles that happened to be in between rafts\n for accumScore, detected_cx, detected_cy, detected_radius in zip(accums, cx, cy, radii):\n new_raft = 1\n if image_cropped[detected_cy, detected_cx] < raft_center_threshold:\n new_raft = 0\n elif image_cropped[detected_cy - detected_radius // 2: detected_cy + detected_radius // 2,\n detected_cx - detected_radius // 2:detected_cx + detected_radius // 2].mean() \\\n < raft_center_threshold:\n new_raft = 0\n # elif (detected_cx - width_x/2)**2 + (detected_cy - height_y/2)**2 > lookup_radius**2:\n # new_raft = 0\n else:\n cost_matrix = scipy_distance.cdist(np.array([detected_cx, detected_cy], ndmin=2),\n raft_centers[:raft_count, :], 'euclidean')\n if np.any(cost_matrix < min_sep_dist): # raft still exist\n new_raft = 0\n if new_raft == 1:\n raft_centers[raft_count, 0] = detected_cx\n # note that raft_count starts with 1, also note that cx corresonds to columns number\n raft_centers[raft_count, 1] = detected_cy\n # cy is row number\n raft_radii[raft_count] = detected_radius\n raft_count = raft_count + 1\n if raft_count == num_of_rafts:\n # error_message = 'all rafts found'\n break\n\n # convert the xy coordinates of the cropped image into the coordinates of the original image\n raft_centers[:, 0] = raft_centers[:, 0] + top_left_x\n raft_centers[:, 1] = raft_centers[:, 1] + top_left_y\n\n return raft_centers, raft_radii, raft_count", "def __findFarestPoint__( self, outPoint ):\n end = outPoint;\n endInside = self.inside( end );\n if endInside: return outPoint;\n start = self.center;\n startInside = self.inside( start );\n \n while( True ):\n if ( utility.euclideanDistSqr( start, end ) <= 4 ):\n return start;\n mid = utility.devide( utility.add( start, end ), 2);\n if self.inside( mid ):\n start = mid;\n else:\n end = mid;", "def getBrightest(source=None, elMin=20, elMax=87, sourceList=[], action=INCLUDE, \n numReturn=1, ignoreNorthSouth=True, coordsys=\"azel\",\n getOptical=False, fluxLimit=1.0, frequency=95) : \n # If source is None then use Polaris as the source as it is always up\n nsource = source \n if source == None: \n nsource = 'aumi'\n ignoreNorthSouth = True \n coordsys = \"azel\" \n # We need to get a bunch and then sort them and truncate after sorting...\n numToGet = 333 \n r = s.getNearest(nsource, elMin, elMax, sourceList, action, numToGet,\n ignoreNorthSouth, coordsys, getOptical, fluxLimit, frequency)\n print \"Number of results before trimming:\", len(r) \n def f(x, y):\n return cmp(y.brightness, x.brightness) \n r.sort(f) \n if source == None: \n for n in r:\n n.reference = \"None\"\n n.distance = 0\n n.azimuth = 0\n n.elevation = 0\n return r[:numReturn]", "def acquisition(self):\r\n fs, _ = self.gp.predict(self.gp.X)\r\n next_fs, vars = self.gp.predict(self.X_s)\r\n opt = np.min(fs)\r\n improves = opt - next_fs - self.xsi\r\n Z = improves / vars\r\n eis = improves * norm.cdf(Z) + vars * norm.pdf(Z)\r\n return self.X_s[np.argmax(eis)], eis", "def __find_previous_cf_element(self) -> LineElement:\n index = (self.current_time_in_eighths - 1) // N_EIGHTHS_PER_MEASURE\n result = self.cantus_firmus[index]\n return result", "def heuristic_main(data):\n arcs, nodes, periods = data.arcs.size, data.nodes, data.periods\n commodities = data.commodities\n arc_origins, arc_destinations = get_2d_index(data.arcs, nodes)\n\n model = make_model(data)\n\n open_arcs = np.zeros((periods, arcs), dtype=np.double)\n\n objective = 0.\n\n for t in xrange(periods):\n fixed_cost, variable_cost = data.fixed_cost[t, :], data.variable_cost\n demand = data.demand[t, :]\n flow, arc_open = model._flow, model._arc_open\n\n for arc in xrange(arcs):\n i, j = arc_origins[arc], arc_destinations[arc]\n con_name = 'cap_{}-{}'.format(i, j)\n con = model.getConstrByName(con_name)\n # We pay for arcs that are not already open\n if arc_open[arc].lb == 0:\n arc_open[arc].obj = fixed_cost[arc]\n for c in xrange(commodities):\n model.chgCoeff(con, flow[c, arc], demand[c])\n flow[c, arc].obj = variable_cost[arc] * demand[c]\n model.optimize()\n if model.status == grb.GRB.status.INFEASIBLE:\n model.computeIIS()\n print 'model is infeasible'\n model.write(str(model.ModelName) + '_{}.ilp'.format(t))\n if model.SolCount > 0:\n objective += model.objVal\n # If we use an arc and it has not been opened before, we should\n # mark it as open now, and keep it open all along\n for count, var in enumerate(arc_open):\n if var.X > 0.1:\n var.lb = 1.\n var.obj = 0.\n if np.sum(open_arcs[:t, count]) < 10e-5:\n open_arcs[t, count] = 1.\n print 'Period : {} Objective value: {}'.format(t, objective)\n\n return objective, open_arcs", "def find_closest_state(H, reference_state, QN):\n \n #Make a state vector for reference state\n reference_state_vec = reference_state.state_vector(QN)\n \n #Find eigenvectors of the given Hamiltonian\n E, V = np.linalg.eigh(H)\n \n #Find out which of the eigenstates of H corresponds to reference state\n state_index = find_state_idx(reference_state_vec,V,n=1)\n \n #Find state vector of state corresponding to reference\n state_vec = V[:,state_index:state_index+1]\n \n #return the state\n state = matrix_to_states(state_vec,QN)[0]\n \n return state", "def find_reddest_pixel_fast(img): \n img = np.array(img, dtype = 'int32')\n location = cv2.minMaxLoc((img[:, :, 2] - img[:, :, 1]) + (img[:, :, 2] - img[:, :, 0]))[3]\n return location", "def closest_approach_to_camera(scene, speaker_object) -> (float, int):\n max_dist = sys.float_info.max\n at_time = scene.frame_start\n for frame in range(scene.frame_start, scene.frame_end + 1):\n scene.frame_set(frame)\n rel = speaker_object.matrix_world.to_translation() - scene.camera.matrix_world.to_translation()\n dist = norm(rel)\n\n if dist < max_dist:\n max_dist = dist\n at_time = frame\n\n return max_dist, at_time", "def nearest_neigbor(self, pc):\n coord = get_coordinates(pc)\n # deliveries\n pdist_deliv = {haversine(coord[0], coord[1], pcoord[1][0], pcoord[1][1]):pc for pc, pcoord in self.state.D_k.items()}\n pdist_list_deliv = list(pdist_deliv.keys())\n if len(pdist_list_deliv) > 0:\n val_deliv_min = min(pdist_list_deliv)\n else:\n val_deliv_min = 1e6 # great value to be discarded when comparing with val_pickup_min\n # pickups\n pdist_pickup = {haversine(coord[0], coord[1], pcoord[-1][0], pcoord[-1][1]):pc for pc, pcoord in self.state.P_k.items()}\n pdist_list_pickup = list(pdist_pickup.keys())\n\n if len(pdist_list_pickup) > 0:\n val_pickup_min = min(pdist_list_pickup)\n else:\n val_pickup_min = 1e6 # great value to be discarded when comparing with val_pickup_min\n\n if val_deliv_min == val_pickup_min and val_deliv_min == 1e6:\n print(\"All jobs completed: go to wait or stop if it's 12pm\")\n return 0\n\n if val_deliv_min < val_pickup_min:\n return pdist_deliv[val_deliv_min]\n\n elif val_deliv_min >= val_pickup_min:\n return pdist_pickup[val_pickup_min]\n else:\n raise valueError('Impossible comparison between val_deliv_min and val_pickup_min ')", "def _determine_next_eval_point(self):\n anc_data = self._get_ancillary_data_for_acquisition()\n acq_to_use = getattr(acquisitions.asy, self.options.acq.lower())\n next_eval_point = acq_to_use(self.gp, self.acq_optimise, anc_data)\n return next_eval_point", "def get_hits(event, path, hit_charge_quant, min_hit_charge, angsens_model=None):\n photons = path[0] == 'photons'\n\n series = get_path(event, path)\n\n if photons:\n time_window_start = 0.\n time_window_stop = 0.\n if angsens_model is not None:\n if isinstance(angsens_model, string_types):\n angsens_poly, _ = load_angsens_model(angsens_model)\n elif isinstance(angsens_model, np.polynomial.Polynomial):\n angsens_poly = angsens_model\n else:\n raise TypeError('`angsens_model` is {} but must be either'\n ' string or np.polynomial.Polynomial'\n .format(type(angsens_model)))\n\n else:\n trigger_hierarchy = event['triggers']['I3TriggerHierarchy']\n time_window_start = np.inf\n time_window_stop = -np.inf\n for trigger in trigger_hierarchy:\n if 'key' in trigger.dtype.names: # New (more correct) TRIGGER_T struct\n trigger_key = trigger['key']\n source = trigger_key['source']\n tr_type = trigger_key['type']\n config_id = trigger_key['config_id']\n else: # old TRIGGER_T had triggerkey fields at same level as trigger\n source = trigger['source']\n tr_type = trigger['type']\n config_id = trigger['config_id']\n\n # Do not expand the in-ice window based on GLOBAL triggers (of\n # any TriggerTypeID)\n if source == TriggerSourceID.GLOBAL:\n continue\n\n tr_time = trigger['time']\n\n # TODO: rework to _only_ use TriggerConfigID?\n # Below values can be extracted by running\n # $I3_SRC/trigger-sim/resources/scripts/print_trigger_configuration.py -g GCDFILE\n trigger_handled = False\n if tr_type == TriggerTypeID.SIMPLE_MULTIPLICITY:\n if source == TriggerSourceID.IN_ICE:\n if config_id == TriggerConfigID.SMT8_IN_ICE:\n trigger_handled = True\n left_dt = -4e3\n right_dt = 5e3 + 6e3\n elif config_id == TriggerConfigID.SMT3_DeepCore:\n trigger_handled = True\n left_dt = -4e3\n right_dt = 2.5e3 + 6e3\n elif tr_type == TriggerTypeID.VOLUME:\n if source == TriggerSourceID.IN_ICE:\n trigger_handled = True\n left_dt = -4e3\n right_dt = 1e3 + 6e3\n elif tr_type == TriggerTypeID.STRING:\n if source == TriggerSourceID.IN_ICE:\n trigger_handled = True\n left_dt = -4e3\n right_dt = 1.5e3 + 6e3\n\n if not trigger_handled:\n raise NotImplementedError(\n 'Trigger TypeID {}, SourceID {}, config_id {} not'\n ' implemented'\n .format(TriggerTypeID(tr_type).name, # pylint: disable=no-member\n TriggerSourceID(source).name, # pylint: disable=no-member\n config_id)\n )\n\n time_window_start = min(time_window_start, tr_time + left_dt)\n time_window_stop = max(time_window_stop, tr_time + right_dt)\n\n hits = []\n hits_indexer = []\n offset = 0\n\n for (string, dom, pmt), hits_ in series:\n # -- Filter the pulses -- #\n if hit_charge_quant > 0:\n hits_[\"charge\"] = QUANTIZE_VEC(hits_[\"charge\"], hit_charge_quant)\n if min_hit_charge > 0:\n hits_ = hits_[hits_[\"charge\"] >= min_hit_charge]\n\n num = len(hits_)\n if num == 0:\n continue\n\n sd_idx = const.get_sd_idx(string=string, om=dom, pmt=pmt)\n sd_hits = np.empty(shape=num, dtype=HIT_T)\n sd_hits['time'] = hits_['time']\n if not photons:\n sd_hits['charge'] = hits_['charge']\n elif angsens_model:\n sd_hits['charge'] = angsens_poly(hits_['coszen'])\n else:\n sd_hits['charge'] = 1\n\n hits.append(sd_hits)\n hits_indexer.append((sd_idx, offset, num))\n offset += num\n\n if len(hits) == 0:\n hits = np.empty(shape=0, dtype=HIT_T)\n hits_indexer = np.empty(shape=0, dtype=SD_INDEXER_T)\n hits_summary = np.empty(shape=0, dtype=HITS_SUMMARY_T)\n return hits, hits_indexer, hits_summary\n\n hits = np.concatenate(hits) #, dtype=HIT_T)\n if hits.dtype != HIT_T:\n raise TypeError('got dtype {}'.format(hits.dtype))\n\n hits_indexer = np.array(hits_indexer, dtype=SD_INDEXER_T)\n\n hit_times = hits['time']\n hit_charges = hits['charge']\n total_charge = np.sum(hit_charges)\n\n earliest_hit_time = hit_times.min()\n latest_hit_time = hit_times.max()\n average_hit_time = np.sum(hit_times * hit_charges) / total_charge\n\n num_hits = len(hits)\n num_doms_hit = len(hits_indexer)\n\n hits_summary = np.array(\n (\n earliest_hit_time,\n latest_hit_time,\n average_hit_time,\n total_charge,\n num_hits,\n num_doms_hit,\n time_window_start,\n time_window_stop,\n ),\n dtype=HITS_SUMMARY_T,\n )\n\n return hits, hits_indexer, hits_summary", "def _get_closest_control_zone(x: float, y: float, hp_info: pd.DataFrame) -> int:\n\n min_dist = CONTROL_ZONE_RADIUS\n min_ind = 0\n\n for ind in hp_info.index:\n hp_x = hp_info[0][ind]\n hp_y = hp_info[1][ind]\n\n dist = np.sqrt((x - hp_x) ** 2 + (y - hp_y) ** 2)\n\n if dist < min_dist:\n min_dist = dist\n min_ind = ind\n\n return min_ind", "def nearest_neighbor(self, image, fx, fy):\n #Write your code for nearest neighbor interpolation here\n w = image.shape[1]\n h = image.shape[0]\n\n newW = int(w*float(fx))\n newH = int(h*float(fy))\n\n ratioW = w/newW\n ratioH = h/newH\n\n import numpy as np\n\n newImg = np.zeros((newH, newW), np.uint8)\n for i in range(newImg.shape[0]):\n for j in range(newImg.shape[1]):\n\n x = round(ratioH*i)\n y = round(ratioW*j)\n\n x = x-1 if x == h else x\n y = y-1 if y == w else y\n\n temp = image[x, y]\n newImg[i,j] = temp\n\n image = newImg\n return image", "def hindsight(video_state, dist=16):\n assert dist < 100, 'Too much hindsight requested, you\\'re not that smart!'\n im_paths = video_state.frame_paths[max(video_state.image_idx - 1 - dist, 0): max(video_state.image_idx - 1, 0)]\n if len(im_paths) > 0:\n image_grid = grid_images(im_paths=im_paths, w=np.ceil(np.sqrt(dist)), h=np.ceil(np.sqrt(dist)), margin=1)\n cv2.imshow('Hindsight', image_grid)", "def get_corner(self, time):\n if self.start_time <= time <= self.end_time:\n diff = time - self.start_time\n return self.i[diff][0, 0], self.j[diff][0, 0]\n else:\n return -1, -1", "def time_calibration(input_file):\n original_path = os.getcwd()\n save_path = input_file['save_path']\n #change to save data reduction directory\n os.chdir(save_path)\n print '\\n Reading the list of images ....\\n'\n planet = input_file['exoplanet'] #set exoplanet name\n images = sorted(glob.glob('AB'+planet+'*.fits'))\n print images\n #include de RA,DEC and epoch of the exoplanet\n RA,DEC,epoch = input_file['RA'],input_file['DEC'],input_file['epoch']\n #obtain ST JD using iraf task and introduce in the header\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n if int(split(hdr['UT'],':')[0]) < int(hdr['timezone']):\n new_date = use.yesterday(hdr['date-obs'])\n #print images[i], new_date\n else:\n new_date = hdr['date-obs']\n year,month,day = split(new_date,'-')\n iraf.asttimes(year=year,month=month,day=day,time=hdr['loctime'],obs=input_file['observatory'])\n JD = iraf.asttimes.jd #obtain julian date\n LMST = iraf.asttimes.lmst #obtain the sideral time\n LMST = use.sexagesimal_format(LMST) #convert sideral time in sexagesimal format\n iraf.hedit(images[i],'ST',LMST,add='yes',verify='no',show='no',update='yes') #create the ST keyword in the header\n iraf.ccdhedit(images[i],'LMST',LMST,type='string') #include the mean sideral time in the header\n iraf.ccdhedit(images[i],'JD',JD,type='string') #include de julian date in the header\n #include RA, and DEC of the object in your header\n iraf.ccdhedit(images[i],\"RA\",RA,type=\"string\") #include right ascention in the header\n iraf.ccdhedit(images[i],\"DEC\",DEC,type=\"string\") #include declination in the header\n iraf.ccdhedit(images[i],\"epoch\",epoch,type=\"string\") #include epoch in the header\n # use.update_progress((i+1.)/len(images))\n print '\\n Setting airmass ....\\n'\n for i in range(len(images)):\n print '# ',images[i]\n #iraf.hedit(images[i],'airmass',airmass,add='yes')\n #iraf.hedit(images[i],'HJD',HJD,add='yes')\n iraf.setairmass.observatory = input_file['observatory']\n iraf.setairmass(images[i])\n iraf.setjd.time = 'ut'\n iraf.setjd(images[i])\n print '\\n.... done.\\n'\n #export information\n hjd, jd, airmass, st = [],[],[],[]\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n hjd.append(hdr['HJD'])\n jd.append(hdr['JD'])\n airmass.append(hdr['airmass'])\n st.append(hdr['st'])\n #saving the data\n data = DataFrame([list(hjd),list(jd),list(st),list(airmass)]).T\n data.columns = ['HJD','JD','ST','Airmass']\n data.to_csv('results_iraf_calibrations.csv')\n #change to workings directory\n os.chdir(original_path)\n return", "def rk4_glidingHST_exact(xy, v, NL, KL, BM, Mm, params):\n h = params['h']\n dx1 = h * v\n dv1 = h * fglidingHST_exact(xy, v, NL, KL, BM, Mm, params)\n dx2 = h * (v + dv1 / 2.)\n dv2 = h * fglidingHST_exact(xy + dx1 / 2., v + dv1 / 2., NL, KL, BM, Mm, params)\n dx3 = h * (v + dv2 / 2.)\n dv3 = h * fglidingHST_exact(xy + dx2 / 2., v + dv2 / 2., NL, KL, BM, Mm, params)\n dx4 = h * (v + dv3)\n dv4 = h * fglidingHST_exact(xy + dx3, v + dv3, NL, KL, BM, Mm, params)\n xout = xy + (dx1 + 2. * dx2 + 2. * dx3 + dx4) / 6.\n vout = v + (dv1 + 2. * dv2 + 2. * dv3 + dv4) / 6.\n\n # print 'dv1 = ', dv1[:,3][10]\n # print 'rk BM = ', BM\n\n # Ensure theta remains positive\n # xout[:,2] = np.abs(xout[:,2])\n\n if params['BCtype'] == 'excite':\n d = params['amplitude']\n l = params['l']\n freq = params['frequency']\n x0_BIND = params['x0_BIND']\n y0_BIND = params['y0_BIND']\n BIND = params['BIND']\n w3 = params['w3'][BIND]\n\n phidot = freq\n phi_new = (xy[BIND, 3] + phidot * h)[0]\n psidot = w3 - phidot * np.cos(xy[BIND, 2])\n\n # print 'T1 = ', x0_BIND+d*np.cos(phi_new)\n # print 'T2 = ', y0_BIND+d*np.sin(phi_new)\n # print 'T3 = ', xy[BIND,2]\n # print 'T4 = ', xy[BIND,3]+phidot*h\n # print 'T5 = ', xy[BIND,4]+psidot*h\n\n xout[BIND, :] = np.array([x0_BIND + d * np.cos(phi_new), y0_BIND + d * np.sin(phi_new),\n xy[BIND, 2], xy[BIND, 3] + phidot * h, xy[BIND, 4] + psidot * h]).reshape(1, 5)\n vout[BIND, :] = np.array([-d * np.sin(phi_new) * phidot, d * np.cos(phi_new) * phidot,\n 0.0, phidot, psidot]).reshape(1, 5)\n\n xout[np.isnan(xout)] = xy[np.isnan(xout)]\n vout[np.isnan(vout)] = v[np.isnan(vout)]\n\n # Modulo phi\n xout[:, 3] = np.mod(xout[:, 3], 2. * np.pi)\n # Modulo theta\n # xout[:,2] = np.mod(xout[:,4],np.pi)\n\n return xout, vout", "def model_lookup2(taskid, beam):\n # Assumes running on happili-05:\n model_dir = '/data/kutkin/cbeams/'\n\n weekly_gaussian_regression = False\n if weekly_gaussian_regression == True:\n all_dates = get_dates()\n all_beam_stats = get_beam_stats(all_dates)\n if beam > all_beam_stats.shape[1] - 1:\n print(\"\\t{}: Pick a valid beam number 0-39.\".format(beam))\n exit()\n beam_stats = all_beam_stats[:, beam]\n\n # Divide into before & after beam attenuation on October 1st (big impact on beam quality)!\n taskid = str(taskid)[:6]\n if int(taskid) < 191001:\n # *** Until we have a full beam complement ***:\n index = np.where(all_dates == '190821')[0][0]\n # index = np.where(all_dates == '190916')[0][0]\n dates = all_dates[:index + 1]\n beams = beam_stats[:index + 1]\n else:\n # index = np.where(all_dates == '191002')[0][0]\n index = np.where(all_dates == '191023')[0][0]\n dates = all_dates[index:]\n beams = beam_stats[index:]\n\n print(\"[MODEL_LOOKUP] Searching for appropriate beam model for beam {}.\".format(beam))\n if np.all(beams == 0):\n print(\"\\tNo good beam model options for period when this was observed. Do more drift scans (or edit code).\")\n exit()\n elif len(beams[beams == 1]) == 1:\n # If only one good beam model exists, use it.\n best = dates[beams == 1][0]\n else:\n # Use nearest. Don't have enough beam statistics for floor, I think.\n dates = dates[beams == 1]\n best = nearest_date(dates, taskid)\n\n # *** Until we have a full beam complement ***:\n if beam >= 32:\n # best = '191002'\n best = '191023'\n\n model = model_dir + '{:02}_gp_avg_orig.fits'.format(beam)\n\n return model", "def circle_of_least_confusion(self):\n ff=beam_field() \n ff.rectangular_grid(1,2000,self.entrance_pupil)\n ff.propagate(self.surfaces)\n def f(x):\n pl=ff.project_onto_plane(x)\n return max(pl[:,1])\n \n # m=self.marginal_ray\n if hasattr(self, 'start'):\n start=self.start\n else:\n# start=(m.Q_p[-1,0,2]-m.Q_p[-2,0,2])/2\n start=(self.surfaces[-1].pos()-self.surfaces[-2].pos())/2\n #print(start)\n res=minimize(f,(start), method='Nelder-Mead')\n self.start=res.final_simplex[0][0,0]\n \n return res.final_simplex[0][0,0],res.final_simplex[1][0]", "def test_get_neigh_csi_components_one_time(self):\n\n (\n this_num_pred_oriented_tp, this_num_obs_oriented_tp,\n this_num_false_positives, this_num_false_negatives\n ) = learning_curves._get_neigh_csi_components_one_time(\n actual_target_matrix=ACTUAL_TARGET_MATRIX,\n probability_matrix=PROBABILITY_MATRIX,\n eval_mask_matrix=MASK_MATRIX,\n matching_distance_px=NEIGH_DISTANCE_PX\n )\n\n self.assertTrue(numpy.isclose(\n this_num_pred_oriented_tp, NEIGH_NUM_PRED_ORIENTED_TP,\n atol=TOLERANCE\n ))\n self.assertTrue(numpy.isclose(\n this_num_obs_oriented_tp, NEIGH_NUM_OBS_ORIENTED_TP, atol=TOLERANCE\n ))\n self.assertTrue(numpy.isclose(\n this_num_false_positives, NEIGH_NUM_FALSE_POSITIVES, atol=TOLERANCE\n ))\n self.assertTrue(numpy.isclose(\n this_num_false_negatives, NEIGH_NUM_FALSE_NEGATIVES, atol=TOLERANCE\n ))", "def find_best_path(self, paths, sw, util, duration, time_now):\n bestpath = None\n bestpathmetric = None # [0,1] lower means better path\n bestpathlen = None # lower -> better path\n candidatepaths = []\n \n assert len(paths) == 2\n \n path_to_shift, shift_by = self.calculate_what_to_shift(paths, sw)\n\n pathmetrics = {}\n paths_by_length = {}\n metrics = []\n metricpaths = {}\n for path in paths:\n metric, length = self.compute_path_metric(sw, path, 0, 0, local_contrib=True)\n paths_by_length[length] = path\n metrics.append(metric)\n assert metric >= 0 \n pathmetrics[\" \".join(path)] = metric\n metricpaths[metric] = path\n\n logging.debug(\"SS FBP PATH METRICS:, %s\", str(metricpaths))\n if path_to_shift == None:\n # return shortest path\n logging.debug(\"SS FBP Returning LOCAL: %s\", str((paths_by_length[min(paths_by_length.keys())],0)))\n return (paths_by_length[min(paths_by_length.keys())], 0)\n \n \n path_to_shift_metric = pathmetrics.pop(\" \".join(path_to_shift))\n path_to_receive_metric = pathmetrics.pop(pathmetrics.keys()[0])\n logging.debug(\"SS FBP Path to Recv: %s\", str(metricpaths[path_to_receive_metric]))\n\n if (path_to_receive_metric == 0):\n logging.debug(\"SS FBP EARLY Returning : %s\", str((metricpaths[min(metrics)], 0)))\n return (metricpaths[min(metrics)], 0)\n else:\n current_ratio = path_to_shift_metric * 1.0 / path_to_receive_metric\n\n logging.debug(\"SS FBP CURRENT RATIO: %s\", str(current_ratio))\n\n\n goal_path_to_shift_metric = path_to_shift_metric * (1 - (shift_by * self.alpha))\n goal_path_to_receive_metric = path_to_receive_metric + (path_to_shift_metric * (shift_by * self.alpha))\n\n if (goal_path_to_receive_metric == 0):\n # large number for practical purposes\n goal_ratio = 100000\n else:\n goal_ratio = goal_path_to_shift_metric * 1.0 / goal_path_to_receive_metric\n\n logging.debug(\"SS FBP GOAL RATIO: %s\", str(goal_ratio))\n\n # FINALLY DECIDE WHICH PATH TO RETURN BASED ON GOAL-Current RATIO\n if goal_ratio - current_ratio < 0:\n # return path with lower utiliztion\n logging.debug(\"SS FBP LOWER Returning : %s\", str((metricpaths[min(metrics)], 0)))\n return (metricpaths[min(metrics)], 0)\n \n if goal_ratio - current_ratio > 0:\n # return path with higher utilization\n logging.debug(\"SS FBP HIGHER Returning : %s\", str((metricpaths[max(metrics)], 0)))\n return (metricpaths[max(metrics)], 0)\n\n if goal_ratio - current_ratio == 0:\n # return shortest path\n logging.debug(\"SS FBP Returning LOCAL: %s\",\n str((paths_by_length[min(paths_by_length.keys())], 0)))\n return (paths_by_length[min(paths_by_length.keys())], 0)", "def find_circuit_image(self, url):\n try:\n soup = set_soup(url)\n img_url_container = soup.find(\n \"div\", {\"class\": \"f1-race-hub--schedule-circuit-map\"}\n )\n img_url = img_url_container.find(\"a\")[\"href\"]\n soup = set_soup(self.BASE_URL + img_url)\n img_container = soup.find(\"div\", {\"class\": \"f1-race-hub--map-container\"})\n img = img_container.find(\"img\", {\"class\": \"lazy\"})[\"data-src\"]\n return self._add_timestamp_to_image(img)\n except Exception:\n logger.exception(\"Error getting circuit image\")", "def get_closest_waypoint_idx(self):\n\n # TODO:\n # The churchlot waypoints are roughly circular but have self-\n # intersecting endpoints, so I'm not sure how this code will \n # yield good results. Might need some additional filtering\n # logic to force a choice consistent with the vehicle pose yaw\n # in order to avoid jumping onto the wrong path.\n\n # Vehicle position short reference\n pos = self.pose.pose.position\n\n # Find the closest waypoint index\n # If closest index is zero bump to 1 since we don't want slice for \n # prev_coord to look at the final map waypoint.\n closest_idx = max(self.waypoint_tree.query([pos.x, pos.y], 1)[1], 1)\n\n # Get closest point\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx-1]\n\n # Convert coordinates into 2D numpy vectors\n closest_vec = np.array(closest_coord)\n prev_vec = np.array(prev_coord)\n pos_vec = np.array([pos.x, pos.y])\n\n # Find vec(close-prev) dot vec(pos-close) \n val = np.dot(closest_vec - prev_vec, pos_vec - closest_vec)\n\n # If pos is ahead of closest...\n if val > 0: \n\n # Advance index so that closest is ahead of pos\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n # Return closest index\n return closest_idx", "def get_index_of_surface_gate(data, setup={}):\n alts = data['alt']\n return np.argmin(np.abs(alts), 1)", "def get_cheapest_neighbor(client, facilities):\n adj_facilities = client.get_facility_list(facilities)\n return min(adj_facilities, key=lambda fac: fac['cost'])", "def shiftDetectorONH(frame, onh_info, x_onh_bounds):\n\n x_min = x_onh_bounds[0]-30\n x_max = x_onh_bounds[1]+30\n frame_len = frame.shape[1]\n mid_x = int(frame_len/2)\n\n norm = frame/np.max(frame)#(2**16)\n #if the frame midpoint is inside the bbox x bounds\n #this section is to avoid using any part of the onh as the a-scan to reference when doing the cross-correlation\n if mid_x>=x_min and mid_x<=x_max:\n d_min = mid_x-x_min\n d_max = x_max-mid_x\n #if mid_x is closer to x_min but not close to the edge of the image -- at least 75 px\n if d_min<d_max and x_min>75:\n acol = int((frame_len/2)-(d_min+1))\n elif x_max<frame_len-75:\n acol = int((frame_len/2)+(d_max+1))\n else:\n acol = int((frame_len/2)-(d_min+1))\n anchorCol = norm[:,acol]\n else:\n anchorCol = norm[:,mid_x]\n shifts = [np.argmax(signal.correlate(norm[:,i],anchorCol,mode='same'))-int((frame.shape[0])/2) for i in range(frame_len)]\n\n #if onh detection is bad, bbox might be huge. The onh area should be less that 10% of the image (256*1024 pixels)\n if onh_info.area/(2**18) > 0.10:\n return shifts\n #old, changed 1-29-2018 because this is really about location, not size\n #if x_min<100 or x_max>902:\n #return shifts\n\n #This ensures that clean_shifts and clean_x are the same length and comes into play when the ONH is basically touching the\n #side of the image.\n #if the onh is too far to the right side of the frame, only use the left side info\n #fit a quadratic to get LOCAL curvature\n if x_max>=frame_len-100:\n #this uses the entire bscans to get the curvature, otherwise it will fit very poorly\n clean_x = np.arange(0,x_min,1)\n curve_fit_params = np.polyfit(clean_x, shifts[0:x_min],2)\n curve_fit = lambda x: curve_fit_params[0]*x**2 + curve_fit_params[1]*x + curve_fit_params[2]\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = shifts\n clean_shifts[x_min:x_max+1]=corrected_shifts\n #if the onh is too far to the left side, only use right side info\n elif x_min<100:\n clean_x = np.arange(x_max+1,frame_len,1)\n curve_fit_params = np.polyfit(clean_x, shifts[x_max+1:frame_len],2)\n curve_fit = lambda x: curve_fit_params[0]*x**2 + curve_fit_params[1]*x + curve_fit_params[2]\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = shifts\n clean_shifts[x_min:x_max+1]=corrected_shifts\n #Everything is normal, everyone is happy.\n else:\n #need to cut out onh, I don't think there is a way to index this to put it\n #directly in polyfit\n clean_shifts = np.array(shifts[0:x_min] + shifts[x_max+1:frame_len])\n clean_x = np.concatenate((np.arange(x_min-100,x_min,1),np.arange(x_max+1,x_max+101,1)))\n curve_fit_params = np.polyfit(clean_x, clean_shifts[x_min-100:x_min+100],3)\n curve_fit = lambda x: curve_fit_params[0]*x**3 + curve_fit_params[1]*x**2 + curve_fit_params[2]*x + curve_fit_params[3]\n #!!astype added 4-18-19 because floats throw an error when correcting shifts\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = np.insert(clean_shifts, x_min+1, corrected_shifts)\n\n return list(clean_shifts)", "def search(world_state, robot_pose, goal_pose):\n if world_state.shape[0] == 0 or world_state.shape[1] == 0:\n print(\"Error, empty world_state!!!\")\n return None\n if not is_pos_valid(robot_pose, world_state.shape):\n print(\"Error, invalid robot_pose!!!\", robot_pose)\n return None\n if not is_pos_valid(goal_pose, world_state.shape):\n print(\"Error, invalid goal_pose!!!\", goal_pose)\n return None\n\n directions = [(-1, 0), (1, 0), (0, -1), (0, 1)] # orthogonal directions\n found = False\n\n x, y = robot_pose\n g = 0\n h = heuristic(robot_pose, goal_pose)\n f = g + h\n open = [[f, x, y]]\n came_from = {}\n came_from[robot_pose] = None\n cost_so_far = {}\n cost_so_far[robot_pose] = 0\n\n while open:\n open.sort() # sort based on f value\n current = open.pop(0)\n\n x, y = current[1:]\n g = cost_so_far[(x, y)]\n\n if (x, y) == goal_pose:\n found = True\n break\n else:\n # find available next positions\n for direction in directions:\n x2 = x + direction[0]\n y2 = y + direction[1]\n\n # check whether x2 and y2 are valid\n if not is_pos_valid((x2, y2), world_state.shape):\n continue\n\n g2 = g + 1\n if world_state[x2, y2] == 0 and ((x2, y2) not in cost_so_far or g2 < cost_so_far[(x2, y2)]):\n\n h2 = heuristic((x2, y2), goal_pose)\n f2 = g2 + h2\n open.append([f2, x2, y2])\n came_from[(x2, y2)] = (x, y)\n cost_so_far[(x2, y2)] = g2\n if found:\n path = [goal_pose]\n current = goal_pose\n while came_from[current]:\n current = came_from[current]\n path.append(current)\n\n path.reverse()\n return path\n\n else:\n return None", "def findClosestInds(drift, ncfile):\n\n inds_space = closest_dist(drift.Variables.lon, drift.Variables.lat, \\\n ncfile.Grid.lonc, ncfile.Grid.latc)\n\n inds_time = closest_vals(drift.Variables.matlabTime, ncfile.Variables.matlabTime)\n\n return inds_space, inds_time", "def _get_closest_waypoint(self, pose):\n pos = pose.position\n x = pos.x\n y = pos.y\n closest_idx = self.waypoints_tree.query([x,y],1)[1]\n\n return closest_idx", "def find_path(masked_image,start_pos, target_pos, size_compress_index, active_particle_size,\r\n compress = False):\r\n \r\n \r\n not_image = cv2.bitwise_not(masked_image)\r\n image_index = size_compress_index\r\n \r\n start_x,start_y = start_pos\r\n end_x, end_y = target_pos\r\n \r\n ker1=cv2.getStructuringElement(cv2.MORPH_RECT, (3,3),anchor =(-1,-1))\r\n not_image = cv2.dilate(not_image,ker1,iterations = active_particle_size//2)\r\n\r\n small_image = cv2.resize(not_image, (st_width//image_index, st_height//image_index),interpolation = cv2.INTER_AREA)\r\n ret,small_image = cv2.threshold(small_image,127,255,cv2.THRESH_BINARY)\r\n \r\n small_image = cv2.bitwise_not(small_image)\r\n # \r\n #cv2.imshow(\"thresh\", small_image)\r\n #cv2.waitKey(0)\r\n #cv2.destroyAllWindows() \r\n \r\n \r\n matrix = small_image.tolist()\r\n grid = Grid(matrix=matrix)\r\n\r\n start = grid.node(int(start_x//image_index), int(start_y//image_index))\r\n end = grid.node(int(end_x//image_index), int(end_y//image_index))\r\n\r\n finder = AStarFinder(diagonal_movement = DiagonalMovement.never)\r\n path, runs = finder.find_path(start, end, grid)\r\n \r\n new_path = list()\r\n for p in path:\r\n x,y = p\r\n x = x*image_index\r\n y = y*image_index\r\n new_path.append((x,y))\r\n \r\n compressed_path = compress_path(new_path)\r\n \r\n if compress == True:\r\n res_path = compressed_path\r\n else:\r\n res_path = new_path\r\n \r\n return res_path, runs", "def frame_analysis_yellow(fW, fH, frame):\n\n # Compute the ratio to go from pixel coordinate to grid coordinate\n cam_grid_ratio = (gw / fW, gh / fH)\n\n # Compute the yellow mask in order to find the uper square on the thymio\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, low_yellow, up_yellow)\n\n # Find to contours of the square in order to compute the center of it\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n areas = [cv2.contourArea(c) for c in contours]\n\n # If we don't find the square, return impossible value for the followin code\n # to know no measurement were possible\n if len(areas) < 1:\n\n # Display the resulting frame\n x2, y2 = (-1, -1)\n xf, yf = (-1, -1)\n\n else:\n\n # Find the largest moving object in the image\n max_index = np.argmax(areas)\n cnt = contours[max_index]\n x, y, w, h = cv2.boundingRect(cnt)\n\n # Change from pixel to grid coordinate\n xf = x + int(w / 2)\n yf = y + int(h / 2)\n\n x2 = xf * cam_grid_ratio[0]\n y2 = gh - yf * cam_grid_ratio[1]\n\n frame = frame[:, :, ::-1]\n\n return x2, y2, xf, yf, frame", "def main():\n G = nx.gnp_random_graph(100, 0.5)\n centrality = nx.eigenvector_centrality(G)\n avg_centrality = sum(centrality.values()) / len(G)\n\n def has_high_centrality(v):\n return centrality[v] >= avg_centrality\n\n source = 0\n value = centrality.get\n condition = has_high_centrality\n\n found_node = progressive_widening_search(G, source, value, condition)\n c = centrality[found_node]\n print('found node {0} with centrality {1}'.format(found_node, c))", "def closest_time(self, when):\n try:\n return np.argmin(np.abs(self.time.datetime - when))\n except AttributeError:\n self.load_time()\n return np.argmin(np.abs(self.time.datetime - when))", "def nearest(self, query):\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in nearest_trees + distances_pool:\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best", "def findNearestUnstructNode(xFRF, yFRF, ugridDict):\n\n assert 'xFRF' in list(ugridDict.keys()), 'Error: xFRF is a required key in ugridDict'\n assert 'yFRF' in list(ugridDict.keys()), 'Error: yFRF is a required key in ugridDict'\n\n points = np.column_stack((ugridDict['xFRF'], ugridDict['yFRF']))\n qPt = np.column_stack((xFRF, yFRF))\n\n # compute nearest neighbor\n kdt = scipy.spatial.cKDTree(points)\n dist, ind = kdt.query(qPt, 1)\n\n return ind, dist", "def find_tape():\n\n _, frame = CAP.read()\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, color_lower, color_upper)\n _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # Find all valid pair rects, and reutrn if none found\n pair_rects = get_pair_rects(contours)\n if len(pair_rects) == 0:\n return\n\n # If found, continue on and post results\n center = closest_center(pair_rects)\n\n to_send = '{}:{}\\n'.format(\n round(time.time(), 3), round(degrees(horizontal_angle(center[0])), 3))\n print(to_send)\n s.send(bytearray(to_send, 'utf-8'))", "def eeg_findnearest(x,X):\t\n\t#x array or vector and X a scalar\n\tabsdif = np.abs(x-X)\n\tval = np.min(absdif)\n\tidx = absdif.argmin()\n\treturn val,idx", "def hough_line_own(image):\n\n # All possible angles and rhos\n thetas = np.deg2rad(np.arange(-90.0, 90.0))\n w, h = image.shape\n max_dist = int(np.ceil(np.sqrt(w**2 + h**2)))\n rhos = np.linspace(-max_dist, max_dist, max_dist*2)\n print(rhos.shape, thetas.shape)\n\n # Save shit for later\n theta_cos = np.cos(thetas)\n theta_sin = np.sin(thetas)\n num_thetas = len(thetas)\n\n result = np.zeros((2 * max_dist, num_thetas), dtype=np.uint64)\n\n # Only get the non zero indexes of the image\n yidx, xidx = np.nonzero(image)\n\n for i in range(len(xidx)):\n x = xidx[i]\n y = yidx[i]\n\n for theta_idx in range(num_thetas):\n rho = int(round(x * theta_cos[theta_idx] +\n y * theta_sin[theta_idx]) +\n max_dist)\n result[rho, theta_idx] += 1\n\n return result, thetas, rhos", "def greedy_match(cost_matrix):\n num_detections, num_tracks = cost_matrix.shape\n distance_1d = cost_matrix.reshape(-1)\n index_1d = np.argsort(distance_1d)\n index_2d = np.stack([index_1d // num_tracks, index_1d % num_tracks], axis=1).astype(int, copy=False)\n\n matched_indices = []\n matched_firsts = set()\n matched_seconds = set()\n\n for (first_id, second_id) in index_2d:\n if first_id not in matched_firsts and second_id not in matched_seconds:\n matched_seconds.add(second_id)\n matched_firsts.add(first_id)\n matched_indices.append([first_id, second_id])\n return np.array(matched_indices)" ]
[ "0.61136955", "0.5495255", "0.53449804", "0.53088015", "0.5267729", "0.5264703", "0.52540445", "0.51978594", "0.51893973", "0.5173132", "0.5172842", "0.517216", "0.5133897", "0.51184076", "0.50679076", "0.50616145", "0.5060293", "0.5051561", "0.5042053", "0.5037389", "0.50302774", "0.49955773", "0.49773225", "0.49667892", "0.49656028", "0.49587765", "0.49531177", "0.49246022", "0.49225846", "0.49138683", "0.4908642", "0.48944494", "0.48929837", "0.4887439", "0.48843983", "0.4879022", "0.48677593", "0.4863117", "0.4861352", "0.48496264", "0.48380682", "0.4827521", "0.48129907", "0.48028445", "0.48016328", "0.4801448", "0.478894", "0.47809514", "0.4773352", "0.47627428", "0.47538537", "0.4751197", "0.47386608", "0.4724952", "0.47247815", "0.4720376", "0.471607", "0.47138497", "0.47052637", "0.470123", "0.46996817", "0.46995085", "0.46960923", "0.46951973", "0.46939346", "0.46881226", "0.46804863", "0.46795607", "0.46774203", "0.46710935", "0.46702507", "0.46661645", "0.46652076", "0.4665079", "0.4664512", "0.46582785", "0.46473303", "0.46430513", "0.46427292", "0.4641576", "0.46405387", "0.46377677", "0.46328282", "0.46284035", "0.46282834", "0.4620597", "0.4616992", "0.4612127", "0.46120408", "0.46008688", "0.4597696", "0.45937666", "0.45937267", "0.45927638", "0.45904285", "0.45903984", "0.45823768", "0.4578161", "0.4578114", "0.45776147" ]
0.58393806
1
Create structures defining acquisition time for fieldmaps and anatomicals. First find the fieldmap (or hires structural if no fieldmap was collected) nearest (on average) to the epis. Then define this series as the one that should be in register with the epis.
def _SetAnatTgts(self): anat_candidates = {} fmap_candidates = {} for entry in self.entry_map['anat']: if self.info[entry]['type'] == 'T1High': anat_candidates[entry] = self.info[entry]['acqtime'] # Find the valid anatomical acquired nearest to fieldmap. tdiff_min = 1e6 if len(self.entry_map['fmap']) > 0: for entry in self.entry_map['fmap']: anat_tgt = self. _FindNearestAnat(self.info[entry]['acqtime']) self.info[entry]['anat_ref'] = anat_tgt else: # No fieldmaps were collected. Find the structural nearest the # beginning of the EPIs. if len(self.entry_map['anat']) == 1: anat_tgt = self.entry_map['anat'][0] else: epi_start = [] tmin = 1e6 for anat in self.entry_map['anat']: if self.info[anat]['type'] != 'T1High': continue tsum1 = 0; tsum2 = 0; for epi in self.entry_map['epi']: # Difference from start of structural and first epi tsum1 += abs(self.info[anat]['acqtime'] - \ self.info[epi]['acqtime']) # Difference from start of structural and last epi tsum2 += abs(self.info[anat]['acqtime'] - \ (self.info[epi]['acqtime'] +\ self.info[epi]['TR']*self.info[epi]['tdim'])) if tsum1 < tmin or tsum2 < tmin: tmin = min(tsum1, tsum2) anat_tgt = anat # Resolve anatomical names and links. self._SetAnatNames(anat_tgt) # Set appropriate attributes in the entry for each EPI. for epi in self.entry_map['epi']: if len(self.entry_map['fmap']) > 0 and not self.no_fmapcorr: fmap_entry = self.info[epi]['fmap_entry'] anat_ref = self.info[fmap_entry]['anat_ref'] self.info[epi]['anat_tgt'] = fmap_entry self.info[epi]['anat_matfile'] = self.info[fmap_entry]['matfile'] if self.align_fmaps or (not self.no_align_fmaps and \ self._SetCatMotionFmapMats(fmap_entry, anat_ref)): # Concatenate motion-correction matrices with tranform from # fieldmap to structural. Use the registered fieldmap. self.info[epi]['catmats'] = True fmap_info = self.info[self.info[epi]['fmap_entry']] self.info[epi]['fmapname'] = \ fmap_info['imgfile_r'] + fmap_info['suffix'] else: # Assume fieldmap is in register with the structural. self.info[epi]['catmats'] = False else: self.info[epi]['anat_tgt'] = anat_tgt self.info[epi]['anat_matfile'] = None self.info[epi]['catmats'] = False self.info[epi]['anat_link'] = self.info[anat_tgt]['imgfile'] + \ self.info[anat_tgt]['suffix']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetEpiAcqTimes(self, series):\n# Find minimum and maximum start times for each acquistion in series.\n self.epi_times = {}\n for entry in self.entry_map['epi']:\n# Loop through each file in this series.\n if self.info[entry]['series'] == series and \\\n self.info[entry]['tdim'] > 2:\n# Relate each entry to its time of acquisition.\n self.epi_times[self.info[entry]['acqtime']] = entry", "def generateForcingFields(self, conc_idx, inputs, outputs):\n\n\t\tForcing.log(\"Running %s.generateForcingFields()\"%type(self))\n\n\t\t# Some variable used later\n\t\tscalar = None\n\n\t\tif self.griddedTimeZoneFld == None:\n\t\t\t# Assume all timezones are GMT\n\t\t\tprint \"Warning! No gridded time zone information loaded. Using a field of zeros.\"\n\t\t\ttz = np.zeros((self.ni,self.nj))\n\t\telse:\n\t\t\ttz = self.griddedTimeZoneFld\n\n\t\tif len(self.species) == 0:\n\t\t\traise NoSpeciesException(\"Must specify species\")\n\t\t\treturn\n\n\t\t# We doing time averaging?\n\t\tif self.averaging in ['AVG_MAX', 'AVG_MAX8', 'AVG_MAX24']:\n\t\t\tdo_averaging=True\n\t\t\taveraging_window = self.averaging_window\n\t\telse:\n\t\t\tdo_averaging=False\n\t\t\taveraging_window = None\n\t\t\t#if self.averaging == 'AVG_MASK' or self.averaging == 'AVG_NONE'\n\t\t\tif self.averaging == 'AVG_NONE':\n\t\t\t\t# Ensure this is set right\n\t\t\t\tself.timeMask = range(0,25)\n\t\t\t# If it's the mask, then the timemask should already be set\n\n\t\t# Create zero fields to allocate our arrays\n\t\tfld_empty=np.zeros((len(self.species), self.nt, self.nk_f, self.nj, self.ni), dtype=np.float32)\n\n\t\t# Get the relative days, so [-1 0 1] for [yesterday, today, tomorrow]\n\t\trdays = inputs.keys()\n\t\t# Probably an easiesr way to initalize this since we're only writing later, but for now we'll do it.\n\t\tflds={}\n\t\tfor d in rdays:\n\t\t\tflds[d] = fld_empty.copy()\n\n\t\t# This is NOT efficient. Could probably easily make it\n\t\t# more efficient by implementing some sort of cache though..\n\t\tfor idx_s, species in enumerate(self.species):\n\t\t\t#print \"Iteratiing through species %d=%s\"%(idx_s, species)\n\n\t\t\t# Initialize the data flds. Set to zero if there's a day that doesn't exist\n\t\t\tdatas={}\n\t\t\tfor d in rdays:\n\t\t\t\tif inputs[d] is None:\n\t\t\t\t\tdatas[d] = np.zeros((self.nt, self.nk_f, self.nj, self.ni), dtype=np.float32)\n\t\t\t\telse:\n\t\t\t\t\tdatas[d] = inputs[d].variables[species][:]\n\n\t\t\t# Recall, mask is already considered in these vectors\n\t\t\tfor k in self._layers:\n\t\t\t\t# I think there's a better way to do the next two loops, don't know it though.\n\t\t\t\tfor i in range(0,self.ni):\n\t\t\t\t\tfor j in range(0,self.nj):\n\n\t\t\t\t\t\t# Spatial mask\n\t\t\t\t\t\tif not self.space[j,i]:\n\t\t\t\t\t\t\t# This is masked out. Set to zero and go to the next cell\n\t\t\t\t\t\t\tfor d in rdays:\n\t\t\t\t\t\t\t\tflds[d][idx_s][0:self.nt,k,j,i] = np.zeros((self.nt), dtype=np.float32)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t#else:\n\t\t\t\t\t\t#\t# TEMP HACK!!\n\t\t\t\t\t\t# # This temp hack is used to ensure the mask is working\n\t\t\t\t\t\t#\tfld_yest[0:self.nt,k,j,i] = np.ones((self.nt), dtype=np.float32)\n\t\t\t\t\t\t#\tfld_today[0:self.nt,k,j,i] = np.ones((self.nt), dtype=np.float32)\n\t\t\t\t\t\t#\tfld_tom[0:self.nt,k,j,i] = np.ones((self.nt), dtype=np.float32)\n\t\t\t\t\t\t#\tcontinue\n\n\n\t\t\t\t\t\t# Take averaging into consideration\n\t\t\t\t\t\t# For almost all of these averagings, we'll have to\n\t\t\t\t\t\t# build a vector of all values for all times at that\n\t\t\t\t\t\t# cell. Unfortunately, the data is organized in the \n\t\t\t\t\t\t# opposite way as we want (time is the top index..)\n\t\t\t\t\t\tif do_averaging:\n\t\t\t\t\t\t\tvecs={}\n\t\t\t\t\t\t\tfor d in rdays:\n\t\t\t\t\t\t\t\tvecs[d] = datas[d][:Forcing.dayLen,k,j,i]\n\n\t\t\t\t\t\t\t# REMOVE!\n\t\t\t\t\t\t\t#if i==self.debug_i and j==self.debug_j:\n\t\t\t\t\t\t\t#\tprint \"vec_today[%d,%d]: \"%(self.debug_j, self.debug_i), vec_today\n\n\t\t\t\t\t\t\t# Prepares a vector of values with respect to the\n\t\t\t\t\t\t\t# direction we're going to calculate the average\n\t\t\t\t\t\t\t# (forward/backward), the window size, and time\n\t\t\t\t\t\t\t# zone \n\n\t\t\t\t\t\t\tvec = Forcing.prepareTimeVectorForAvg(vecs, timezone=tz[j][i], winLen=averaging_window, debug=False)\n\t\t\t\t\t\t\t#print \"i=%d,j=%d, preped vec[%d] = %s\"%(i,j,len(vec),\" \".join(map(str, vec)))\n\n\t\t\t\t\t\t\t# Calculate the moving window average\n\t\t\t\t\t\t\tavgs = Forcing.calcMovingAverage(vec, winLen=averaging_window)\n\t\t\t\t\t\t\t#print \"i=%d,j=%d, avg vec[%d] = %s\"%(i,j,len(avgs),\" \".join(map(str, avgs)))\n\n\t\t\t\t\t\t\t# And then, for the 8-hour max to be used for a\n\t\t\t\t\t\t\t# forcing term, generate a vector for yesterday,\n\t\t\t\t\t\t\t# today and tomorrow with the forcing terms in them\n\n\t\t\t\t\t\t\tif self.timeInvariantScalarMultiplcativeFld is not None:\n\t\t\t\t\t\t\t\tscalar = self.timeInvariantScalarMultiplcativeFld[j][i]/averaging_window\n\n\t\t\t\t\t\t\tvecs = Forcing.applyForceToAvgTime(avgs, days=vecs.keys(), winLen=averaging_window, timezone=tz[j][i], min_threshold=self.threshold, forcingValue=scalar)\n\n# This was done blindly\n\t\t\t\t\t\t\tfor d in rdays:\n\t\t\t\t\t\t\t\tflds[d][idx_s][:24,k,j,i] = vecs[d]\n\n\t\t\t\t\t\telif self.averaging == 'AVG_MASK' or self.averaging == 'AVG_NONE':\n# NOT YET TESTED\n\t\t\t\t\t\t\traise NotImplementedError( \"Mask timing or no averaging is not yet tested. Averaging options=%s\"%self.averaging )\n\t\t\t\t\t\t\t# The comments assume timezone = -6\n\t\t\t\t\t\t\tfor t_gmt in self.timeMask:\n\t\t\t\t\t\t\t\t# when t_gmt = 0, t_loc = -6, so we're into yesterday\n\t\t\t\t\t\t\t\tt_loc = t_gmt + tz[j][i]\n\n\t\t\t\t\t\t\t\t# Reference the arrays\n\t\t\t\t\t\t\t\tif t_loc < 0:\n\t\t\t\t\t\t\t\t\tdfld = data_yest\n\t\t\t\t\t\t\t\t\t#ffld = fld_yest\n\t\t\t\t\t\t\t\telif t_loc>0 and t_loc<Forcing.dayLen:\n\t\t\t\t\t\t\t\t\tdfld = data_today\n\t\t\t\t\t\t\t\t\t#ffld = fld_today\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tdfld = data_tomorrow\n\t\t\t\t\t\t\t\t\t#ffld = fld_tomorrow\n\n\t\t\t\t\t\t\t\t# I have to write in GMT\n# This is wrong, as local times can write into another day.. maybe.. but since there's no averaging, another iteration will take care of that..\n\t\t\t\t\t\t\t\tffld = fld_today\n\n\t\t\t\t\t\t\t\t# fld[-6] is fld[18]\n\t\t\t\t\t\t\t\tval=dfld[t_loc,k,j,i]\n\t\t\t\t\t\t\t\tif threshold is not None:\n\t\t\t\t\t\t\t\t\tif val > threshold:\n\t\t\t\t\t\t\t\t\t\tforce = 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tif val > 0.0:\n\t\t\t\t\t\t\t\t\t\tforce = 1\n\n\t\t\t\t\t\t\t\t# Set the field in the referenced forcing field\n\t\t\t\t\t\t\t\tffld[t_loc,k,j,i] = force\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\traise NotImplementedError( \"Unavailable time averaging method (%s) selected\"%self.averaging )\n\n\t\t\t\t\t\t#endif averaging\n\t\t\t\t\t#endfor j\n\t\t\t\t#endfor i\n\t\t\t#endfor k\n\n\t\t#endfor species\n\n\t\treturn flds", "def _SetBaseEpi(self):\n tinfo = {}\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n if self.info[entry]['fmap_entry'] is None:\n tgt = info['anat_tgt']\n else:\n tgt = info['fmap_entry']\n tgt_time = self.info[tgt]['acqtime']\n\n plane = info['plane']\n if not tinfo.has_key(plane):\n tinfo[plane] = {}\n tdiff = abs(info['acqtime'] - tgt_time)\n tinfo[plane][tdiff] = (entry, 'start')\n tdiff = abs(info['acqtime'] + info['TR']*info['tdim']/1000 - tgt_time)\n tinfo[plane][tdiff] = (entry, 'end')\n\n bases = {}\n for plane in tinfo.keys():\n tdiffs = tinfo[plane].keys()\n tdiffs.sort()\n bases[plane] = tinfo[plane][tdiffs[0]]\n\n for epi in self.entry_map['epi']:\n plane = self.info[epi]['plane']\n base_entry, base = bases[plane]\n self.info[epi]['base_entry'] = base_entry\n self.info[epi]['base'] = base\n self.info[epi]['basefile'] = '%s'%(self.info[base_entry]['imgfile'])", "def main(name, line1, line2, orbital_filename):\n #name = \"TERRA\"\n #line1 = \"1 25994U 99068A 16048.43680378 .00000258 00000-0 67198-4 0 9999\"\n #line2 = \"2 25994 98.1982 124.4247 0001352 105.3907 254.7441 14.57126067859938\"\n satellite = ephem.readtle(name, line1, line2)\n \n\n # Landsat 8\n #name = \"Landsat8\"\n #line1=\"1 39084U 13008A 16051.82349873 .00000188 00000-0 51829-4 0 9999\"\n #line2=\"2 39084 98.1988 123.2603 0001265 89.4360 270.6984 14.57110027160810\"\n #LD8 = ephem.readtle(name, line1, line2)\n \n\n sun = ephem.Sun()\n fov = np.radians(68.6)\n\n \"\"\"\n Make pandas dataframe to store swath information\n \"\"\"\n import pandas as pd\n data = {\"DateTime\": [],\"DOY\":[],\"Month\": [],\n \"orbit_id\":[], \"ground_lat\": [], \n \"ground_lon\": [], \"swath_width\": []}\n swaths = pd.DataFrame(data)\n swaths.set_index(keys=\"DateTime\")\n # generate shapefile\n\n orbit_id = 0\n # need to do splitted by hemisphere unfortunately..\n for orbit in make_an_orbit(satellite):\n #import pdb; pdb.set_trace()\n if len(orbit) > 1:\n \"\"\"\n So worth doing processing on orbit...\n\n \"\"\"\n sun = ephem.Sun()\n\n print(orbit[0].datetime)\n\n for overpass in orbit:\n overpass.only_daytime_overpasses(sun)\n overpass.derive_swath_width(fov)\n \"\"\"\n Create a tempoary dataframe for this orbit\n \"\"\"\n epoch = datetime.datetime(1970, 1, 1)\n #import pdb; pdb.set_trace()\n tmp_d = {\"DateTime\": [(o.datetime - epoch).total_seconds() for o in orbit],\n \"DOY\":[int(o.datetime.strftime('%j')) for o in orbit],\n \"Month\": [o.datetime.month for o in orbit],\n \"orbit_id\": orbit_id * np.ones(len(orbit)),\n \"ground_lat\": [o.lat for o in orbit],\n \"ground_lon\": [o.long for o in orbit],\n \"swath_width\": [o.swath_width for o in orbit]}\n tmp = pd.DataFrame(tmp_d)\n tmp.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n orbit_id +=1 \n \"\"\"\n Append to main dataframe\n \"\"\"\n swaths = swaths.append(tmp)\n #swaths.set_index(keys=\"DateTime\")\n\n \"\"\"\n Save the DataFrame to a file\n \"\"\"\n swaths = swaths.set_index(keys=\"DateTime\")\n #swaths.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n swaths.to_csv(orbital_filename, header=True)", "def create_data_structures(self):\n # Data storage arrays for time and measurement\n # Create the array of zeros and preallocating\n start_time = time.time()\n # The number of data points has to be optimized\n self.data_points = 5000\n # prs_data has three rows, 0 = time, 1 = pressure - tare, 2 = raw_pressure\n self.prs_data = np.zeros([3, self.data_points])\n self.prs_data[0, :] = start_time\n # This queue receives data from the sensors and puts it in the graphs and sends to the \n # LifoQueue\n self.prs_q = Queue()\n # The lifo queue is created to send the data to the piston control thread. The piston\n # control will only read and use the last value, since only the most recent information\n # matters\n self.prs_lifo_q = LifoQueue()\n self.prs_tare = 0\n \n self.flw_data = np.zeros([3, self.data_points])\n self.flw_data[0, :] = start_time\n self.flw_q = Queue()\n self.flw_lifo_q = LifoQueue() # Read comment on the lifoqueue above\n self.flw_tare = 0\n\n self.vol_lifo_q = LifoQueue() # Read comment on the lifoqueue above\n self.vol_data = np.zeros([2, self.data_points])\n self.vol_data[0, :] = start_time", "def create(records):\n version = '1.0.0'\n\n iversion = [int(x) for x in version.split('.')]\n if iversion[1] > 0 or iversion[2] > 0:\n raise IOError(\"SEF versions > 0.0 are not supported\")\n\n latitude = 42.331\n longitude = -83.046\n altitude = 'NA'\n\n header = {\n 'SEF': version, 'ID': 'Detroit_Anthon', 'Name': 'Detroit, MI',\n 'Lat': latitude, 'Lon': longitude, 'Alt': altitude, 'Source': 'C3S-DRS',\n 'Link': '', 'Vbl': 'ta', 'Stat': 'point',\n 'Units': 'C', 'Meta': 'Observer=George Christian Anthon',\n }\n\n index_temperatures = 0\n index_times = 0\n\n time_offset = longitude * 12 / 180\n\n temp_dict = defaultdict(list)\n\n temperatures = []\n\n times = [datetime.time(7, 0), datetime.time(12, 0), datetime.time(20, 0)]\n original_time = [\"7:00AM\", \"12:00PM\", \"20:00PM\"]\n\n for index in range(len(records)):\n temperatures.append(records[index][datetime.time(7, 0)])\n temperatures.append(records[index][datetime.time(12, 0)])\n temperatures.append(records[index][datetime.time(20, 0)])\n for time in original_time:\n if isinstance(temperatures[index_temperatures], str):\n value = 'NA'\n else:\n value = round(((float(temperatures[index_temperatures]) - 32) * 5 / 9), 1)\n\n date = str(records[index]['Year']) \\\n + \"-\" \\\n + str(records[index]['Month']) \\\n + \"-\" + str(records[index]['Day']) \\\n + \" \" + str(times[index_times])\n\n date_time = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\n\n utc = date_time - datetime.timedelta(hours=time_offset)\n\n year = str(utc)[:4]\n month = str(utc)[5:7]\n day = str(utc)[8:10]\n hour = str(utc)[11:13]\n minutes = str(utc)[14:16]\n\n data_dict = {\n 'Data': pd.DataFrame({\n 'Year': year,\n 'Month': month,\n 'Day': day,\n 'Hour': hour,\n 'Minute': minutes,\n 'Period': 0,\n 'Value': value,\n 'Meta': \"orig=\" + str(temperatures[index_temperatures])\n + 'F' + \"|orig.time=\" + str(time)\n + \"|orig.date=\" + str(records[index]['Year']) + '-' + str(records[index]['Month'])\n + '-' + str(records[index]['Day'])\n\n }, index=[0])\n }\n temp_dict['Data'].append(data_dict['Data'])\n\n index_times += 1\n if index_times > 2:\n index_times = 0\n\n index_temperatures += 1\n\n header.update(temp_dict)\n\n return header", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def make_temperature_map(time: u.s, field, instr, **kwargs):\n plot_settings = {'cmap': cm.get_cmap('inferno')}\n plot_settings.update(kwargs.get('plot_settings', {}))\n bins, bin_range = instr.make_detector_array(field)\n visible = is_visible(instr.total_coordinates, instr.observer_coordinate)\n hist_coordinates, _, _ = np.histogram2d(instr.total_coordinates.Tx.value,\n instr.total_coordinates.Ty.value,\n bins=(bins.x.value, bins.y.value),\n range=(bin_range.x.value, bin_range.y.value),\n weights=visible)\n with h5py.File(instr.counts_file, 'r') as hf:\n try:\n i_time = np.where(u.Quantity(hf['time'],\n get_keys(hf['time'].attrs), ('unit', 'units')) == time)[0][0]\n except IndexError:\n raise IndexError(f'{time} is not a valid time in observing time for {instr.name}')\n weights = np.array(hf['electron_temperature'][i_time, :])\n units = u.Unit(get_keys(hf['electron_temperature'].attrs, ('unit', 'units')))\n hist, _, _ = np.histogram2d(instr.total_coordinates.Tx.value,\n instr.total_coordinates.Ty.value,\n bins=(bins.x.value, bins.y.value),\n range=(bin_range.x.value, bin_range.y.value),\n weights=weights * visible)\n hist /= np.where(hist_coordinates == 0, 1, hist_coordinates)\n meta = instr.make_fits_header(field, instr.channels[0])\n del meta['wavelnth']\n del meta['waveunit']\n meta['bunit'] = units.to_string()\n meta['detector'] = 'Electron Temperature'\n meta['comment'] = 'Column-averaged electron temperature calculated by synthesizAR'\n\n return GenericMap(hist.T, meta, plot_settings=plot_settings)", "def time_info(input_file):\n original_path = os.getcwd() #set original directory\n save_path = input_file['save_path']\n planet = input_file['exoplanet'] #set exoplanet name\n print '\\nObtain the images .... \\n'\n print 'Change to ', save_path\n os.chdir(save_path) #change to save directory where is our scvience images\n images = sorted(glob.glob('AB'+input_file['exoplanet']+'*.fits'))\n print '\\nImages = \\n',images\n tempo_loc = [] #time object\n SUN = [] #Sun coordinate object\n ra_sun, dec_sun, dsun = np.zeros(len(images)),np.zeros(len(images)),np.zeros(len(images)) #sun coordinates\n JD = np.zeros(len(images)) #julian date from time object\n ST = np.zeros(len(images))\n HJD = np.zeros(len(images))\n #create the exoplanet object coordianate\n exoplanet = SkyCoord(dec=input_file['DEC'],ra=input_file['RA'],unit=('deg','deg'),frame=input_file['frame'])\n print '\\nObtain data info from header ....\\n'\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n UTC = hdr['date-obs']+'T'+hdr['UT'] #string that contain the time in UTC in isot format\n tempo_loc.append(Time(UTC,scale=input_file['scale-time'],format='isot',location=(input_file['lon-obs'],input_file['lat-obs'])))#,input_data['altitude'])))\n JD[i] = tempo_loc[i].jd\n ST[i] = tempo_loc[i].sidereal_time('apparent').hour\n SUN.append(get_sun(tempo_loc[i]))\n ra_sun[i],dec_sun[i] = SUN[i].ra.deg, SUN[i].dec.deg\n dsun[i] = SUN[i].distance.value\n HJD[i] = use.hjd_date(JD[i],dsun[i],dec_sun[i],ra_sun[i],exoplanet.dec.deg,exoplanet.ra.deg,circular_orbit=input_file['circular_orbit'])\n use.update_progress((i+1.)/len(images))\n print '\\n.... done.\\n'\n print '\\n Time from header = \\n'\n #print '\\nImages ** UTC (YYYY-MM-DDTHH:MM:SS) ** JD (7d.5d) ** ST (hours) ** ST (HH:MM:SS) ** Sun Coordinate (epoch,RA,DEC,Distance) (deg,deg,AU) \\n'\n ST_string = []\n for i in range(len(images)):\n ST1 = int(ST[i])\n ST2 = int((ST[i]-ST1)*60.)\n ST3 = (((ST[i]-ST1)*60.)-ST2)*60\n ST_string.append(str(ST1)+':'+str(ST2)+':'+str(ST3))\n tempo_loc[i] = tempo_loc[i].value\n use.update_progress((i+1.)/len(images))\n #print images[i], ' ** ',tempo_loc[i], ' ** ', JD[i], ' ** ', ST[i],' ** ',ST_string[i],' ** ',sun_loc[i],' ** ',HJD[i]\n print '\\nSave data file ... \\n'\n data = DataFrame([images,tempo_loc,list(JD),list(ST),list(ST_string),list(ra_sun),list(dec_sun),list(dsun),list(HJD)]).T\n data.columns=['images','UTC','JD','ST','ST_isot','RA_SUN','DEC_SUN','D_SUN','HJD']\n print data\n data.to_csv('results.csv')\n os.chdir(original_path)\n return", "def fill_dict(self):\n image_time = (self.nl_image - 1) * (self.tcycle * self.dec)\n slc_dict = default_slc_dict()\n ts = self.time_start\n sod = _dt.timedelta(hours=ts.hour, minutes=ts.minute,\n seconds=ts.second, microseconds=ts.microsecond).total_seconds()\n st0 = sod + self.nl_acc * self.tcycle * self.dec + \\\n (self.dec / 2.0) * self.tcycle # include time to center of decimation window\n az_step = self.ang_per_tcycle * self.dec\n prf = abs(1.0 / (self.tcycle * self.dec))\n seq = self.TX_RX_SEQ\n GPRI_TX_z = self.mapping_dict['TX_' + seq[0] + \"_position\"]\n GPRI_RX_z = self.mapping_dict['RX_' + seq[1] + seq[3] + \"_position\"]\n fadc = C / (2. * self.rps)\n # Antenna elevation angle\n ant_elev = _np.deg2rad(self.antenna_elevation)\n # Compute antenna position\n rx1_coord = [0., 0., 0.]\n rx2_coord = [0., 0., 0.]\n tx_coord = [0., 0., 0.]\n #\n # Topsome receiver\n rx1_coord[0] = xoff + ant_radius * _np.cos(\n ant_elev) # local coordinates of the tower: x,y,z, boresight is along +X axis, +Z is up\n rx1_coord[1] = 0.0 # +Y is to the right when looking in the direction of +X\n rx1_coord[2] = GPRI_RX_z + ant_radius * _np.sin(\n ant_elev) # up is Z, all antennas have the same elevation angle!\n # Bottomsome receiver\n rx2_coord[0] = xoff + ant_radius * _np.cos(ant_elev)\n rx2_coord[1] = 0.0\n rx2_coord[2] = GPRI_RX_z + ant_radius * _np.sin(ant_elev)\n tx_coord[0] = xoff + ant_radius * _np.cos(ant_elev)\n tx_coord[1] = 0.0\n tx_coord[2] = GPRI_TX_z + ant_radius * _np.sin(ant_elev)\n chan_name = 'CH1 lower' if seq[3] == 'l' else 'CH2 upper'\n slc_dict['title'] = str(ts) + ' ' + chan_name\n slc_dict['date'] = self.time_start.date()\n slc_dict['start_time'] = st0\n slc_dict['center_time'] = st0 + image_time / 2\n slc_dict['end_time'] = st0 + image_time\n slc_dict['range_samples'] = self.ns_out\n slc_dict['azimuth_lines'] = self.nl_tot_dec - 2 * self.nl_acc\n slc_dict['range_pixel_spacing'] = self.rps\n slc_dict['azimuth_line_time'] = self.tcycle * self.dec\n slc_dict['near_range_slc'] = self.rmin\n slc_dict['center_range_slc'] = (self.rmin + self.rmax) / 2\n slc_dict['far_range_slc'] = self.rmax\n slc_dict['radar_frequency'] = self.RF_center_freq\n slc_dict['adc_sampling_rate'] = fadc\n slc_dict['prf'] = prf\n slc_dict['chirp_bandwidth'] = self.RF_freq_max - self.RF_freq_min\n slc_dict['receiver_gain'] = 60 - self.IMA_atten_dB\n slc_dict['GPRI_TX_mode'] = self.TX_mode\n slc_dict['GPRI_TX_antenna'] = seq[0]\n slc_dict.add_parameter('GPRI_RX_antennas', seq[1] + seq[3])\n slc_dict['GPRI_tx_coord'] = [tx_coord[0], tx_coord[1], tx_coord[2]]\n slc_dict['GPRI_rx1_coord'] = [rx1_coord[0], rx1_coord[1], rx1_coord[2]]\n slc_dict['GPRI_rx2_coord'] = [rx2_coord[0], rx2_coord[1], rx2_coord[2]]\n slc_dict['GPRI_az_start_angle'] = self.az_start\n slc_dict['GPRI_az_angle_step'] = az_step\n slc_dict['GPRI_ant_elev_angle'] = self.antenna_elevation\n slc_dict['GPRI_ref_north'] = self.geographic_coordinates[0]\n slc_dict['GPRI_ref_east'] = self.geographic_coordinates[1]\n slc_dict['GPRI_ref_alt'] = self.geographic_coordinates[2]\n slc_dict['GPRI_geoid'] = self.geographic_coordinates[3]\n return slc_dict", "def analyze_so(self, zmethod='trough'):\n\n ## create dict of dataframes for slow oscillation analysis\n print('Creating individual dataframes...')\n\n so = {}\n for chan in self.so_events.keys():\n so[chan] = {}\n for i, s in self.so_events[chan].items():\n # create individual df for each spindle\n start = self.so_events[chan][i]['npeak_minus2s']\n end = self.so_events[chan][i]['npeak_plus2s']\n so_data = self.data[chan]['Raw'].loc[start:end]\n so_filtdata = self.sofiltEEG[chan]['Filtered'].loc[start:end]\n spso_filtdata = self.spsofiltEEG[chan]['Filtered'].loc[start:end]\n \n # set new index so that each SO is zero-centered around the negative peak\n ms1 = list(range(-2000, 0, int(1/self.metadata['analysis_info']['s_freq']*1000)))\n ms2 = [-x for x in ms1[::-1]]\n id_ms = ms1 + [0] + ms2\n \n # create new dataframe\n so[chan][i] = pd.DataFrame(index=id_ms)\n so[chan][i].index.name='id_ms'\n \n # if the SO is not a full 2s from the beginning\n if start < self.data.index[0]:\n # extend the df index to the full 2s\n time_freq = str(int(1/self.metadata['analysis_info']['s_freq']*1000000))+'us'\n time = pd.date_range(start=start, end=end, freq=time_freq)\n so[chan][i]['time'] = time\n # append NaNs onto the end of the EEG data\n nans = np.repeat(np.NaN, len(time)-len(so_data))\n data_extended = list(nans) + list(so_data.values)\n so[chan][i]['Raw'] = data_extended\n filtdata_extended = list(nans) + list(so_filtdata.values)\n so[chan][i]['sofilt'] = filtdata_extended\n spsofiltdata_extended = list(nans) + list(spso_filtdata.values)\n so[chan][i]['spsofilt'] = spsofiltdata_extended\n\n # if the SO is not a full 2s from the end\n elif end > self.data.index[-1]:\n # extend the df index to the full 2s\n time_freq = str(int(1/self.metadata['analysis_info']['s_freq']*1000000))+'us'\n time = pd.date_range(start=start, end=end, freq=time_freq)\n so[chan][i]['time'] = time\n # append NaNs onto the end of the EEG data\n nans = np.repeat(np.NaN, len(time)-len(so_data))\n data_extended = list(so_data.values) + list(nans)\n so[chan][i]['Raw'] = data_extended\n filtdata_extended = list(so_filtdata.values) + list(nans)\n so[chan][i]['sofilt'] = filtdata_extended\n spsofiltdata_extended = list(spso_filtdata.values) + list(nans)\n so[chan][i]['spsofilt'] = spsofiltdata_extended\n else:\n so[chan][i]['time'] = so_data.index\n so[chan][i]['Raw'] = so_data.values\n so[chan][i]['sofilt'] = so_filtdata.values\n so[chan][i]['spsofilt'] = spso_filtdata.values\n \n self.so = so\n print('Dataframes created. Slow oscillation data stored in obj.so.')", "def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata", "def GEEsmos(ptsFile,metric,timeStep,buf,poly,username,folderOut, scalePix = 25000,startYear = None,endYear = None):\n \n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n time_d = {}\n time_d['lowest'] = 'rl'\n time_d['month'] = 'rm'\n time_d['year'] = 'ry'\n\n lastImage = ee.Image(ee.ImageCollection('NASA_USDA/HSL/soil_moisture')\n .sort('system:time_start',False)\n .first())\n lastImageDate = lastImage.get('system:index').getInfo()\n\n firstImage = ee.Image(ee.ImageCollection('NASA_USDA/HSL/soil_moisture')\n .sort('system:time_start',True)\n .first())\n firstImageDate = firstImage.get('system:index').getInfo()\n \n #startMonth - 1, because time-series starts on Jan 1\n #startYearAll: did't add one, for same reason\n if all([startYear is None,endYear is None]):\n startYear = int(firstImageDate[(len(firstImageDate)-8):(len(firstImageDate)-4)])\n endYear = int(lastImageDate[(len(lastImageDate)-8):(len(lastImageDate)-4)])\n startMonth = int(firstImageDate[(len(firstImageDate)-4):(len(firstImageDate)-2)])-1\n endMonth = int(lastImageDate[(len(lastImageDate)-4):(len(lastImageDate)-2)])-1\n startYearAll = startYear\n endYearAll = endYear - 1\n \n years = list(range(startYear, endYearAll + 1))\n monthsEE = ee.List(list(range(startMonth,(12*len(years)+endMonth))))\n yearsEE = ee.List(list(range(startYearAll, endYearAll + 1)))\n \n elif all([startYear >= 0,endYear >= 0]):\n startYearReal = int(firstImageDate[(len(firstImageDate)-8):(len(firstImageDate)-4)])\n endYearReal = int(lastImageDate[(len(lastImageDate)-8):(len(lastImageDate)-4)]) \n \n years = list(range(max(startYearReal,startYear), (min(endYearReal,endYear) + 1)))\n \n if endYear >= endYearReal:\n endMonth = int(lastImageDate[(len(lastImageDate)-4):(len(lastImageDate)-2)])-1\n endYearReal2 = endYearReal-1\n years2 = len(years)-1\n elif endYear < endYearReal:\n endMonth = 0\n endYearReal2 = endYearReal\n years2 = len(years)\n \n if startYear <= startYearReal:\n startMonth = int(firstImageDate[(len(firstImageDate)-4):(len(firstImageDate)-2)])-1\n elif startYear > startYearReal:\n startMonth = 0\n \n monthsEE = ee.List(list(range(startMonth,(12*years2+endMonth))))\n yearsEE = ee.List(list(range(max(startYearReal,startYear), (min(endYearReal2,endYear) + 1))))\n \n for met in metric:\n SMOS = ee.ImageCollection('NASA_USDA/HSL/soil_moisture').select(met)\n metL = [met]\n \n if timeStep == 'year':\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (SMOS\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (SMOS\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif timeStep == 'month':\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (SMOS\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (SMOS\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif all([timeStep == 'lowest',endYear is None, startYear is None]):\n\n img_col = SMOS\n \n elif all([timeStep == 'lowest',endYear > 0, startYear > 0]):\n\n img_col = SMOS.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_SMOS_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for SMOS: ' + met)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_SMOS_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for SMOS: ' + met)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_SMOS_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for SMOS: ' + met)", "def Load_EP_Fullcospectra(path,start_day,end_day,variable):\r\n \r\n # Number of days selected\r\n sday = datetime.strptime(start_day,'%Y-%m-%d')\r\n eday = datetime.strptime(end_day,'%Y-%m-%d')\r\n Nday = (eday-sday).days +1\r\n \r\n if Nday <= 0:\r\n print('WARNING!! End day is before start day!')\r\n \r\n Nvars = len(variable)\r\n\r\n allf = os.listdir(path)\r\n fnames = [f for f in allf if f.endswith('.csv')]\r\n \r\n # Read first file to get info (meta) \r\n spec, timeseries, header, meta1 = read_cospectrum(path,[fnames[0]])\r\n Hz = meta1[0]\r\n avg_period = meta1[3]\r\n nseg = np.int(24*60/avg_period)\r\n ppf = np.int(2**np.floor(np.log2(avg_period*60*Hz/2)))\r\n\r\n df = Hz/2/ppf\r\n freq = np.arange(df,Hz/2+df,df)\r\n \r\n # spec shape: [frequency,time,variables]\r\n spec=np.zeros((ppf,np.int(Nday*(24*60/avg_period)),Nvars))*np.nan\r\n spec_time=[]\r\n\r\n tct = -1 # Time counter\r\n for d in range(Nday):\r\n for h in range(nseg):\r\n tct+=1\r\n curtime = sday+timedelta(d,0,0,0,avg_period*(h+1))\r\n spec_time.append(curtime)\r\n hstr = (curtime).strftime('%H%M')\r\n\r\n daystr = curtime.strftime('%Y-%m-%d')\r\n daystr2 = curtime.strftime('%Y%m%d')\r\n print('Loading... {} {}'.format(daystr,hstr))\r\n\r\n # See if file exists\r\n matchi = np.array(['{}-{}'.format(daystr2,hstr) in f for f in fnames])\r\n\r\n if np.sum(matchi)>0:\r\n matchi = np.where(matchi)[0][0]\r\n spec_day, spec_time_day, header_day, meta_day = read_cospectrum(path,[fnames[matchi]])\r\n spec_day = spec_day[0]\r\n\r\n for vi in range(Nvars):\r\n gasheader = 'f_nat*cospec(w_{})'.format(variable[vi])\r\n vmatchi = np.array([gasheader in h for h in header_day])\r\n if np.sum(vmatchi)>0:\r\n vmatchi = np.where(vmatchi)[0][0]\r\n spec[:,tct,vi] = spec_day[:,vmatchi]\r\n\r\n else:\r\n print('And there was a problem!') \r\n \r\n return spec, spec_time, freq", "def get_time_series(this_lat, this_lon, case, varnames):\n\n cesmdir = '/gpfs/fs1/collections/cdg/data/cesmLE/CESM-CAM5-BGC-LE/atm/proc/tseries/monthly'\n\n if 'LE' in case:\n\n from observational_large_ensemble.params import karen_params_cesm\n\n mode_lag = karen_params_cesm.mode_lag\n cvdp_loc = karen_params_cesm.cvdp_loc\n AMO_cutoff_freq = karen_params_cesm.AMO_cutoff_freq\n\n name_conversion = {'tas': 'TREFHT', 'pr': 'PRECC', 'slp': 'PSL'}\n cesm_names = [name_conversion[v] for v in varnames]\n this_member = int((case).split('-')[-1])\n cvdp_file = '%s/CESM1-CAM5-BGC-LE_#%i.cvdp_data.1920-2018.nc' % (cvdp_loc, this_member)\n\n # Historical filenames for CESM. Will need to append part of RCP8.5 to get full period\n filenames = []\n for var in cesm_names:\n file_str = '%s/%s/b.e11.B20TRC5CNBDRD.f09_g16.%03d.cam.h0.%s.??????-200512.nc' % (cesmdir, var,\n this_member, var)\n this_file = glob(file_str)[0]\n filenames.append(this_file)\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames,\n karen_params_cesm.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n else:\n\n from observational_large_ensemble.params import karen_params_obs\n\n mode_lag = karen_params_obs.mode_lag\n cvdp_loc = karen_params_obs.cvdp_loc\n AMO_cutoff_freq = karen_params_obs.AMO_cutoff_freq\n\n tas_dir = karen_params_obs.tas_dir\n pr_dir = karen_params_obs.pr_dir\n slp_dir = karen_params_obs.slp_dir\n cvdp_file = '%s/HadISST.cvdp_data.1920-2018.nc' % cvdp_loc\n file_dict = {'tas': '%s/Complete_TAVG_LatLong1.nc' % tas_dir,\n 'pr': '%s/full_data_monthly_v2020.nc' % pr_dir,\n 'slp': '%s/prmsl.mon.mean.nc' % slp_dir}\n\n filenames = []\n for var in varnames:\n filenames.append(file_dict[var])\n\n name_conversion = {'tas': 'temperature', 'pr': 'precip', 'slp': 'prmsl'}\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames[0],\n karen_params_obs.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n return this_ts, df_shifted", "def read_szf_fmv_12(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"land_frac\", uint_nan),\n (\"flagfield_gen2\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags(data)\n\n return data, metadata", "def _get_gedi2a_main_data_dict(self) -> dict:\n gedi_l2a_count_start = pd.to_datetime(\"2018-01-01T00:00:00Z\")\n data = {\n # General identifiable data\n \"granule_name\": [self.parent_granule.filename] * self.n_shots,\n \"shot_number\": self[\"shot_number\"][:],\n \"beam_type\": [self.beam_type] * self.n_shots,\n \"beam_name\": [self.name] * self.n_shots,\n # Temporal data\n \"delta_time\": self[\"delta_time\"][:],\n \"absolute_time\": (\n gedi_l2a_count_start\n + pd.to_timedelta(list(self[\"delta_time\"]), unit=\"seconds\")\n ),\n # Quality data\n \"sensitivity\": self[\"sensitivity\"][:],\n \"quality_flag\": self[\"quality_flag\"][:],\n \"solar_elevation\": self[\"solar_elevation\"][:],\n \"solar_azimuth\": self[\"solar_elevation\"][:],\n \"energy_total\": self[\"energy_total\"][:],\n # DEM\n \"dem_tandemx\": self[\"digital_elevation_model\"][:],\n \"dem_srtm\": self[\"digital_elevation_model_srtm\"][:],\n # Processing data\n \"selected_algorithm\": self[\"selected_algorithm\"][:],\n \"selected_mode\": self[\"selected_mode\"][:],\n # Geolocation data\n \"lon_lowestmode\": self[\"lon_lowestmode\"][:],\n \"longitude_bin0_error\": self[\"longitude_bin0_error\"][:],\n \"lat_lowestmode\": self[\"lat_lowestmode\"][:],\n \"latitude_bin0_error\": self[\"latitude_bin0_error\"][:],\n \"elev_lowestmode\": self[\"elev_lowestmode\"][:],\n \"elevation_bin0_error\": self[\"elevation_bin0_error\"][:],\n \"lon_highestreturn\": self[\"lon_highestreturn\"][:],\n \"lat_highestreturn\": self[\"lat_highestreturn\"][:],\n \"elev_highestreturn\": self[\"elev_highestreturn\"][:],\n } | {f\"rh{i}\": self[\"rh\"][:, i] for i in range(101)}\n return data", "def compute_habitat(particle, fieldset, time):\n if particle.active == 1:\n #Convert dx to lon and lat\n dx_lon = fieldset.grad_dx / (fieldset.deg * cos(particle.lat * math.pi / 180)) \n dx_lat = fieldset.grad_dx / fieldset.deg\n #\n #Get 5 T and 5 NPP\n #\n T0 = [fieldset.T[time, particle.depth, particle.lat, particle.lon],#position\n fieldset.T[time, particle.depth, particle.lat, particle.lon - dx_lon],#left\n fieldset.T[time, particle.depth, particle.lat, particle.lon + dx_lon],#right\n fieldset.T[time, particle.depth, particle.lat - dx_lat, particle.lon],#bottom\n fieldset.T[time, particle.depth, particle.lat + dx_lat, particle.lon]]#top\n \n NPP0 = [fieldset.NPP[time, particle.depth, particle.lat, particle.lon],#position\n fieldset.NPP[time, particle.depth, particle.lat, particle.lon - dx_lon],#left\n fieldset.NPP[time, particle.depth, particle.lat, particle.lon + dx_lon],#right\n fieldset.NPP[time, particle.depth, particle.lat - dx_lat, particle.lon],#bottom\n fieldset.NPP[time, particle.depth, particle.lat + dx_lat, particle.lon]]#top \n #Save T and NPP at particle location\n particle.T = T0[0]\n particle.NPP = NPP0[0]\n #\n #Temperature habitat\n #\n Tmin = particle.Tmin\n Topt = particle.Topt\n T_hab = [0, 0, 0, 0, 0] #position, left, right, bottom and top\n #\n if T0[0] >= Topt:\n T_hab[0] = 1.0\n else:\n T_hab[0] = exp(-2*((T0[0]-Topt)/(Topt-Tmin))**2)\n #\n if T0[1] >= Topt:\n T_hab[1] = 1.0\n else:\n T_hab[1] = exp(-2*((T0[1]-Topt)/(Topt-Tmin))**2)\n #\n if T0[2] >= Topt:\n T_hab[2] = 1.0\n else:\n T_hab[2] = exp(-2*((T0[2]-Topt)/(Topt-Tmin))**2)\n #\n if T0[3] >= Topt:\n T_hab[3] = 1.0\n else:\n T_hab[3] = exp(-2*((T0[3]-Topt)/(Topt-Tmin))**2)\n #\n if T0[4] >= Topt:\n T_hab[4] = 1.0\n else:\n T_hab[4] = exp(-2*((T0[4]-Topt)/(Topt-Tmin))**2)\n #\n #Food habitat\n #\n food_hab = [0, 0, 0, 0, 0] #position, left, right, bottom and top\n #\n if NPP0[0] < 0:\n print('WARNING: negative NPP at lon,lat = %f,%f and time = %f: set to 0'%(particle.lon,particle.lat,time))\n food_hab[0] = 0\n else:\n food_hab[0] = min(NPP0[0]/particle.PPmax,1)\n #\n if NPP0[1] < 0:\n print('WARNING: negative NPP at lon,lat = %f,%f and time = %f: set to 0'%(particle.lon,particle.lat,time))\n food_hab[1] = 0\n else:\n food_hab[1] = min(NPP0[1]/particle.PPmax,1)\n #\n if NPP0[2] < 0:\n print('WARNING: negative NPP at lon,lat = %f,%f and time = %f: set to 0'%(particle.lon,particle.lat,time))\n food_hab[2] = 0\n else:\n food_hab[2] = min(NPP0[2]/particle.PPmax,1)\n #\n if NPP0[3] < 0:\n print('WARNING: negative NPP at lon,lat = %f,%f and time = %f: set to 0'%(particle.lon,particle.lat,time))\n food_hab[3] = 0\n else:\n food_hab[3] = min(NPP0[3]/particle.PPmax,1)\n #\n if NPP0[4] < 0:\n print('WARNING: negative NPP at lon,lat = %f,%f and time = %f: set to 0'%(particle.lon,particle.lat,time))\n food_hab[4] = 0\n else:\n food_hab[4] = min(NPP0[4]/particle.PPmax,1)\n #\n #Total habitat\n #\n particle.habT = T_hab[0]\n particle.habPP = food_hab[0]\n particle.hab = particle.habT * particle.habPP\n h_left = T_hab[1] * food_hab[1]\n h_right = T_hab[2] * food_hab[2]\n h_bot = T_hab[3] * food_hab[3]\n h_top = T_hab[4] * food_hab[4]\n #\n #Habitat gradient\n #\n particle.xgradh = (h_right - h_left)/(2 * fieldset.grad_dx)\n particle.ygradh = (h_top - h_bot)/(2 * fieldset.grad_dx)\n #\n #Safety check\n #\n if particle.hab < 0 or particle.hab > 1:\n print(\"Habitat is %f at lon,lat = %f,%f. Execution stops.\"%(particle.hab,particle.lon,particle.lat))\n exit(0)", "def prepare_input(self, only_center = True):\n \n if only_center:\n nx = [0]\n ny = [0]\n else:\n nx = [0,1,-1]\n ny = [0,1,-1]\n gauge = dd.read_csv(str(Path(self.db_location, 'gauge', '*.csv.gz')), \n compression='gzip', \n assume_missing=True,\n dtype = {'TIMESTAMP':int, 'STATION': str})\n \n gauge = gauge.compute().drop_duplicates()\n gauge = gauge.replace(-9999,np.nan)\n for x in nx:\n for y in ny:\n logging.info('Processing neighbour {:d}{:d}'.format(x, y))\n radar = dd.read_parquet(str(Path(self.db_location, 'radar',\n '*.parquet')))\n refer = dd.read_parquet(str(Path(self.db_location, 'reference', \n '*.parquet')))\n \n # Select only required pixel\n radar = radar.loc[np.logical_and(radar['NX'] == x, \n radar['NY'] == y)]\n refer = refer.loc[np.logical_and(refer['NX'] == x, \n refer['NY'] == y)]\n \n # Convert to pandas and remove duplicates \n radar = radar.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION',\n 'RADAR',\n 'NX','NY',\n 'SWEEP'])\n \n refer = refer.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION'])\n \n radar = radar.sort_values(by = ['TIMESTAMP','STATION','SWEEP'])\n refer = refer.sort_values(by = ['TIMESTAMP','STATION'])\n gauge = gauge.sort_values(by = ['TIMESTAMP','STATION'])\n # Get only valid precip data\n gauge = gauge[np.isfinite(gauge['RRE150Z0'])]\n \n # Create individual 10 min - station stamps\n gauge['s-tstamp'] = np.array(gauge['STATION'] + \n gauge['TIMESTAMP'].astype(str)).astype(str)\n radar['s-tstamp'] = np.array(radar['STATION'] + \n radar['TIMESTAMP'].astype(str)).astype(str)\n refer['s-tstamp'] = np.array(refer['STATION'] + \n refer['TIMESTAMP'].astype(str)).astype(str)\n \n # Get gauge and reference only when radar data available\n \n # Find timestamps that are in the three datasets\n ststamp_common = np.array(pd.Series(list(set(gauge['s-tstamp'])\n .intersection(set(refer['s-tstamp'])))))\n ststamp_common = np.array(pd.Series(list(set(radar['s-tstamp'])\n .intersection(set(ststamp_common)))))\n radar = radar.loc[radar['s-tstamp'].isin(ststamp_common)]\n gauge = gauge.loc[gauge['s-tstamp'].isin(ststamp_common)]\n refer = refer.loc[refer['s-tstamp'].isin(ststamp_common)]\n \n \n # Filter incomplete hours\n stahour = np.array(gauge['STATION'] + \n ((gauge['TIMESTAMP'] - 600 ) - \n (gauge['TIMESTAMP'] - 600 ) % 3600).astype(str)).astype(str)\n \n full_hours = np.array(gauge.groupby(stahour)['STATION']\n .transform('count') == 6)\n \n refer = refer.reindex[full_hours]\n gauge = gauge.reindex[full_hours] \n radar = radar.reindex[radar['s-tstamp'].\n isin(np.array(gauge['s-tstamp']))]\n \n stahour = stahour[full_hours]\n \n # Creating vertical grouping index\n \n _, idx, grp_vertical = np.unique(radar['s-tstamp'],\n return_inverse = True,\n return_index = True)\n # Get original order\n sta_tstamp_unique = radar['s-tstamp'][np.sort(idx)]\n # Preserves order and avoids sorting radar_statstamp\n grp_vertical = idx[grp_vertical]\n # However one issue is that the indexes are not starting from zero with increment\n # of one, though they are sorted, they are like 0,7,7,7,15,15,23,23\n # We want them starting from zero with step of one\n grp_vertical = rankdata(grp_vertical,method='dense') - 1\n \n # Repeat operation with gauge hours\n sta_hourly_unique, idx, grp_hourly = np.unique(stahour, \n return_inverse = True,\n return_index = True)\n grp_hourly = idx[grp_hourly]\n \n # Add derived variables height iso0 (HISO) and height above ground (HAG)\n # Radar\n stations = constants.METSTATIONS\n cols = list(stations.columns)\n cols[1] = 'STATION'\n stations.columns = cols\n radar = pd.merge(radar,stations, how = 'left', on = 'STATION',\n sort = False)\n \n radar['HISO'] = -radar['T'] / constants.LAPSE_RATE * 100\n radar['HAG'] = radar['HEIGHT'] - radar['Z']\n radar['HAG'][radar['HAG'] < 0] = 0\n \n # Gauge\n gauge['minutes'] = (gauge['TIMESTAMP'] % 3600)/60\n \n # Save all to file\n refer.to_parquet(str(Path(self.input_location, \n 'reference_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n radar.to_parquet(str(Path(self.input_location, \n 'radar_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n grp_idx = {}\n grp_idx['grp_vertical'] = grp_vertical\n grp_idx['grp_hourly'] = grp_hourly\n grp_idx['tstamp_unique'] = sta_tstamp_unique\n \n pickle.dump(grp_idx, \n open(str(Path(self.input_location, \n 'grouping_idx_x{:d}y{:d}.p'.format(x,y))),'wb'))\n \n if x == 0 and y == 0:\n # Save only gauge for center pixel since it's available only there\n gauge.to_parquet(str(Path(self.input_location, 'gauge.parquet')),\n compression = 'gzip', index = False)", "def test_plt_mag_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n # create the arrays per filter and readout pattern\n nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], []\n nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], []\n filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout']\n max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr']\n for i, val in enumerate(max_val_box):\n if '140' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(val)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(val)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif '110' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(val)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(val)\n nrsrapidd6_clear.append(np.NaN)\n else:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(val)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(val)\n # add to the bokeh data structure\n ta.source.data[\"nrsrapid_f140x\"] = nrsrapid_f140x\n ta.source.data[\"nrsrapid_f110w\"] = nrsrapid_f110w\n ta.source.data[\"nrsrapid_clear\"] = nrsrapid_clear\n ta.source.data[\"nrsrapidd6_f140x\"] = nrsrapidd6_f140x\n ta.source.data[\"nrsrapidd6_f110w\"] = nrsrapidd6_f110w\n ta.source.data[\"nrsrapidd6_clear\"] = nrsrapidd6_clear\n result = ta.plt_mag_time()\n\n assert bokeh_plot_type == type(result)", "def main(datafilepath):\n #create midline\n sectionsize = 10000\n TrackData = TrackMaker(sectionsize) # 10000\n moving_window = sectionsize*2\n midline = TrackData[0] \n sections = TrackData[2]\n #midline = midline[sections[0]:sections[5],:] #only work with the midline of the trial \n #steergaze_df = pd.read_feather(datafilepath)\n steergaze_df = pd.read_csv(datafilepath, sep=',',header=0)\n #steergaze_df.reset_index()\n master_steergaze = pd.DataFrame()\n datafolder = os.path.split(datafilepath)[0] \n\n #TODO: due to grouping the future path cuts - off at end of slalom, use the continuous trajectory across roadsections for fp mapping\n\n #modes taken from gaze_through_midline_densities.py\n entry = find_closest_index(midline, [-23, 69])\n firstobject = find_closest_index(midline, [25, 52])\n gazemodes = [entry, firstobject]\n\n mid_diff = np.linalg.norm(np.diff(midline, axis=0, prepend = np.array([[0,0]])), axis = 1)\n midline_dist_array = np.cumsum(mid_diff)\n\n tree = spatial.cKDTree(midline)\n\n #for trial in picked_trials:\t\n for block, blockdata in steergaze_df.groupby(['ID','block']):\n\n print(block)\n begin = timer()\n\n\n blockdata = blockdata.copy()\n blockdata.sort_values('currtime', inplace=True)\n # blockdata.reset_index()\n\n ####pick target\n \"\"\"\n condition = blockdata.condition.values[0]\n target_centres = targets.loc[targets['condition']==int(condition),:]\n #pprint(target_centres)\n\n target_centres = target_centres.reset_index(drop=True)\n #pick starting position.\n start_x = np.sign(blockdata['posx']).values[0]\n #select targets with opposite sign for xcentre, these will be the ones encountered in that block\n target_centres = target_centres.loc[np.sign(target_centres['xcentre'])!=start_x,:] \n target_circles = dp.target_position_circles(target_centres)\n\n \"\"\"\n\n traj_x = blockdata['posx'].values\n traj_z = blockdata['posz'].values\n trajectory = np.transpose(np.array([traj_x, traj_z]))\n\n yaw = blockdata['yaw'].values\n \n #gaze_on_screen = blockdata['hangle'].values, blockdata['vangle'].values\n gaze_on_screen = np.transpose(np.array([blockdata['hangle'].values, blockdata['vangle'].values]))\n\n #print(yaw[0])\n #index = i\n #\tviewpoint = blockdata['posx'].values, blockdata['posz'].values\n roadsection = blockdata['roadsection'].values\n\n #find time headway along MIDLINE \n \"\"\"\n start = timer()\n #idx, *_ = find_closest_index(midline, trajectory[0,:])\n idx = [find_closest_index(midline, viewpoint) for viewpoint in trajectory] \n print(idx[:10])\n print(timer()-start)\n \"\"\"\n\n #closest_indexes = [closest_node(midline, viewpoint) for viewpoint in trajectory] \n #closest indexes\n #print(np.take(midline, 5, axis = 0, mode = 'wrap'))\n #print(np.take(midline, len(midline), axis = 0, mode = 'wrap'))\n #print(np.take(midline, 0, axis = 0, mode = 'wrap'))\n _, closest_indexes = tree.query(trajectory) \n\n end_of_view = closest_indexes + moving_window\n\n #futuremid = np.take(midline, range(closest_indexes[0], end_of_view[0]), axis = 0, mode = 'wrap')\n def takemid(c,e):\n return (np.take(midline, range(c, e), axis = 0, mode = 'wrap'))\n\n start = timer()\n ml_idx, ml_screen_refs, ml_world_refs, ml_th = zip(*[\n closest_on_screen_point(takemid(c,e), t, y, g) \n for c, e, t, y, g in zip(closest_indexes, end_of_view, trajectory, yaw, gaze_on_screen)\n ])\n print(timer() - start) \n \n print(ml_screen_refs.shape)\n print(type(ml_screen_refs))\n ml_screen_refs = ml_screen_refs.reshape(-1, 2)\n ml_world_refs = ml_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['midline_ref_onscreen_x'] = ml_screen_refs[:, 0]\n blockdata['midline_ref_onscreen_z'] = ml_screen_refs[:, 1]\n blockdata['midline_ref_world_x'] = ml_world_refs[:, 0]\n blockdata['midline_ref_world_z'] = ml_world_refs[:, 1]\n blockdata['th_along_midline'] = ml_th\n\n #find closest point on FUTURE PATH, with th calc along the path \n \n traj_index = range(len(trajectory))\n fp_idx, fp_screen_refs, fp_world_refs, fp_th = zip(*[\n closest_on_screen_point(trajectory[i:(i+1000),:], t, y, g) \n for i, t, y, g in zip(traj_index, trajectory, yaw, gaze_on_screen)\n ])\n #future_traj = trajectory[index:(index+window_fp), :]\n #fp_world_ref, fp_idx, dists, fp_angles = closest_on_screen_point(future_traj, viewpoint, yaw, gaze_on_screen)\n print(fp_screen_refs.shape)\n print(type(fp_screen_refs))\n fp_screen_refs = fp_screen_refs.reshape(-1, 2)\n fp_world_refs = fp_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['futurepath_ref_onscreen_x'] = fp_screen_refs[:, 0]\n blockdata['futurepath_ref_onscreen_z'] = fp_screen_refs[:, 1]\n blockdata['futurepath_ref_world_x'] = fp_world_refs[:, 0]\n blockdata['futurepath_ref_world_z'] = fp_world_refs[:, 1]\n blockdata['th_along_futurepath'] = fp_th\n \n \n\n #TODO: current method runs into problems if the viewpoint is just before the midline resets (i.e. very large midline_dist_array value).\n #but not a problem for current analysis because trial starts from beginning of midline.\n #th_to_entry\n mid_dist_viewpoint = midline_dist_array[idx]\n\n mid_dist_entry = midline_dist_array[gazemodes[0]]\n th_to_entry = (mid_dist_entry - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_entry'] = th_to_entry\n\n #th_to_object\n mid_dist_object = midline_dist_array[gazemodes[1]]\n th_to_object = (mid_dist_object - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_object'] = th_to_object\t\t\n \n \"\"\"\n trialcode = row['trialcode']\n #plot\t\t\t \n #print(\"th_along_midline\", ml_timeheadway)\n #print('ml_ref', ml_world_ref)\n #print(\"th_along_futurepath\", fp_timeheadway)\n #print(\"fp_ref\", fp_world_ref)\n\n world_gaze = dp.angles_to_world(gaze_on_screen, viewpoint, yaw)\n #print(\"world_gaze\", world_gaze)\n\n plt.ylim(angles_limits_bottom[1],angles_limits_top[1])\n plt.xlim(angles_limits_bottom[0],angles_limits_top[0])\n\n plt.plot(ml_angles[:,0],ml_angles[:,1], 'C3o', markersize = .5, )\n plt.plot(fp_angles[:,0],fp_angles[:,1], 'C2o', markersize = .5)\n plt.plot(ml_screen_ref[0],ml_screen_ref[1], 'C1o', markersize = 5, markeredgecolor = 'k')\n plt.plot(fp_screen_ref[0],fp_screen_ref[1], 'C0o', markersize = 5, markeredgecolor = 'k')\n\n plt.plot(gaze_on_screen[0],gaze_on_screen[1], 'mo', markersize = 5, markeredgecolor = 'k')\n plt.title(str(trialcode))\n\n\n plt.pause(.016) \n plt.cla()\n\n plt.show()\n \"\"\"\n\t\t\n #master_steergaze = pd.concat([master_steergaze, blockdata])\n\n\n compute_time = timer()-begin\n print(\"Processing block took %f seconds\" % compute_time)\n\n\n print(\"APPENDING DATA FRAME\")\n outfilepath = datafolder + '/trout_gazeandsteering_addthfrompath2.csv'\n\n with open(outfilepath, 'a', newline = '') as sgfile:\n blockdata.to_csv(sgfile, mode='a', header=sgfile.tell()==0)\n\n #master_steergaze.to_csv(datafolder + '/trout_gazeandsteering_addthfrompath.csv')\n\n #master_steergaze.to_feather(datafilepath)", "def make_digital_map(self):\n self.uni.home(axis='X')\n time.sleep(10.0)\n azimuths = []\n for x in numpy.arange(self.azimuth.xmin, self.azimuth.xmax + self.azimuth.xinc,\n self.azimuth.xinc):\n if x > self.azimuth.xmax:\n x = self.azimuth.xmax\n azimuths.append(x)\n azimuths = numpy.array(azimuths)\n wait = (abs(azimuths[0]-self.uni.pos_az)/self.azimuth.xslew_vel) + 1.0\n self.uni.set_azimuth(azimuths[0], self.azimuth.xslew_vel)\n logger.info(\"Sleeping for %.2f seconds while stage gets to start of map\" % wait)\n time.sleep(wait)\n\n fp = open(self.filename, 'w')\n header = self.make_digital_header()\n fp.write(header)\n plt.ion()\n plt.plot([self.azimuth.xmin, self.azimuth.xmax], [0, 0], 'r-')\n plt.xlim(self.azimuth.xmin, self.azimuth.xmax)\n plt.ylim(-0.5, 6)\n plt.draw()\n for az in azimuths:\n wait = (abs(az-self.uni.pos_az)/self.azimuth.xmap_vel) + 1.0\n self.uni.set_azimuth(az, self.azimuth.xmap_vel)\n logger.info(\"Sleeping for %.2f seconds while stage gets to %.1f degrees\" % (wait, az))\n time.sleep(wait)\n fp.write(\"%.3f\" % az)\n #data = self.take_readings()\n for i, freq in enumerate(self.freq_list):\n self.syn.set_freq(freq)\n for dig_channel in range(8):\n for dig in range(8):\n if dig != dig_channel:\n self.labjack.digital_output(dig, 1)\n time.sleep(0.050)\n self.labjack.digital_output(dig_channel, 0)\n time.sleep(0.050)\n ratio, phase = self.vv.measure_vector_averaged_transmission(self.average)\n fp.write(\",%.6g,%.6g\" % (ratio, phase))\n logger.info(\"Az: %.2f, Freq: %.3f, Ratio: %g; Phase: %g\" % (az, freq/1e9, ratio, phase))\n plt.plot(az, ratio, self.plot_symbols[i])\n plt.draw()\n fp.write('\\n')\n \n time.sleep(10.0)\n self.uni.home(axis='X')\n logger.info(\"Map Completed, Saving data file %s\" % self.filename)\n fp.close()", "def from_field(cls, fieldset, pclass, start_field, size, mode='monte_carlo', depth=None, time=None, repeatdt=None):\n\n if mode == 'monte_carlo':\n if start_field.interp_method == 'cgrid_tracer':\n p_interior = np.squeeze(start_field.data[0, 1:, 1:])\n else: # if A-grid\n d = start_field.data\n p_interior = (d[0, :-1, :-1] + d[0, 1:, :-1] + d[0, :-1, 1:] + d[0, 1:, 1:])/4.\n p_interior = np.where(d[0, :-1, :-1] == 0, 0, p_interior)\n p_interior = np.where(d[0, 1:, :-1] == 0, 0, p_interior)\n p_interior = np.where(d[0, 1:, 1:] == 0, 0, p_interior)\n p_interior = np.where(d[0, :-1, 1:] == 0, 0, p_interior)\n p = np.reshape(p_interior, (1, p_interior.size))\n inds = np.random.choice(p_interior.size, size, replace=True, p=p[0] / np.sum(p))\n xsi = np.random.uniform(size=len(inds))\n eta = np.random.uniform(size=len(inds))\n j, i = np.unravel_index(inds, p_interior.shape)\n grid = start_field.grid\n if grid.gtype in [GridCode.RectilinearZGrid, GridCode.RectilinearSGrid]:\n lon = grid.lon[i] + xsi * (grid.lon[i + 1] - grid.lon[i])\n lat = grid.lat[j] + eta * (grid.lat[j + 1] - grid.lat[j])\n else:\n lons = np.array([grid.lon[j, i], grid.lon[j, i+1], grid.lon[j+1, i+1], grid.lon[j+1, i]])\n if grid.mesh == 'spherical':\n lons[1:] = np.where(lons[1:] - lons[0] > 180, lons[1:]-360, lons[1:])\n lons[1:] = np.where(-lons[1:] + lons[0] > 180, lons[1:]+360, lons[1:])\n lon = (1-xsi)*(1-eta) * lons[0] +\\\n xsi*(1-eta) * lons[1] +\\\n xsi*eta * lons[2] +\\\n (1-xsi)*eta * lons[3]\n lat = (1-xsi)*(1-eta) * grid.lat[j, i] +\\\n xsi*(1-eta) * grid.lat[j, i+1] +\\\n xsi*eta * grid.lat[j+1, i+1] +\\\n (1-xsi)*eta * grid.lat[j+1, i]\n else:\n raise NotImplementedError('Mode %s not implemented. Please use \"monte carlo\" algorithm instead.' % mode)\n\n return cls(fieldset=fieldset, pclass=pclass, lon=lon, lat=lat, depth=depth, time=time, repeatdt=repeatdt)", "def GetMapId(landsat, date, date_range):\n \n def maskClouds(img):\n scored = ee.Algorithms.Landsat.simpleCloudScore(img);\n return img.updateMask(scored.select(['cloud']).lt(20));\n\n def CreateTimeBand(img):\n return maskClouds(img).byte().addBands(img.metadata('system:time_start'))\n\n if landsat == 'l7':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L7)\n l7 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l7Composite = l7.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l7Composite.getMapId({\n 'min': '0,0,0',\n 'max': '255,255,255',\n 'bands': 'B4,B3,B2',\n })\n if landsat == 'l8':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L8)\n l8 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l8Composite = l8.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l8Composite.getMapId({\n 'min': '0',\n 'max': '0.4',\n 'bands': 'B4,B3,B2',\n })", "def __init__(self, t0, t1, hours=(\"00:00\", \"23:45\"),\n forecast_zones=\"DK\", norm=False, TimeResolution=\"15T\"):\n \n self.t0 = t0\n self.t1 = t1\n self.muni_input = forecast_zones\n self.norm = norm\n self.Time = TimeResolution\n self.fc_zones = self._muni_interpreter(self.muni_input)\n self.fc_obj = import_muni_forecast(self.t0, self.t1,\n hours=hours,\n muni_list=self.fc_zones,\n sub_h_freq=self.Time)\n root = return_to_root()\n coef_path = 'scripts/rad_model_development/'\n stem_path = '/data/stem_data/'\n self.all_KNr = np.array(pd.read_excel(root + stem_path +\n 'Kommune_GridNr.xlsx',\n header=0)['Kommune_Nr'])\n \n # Importing season, muni and time parameters\n self.beta = np.load(root + coef_path + 'rad_coef_merge.pickle')\n \n self.season = {}\n self.season['DK'] = self.beta['season']['coef_s'][0:4].reshape((1,4))[0]\n self.season['zones'] = self.beta['season']['coef_s'][4:8].reshape((1,4))[0]\n self.season['munis'] = self.beta['season']['coef_s'][8:12].reshape((1,4))[0]\n \n self.time = {}\n self.time['DK'] = self.beta['time']['coef_t'][0:24].reshape((1,24))[0]\n self.time['zones'] = self.beta['time']['coef_t'][24:48].reshape((1,24))[0]\n self.time['munis'] = self.beta['time']['coef_t'][48:72].reshape((1,24))[0]\n \n self.muni = self.beta['muni']['coef_m'].reshape((1,101))[0]\n \n self.GHI = self.fc_obj.GHI*10**(-3) # Scaled to MW\n self.KNr = self.fc_obj.muninr\n self.hour = (self.fc_obj.GHI.index[0].hour,\n self.fc_obj.GHI.index[-1].hour)\n self.minutes = (self.fc_obj.GHI.index[0].time().minute,\n self.fc_obj.GHI.index[-1].time().minute)\n self.t0 = pd.Timestamp(self.fc_obj.GHI.index[0].date())\n self.t1 = pd.Timestamp(self.fc_obj.GHI.index[-1].date())\n self.IndxSet = self.findIndx()\n self.rng_single_day = pd.date_range(self.t0 +\n pd.Timedelta(hours=self.hour[0],\n minutes=self.minutes[0]),\n self.t0 +\n pd.Timedelta(hours=self.hour[-1],\n minutes=self.minutes[-1]),\n freq=self.Time)\n\n self.rng = pd.date_range(self.t0 + pd.Timedelta(hours=self.hour[0],\n minutes=self.minutes[0]),\n self.t1 + pd.Timedelta(hours=self.hour[-1],\n minutes=self.minutes[-1]),\n freq=self.Time)", "def __init__(self, markers):\n self.markers = markers\n self.last_time = None # Used to keep track of time between measurements \n self.Q_t = np.eye(2)\n self.R_t = np.eye(3)\n # YOUR CODE HERE", "def update_maps(self):\n if self.fmodel is None:\n return\n def fft_map(map_coeffs, resolution_factor = 0.25):\n return map_coeffs.fft_map(resolution_factor = resolution_factor,\n ).apply_sigma_scaling().real_map_unpadded()\n map_types = [\"2mFo-DFc\", \"mFo-DFc\"]\n map_keys = [\"2mFo-DFc\", \"mFo-DFc\"]\n if (self.fmodel.f_obs().anomalous_flag()):\n if (self.params.anom_map_type == \"phaser\"):\n map_types.append(\"llg\")\n elif (self.params.anom_map_type == \"residual\"):\n map_types.append(\"anom_residual\")\n else :\n map_types.append(\"anom\")\n map_keys.append(\"anom\")\n if (self.use_svm):\n map_types.append(\"mFo\")\n map_keys.append(\"mFo\")\n # To save memory, we sample atomic positions immediately and throw out\n # the actual maps (instead of keeping up to 3 in memory)\n sites_frac = self.xray_structure.sites_frac()\n sites_cart = self.xray_structure.sites_cart()\n self._principal_axes_of_inertia = [ None ] * len(sites_frac)\n self._map_variances = [ None ] * len(sites_frac)\n self._map_gaussian_fits = {}\n self.calpha_mean_two_fofc = 0\n for map_type, map_key in zip(map_types, map_keys):\n real_map = self.get_map(map_type)\n if (real_map is not None):\n # Gather values for map peaks at each site\n self._map_values[map_key] = flex.double(sites_frac.size(), 0)\n self._map_gaussian_fits[map_key] = [ None ] * len(sites_frac)\n for i_seq, site_frac in enumerate(sites_frac):\n atom = self.pdb_atoms[i_seq]\n resname = atom.fetch_labels().resname.strip().upper()\n if (resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED or\n atom.segid.strip().upper() in [\"ION\"]):\n value = real_map.eight_point_interpolation(site_frac)\n self._map_values[map_key][i_seq] = value\n if (self.use_svm):\n gaussian_fit = utils.fit_gaussian(\n unit_cell=self.unit_cell,\n site_cart=atom.xyz,\n real_map=real_map)\n self._map_gaussian_fits[map_key][i_seq] = gaussian_fit\n\n if map_type in [\"2mFo-DFc\"]:\n # Gather values on map variance and principal axes of interia\n from cctbx import maptbx\n for i_seq, site_cart in enumerate(sites_cart):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n if resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED:\n # XXX not totally confident about how I'm weighting this...\n p_a_i = maptbx.principal_axes_of_inertia(\n real_map = real_map,\n site_cart = site_cart,\n unit_cell = self.unit_cell,\n radius = self.params.map_sampling_radius)\n self._principal_axes_of_inertia[i_seq] = p_a_i\n variance = maptbx.spherical_variance_around_point(\n real_map = real_map,\n unit_cell = self.unit_cell,\n site_cart = site_cart,\n radius = self.params.map_sampling_radius)\n self._map_variances[i_seq] = variance\n elif (i_seq in self.calpha_sel):\n # Also collect some info in average C_alpha 2FoFc peak heights\n self.calpha_mean_two_fofc += real_map.eight_point_interpolation(\n sites_frac[i_seq])\n del real_map\n\n if (self.calpha_mean_two_fofc > 0):\n n_calpha = len(self.calpha_sel)\n assert (n_calpha > 0)\n self.calpha_mean_two_fofc /= n_calpha\n\n # Gather info on carbons' average Fo peak height for use in estimating other\n # sites' atomic weight\n self.carbon_fo_values = None\n if (len(self.carbon_sel) > 0):\n self.carbon_fo_values = flex.double()\n self._map_values[\"mFo\"] = flex.double(sites_frac.size(), 0)\n fo_map = fft_map(self.fmodel.map_coefficients(\n map_type = \"mFo\",\n exclude_free_r_reflections = True,\n fill_missing = True))\n\n for i_seq, site_frac in enumerate(sites_frac):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n element = self.pdb_atoms[i_seq].element.strip()\n if (element == \"C\") or ((element == \"O\") and (resname in WATER_RES_NAMES)):\n map_value = fo_map.eight_point_interpolation(site_frac)\n self._map_values[\"mFo\"][i_seq] = map_value\n if (element == \"C\"):\n self.carbon_fo_values.append(map_value)\n del fo_map", "def make_df_an_table(an_string, site_name='DSW', min_moon_dist=MIN_MOON_DISTANCE,\n min_hours=MIN_HOURS_OBSERVABLE):\n an_string = str(an_string) # (precaution in case int passed in)\n an_object = Astronight(an_string, site_name)\n # dark_start, dark_end = an_object.ts_dark.start, an_object.ts_dark.end\n mid_dark = an_object.local_middark_utc\n # dark_no_moon_start, dark_no_moon_end = an_object.ts_dark_no_moon.start, an_object.ts_dark_no_moon.end\n mpfile_dict = make_mpfile_dict()\n\n an_dict_list = [] # results to be deposited here, to make a dataframe later.\n for mp in mpfile_dict.keys():\n mpfile = mpfile_dict[mp]\n # an_dict doesn't need to include defaults for case before or after mpfile ephemeris,\n # because making the dataframe should put in NANs for missing keys anyway (check this later):\n an_dict = {'MPnumber': mpfile.number, 'MPname': mpfile.name, 'Motive': mpfile.motive,\n 'Priority': mpfile.priority, 'Period': mpfile.period}\n # Interpolate within ephemeris (because MP is moving in sky); 2 iterations s/be enough:\n data, status, ts_observable, mp_radec = None, None, None, None # keep stupid IDE happy.\n best_utc = mid_dark # best_utc will = mid-observable time at converged RA,Dec.\n\n # Converge on best RA, Dec, observable timespan (they interact, as MP is moving):\n hours_observable = 0.0 # default to keep IDE happy.\n for i in range(2):\n data = mpfile.eph_from_utc(best_utc)\n if data is None:\n if mpfile.eph_range[1] < an_object.ts_dark.start:\n status = 'too late'\n else:\n status = 'too early'\n break\n status = 'ok'\n mp_radec = RaDec(data['RA'], data['Dec'])\n ts_observable = an_object.ts_observable(mp_radec,\n min_alt=MIN_MP_ALTITUDE,\n min_moon_dist=min_moon_dist) # Timespan object\n hours_observable = ts_observable.seconds / 3600.0\n mid_observable = ts_observable.midpoint # for loop exit\n best_utc = mid_observable # update for loop continuation.\n\n # Mark valid MPs that are observable too briefly:\n if status.lower() == 'ok':\n if hours_observable < min_hours:\n status = 'too brief'\n\n # For MPs observable this night, add one line to table:\n # print(mpfile.name, status)\n an_dict['Status'] = status\n if status.lower() == 'ok':\n an_dict['RA'] = data['RA']\n an_dict['Dec'] = data['Dec']\n an_dict['StartUTC'] = ts_observable.start\n an_dict['EndUTC'] = ts_observable.end\n an_dict['TransitUTC'] = an_object.transit(mp_radec)\n an_dict['MoonDist'] = mp_radec.degrees_from(an_object.moon_radec)\n an_dict['PhaseAngle'] = data['Phase']\n an_dict['V_mag'] = data['V_mag']\n an_dict['ExpTime'] = float(round(float(calc_exp_time(an_dict['V_mag'],\n EXP_TIME_TABLE_PHOTOMETRY))))\n if an_dict['Period'] is not None:\n # Duty cycle is % of time spent observing this MP if one exposure per 1/60 of period.\n an_dict['DutyCyclePct'] = 100.0 * ((an_dict['ExpTime'] + EXP_OVERHEAD) / 60.0) / \\\n an_dict['Period']\n else:\n an_dict['DutyCyclePct'] = None\n if status.lower() == 'ok':\n an_dict['PhotrixPlanning'] = 'IMAGE MP_' + mpfile.number + \\\n ' Clear=' + str(an_dict['ExpTime']) + 'sec(***) ' + \\\n ra_as_hours(an_dict['RA'], seconds_decimal_places=1) + ' ' + \\\n degrees_as_hex(an_dict['Dec'], arcseconds_decimal_places=0)\n if an_dict['Period'] is not None:\n an_dict['Coverage'] = make_df_coverage(an_dict['Period'],\n mpfile.obs_jd_ranges,\n (jd_from_datetime_utc(an_dict['StartUTC']),\n jd_from_datetime_utc(an_dict['EndUTC'])))\n an_dict['PhaseCoverage'] = make_df_phase_coverage(an_dict['Period'],\n mpfile.obs_jd_ranges)\n else:\n an_dict['Coverage'] = None\n an_dict_list.append(an_dict)\n if len(an_dict_list) == 0:\n return None\n df_an_table = pd.DataFrame(data=an_dict_list)\n df_an_table.index = df_an_table['MPnumber'].values\n df_an_table = df_an_table.sort_values(by='TransitUTC')\n return df_an_table", "def create_maps(self,data,tod,mjd,coords):\n features = np.log10(self.getFeatures(data))/np.log10(2)\n special_idx = np.where((features==16))[0]\n # This is for getting the stare data on more recent\n # calibration observations.\n point_data = self.get_point_data(data,special_idx)\n \n cel_maps = self.create_single_map(tod,\n coords['ra'],\n coords['dec'],\n self.source_positions['ra'][coords['sky_data_flag']],\n self.source_positions['dec'][coords['sky_data_flag']])\n az_maps = self.create_single_map(tod,\n coords['az'],\n coords['el'],\n self.source_positions['az'][coords['sky_data_flag']],\n self.source_positions['el'][coords['sky_data_flag']])\n cel_maps= self.average_maps(cel_maps)\n az_maps = self.average_maps(az_maps)\n xygrid = np.meshgrid((np.arange(self.Nx)+0.5)*self.dx - self.Nx*self.dx/2.,\n (np.arange(self.Ny)+0.5)*self.dy - self.Ny*self.dy/2.)\n \n \n cel_maps['xygrid']=xygrid\n cel_maps['StareCoords']= {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n az_maps['xygrid']=xygrid\n az_maps['StareCoords'] = {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n return cel_maps,az_maps", "def repeat(start, end, roi_times=None, timeres=2, coords=None, ar=None,\n split_temps=None, em_wlen=None, plotminmax=False, plotstd=False,\n hist_type='plain', loaddata=False):#, output=None):\n #if isinstance(output, str):\n # from matplotlib import use\n # use(output)\n import matplotlib.pyplot as plt\n import matplotlib.dates as mdates\n\n #loaddata = False\n\n print start, end\n start, end = parse(start), parse(end)\n \n s = []\n t = []\n p = []\n \n #if flares == []:\n # return s, t, p\n\n timerange = tr(start, end)\n delta = dt.timedelta(hours=timeres)\n ntimes = int(timerange.seconds()/delta.total_seconds())\n times = [time.start() for time in timerange.split(ntimes)]\n \n ntemps = 141\n tempsovertime = np.zeros((ntemps, ntimes))\n \n means = np.zeros(len(times))\n p95s = np.zeros(len(times))\n loopmeans = np.zeros(len(times))\n if plotminmax:\n maxes = np.zeros(len(times))\n mins = np.zeros(len(times))\n if plotstd:\n stds = np.zeros(len(times))\n loopstds = np.zeros(len(times))\n if em_wlen:\n meanem = np.zeros(len(times))\n if plotminmax:\n maxem = np.zeros(len(times))\n minem = np.zeros(len(times))\n if plotstd:\n stdem = np.zeros(len(times))\n\n for i, date in enumerate(times):\n data_only = True\n try:\n if ar == 'all':\n plotar = None\n else:\n plotar = ar\n results = output_maps(date, plotar, coords, 'data', split_temps,\n subimsize=50, calc_em=em_wlen, data_only=data_only)#True)#, linear=True)\n if isinstance(results, tuple):\n tempmap, emmap = results\n else:\n tempmap = results\n data = tempmap.data\n except DownloadError as de:\n data = np.zeros((512, 512))\n print de.msg\n except:\n print 'KHAAAAAAAN! Some part of the temperature-plotting process failed.'\n raise\n data = np.zeros((512, 512))\n if em_wlen:\n emmap = np.zeros((512, 512))\n \n t.append(np.nanmean(data))\n p.append(np.nanmax(data))\n \n data = data.flatten()\n data2 = data.copy()\n data2[data == 0.0] = np.NaN\n data2 = data2[np.isfinite(data)]\n data2.sort()\n temps, bins = np.histogram(data, bins=ntemps, density=False, range=(5.6, 7.0))\n temps = (temps/float(data.size))*100.0\n tempsovertime[:, i] = temps\n\n #loops = data[data >= split_temps]\n #data = data[data < split_temps]\n\n means[i] = np.nanmean(data2)\n try:\n p95s[i] = data2[round(0.95 * len(data2))-1]\n except IndexError:\n p95s[i] = np.NaN\n #loopmeans[i] = np.nanmean(loops)\n if plotminmax:\n maxes[i] = np.nanmax(data)\n mins[i] = np.nanmin(data)\n if em_wlen:\n maxem[i] = np.nanmax(emmap)\n minem[i] = np.nanmin(emmap)\n if plotstd:\n stds[i] = np.nanstd(data)\n if em_wlen:\n stdem[i] = np.nanstd(emmap)\n #loopstds[i] = np.nanstd(loops)\n \n tempsovertime[tempsovertime <= 0.1] = np.nan\n\n xmin, xmax = mdates.datestr2num([str(start), str(end)])\n fig = plt.figure(figsize=(36, 18))\n ax = fig.add_subplot(111, axisbg='k')\n plot_title = 'Temperature distribution of corona\\n{:%Y/%m/%d %H:%M} - {:%Y/%m/%d %H:%M}'.format(start, end)\n if roi_times:\n plot_title += '\\nRegion observed: {:%Y/%m/%d %H:%M} - {:%Y/%m/%d %H:%M}'.format(*roi_times)\n plt.title(plot_title)\n if hist_type == 'plain':\n plt.imshow(tempsovertime[30:106, :], extent=[xmin, xmax, 5.9, 6.65],\n aspect='auto', interpolation='none', origin='lower',\n cmap='coolwarm', vmin=np.nanmin(tempsovertime[65:106, :]),\n vmax=np.nanmax(tempsovertime[65:106, :]))\n elif hist_type == 'loops':\n plt.imshow(tempsovertime[65:106, :], extent=[xmin, xmax, 6.25, 6.65],\n aspect='auto', interpolation='none', origin='lower',\n cmap='coolwarm', vmin=np.nanmin(tempsovertime[65:106, :]),\n vmax=np.nanmax(tempsovertime[65:106, :]))\n elif hist_type == 'full':\n plt.imshow(tempsovertime, extent=[xmin, xmax, 5.6, 7.0],\n aspect='auto', interpolation='none', origin='lower',\n cmap='coolwarm', vmin=np.nanmin(tempsovertime),\n vmax=np.nanmax(tempsovertime))\n plt.tight_layout()\n ax.xaxis_date()\n fig.autofmt_xdate()\n plt.colorbar(orientation='horizontal')\n plt.savefig('/media/huw/temp-time_hists/distribution_over_time_{}'.format(ar))\n plt.close()\n\n\n means[np.where(means == 0.0)] = np.nan\n if plotstd:\n stds[np.where(stds == 0.0)] = np.nan\n loopstds[loopstds == 0.0] = np.nan\n\n try:\n tnums = mdates.date2num([ti for ti in times])\n print maxes\n print len(maxes)\n fig = plt.figure(figsize=(18, 14))\n ax = fig.add_subplot(111)\n plt.title('Variation of temperature over time; AR{}'.format(ar), \n fontsize=32)\n plt.plot(tnums, maxes, label='Maximum temperature', color='red')\n plt.axhline(np.nanmean(maxes))\n print tnums\n print len(tnums)\n ax.xaxis_date()\n fig.autofmt_xdate()\n plt.legend(loc=4, fontsize=16)\n plt.xlabel('Date', fontsize=24)\n plt.ylabel('log(T)', fontsize=24)\n #plt.savefig('/media/huw/temp_plots/temp_plot_{}_b'.format(ar))\n plt.savefig('/home/drew/Dropbox/ARs/temps_{}_b'.format(ar))\n plt.close()\n\n \"\"\"diff = ((maxes-p95s)/p95s)*100.0\n fig = plt.figure(figsize=(18, 14))\n ax = fig.add_subplot(1, 1, 1)\n plt.title('Percentage difference between max and 95th %-ile; AR{}'.format(ar), \n fontsize=32)\n plt.plot(tnums, diff, color='black')\n plt.scatter(fldates, [np.nanmax(diff)]*len(fldates))\n for flare in flares:\n ax.text(sunpy.time.parse_time(flare['event_peaktime']), np.nanmax(diff)+0.01, flare['fl_goescls'][0])\n ax.xaxis_date()\n fig.autofmt_xdate()\n plt.xlabel('Date', fontsize=24)\n plt.ylabel('log(T)', fontsize=24)\n #plt.savefig('/media/huw/temp_plots/temp_plot_{}'.format(ar))\n plt.savefig('Dropbox/ARs/diffs_{}'.format(ar))\n plt.close()\"\"\"\n \n except:# ValueError:\n print \"Can't plot the temperature graph because matplotlib is being a whiney douche\"\n print tnums\n raise\n\n return s, t, p, times", "def merge(atmos, surface):\n data = {}\n # Do what we can with the atmospheric data\n for _, row in atmos.iterrows():\n nwsli = get_nwsli(row['Rpuid'])\n if nwsli is None:\n if int(row['Rpuid']) not in KNOWN_UNKNOWNS:\n print(('process_rwis: Unknown Rpuid: %s in atmos'\n '') % (row['Rpuid'],))\n continue\n if nwsli not in data:\n data[nwsli] = {}\n # Timestamp\n ts = datetime.datetime.strptime(row['DtTm'], '%m/%d/%y %H:%M')\n data[nwsli]['valid'] = ts.replace(tzinfo=pytz.UTC)\n data[nwsli]['tmpf'] = get_temp(row['AirTemp'])\n data[nwsli]['dwpf'] = get_temp(row['Dewpoint'])\n if data[nwsli]['tmpf'] is not None and data[nwsli]['dwpf'] is not None:\n data[nwsli]['relh'] = mcalc.relative_humidity_from_dewpoint(\n data[nwsli]['tmpf'] * units('degF'),\n data[nwsli]['dwpf'] * units('degF')).magnitude * 100.\n # Rh is unused\n data[nwsli]['sknt'] = get_speed(row['SpdAvg'])\n data[nwsli]['gust'] = get_speed(row['SpdGust'])\n if row['DirMin'] not in ['', 32767, np.nan]:\n data[nwsli]['drct'] = row['DirMin']\n # DirMax is unused\n # Pressure is not reported\n # PcIntens\n # PcType\n # PcRate\n if row['PcAccum'] not in ['', -1, 32767, np.nan]:\n data[nwsli]['pday'] = row['PcAccum'] * 0.00098425\n if row['Visibility'] not in ['', -1, 32767, np.nan]:\n data[nwsli]['vsby'] = row['Visibility'] / 1609.344\n\n # Do what we can with the surface data\n for _, row in surface.iterrows():\n nwsli = get_nwsli(row['Rpuid'])\n if nwsli is None:\n if int(row['Rpuid']) not in KNOWN_UNKNOWNS:\n print(('process_rwis: Unknown Rpuid: %s in sfc'\n '') % (row['Rpuid'],))\n continue\n ts = datetime.datetime.strptime(row['DtTm'], '%m/%d/%y %H:%M')\n ts = ts.replace(tzinfo=pytz.UTC)\n if nwsli not in data:\n data[nwsli] = {'valid': ts}\n sensorid = int(row['Senid'])\n key = 'sfvalid%s' % (sensorid,)\n data[nwsli][key] = ts\n key = 'scond%s' % (sensorid,)\n data[nwsli][key] = row['sfcond']\n # sftemp -150\n key = 'tsf%s' % (sensorid,)\n data[nwsli][key] = get_temp(row['sftemp'])\n # frztemp 32767\n # chemfactor 0\n # chempct 101\n # depth 32767\n # icepct 101\n # subsftemp NaN\n key = 'tsub%s' % (sensorid,)\n data[nwsli][key] = get_temp(row['subsftemp'])\n # waterlevel NaN\n # Unnamed: 13 NaN\n # Unnamed: 14 NaN\n\n return data", "def s4_1min_1freq(powerData1,timevec,elevaData,azitmData):\n\t#TODO : empezar desde el primer minuto del tiempo, no desde el inicio del dictionaries\n\t#TODO : calcular s4 para l1 y l2 al mismo tiempo, deberia reducir la mitad del tiempo\n\t#TODO : dont take into account snr2 if this comes with a lot of 0000000 zeros\n\ts4_values1=[]\n\n\ts4_times=[]\n\ts4_avgSNR1 = [] #\n\ts4_avgSNR2 = [] #\n\ts4_points1 = [] # s4_points_per_minute\n\n\ts4_timesr=[]\n\ts4_elev=[]\n\ts4_azit=[]\n\n\tfor eachminute in range(0,1440):\n\t\ts4_times.append(eachminute/60.0)\n\n\ttmp_amplitudes1 = []\n\ttmp_amplitudesdB1=[]\n\ttmp_elevations = []\n\ttmp_azimuths = []\n\n\tinit_index=0\n\n\tarr=np.array(timevec)+np.ones([len(timevec)])*(18.0/3600.0) # #SEPTENTRIO USES DATA from 1 MINUTE GPS TIME\n\t########################\n\tfor eachminute in s4_times:\n\t\tidxarray = (arr >= eachminute) & (arr < (eachminute+(1/60.0)) )# bool array\n\t\ttmp_amplitudesdB1 = powerData1[idxarray]\n\t\ttmp_elevations = elevaData[idxarray]\n\t\ttmp_azimuths = azitmData[idxarray]\n\t\ttmp_amplitudes1=list(map(pow10,tmp_amplitudesdB1))#use numpy.power\n\n\n\t\tif len(tmp_amplitudes1)>0:\n\t\t\ts4_1 = np.std(tmp_amplitudes1,ddof=1) / np.mean(tmp_amplitudes1)\n\t\telse:\n\t\t\ts4_1 = float(\"nan\")\n\n\t\ts4_values1.append(s4_1)\n\t\ts4_avgSNR1.append(np.mean(tmp_amplitudesdB1))\n\t\ts4_timesr.append(eachminute+1/60.0) #Septentrio has the timestamp 1 min in advance\n\t\ts4_points1.append(len(tmp_amplitudes1))\n\t\ts4_elev.append(np.mean(tmp_elevations))\n\t\ts4_azit.append(np.mean(tmp_azimuths))\n\n\treturn s4_values1,s4_timesr,s4_points1,s4_elev,s4_azit,s4_avgSNR1", "def __init__(self):\n self.timeMap = defaultdict(list)", "def __init__(self):\n self.timeMap = defaultdict(list)", "def getEPADailyData(dateint, dt_ind, month, epa_df, yr):\n\n try:\n start = dateint + dt_ind * 10000\n end = start + 10001\n dly_epa_df = epa_df[(epa_df.created >= start) & (epa_df.created < end)]\n dly_epa_df.reset_index(inplace=True, drop=True)\n\n new_df = pd.DataFrame(columns=['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'epa_pm25_value', 'raw_concentration', 'aqi', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code', 'created'])\n for sitenm in dly_epa_df.site_name.unique():\n indx_ct = 0\n site_df = dly_epa_df[dly_epa_df.site_name == sitenm]\n for i in site_df.created.unique():\n indx_ct += 1\n new_df = pd.concat([new_df,site_df.iloc[indx_ct - 1:indx_ct]],ignore_index=True)\n\n if i != site_df.created.max(): # Don't interpolate the last record\n tmp_df = site_df.iloc[indx_ct - 1:indx_ct][['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code']]\n for j in range(1,6):\n new_dt = i + j * 10\n tmp_df['created'] = int(new_dt)\n tmp_df['epa_pm25_value'] = np.nan\n tmp_df['raw_concentration'] = np.nan\n tmp_df['aqi'] = np.nan\n new_df = pd.concat([new_df,tmp_df],ignore_index=True)\n\n # Convert aqi to numerica for so that it gets interpolated\n new_df[['aqi']] = new_df[['aqi']].replace(\"nan\", np.nan, regex=True)\n new_df[['aqi']] = new_df[['aqi']].apply(pd.to_numeric)\n\n new_df = new_df.interpolate(method='linear', limit_direction='forward', axis=0)\n\n int_epa_df = new_df[(new_df.created >= start) & (new_df.created < (end - 1))]\n int_epa_df.reset_index(inplace=True, drop=True)\n \n # Write to S3\n s3 = s3fs.S3FileSystem()\n myopen = s3.open\n write('midscapstone-whos-polluting-my-air/EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr), int_epa_df, compression='GZIP', open_with=myopen)\n s3_resource = boto3.resource('s3')\n s3_resource.Object('midscapstone-whos-polluting-my-air', 'EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr)).Acl().put(ACL='public-read')\n\n except Exception as e:\n print(\"*** EXCEPTION IN GET EPA DAILY DATA *** {}\".format(e))\n return int_epa_df", "def getPressures(self, flaggedmeteo, useWeatherStations=True, scaleHeight=500.):\n self._weather = self.asdm.weatherTable().get()\n self._station = self.asdm.stationTable().get()\n self._antenna = self.asdm.antennaTable().get()\n antennas = []\n wStationId = {}\n wStationName = {}\n wStationDistance = {}\n flagged_meteo = flaggedmeteo.split()\n count = {}\n self.meanDeltaPressure = {}\n \n centralStationId = Tag(0)\n #for r in self._station:\n # if str(r.name()) == \"MeteoCentral\":\n # centralStationId = r.stationId()\n for r in self._station:\n if str(r.name()) == \"MeteoTB2\":\n centralStationId = r.stationId()\n \n if centralStationId == Tag(0):\n print(\"== no central station\")\n return\n refPos = self.asdm.stationTable().getRowByKey(centralStationId).position()\n refVector = pl.array([refPos[0].get(),refPos[1].get(),refPos[2].get()])\n for row in self._antenna:\n ant = row.name()\n antennas.append(ant)\n count[ant] = 0\n self.meanDeltaPressure[ant] = 0\n if useWeatherStations:\n stationId = row.stationId()\n r0 = self.asdm.stationTable().getRowByKey(stationId)\n\n d2min = 1e12\n for r in self._station:\n if (str(r.type()) == 'WEATHER_STATION') and (str(r.name()) not in flagged_meteo):\n d2 = 0\n for i in range(3):\n d2 += (r0.position()[i].get()-r.position()[i].get())**2\n if d2 < d2min: \n rows = self.asdm.weatherTable().getByContext(r.stationId())\n # test th epressure\n if rows[0].pressure().get() > 1000:\n # \n wStationName[ant] = r.name()\n wStationId[ant] = r.stationId()\n wStationDistance[ant] = sqrt(d2)\n d2min = d2\n print('%s/%s : Weather station %15s distance %10.2f m' \\\n %(ant, r0.name(), wStationName[ant], wStationDistance[ant])) \n \n self.deltaPressures = {}\n self.centralPressure = {}\n self.centralWaterPressure = {}\n self.centralTemperature = {}\n self.minPressure = 1e10\n self.maxPressure = -1e10\n \n for row in self.asdm.calDataTable().get():\n if str(row.calType()) == \"CAL_WVR\":\n scan = row.scanSet()[0]\n if scan not in list(self.scanArrayTimes.keys()):\n start = row.startTimeObserved().get()\n end = row.endTimeObserved().get()\n\n self.deltaPressures[scan] = {}\n rows = self.asdm.weatherTable().getByContext(centralStationId)\n for r in rows:\n ttt = r.timeInterval().start().get()\n if (ttt > start) and (ttt < end):\n found = True\n self.centralPressure[scan] = r.pressure().get()\n self.centralTemperature[scan] = r.temperature().get()\n for wvrrow in self.asdm.calWVRTable().get():\n #print wvrrow.calDataId(), row.calDataId()\n if wvrrow.antennaName() == self.refAntenna:\n if wvrrow.calDataId() == row.calDataId():\n water = wvrrow.water().get() # meters\n break\n # assuming scale height of 1000m\n scaleHeight = 1000.\n self.centralWaterPressure[scan] = self.centralTemperature[scan]*water*1000./217.*100*(1000./scaleHeight) ## in pascals.\n print(\"=== scan %2s pres %7.3f mb temp %7.3f K w %6.3f mm ppH2O %7.3f mb\" %\\\n (scan, self.centralPressure[scan]/100., self.centralTemperature[scan], water*1000, self.centralWaterPressure[scan]/100.))\n self.minPressure = min(self.minPressure, self.centralPressure[scan])\n self.maxPressure = max(self.minPressure, self.centralPressure[scan])\n\n for ant in antennas:\n # print \"antenna \", ant \n water = 0\n for wvrrow in self.asdm.calWVRTable().get():\n if wvrrow.antennaName() == ant:\n if wvrrow.calDataId() == row.calDataId():\n water = wvrrow.water().get() # meters\n break\n temp = self.centralTemperature[scan]\n water_pressure = temp*water*1000./217.*100.*(1000./scaleHeight) # pascals\n self.deltaPressures[scan][ant] = \\\n - (water_pressure-self.centralWaterPressure[scan] ) \n if useWeatherStations:\n rows = self.asdm.weatherTable().getByContext(wStationId[ant])\n sRow = self.asdm.stationTable().getRowByKey(wStationId[ant])\n pos = sRow.position()\n padVector = pl.array([pos[0].get(),pos[1].get(),pos[2].get()]) \n diffVector = padVector - refVector\n diffHeight = sqrt(padVector[0]**2+padVector[1]**2+padVector[2]**2)\n diffHeight -= sqrt(refVector[0]**2+refVector[1]**2+refVector[2]**2)\n found = False\n pres = 0\n for r in rows:\n ttt = r.timeInterval().start().get()\n if (ttt > start) and (ttt < end):\n found = True\n pres = r.pressure().get()\n temp = r.temperature().get()\n if found:\n self.deltaPressures[scan][ant] += \\\n pres - self.centralPressure[scan]*(1.-6.5e-3/293.5*diffHeight)**5.26 \n # if scan>1:\n self.meanDeltaPressure[ant] += self.deltaPressures[scan][ant]\n count[ant] += 1\n\n for ant in list(count.keys()):\n self.meanDeltaPressure[ant] /= count[ant]", "def time_calibration(input_file):\n original_path = os.getcwd()\n save_path = input_file['save_path']\n #change to save data reduction directory\n os.chdir(save_path)\n print '\\n Reading the list of images ....\\n'\n planet = input_file['exoplanet'] #set exoplanet name\n images = sorted(glob.glob('AB'+planet+'*.fits'))\n print images\n #include de RA,DEC and epoch of the exoplanet\n RA,DEC,epoch = input_file['RA'],input_file['DEC'],input_file['epoch']\n #obtain ST JD using iraf task and introduce in the header\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n if int(split(hdr['UT'],':')[0]) < int(hdr['timezone']):\n new_date = use.yesterday(hdr['date-obs'])\n #print images[i], new_date\n else:\n new_date = hdr['date-obs']\n year,month,day = split(new_date,'-')\n iraf.asttimes(year=year,month=month,day=day,time=hdr['loctime'],obs=input_file['observatory'])\n JD = iraf.asttimes.jd #obtain julian date\n LMST = iraf.asttimes.lmst #obtain the sideral time\n LMST = use.sexagesimal_format(LMST) #convert sideral time in sexagesimal format\n iraf.hedit(images[i],'ST',LMST,add='yes',verify='no',show='no',update='yes') #create the ST keyword in the header\n iraf.ccdhedit(images[i],'LMST',LMST,type='string') #include the mean sideral time in the header\n iraf.ccdhedit(images[i],'JD',JD,type='string') #include de julian date in the header\n #include RA, and DEC of the object in your header\n iraf.ccdhedit(images[i],\"RA\",RA,type=\"string\") #include right ascention in the header\n iraf.ccdhedit(images[i],\"DEC\",DEC,type=\"string\") #include declination in the header\n iraf.ccdhedit(images[i],\"epoch\",epoch,type=\"string\") #include epoch in the header\n # use.update_progress((i+1.)/len(images))\n print '\\n Setting airmass ....\\n'\n for i in range(len(images)):\n print '# ',images[i]\n #iraf.hedit(images[i],'airmass',airmass,add='yes')\n #iraf.hedit(images[i],'HJD',HJD,add='yes')\n iraf.setairmass.observatory = input_file['observatory']\n iraf.setairmass(images[i])\n iraf.setjd.time = 'ut'\n iraf.setjd(images[i])\n print '\\n.... done.\\n'\n #export information\n hjd, jd, airmass, st = [],[],[],[]\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n hjd.append(hdr['HJD'])\n jd.append(hdr['JD'])\n airmass.append(hdr['airmass'])\n st.append(hdr['st'])\n #saving the data\n data = DataFrame([list(hjd),list(jd),list(st),list(airmass)]).T\n data.columns = ['HJD','JD','ST','Airmass']\n data.to_csv('results_iraf_calibrations.csv')\n #change to workings directory\n os.chdir(original_path)\n return", "def retrieve_timeseries(start_time, end_time, channel_name, IFO, frame_type):\n\td = pylal.frutils.AutoqueryingFrameCache(frametype=frame_type, scratchdir=None)\n\tdata = d.fetch(channel_name, start_time, end_time)\n\t\n\ttime_series = {\n\t\t'waveform': data.A,\n\t\t'dt' : data.metadata.dt,\n\t\t'fs' : 1.0/data.metadata.dt,\n\t}\n\treturn time_series", "def get_spatial(date, spatial_index,dataStruct,interval):\n from lon_to_m import lon_to_m # Function to turn longitude degrees into metres\n from lat_to_m import lat_to_m # Function to turn latitude degrees into metres\n import numpy as np\n\n available_indices = [\"NDVI\", \"NDWI\",\"MNDWI_SW1\",\"MNDWI_SW2\"]\n lonData, latData, spatialData = [], [], []\n \n id = dataStruct['id']\n lon = dataStruct['longitude']\n lat = dataStruct['latitude']\n\n full_month = {'lonData':[],'latData':[],'spatialData':[]}\n\n if interval == \"daily\":\n for i in range(len(id)):\n if date == int(id[i][12:]):\n \n # Appending longitude and latitude data \n lonData.append(lon_to_m(lon[i]))\n latData.append(lat_to_m(lat[i]))\n \n # Finding appropriate index data to append\n spatialData.append(dataStruct[spatial_index][i])\n elif interval == \"monthly\":\n # Getting Spatial data\n monthly_ids = []\n \n for each_id in id:\n if str(date)[:6] == each_id[12:18]:\n monthly_ids.append(each_id)\n\n for this_month in monthly_ids:\n holding_spatialData = []\n for i in range(len(id)):\n if this_month[12:] == id[i][12]:\n holding_spatialData.append(dataStruct[spatial_index][i])\n full_month['spatialData'].append(holding_spatialData)\n\n\n for j in range(len(full_month['spatialData'][0])): # Iterating through each point within each month (~1560)\n averaging = []\n for jj in range(len(full_month['spatialData'])): # iterating through each stored month (~4)\n \n averaging.append(full_month['spatialData'][jj][j])\n spatialData.append(np.nanmean(averaging))\n\n # Getting lat/lon\n if date == int(id[i][12:]):\n \n # Appending longitude and latitude data \n lonData.append(lon_to_m(lon[i]))\n latData.append(lat_to_m(lat[i]))\n\n\n \n return np.array(lonData), np.array(latData), np.array(spatialData)", "def _SetFmapInfo(self):\n for epi in self.pfiles + self.epirt_paths:\n self.info[epi]['fmapname'] = None\n self.info[epi]['fmap_entry'] = None\n for entry in self.entry_map['fmap']:\n fmap_name = self.info[entry]['imgfile'] + self.info[entry]['suffix']\n if self.info[entry]['plane'] == self.info[epi]['plane']:\n# Use the fieldmap acquired at the same plane.\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n else:\n# for fmap in self.fmaps.keys():\n for entry in self.entry_map['fmap']:\n# No fmap at same orientation, look for fmaps in other planes.\n# There won't be more than one, so it isn't much of a choice.\n fmap_name = self.info[entry]['imgfile'] + \\\n self.info[entry]['suffix']\n if self.info[entry]['plane'] == 'sagittal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'axial':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'coronal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'oblique':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n self.info[epi]['plane'] = 'oblique'\n break", "def phases(self, min, max):\n #{{{ function to return dictionary of arrivals\n if config.debug: print \"Events():phases(%s,%s) \"%(min,max)\n\n phases = defaultdict(lambda: defaultdict(dict))\n\n assoc = False\n arrival = False\n\n dbname = self.dbcentral(min)\n\n if config.debug: print \"Events():phases(%s,%s) db:(%s)\"%(min,max,dbname)\n\n if not dbname: return phases\n\n try: \n db = datascope.dbopen (dbname , 'r' )\n db.lookup( table='arrival' )\n db.join( 'assoc' )\n nrecs = db.query(datascope.dbRECORD_COUNT)\n\n except:\n try:\n db = datascope.dbopen (dbname , 'r' )\n db.lookup( table='arrival')\n nrecs = db.query(datascope.dbRECORD_COUNT)\n\n except Exception,e:\n print \"Events: Exception on phases(): %s\" % e,phases\n return phases\n\n if not nrecs:\n try:\n db.close()\n except:\n pass\n return dict(phases)\n\n try:\n db.subset(\"%s <= time && time <= %s\" % (float(min),float(max)) )\n nrecs = db.query(datascope.dbRECORD_COUNT)\n except:\n nrecs = 0\n\n if not nrecs:\n try:\n db.close()\n except:\n pass\n return dict(phases)\n\n for p in range(nrecs):\n\n db.record = p\n\n if assoc:\n\n Sta, Chan, ArrTime, Phase = db.getv('sta','chan','time','phase')\n StaChan = Sta + '_' + Chan\n phases[StaChan][ArrTime] = Phase\n\n else:\n\n Sta, Chan, ArrTime, Phase = db.getv('sta','chan','time','iphase')\n StaChan = Sta + '_' + Chan\n phases[StaChan][ArrTime] = '_' + Phase\n\n\n if config.debug: print \"Phases(%s):%s\" % (StaChan,Phase)\n try:\n db.close()\n except:\n pass\n\n if config.debug: print \"Events: phases(): t1=%s t2=%s [%s]\" % (min,max,phases)\n\n return dict(phases)", "def extract_ace_data(event, start, stop):\n#\n#--- year of starting time\n#\n atemp = re.split(':', start)\n syear = int(float(atemp[0]))\n atemp = re.split(':', stop)\n eyear = int(float(atemp[0]))\n#\n#--- convert time in Chandra Time\n#\n lstart = start\n start = time.strftime('%Y:%j:%H:%M:00', time.strptime(start, '%Y:%m:%d:%H:%M'))\n stop = time.strftime('%Y:%j:%H:%M:00', time.strptime(stop, '%Y:%m:%d:%H:%M'))\n start = int(Chandra.Time.DateTime(start).secs)\n stop = int(Chandra.Time.DateTime(stop).secs)\n#\n#--- set to data collecting period\n#\n pstart = start - 2 * aday\n period = int((stop - start) / (5 * aday)) + 1\n pstop = start + 5 * period * aday\n\n data = []\n for year in range(syear, eyear+1):\n ifile = data_dir + 'rad_data' + str(syear)\n tdata = mcf.read_data_file(ifile)\n data = data + tdata\n\n hline = 'Science Run Interruption: ' + lstart + '\\n'\n hline = hline + 'dofy electron38 electron175 protont47 proton112 '\n hline = hline + 'proton310 proton761 proton1060 aniso\\n'\n hline = hline + '-' * 100\n\n for ent in data:\n atemp = re.split('\\s+', ent)\n if atemp[0].isdigit():\n ltime = atemp[0] + ':' + atemp[1] + ':' + atemp[2] + ':' + atemp[3]\n ltime = time.strftime('%Y:%j:%H:%M:00', time.strptime(ltime, '%Y:%m:%d:%H%M'))\n stime = int(Chandra.Time.DateTime(ltime).secs)\n if (stime >= pstart) and (stime < pstop):\n hline = hline + '%3.4f\\t' % chandara_time_to_yday(stime, syear) \n hline = hline + atemp[7] + '\\t' + atemp[8] + '\\t'\n hline = hline + atemp[10] + '\\t' + atemp[11] + '\\t'\n hline = hline + atemp[12] + '\\t' + atemp[13] + '\\t'\n hline = hline + atemp[14] + '\\t' + atemp[15] + '\\n'\n\n#\n#--- print out the data\n#\n ofile = wdata_dir + event + '_dat.txt'\n\n with open(ofile, 'w') as fo:\n fo.write(hline)", "def get_data(station,starttime,endtime,activity=False,\n rep='/GNOMEDrive/gnome/serverdata/',resample=None):\n setname = \"MagneticFields\"\n dstr = ['%Y','%m','%d','%H','%M']\n dsplit = '-'.join(dstr[:starttime.count('-')+1])\n start = datetime.strptime(starttime,dsplit)\n starttime = construct_utc_from_metadata(start.strftime(\"%Y/%m/%d\"),\n start.strftime(\"%H:%M:%S.%d\"))\n dsplit = '-'.join(dstr[:endtime.count('-')+1])\n end = datetime.strptime(endtime,dsplit)\n endtime = construct_utc_from_metadata(end.strftime(\"%Y/%m/%d\"),\n end.strftime(\"%H:%M:%S.%d\"))\n dataset = []\n for date in numpy.arange(start,end,timedelta(minutes=1)):\n date = date.astype(datetime)\n path1 = rep+station+'/'+date.strftime(\"%Y/%m/%d/\")\n path2 = station+'_'+date.strftime(\"%Y%m%d_%H%M*.hdf5\")\n fullpath = os.path.join(path1,path2)\n dataset += glob.glob(fullpath)\n if len(dataset)==0:\n print \"ERROR: No data files were found...\"\n quit()\n file_order,data_order = {},{}\n for fname in dataset:\n hfile = h5py.File(fname, \"r\")\n segfile = file_to_segment(hfile,setname)\n file_order[segfile] = fname\n data_order[segfile] = hfile\n # Extract sample rate from metadata of last read data file\n sample_rate = hfile[setname].attrs[\"SamplingRate(Hz)\"]\n # Estimate full segment activity list\n activity = create_activity_list(station,data_order)\n # Generate an ASCII representation of the GPS timestamped\n # segments of time covered by the input data\n seglist = segmentlist(data_order.keys())\n # Sort the segment list\n seglist.sort()\n # Create list of time series from every segment\n ts_list = generate_timeseries(file_order,setname)\n # Retrieve channel data for all the segments\n full_data = numpy.hstack([retrieve_channel_data(data_order[seg],setname)\n for seg in seglist])\n new_sample_rate = sample_rate if resample==None else resample\n new_data_length = len(full_data)*new_sample_rate/float(sample_rate)\n full_data = scipy.signal.resample(full_data,int(new_data_length))\n # Models a time series consisting of uniformly sampled scalar values\n ts_data = types.TimeSeries(full_data,delta_t=1./new_sample_rate,\n epoch=seglist[0][0])\n for v in data_order.values():\n v.close()\n return ts_data,ts_list,activity,int(starttime),int(endtime)", "def main():\n # Pull variables from pf\n profileref = pfget('google_mapfeed.pf', profile)\n dbname = profileref['dbname']\n path = profileref['webbase']\n finalfile = '%s/%s' % (path, profileref['file'])\n bufferfile = '%s+' % finalfile\n max_nquakes = 600\n element_fields = ['lat', 'lon', 'depth', 'time', 'local_timestring', 'utc_timestring', 'magnitude', 'auth']\n\n if verbose:\n print \"Start: Creating main JSON file '%s' for all stations at %s\" % (finalfile, time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime()))\n\n now = time.time()\n # Set time zone\n os.putenv('TZ','US/Pacific')\n time.tzset()\n if verbose:\n print \"The time zone is: %s\" % (time.tzname)[0]\n print \"The current time is: %s\" % now\n\n # Override defaults\n if override_number:\n if verbose:\n print \"Overriding default number of events (%d) with %d\" % (max_nquakes, override_number)\n nquakes = override_number\n else:\n nquakes = max_nquakes\n if override_timerange:\n if verbose:\n print \"Overiding default number of events (%d) with time range %d seconds\" % (max_nquakes, override_timerange)\n nquakes = False\n\n # Database processing\n if verbose:\n print \"Opening database\";\n print \"Number of events requested: %s\" % nquakes\n db = dbopen(dbname, 'r')\n\n '''\n Occasionally there is more than one magnitude for a single orid\n (such as provided by QED). We need the most recent magnitude for\n a given orid, so sort on orid and lddate, then group on orid,\n then get the most recent record number (greatest lddate) for each\n group. Add that to a dictionary we will use later.\n '''\n netmag_dict = {}\n db_netmag = dblookup(db, table='netmag')\n db_netmag.sort(['orid', 'lddate'])\n db_netmag_grp = dbgroup(db_netmag, 'orid')\n if verbose:\n print \"There are %s records\" % db_netmag_grp.query('dbRECORD_COUNT')\n for i in range(db_netmag_grp.query('dbRECORD_COUNT')):\n db_netmag_grp[3] = i\n orid, [dbptr, view, end_record, start_record] = db_netmag_grp.getv('orid', 'bundle')\n if verbose:\n print \"\\t- Iteration: %s: Orid: %s, Start record: %s, End record: %s\"% (i, orid, start_record, end_record)\n db_netmag[3] = end_record - 1\n if verbose:\n print \"\\t\\t- Magnitude: %s, Magtype: %s\" % (db_netmag.getv('magnitude')[0], db_netmag.getv('magtype')[0] )\n magnitude, magtype = db_netmag.getv('magnitude', 'magtype')\n netmag_dict[orid] = { 'rec':end_record, 'magnitude':magnitude, 'magtype':magtype }\n\n '''\n if verbose:\n for key in sorted(netmag_dict.iterkeys()):\n print \"%s: %s\" % (key, netmag_dict[key])\n '''\n\n '''\n Now get the event information\n '''\n db.lookup(table='origin')\n db.join('event')\n if verbose:\n print \"Number of joined records of event and origin tables: %s\" % db.query('dbRECORD_COUNT')\n if override_timerange:\n override_oldest = now - override_timerange\n if verbose:\n print \"Override time defined - get events in the last %s seconds - 'time >= %s'\" % (override_timerange, override_oldest)\n db.subset('time >= %d' % override_oldest)\n if verbose:\n print \"Subset on time. Number of records: %s\" % db.query('dbRECORD_COUNT')\n # Join views\n # db_joined = dbjoin(db, db_netmag)\n\n if verbose:\n print \"Subset orid == prefor\"\n db.subset('orid == prefor')\n if verbose:\n print \"Number of subsetted records: %s\" % db.query('dbRECORD_COUNT')\n print \"Subset for time != NULL\"\n db.subset('time != NULL')\n if verbose:\n print \"Number of subsetted records: %s\" % db.query('dbRECORD_COUNT')\n # We want the most recent first for the comparison with nquakes\n db.sort(['time'], reverse=True)\n if verbose:\n print \"Number of sorted records: %s\" % db.query('dbRECORD_COUNT')\n if nquakes:\n if db.query('dbRECORD_COUNT') > nquakes:\n db[3] = nquakes - 1\n min_time = db.getv('time')[0]\n db.subset(\"time >= %s\" % min_time)\n else:\n override_oldest = now - override_timerange\n if verbose:\n print \"Override time defined - get events in the last %s seconds - time > %s\" % (override_timerange, override_oldest)\n db.subset(\"time >= %s\" % override_oldest)\n # Sort in normal time - we want the most recent events plotted on top\n db.sort(('time'))\n if verbose:\n print \"Number of records without subset on time: %s\" % db.query('dbRECORD_COUNT')\n '''\n Build event dictionary\n '''\n event_dict = {'metadata':{},'events':{}}\n\n '''\n Build metadata dictionary\n '''\n if nquakes:\n event_dict['metadata']['max_nquakes'] = nquakes\n event_dict['metadata']['oldest_time_readable'] = epoch2str( int(min_time), \"%H:%M UTC %A %B %o, %Y\" )\n event_dict['metadata']['oldest_time'] = int(min_time)\n event_dict['metadata']['type'] = 'event_limited'\n elif override_oldest:\n event_dict['metadata']['time_range'] = int(override_timerange)\n event_dict['metadata']['oldest_time_readable'] = epoch2str( int(override_oldest), \"%H:%M UTC %A %B %o, %Y\" )\n event_dict['metadata']['oldest_time'] = int(override_oldest)\n event_dict['metadata']['type'] = 'time_limited'\n event_dict['metadata']['modification_time'] = int(time.time())\n event_dict['metadata']['modification_time_readable'] = epoch2str( int(time.time()), \"%H:%M UTC %A %B %o, %Y\" )\n\n '''\n Build event dictionary\n '''\n events = {}\n for i in range(db.query('dbRECORD_COUNT')):\n db[3] = i\n if verbose:\n epoch_time, orid = db.getv('time', 'orid')\n print \"\\tRecord number is: %s Orid is: %d Time is: %s\" % (db[3], orid, epoch2str(epoch_time, '%Y-%m-%d %H:%M:%S'))\n\n orid = db.getv('orid')[0]\n\n if orid in netmag_dict:\n events[i] = {}\n for ef in element_fields:\n # Parse values\n if ef is 'local_timestring' or ef is 'utc_timestring' or ef is 'time':\n value = dbgetv(db, 'time')[0]\n difference = float(now) - float(value)\n if difference < 6 * 3600:\n color = 'red'\n elif difference < 12 * 3600:\n color = 'orange'\n elif difference < 24 * 3600:\n color = 'yellow'\n elif difference < 72 * 3600:\n color = 'chartreuse'\n elif difference < 168 * 3600:\n color = 'blue'\n else:\n color = 'grey'\n events[i]['color'] = color\n elif ef is 'depth':\n value = dbgetv(db, 'depth')[0]\n elif ef is 'auth':\n value = dbgetv(db, 'auth')[0]\n elif ef is 'magnitude':\n # Magnitude\n # mlval, mbval, msval, magnitudeval, magtypeval = db.getv('ml', 'mb', 'ms', 'magnitude', 'magtype')\n # Null magnitude is -999.00\n magnitudeval = netmag_dict[orid]['magnitude']\n magtypeval = netmag_dict[orid]['magtype']\n if int(magnitudeval) > 0:\n scale = magtypeval\n value = '%.1f' % magnitudeval\n else:\n scale = ''\n value = 'N/A'\n events[i]['scale'] = scale\n else:\n value = dbgetv(db, ef)\n\n # Override formatting for specific fields\n if ef is 'lat' or ef is 'lon':\n value = '%.4f' % value\n elif ef is 'local_timestring':\n value = epoch2str( value, \"%H:%M:%S %Z %A %B %o, %Y\", \"US/Pacific\" )\n elif ef is 'utc_timestring':\n value = epoch2str( value, \"%H:%M:%S UTC %A %B %o, %Y\" )\n events[i][ef] = value\n\n full_lat, full_lon = db.getv('lat', 'lon')\n events[i]['grname'] = (grname(full_lat,full_lon)).title()\n events[i]['srname'] = (srname(full_lat,full_lon)).title()\n\n event_dict['events'] = events\n\n # Dump JSON file\n f = open(bufferfile, 'w') \n json.dump(event_dict, f, sort_keys=True, indent=2)\n f.flush()\n\n # Move the file to replace the older one\n try:\n os.rename(bufferfile, finalfile)\n except OSError:\n print \"Cannot rename JSON file from %s to %s\" % (bufferfile,finalfile)\n\n if verbose:\n print \"End: Creating main JSON file '%s' for all stations %s\" % (finalfile, time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime()))\n\n db.close()\n return 0", "def GEEviLandsat(ptsFile,metric,timeStep,sensor,buf,poly,username,folderOut, scalePix = 30):\n \n # load required libraries\n import ee\n import math\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n #define dictionary for raster random names\n sensor_d = {}\n sensor_d['L4'] = 'LANDSAT/LT04/C01/T1_SR'\n sensor_d['L5'] = 'LANDSAT/LT05/C01/T1_SR'\n sensor_d['L7'] = 'LANDSAT/LE07/C01/T1_SR'\n sensor_d['L8'] = 'LANDSAT/LC08/C01/T1_SR'\n\n time_d = {}\n time_d['lowest'] = 'rl'\n time_d['month'] = 'rm'\n time_d['year'] = 'ry'\n \n \n #Computes the bits we need to extract.\n def getQABits(image, start, end, newName):\n pattern = 0\n listB = list(range(start, end+1))\n for one in listB:\n pattern += math.pow(2, one)\n pattern = int(pattern)\n \n return (image.select([0], [newName])\n .bitwiseAnd(pattern)\n .rightShift(start))\n \n for sen in sensor:\n LS = ee.ImageCollection(sensor_d[sen])\n #senL = [sen]\n \n def maskbyBits(img):\n QA = img.select('pixel_qa')\n QA1 = getQABits(QA, 3, 3, 'QA')\n QA2 = getQABits(QA, 5, 5, 'QA')\n\n mask = QA1.eq(0).And(QA2.eq(0))\n return img.updateMask(mask)\n \n LSm = LS.map(maskbyBits)\n \n lastImage = ee.Image(ee.ImageCollection(sensor_d[sen])\n .sort('system:time_start',False)\n .first())\n lastImageDate = lastImage.get('system:index').getInfo()\n\n firstImage = ee.Image(ee.ImageCollection(sensor_d[sen])\n .sort('system:time_start',True)\n .first())\n firstImageDate = firstImage.get('system:index').getInfo()\n \n startYear = int(firstImageDate[(len(firstImageDate)-8):(len(firstImageDate)-4)])\n endYear = int(lastImageDate[(len(lastImageDate)-8):(len(lastImageDate)-4)])\n startMonth = int(firstImageDate[(len(firstImageDate)-4):(len(firstImageDate)-2)])\n endMonth = int(lastImageDate[(len(lastImageDate)-4):(len(lastImageDate)-2)])-1\n startYearAll = startYear + 1\n endYearAll = endYear - 1\n \n years = list(range(startYear, endYearAll + 1))\n monthsEE = ee.List(list(range(startMonth,(12*len(years)+endMonth))))\n yearsEE = ee.List(list(range(startYearAll, endYearAll + 1)))\n \n for met in metric:\n # metL = [met]\n\n if (sen == 'L8' and met == \"NDVI\"):\n bands = ['B5', 'B4']\n elif (sen != 'L8' and met == \"NDVI\"):\n bands = ['B4', 'B3']\n elif (sen == 'L8' and met == \"NDWI\"):\n bands = ['B5', 'B6']\n elif (sen != 'L8' and met == \"NDWI\"):\n bands = ['B4', 'B5']\n elif (sen == 'L8' and met == \"NBR\"):\n bands = ['B5', 'B7']\n elif (sen != 'L8' and met == \"NBR\"):\n bands = ['B4', 'B7']\n #else:\n #print(\"wrong metric specified\")\n \n def addVI(image):\n vi = (image.normalizedDifference(bands)\n .rename('VI'))\n return image.addBands(vi)\n\n withVI = LSm.map(addVI)\n\n VI_col = withVI.select('VI')\n\n if timeStep == 'year':\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (VI_col\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (VI_col\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif timeStep == 'month':\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (VI_col\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (VI_col\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif timeStep == 'lowest':\n\n img_col = VI_col\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select('VI')\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_'+str(sen)+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for Landsat: ' + sen + '_' + met)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select('VI')\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_'+str(sen)+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for Landsat: ' + sen + '_' + met)\n\n else:\n def table_m(image):\n table = (image\n .select('VI')\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_'+str(sen)+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for Landsat: ' + sen + '_' + met)", "def __init__(self):\r\n # DEFAULT SETTINGS\r\n self.ifph5 = False # PH5 data format from PASSCAL (Denali nodal data)\r\n self.client_name = 'IRIS' # IRIS, LLNL, NCEDC\r\n # idb = database index: OBSOLETE -- use client_name instead\r\n self.idb = None # =1-IRIS (default); =2-AEC; =3-LLNL; =4-Geoscope\r\n\r\n # event parameters\r\n self.use_catalog = 1 # =1: use an existing catalog (=1); =0: specify your own event parameters (see iex=9)\r\n self.sec_before_after_event = 10 # time window to search for a target event in a catalog\r\n self.tbefore_sec = 100\r\n self.tafter_sec = 300\r\n # These are used only if self.use_catalog = 0\r\n self.evname = None\r\n self.otime = None\r\n self.elat = None\r\n self.elon = None\r\n self.edep = None\r\n self.emag = None\r\n # Refernce origin (dummy values)\r\n self.rlat = None\r\n self.rlon = None\r\n self.rtime = None\r\n # event objects\r\n self.ev = Event()\r\n self.ref_time_place = Event()\r\n\r\n # station parameters\r\n self.network = '*' # all networks\r\n self.station = '*' # all stations\r\n #self.station = '*,-PURD,-NV33,-GPO' # all stations except -(these)\r\n self.channel = '*' # all channels \r\n self.location = '*' # all locations\r\n self.min_dist = 0 \r\n self.max_dist = 20000\r\n self.min_az = 0 \r\n self.max_az = 360\r\n self.min_lat = None\r\n self.max_lat = None\r\n self.min_lon = None\r\n self.max_lon = None\r\n self.overwrite_ddir = 1 # 1 = delete data directory if it already exists\r\n self.icreateNull = 0 # create Null traces so that rotation can work (obspy stream.rotate require 3 traces)\r\n # this might be helpful if missing a vertical component only\r\n self.phase_window = False # Grab waveforms using phases #WARNING this will cut the waveform to be near the phase arrival\r\n self.phases = [\"P\",\"P\"] # Phases to write to sac files or grab data from\r\n self.write_sac_phase = False # put phase information in sac files\r\n self.taupmodel= \"ak135\"\r\n # Filter parameters\r\n self.ifFilter = False \r\n #------Filter--------------\r\n # for 'bandpass' both f1 and f2 are used\r\n # for 'lowpass' only f2 is used\r\n # for 'highpass' only f1 is used\r\n #\r\n # EXAMPLES\r\n # ifFilter zerophase remove_response ipre_filt\r\n # A. CAP-ready waveforms [DEFAULT]: False NA True 1\r\n # B. plot-ready waveforms, acausal: True True True 2\r\n # C. plot-ready, causal waveforms: True False True 0\r\n # D. possible sensor issues: True False False NA\r\n #\r\n self.filter_type = 'bandpass'\r\n # f1 should consider the requested length of the time series\r\n # f2 should consider the sampling rate for the desired channels\r\n self.f1 = 1/40 # fmin - highpass will keep frequencies larger than fmin\r\n self.f2 = 1/10 # fmax - lowpass will keep frequencies lower than fmax\r\n self.zerophase = True # = False (causal/one-pass), = True (acausal/two-pass)\r\n # 4 pole filter is more sharper at the edges than 2 pole\r\n self.corners = 4 # Is corner in Obspy same as Pole in SAC?\r\n \r\n # Pre-filter parameters\r\n self.ipre_filt = 1 # =0 No pre_filter\r\n # =1 default pre_filter (see getwaveform_iris.py)\r\n # =2 user-defined pre_filter (use this if you are using bandpass filter)\r\n # For tapering down the pre-filter\r\n # Perhaps you want to set ipre_filt = 0 to prevent extra filtering\r\n # pre-filter for deconvolution\r\n # https://ds.iris.edu/files/sac-manual/commands/transfer.html\r\n # Pre-filter will not be applied if remove_response = False \r\n self.f0 = 0.5*self.f1\r\n self.f3 = 2.0*self.f2\r\n self.pre_filt=(self.f0, self.f1, self.f2, self.f3) # applies for ipre_filt = 2 only\r\n # self.pre_filt = (0.005, 0.006, 10.0, 15.0) # BH default\r\n self.water_level = 60\r\n\r\n # For CAP\r\n self.resample_TF = False # if False then resample_freq is taken from SAC files\r\n self.resample_freq = 50 # 0 causes errors. Use resample_TF instead\r\n self.scale_factor = 1 # for CAP use 10**2 (to convert m/s to cm/s)\r\n\r\n # Pre-processing (mainly for CAP)\r\n self.output_cap_weight_file = True# output cap weight files\r\n self.detrend = True # detrend waveforms\r\n self.demean = True # demean waveforms\r\n self.taper = False # this could also be a fraction between 0 and 1 (fraction to be tapered from both sides)\r\n self.output_event_info = True # output event info file\r\n self.outformat = 'VEL' # Instrument-response-removed waveforms saved as VEL, DISP, or ACC\r\n self.ifsave_sacpaz = False # save sac pole zero (needed as input for MouseTrap module)\r\n self.remove_response = True # remove instrument response \r\n self.iplot_response = False # plot response function\r\n self.ifplot_spectrogram = False # plot spectrograms \r\n self.ifsave_stationxml = True # save station xml file (for adjoint tomo)\r\n\r\n # options for rotation and for writing sac files\r\n self.rotateRTZ = True # Rotate and save the RTZ components\r\n self.rotateUVW = False # Rotate and save the UVW components\r\n self.isave_raw = False # save raw waveforms\r\n self.isave_raw_processed = True # save processed waveforms just before rotation to ENZ\r\n self.rotateENZ = True # rotate extracted waveforms to ENZ\r\n self.isave_ENZ = True # save ENZ\r\n\r\n # username and password for embargoed IRIS data\r\n # Register here: http://ds.iris.edu/ds/nodes/dmc/forms/restricted-data-registration/\r\n self.user = None\r\n self.password = None\r\n\r\n # To output lots of processing info\r\n self.ifverbose = True\r\n\r\n # save RTZ as asdf files\r\n self.ifsave_asdf = False\r\n\r\n # Use mass downloader instead\r\n self.ifmass_downloader = False\r\n\t\r\n self.remove_clipped = False\r\n if self.remove_clipped is True:\r\n self.isave_raw = True", "def GEEetMODIS(ptsFile,metric,timeStep,buf,poly,QC,username,folderOut, scalePix = 1000, startYear = None, endYear = None):\n \n # load required libraries\n import ee\n import math\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n \n #Computes the bits we need to extract.\n def getQABits(image, start, end, newName):\n pattern = 0\n listB = list(range(start, end+1))\n for one in listB:\n pattern += math.pow(2, one)\n pattern = int(pattern)\n \n return (image.select([0], [newName])\n .bitwiseAnd(pattern)\n .rightShift(start))\n\n time_d = {}\n time_d['lowest'] = 'rl'\n time_d['month'] = 'rm'\n time_d['year'] = 'ry'\n \n for met in metric:\n modisET = ee.ImageCollection('MODIS/006/MOD16A2')\n metL = [met]\n \n def maskbyBits1(img):\n QA = img.select('ET_QC')\n QA1 = getQABits(QA, 0, 0, 'QA')\n QA2 = getQABits(QA, 2, 2, 'QA')\n QA3 = getQABits(QA, 3, 4, 'QA')\n QA4 = getQABits(QA, 5, 7, 'QA')\n mask = QA1.eq(0).And(QA2.eq(0)).And(QA3.eq(0)).And(QA4.lt(4))\n return img.updateMask(mask)\n\n if QC == 'None':\n modisETn = modisET\n elif QC == 'Op1':\n modisETn = modisET.map(maskbyBits1)\n #modify so that divT gets calculated as 8, if date < 12/26\n #and gets a value of either 5 or 6 accordingly if >\n #also update start and end year\n def scale1(img):\n \n daysT = ee.Number(ee.Date(img.date().get('year').format().cat('-12-31')).getRelative('day','year')).add(1)\n divT = daysT.subtract(img.date().getRelative('day','year')).min(8)\n \n return (img.select(metL[0])\n .float()\n .multiply(0.1)\n .divide(divT)\n .copyProperties(img,['system:time_start','system:time_end']))\n \n modisETm = modisETn.map(scale1)\n \n lastImage = ee.Image(ee.ImageCollection('MODIS/006/MOD16A2')\n .sort('system:time_start',False)\n .first())\n lastImageDate = lastImage.get('system:index').getInfo()\n\n firstImage = ee.Image(ee.ImageCollection('MODIS/006/MOD16A2')\n .sort('system:time_start',True)\n .first())\n firstImageDate = firstImage.get('system:index').getInfo()\n\n if all([startYear is None,endYear is None]):\n startYear = int(firstImageDate[0:4])\n endYear = int(lastImageDate[0:4])\n startMonth = int(firstImageDate[5:7])\n endMonth = int(lastImageDate[5:7])-1\n startYearAll = startYear + 1\n endYearAll = endYear - 1\n\n years = list(range(startYear, endYearAll + 1))\n monthsEE = ee.List(list(range(startMonth,(12*len(years)+endMonth))))\n yearsEE = ee.List(list(range(startYearAll, endYearAll + 1)))\n \n elif all([startYear >= 0,endYear >= 0]):\n startYearReal = int(firstImageDate[0:4])\n endYearReal = int(lastImageDate[0:4]) \n \n years = list(range(max(startYearReal,startYear), (min(endYearReal,endYear) + 1)))\n \n if endYear >= endYearReal:\n endMonth = int(lastImageDate[5:7])-1\n endYearReal2 = endYearReal-1\n years2 = len(years)-1\n elif endYear < endYearReal:\n endMonth = 0\n endYearReal2 = endYearReal\n years2 = len(years)\n \n if startYear <= startYearReal:\n startMonth = int(firstImageDate[5:7])\n startYearReal2 = startYearReal+1\n elif startYear > startYearReal:\n startMonth = 0\n startYearReal2 = startYearReal\n \n monthsEE = ee.List(list(range(startMonth,(12*years2+endMonth))))\n yearsEE = ee.List(list(range(max(startYearReal2,startYear), (min(endYearReal2,endYear) + 1))))\n\n if timeStep == 'year':\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (modisETm\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (modisETm\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif timeStep == 'month':\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (modisETm\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (modisETm\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif all([timeStep == 'lowest',endYear is None, startYear is None]):\n\n img_col = modisETm\n \n elif all([timeStep == 'lowest',endYear > 0, startYear > 0]):\n\n img_col = modisETm.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n #else:\n # print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = time_d[timeStep]+'_MOD16A2_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for: ' + met)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = time_d[timeStep]+'_MOD16A2_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for: ' + met)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = time_d[timeStep]+'_MOD16A2_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for: ' + met)", "def get_data(self):\n# epoch_from = 1301641200\n# epoch_to = epoch_from+60*60*24\n \"\"\"\n letting runs finish for 2 more hours\n ideally, want to make this a function of time from schedule plus some\n variation, like 1 hour just in case\n \"\"\" \n# epoch_to_adjusted = epoch_to + 7200\n conn = self.connect_to_mongo()\n db = conn.muni\n \n# print \"==== Collecting starting runs from %s to %s ====\"\\\n# % (str(time.ctime(epoch_from)), str(time.ctime(epoch_to)))\n \"\"\"\n > db.location.find({loc:{$within:{$center:[[37.80241, -122.4364],\n 0.01]}}})\n > db.location.find({loc:{$within:{$center:[[37.76048, -122.38895],\n 0.002]}}})\n \"\"\"\n bus_ids = db.location.find({'route':self.route_name}).distinct(\"bus_id\")\n for bus_id in bus_ids:\n c_start = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.start_lat, self.start_lon],\n self.start_prec]}}\n }).sort(\"cur_time\", DESCENDING)\n self.massage_start_data(c_start)\n \"\"\"\n TODO: the end point seems to be too nice to Muni, need to tighten\n the circle a little\n \"\"\"\n c_end = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.end_lat, self.end_lon],\n self.end_prec]}}\n }).sort(\"cur_time\", ASCENDING)\n self.massage_end_data(c_end)\n if self.to_log:\n print self.start_bus_ids_to_times\n print self.end_bus_ids_to_times\n \n return self.start_bus_ids_to_times, self.end_bus_ids_to_times", "def __init__(self):\r\n self.label = \"Create Inflow File From ECMWF Runoff\"\r\n self.description = (\"Creates RAPID NetCDF input of water inflow \" +\r\n \"based on ECMWF runoff results and previously created weight table.\")\r\n self.canRunInBackground = False\r\n #CJB self.header_wt = ['StreamID', 'area_sqm', 'lon_index', 'lat_index', 'npoints']\r\n self.header_wt = ['rivid', 'area_sqm', 'lon_index', 'lat_index', 'npoints']\r\n #SDR added new structure to fit new ecmwf ##.runoff.nc file order\r\n #self.dims_oi = [['lon', 'lat', 'time'], ['longitude', 'latitude', 'time']]\r\n self.dims_oi = [['lon', 'lat', 'time'], ['longitude', 'latitude', 'time'], ['time','lon','lat']] # Line Added/Modified CJB 20190108\r\n #self.vars_oi = [[\"lon\", \"lat\", \"time\", \"RO\"], ['longitude', 'latitude', 'time', 'ro']]\r\n self.vars_oi = [[\"lon\", \"lat\", \"time\", \"RO\"], ['longitude', 'latitude', 'time', 'ro'], [\"time\", \"lon\", \"lat\", \"RO\"]] # Line Added/Modified CJB 20190108\r\n self.length_time = {\"LowRes\": 61, \"Low3HrRes\": 40, \"LowResFull\": 85,\"HighRes\": 125, \"High3HrRes\":3} # *** MJS What is High3HrRes for? Doesn't seem to be used.\r\n #self.length_time = {\"LowResFull\": 85,\"HighRes\": 125}\r\n self.length_time_opt = {\"LowRes-6hr\": 60, \"LowRes-3hr\": 40,\r\n \"LowResFull-3hr-Sub\": 48, \"LowResFull-6hr-Sub\": 36,\r\n \"HighRes-1hr\": 90, \"HighRes-3hr\": 48, \"HighRes-6hr\": 40, # *** MJS HighRes-3hr was changed to 40 before; why?\r\n \"HighRes-3hr-Sub\": 18, \"HighRes-6hr-Sub\": 16}\r\n self.errorMessages = [\"Missing Variable 'time'\",\r\n \"Incorrect dimensions in the input ECMWF runoff file.\",\r\n \"Incorrect variables in the input ECMWF runoff file.\",\r\n \"Incorrect time variable in the input ECMWF runoff file\",\r\n \"Incorrect number of columns in the weight table\",\r\n \"No or incorrect header in the weight table\",\r\n \"Incorrect sequence of rows in the weight table\"]", "def _interpolate_meteorological_data(dset, data, rundate):\n rundate = datetime(rundate.year, rundate.month, rundate.day)\n for field, station in [(f, f[4:]) for f in data.keys() if f.startswith(\"met_\")]:\n log.debug(f\"Meteorological data available for station {station}\")\n\n met_time = data[field].pop(\"met_time\")\n flat_list = [item for sublist in met_time for item in sublist]\n met_time_float = np.array([(flat_list[i] - rundate).total_seconds() for i in range(0, len(flat_list))])\n met_time_unique, met_index = np.unique(met_time_float, return_index=True)\n\n diff = len(met_time_float) - len(met_time_unique)\n if diff > 0:\n log.dev(f\"Removed duplicate met data for station {station}\")\n log.dev(\"Do this for the actual obs data also!\")\n if len(met_time_unique) == 1:\n for met_type in data[field].keys():\n data[field][met_type] = np.repeat(data[field][met_type][0], dset.num_obs)\n continue\n\n # Extrapolation one month before/after\n # (this is overkill, most of these values will be removed later when taking the diagonal)\n min_time = min(met_time_unique) - 31 * 86400\n max_time = max(met_time_unique) + 31 * 86400\n met_time_unique = np.hstack((np.array(min_time), met_time_unique, np.array(max_time)))\n\n for met_type in data[field].keys():\n met_data_array = data[field][met_type]\n flat_list = [item for sublist in met_data_array for item in sublist]\n met_data_array = np.array([flat_list[i] for i in met_index])\n met_data_array = np.hstack((met_data_array[0], met_data_array, met_data_array[-1]))\n data[field][met_type] = interpolation.interpolate(\n met_time_unique, met_data_array, dset.obs_time, kind=\"cubic\"\n )\n\n return data", "def dataframe():\n\t#allows function to access station, gmt, and miss_station functions\n global stations\n\tglobal gmt\n\tglobal miss_station\n\t\n\t#read predictor file\n\tcontrol = cfg.read_yaml('../registry/graphs.yaml')\n\tpred_ctrl = cfg.read_yaml(cfg.get_config_path(control.pred_file))\n\tpredd_ctrl = cfg.read_yaml(cfg.get_config_path(control.predd_file))\n\n\t#get file paths and update database\n\tpredictor_file_path = control.predictor_file_path\n\tpredictand_file_path = control.predictand_file_path\n\tpred_file_id = update(predictor_file_path)\n\tpredd_file_id = update(predictand_file_path)\n\t\n\t#store lead time and date range\n\tlead_time = control.lead_time\n\tdate_range = control.date_range\n\n\t#get info for fetch many dates\n\tstart,end,stride = read_pred.parse_range(date_range)\n\tfcst_ref_time = control.date_range[0].split('-')[0][-2:]\n\t\n\t#initialize list of predictors\n\tpred_list = pred_ctrl.predictors\n\tpredictor = []\n\n\t#loops through predictors to build camps data objects\n\tfor entry_dict in pred_list:\n\t\t#formats metadata\n\t\tpred = create.preprocess_entries(entry_dict, fcst_ref_time)\n\t\t\n\t\t#adds info to metadata that's not currently being stored\n\t\tpred.search_metadata['reserved2'] = lead_time*3600\n pred.search_metadata['file_id'] = pred_file_id\n\t\tpred.search_metadata['reserved1'] = 'vector'\n\n\t\t#build camps data objects for each day\n\t\tvariable = fetch_many_dates(predictor_file_path,start,end,stride,pred.search_metadata)\n\t\t\n\t\t#appends all data to single camps object\n\t\tif variable[0] is not None:\n\t\t\tvar = variable[0]\n\t\t\tarrs = []\n\t\t\tfor i in range(len(variable)):\n\t\t\t\tarrs.append(variable[i].data)\n\t\t\tvar.data = np.stack(arrs)\n\t\t\tpredictor.append(var)\n\n\t#initializes list of predictands\n\tpredd_list = predd_ctrl.predictands\n predictand = []\n\t\n\t#loops through predictands to build camps data objects\n for entry_dict in predd_list:\n\t\t#formats metadata\n \tvertical_coordinate = entry_dict.pop('Vertical_Coordinate')\n\t\tentry_dict['file_id'] = predd_file_id\n\n\t\t#build camps objects for each day\n variable = fetch_many_dates(predictand_file_path,start, end, stride, entry_dict)\n\n\t\t#append all data to single camps object\n var = variable[0]\n arrs = []\n for i in range(len(variable)):\n arrs.append(variable[i].data)\n try:\n\t\t\tvar.data = np.stack(arrs)\n\t\t\tpredictand.append(var)\n\t\texcept:\n\t\t\tprint(\"Can't read \" + variable.name)\n\n\t#getting predictor station and time data\n\tpredr = Dataset(predictor_file_path[0])\n\tpredr_stat = predr.variables['station'][:]\n\tif lead_time == 3:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant'][:]\n\telif lead_time == 6:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant1'][:]\n\telif lead_time == 12:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant2'][:]\n\tpredr.close()\n\n\t#reformatting predictor station and time data\n\tpredr_stations = stations(predr_stat)\n\tpredr_gmt = gmt(predr_time)\n\t\n\t#getting predictand station and time data\n\tpredd = Dataset(predictand_file_path[0])\n\tpredd_stat = predd.variables['station'][:]\n\tpredd_time = predd.variables['OM__resultTime'][:]\n\tpredd.close()\n\t\n\t#reformatting predictand station and time data\n\tpredd_stations = stations(predd_stat)\n\tpredd_gmt = gmt(predd_time)\n\n\t#choosing predictand observations that line up with predictor time\n\thour = (predictor[0].metadata['FcstTime_hour']/3600) + lead_time\n\tdays = len(predd_gmt)/24\n\tpredd_hours = [0]*days\n k=0\n for i in range(len(predd_gmt)):\n if i%24 == hour:\n\t\t\tpredd_hours[k]=predd_gmt[i]\n\t\t\tk+=1\n\t\n\t#catches when GFS data doesn't cover the last day of the month\n\tif len(predr_gmt) < len(predd_hours):\n\t\tpredd_hours = predd_hours[:-1]\t\n\t\n\t#find missing stations\n\tmiss_stations = miss_station(predr_stations,predd_stations)\n\tstations = predd_stations\n\t\n\t#station and time array\n\tinfo = [['',''] for k in range(len(predr_gmt)*len(stations))]\n\tfor i in range(len(predr_gmt)):\n\t\tfor j in range(len(stations)):\n\t\t\tk = i*len(stations)+j\n\t\t\tinfo[k][0]=predr_gmt[i]\n\t\t\tinfo[k][1]=stations[j]\n\n\t#create column names\n\tnames = ['']*(len(predictor)+len(predictand)+2)\n\tnames[0]='Time'\n\tnames[1]='Station'\n\n\t#creating array\n\tarr = np.zeros((len(stations)*len(predr_gmt),len(predictor)+len(predictand)))\n\t\n\t#adding predictor data\n\tfor i in range(len(predictor)):\n\t\t#remove lead time and forecast reference time from variable name\n\t\t#and add variable name to column list of final dataframe\n\t\tif lead_time == 12:\n\t\t\tnames[i+2]='GFS_'+predictor[i].get_variable_name()[:-11]\n\t\telse:\n\t\t\t names[i+2]='GFS_'+predictor[i].get_variable_name()[:-10]\n\n\t\t#create pandas dataframe of data and sort alphabetically by station name\n\t\tpredictor[i].data = np.squeeze(predictor[i].data,axis=2)\n\t\tpredictor[i].data = pd.DataFrame(predictor[i].data,columns=predr_stations,index=predr_gmt)\n\t\tpredictor[i].data = predictor[i].data.reindex(sorted(predictor[i].data.columns),axis=1)\n\t\t\n\t\t#remove stations with no predictand data\n\t\tk=0\n\t\ta=miss_stations[:]\n\t\tfor j in predictor[i].data.columns:\n\t\t\tif not a:\n\t\t\t\tbreak\n\t\t\tif j==a[k]:\n\t\t\t\tpredictor[i].data=predictor[i].data.drop(j,axis=1)\n\t\t\t\tdel a[k]\n\t\t\n\t\t#add data to final dataframe\n\t\tfor b in range(len(predr_gmt)):\n\t\t\tfor c in range(len(stations)):\n\t\t\t\tk = b*len(stations)+c\n\t\t\t\tarr[k][i] = predictor[i].data.iloc[b][c]\n\n\t#add predictand data\n\tfor i in range(len(predictand)):\n\t\t#removing extra underscore, adding variable name to column names\n\t\tnames[len(predictor)+2+i]='METAR_'+predictand[i].get_variable_name()[:-1]\n\t\n\t\t#resize array and create pandas dataframe\n\t\tpredictand[i].data = np.squeeze(predictand[i].data,axis=2)\n\t\tpredictand[i].data = pd.DataFrame(predictand[i].data,columns=predd_stations,index=predd_hours)\n\t\tpredictand[i].data = predictand[i].data.reindex(sorted(predictand[i].data.columns),axis=1)\n\t\t\n\t\t#remove extra days of predictand data\n\t\tpredictand[i].data = predictand[i].data.iloc[0:len(predr_time),:]\n\t\t\t\n\t\t#add predictand data to array\n\t\tfor b in range(len(predr_gmt)):\n\t\t\tfor c in range(len(stations)):\n\t\t\t\tk = b*len(stations)+c\n\t\t\t\tval = predictand[i].data.iloc[b][c]\n\t\t\t\t\n\t\t\t\t#catch metar fill data\n\t\t\t\tif val == 9999: \n\t\t\t\t\tval = np.nan\n\t\t\t\tarr[k][len(predictor)+i]=val\n\t\n\t#add station and time data to array and save as csv\n\tdata = np.concatenate([info,arr],axis = 1)\n\tto_save = pd.DataFrame(data,columns=names)\n\tto_save.to_csv(str(start)+'_'+str(end)+'_'+str(lead_time)+'hrs.csv')", "def analyze(self, event):\n pfcands = Collection(event, \"FatJetPFCands\")\n jets = Collection(event, \"FatJet\")\n svs = Collection(event, \"SV\")\n taus = Collection(event, \"Tau\")\n met = Object(event, \"MET\")\n pupmet = Object(event, \"PuppiMET\")\n\n IN_hadhad_v4p1_old = np.full(1, -1., dtype=np.float32)\n GRU_hadel_v6p1_old = np.full(1, -1., dtype=np.float32)\n GRU_hadmu_v6p1_old = np.full(1, -1., dtype=np.float32)\n\n IN_hadhad_v4p1 = np.full(1, -1., dtype=np.float32)\n GRU_hadel_v6p1 = np.full(1, -1., dtype=np.float32)\n GRU_hadmu_v6p1 = np.full(1, -1., dtype=np.float32)\n\n IN_hadhad_v4p1_ohe = np.full(1, -1., dtype=np.float32)\n IN_hadel_v4p1_ohe = np.full(1, -1., dtype=np.float32)\n IN_hadmu_v4p1_ohe = np.full(1, -1., dtype=np.float32)\n\n PostTagger_hadhad_v1p1 = np.full(1, -1., dtype=np.float32)\n PostTagger_hadel_v1p1 = np.full(1, -1., dtype=np.float32)\n PostTagger_hadmu_v1p1 = np.full(1, -1., dtype=np.float32)\n\n Ztagger_Zee = np.full(1, -1., dtype=np.float32)\n Ztagger_Zmm = np.full(1, -1., dtype=np.float32)\n Ztagger_Zhh = np.full(1, -1., dtype=np.float32)\n Ztagger_Zhe = np.full(1, -1., dtype=np.float32)\n Ztagger_Zhm = np.full(1, -1., dtype=np.float32)\n\n MassReg_hadhad = np.full(1, -1., dtype=np.float32)\n MassReg_hadel = np.full(1, -1., dtype=np.float32)\n MassReg_hadmu = np.full(1, -1., dtype=np.float32)\n\n jet_idx = -1\n min_dphi = 999.\n for ij, jet in enumerate(jets):\n if (jet.pt < 200.): continue\n this_dphi = abs(signedDeltaPhi(met.phi, jet.phi))\n if (this_dphi < min_dphi):\n min_dphi = this_dphi\n jet_idx = ij\n pf_idx = 0\n\n for ij, jet in enumerate(jets):\n\n # if jet.pt < 400 or jet.msoftdrop < 30 : continue\n if (ij < jet_idx):\n pf_idx = pf_idx + jet.nPFConstituents\n continue\n elif (ij > jet_idx):\n continue\n if jet.nPFConstituents < 1: continue\n ##Fill basic jet properties\n jpt = jet.pt\n jLSpt = jet.LSpt\n jeta = jet.eta\n jphi = jet.phi\n jmsd = jet.msoftdrop\n jLSmsd = jet.LSmsoftdrop\n jm = jet.mass\n jdRLep = jet.dRLep\n jlsf3 = jet.lsf3\n jn2b1 = jet.n2b1\n jLSn2b1 = jet.LSn2b1\n jdeepTagZqq = jet.deepTagZqq\n jdeepTagWqq = jet.deepTagWqq\n jn3b1 = jet.n3b1\n jLSn3b1 = jet.LSn3b1\n try:\n jtau21 = float(jet.tau2) / float(jet.tau1)\n except:\n jtau21 = 0.\n try:\n jtau32 = float(jet.tau3) / float(jet.tau2)\n except:\n jtau32 = 0.\n try:\n jtau43 = float(jet.tau4) / float(jet.tau3)\n except:\n jtau43 = 0.\n try:\n jLStau21 = float(jet.LStau2) / float(jet.LStau1)\n except:\n jLStau21 = 0.\n try:\n jLStau32 = float(jet.LStau3) / float(jet.LStau2)\n except:\n jLStau32 = 0.\n try:\n jLStau43 = float(jet.LStau4) / float(jet.LStau3)\n except:\n jLStau43 = 0.\n\n jetv = ROOT.TLorentzVector()\n jetv.SetPtEtaPhiM(jet.pt, jet.eta, jet.phi, jet.mass)\n\n ##Fill SV\n svpt = np.zeros(self.Nsvs, dtype=np.float16)\n svdlen = np.zeros(self.Nsvs, dtype=np.float16)\n svdlenSig = np.zeros(self.Nsvs, dtype=np.float16)\n svdxy = np.zeros(self.Nsvs, dtype=np.float16)\n svdxySig = np.zeros(self.Nsvs, dtype=np.float16)\n svchi2 = np.zeros(self.Nsvs, dtype=np.float16)\n svpAngle = np.zeros(self.Nsvs, dtype=np.float16)\n svx = np.zeros(self.Nsvs, dtype=np.float16)\n svy = np.zeros(self.Nsvs, dtype=np.float16)\n svz = np.zeros(self.Nsvs, dtype=np.float16)\n svmass = np.zeros(self.Nsvs, dtype=np.float16)\n svphi = np.zeros(self.Nsvs, dtype=np.float16)\n sveta = np.zeros(self.Nsvs, dtype=np.float16)\n svv = ROOT.TLorentzVector()\n arrIdx = 0\n for isv, sv in enumerate(svs):\n if arrIdx == self.Nsvs: break\n svv.SetPtEtaPhiM(sv.pt, sv.eta, sv.phi, sv.mass)\n if jetv.DeltaR(svv) < 0.8:\n svpt[arrIdx] = sv.pt / jpt\n svdlen[arrIdx] = sv.dlen\n svdlenSig[arrIdx] = sv.dlenSig\n svdxy[arrIdx] = sv.dxy\n svdxySig[arrIdx] = sv.dxySig\n svchi2[arrIdx] = sv.chi2\n svpAngle[arrIdx] = sv.pAngle\n svx[arrIdx] = sv.x\n svy[arrIdx] = sv.y\n svz[arrIdx] = sv.z\n sveta[arrIdx] = sv.eta - jeta\n svphi[arrIdx] = signedDeltaPhi(sv.phi, jphi)\n svmass[arrIdx] = sv.mass\n arrIdx += 1\n\n # Fill Taus\n tau_charge = np.zeros(self.Ntaus, dtype=np.float16)\n tau_chargedIso = np.zeros(self.Ntaus, dtype=np.float16)\n tau_dxy = np.zeros(self.Ntaus, dtype=np.float16)\n tau_dz = np.zeros(self.Ntaus, dtype=np.float16)\n tau_eta = np.zeros(self.Ntaus, dtype=np.float16)\n tau_leadTkDeltaEta = np.zeros(self.Ntaus, dtype=np.float16)\n tau_leadTkDeltaPhi = np.zeros(self.Ntaus, dtype=np.float16)\n tau_leadTkPtOverTauPt = np.zeros(self.Ntaus, dtype=np.float16)\n tau_mass = np.zeros(self.Ntaus, dtype=np.float16)\n tau_neutralIso = np.zeros(self.Ntaus, dtype=np.float16)\n tau_phi = np.zeros(self.Ntaus, dtype=np.float16)\n tau_photonsOutsideSignalCone = np.zeros(self.Ntaus, dtype=np.float16)\n tau_pt = np.zeros(self.Ntaus, dtype=np.float16)\n tau_rawAntiEle = np.zeros(self.Ntaus, dtype=np.float16)\n tau_rawIso = np.zeros(self.Ntaus, dtype=np.float16)\n tau_rawIsodR03 = np.zeros(self.Ntaus, dtype=np.float16)\n tau_rawMVAoldDM2017v2 = np.zeros(self.Ntaus, dtype=np.float16)\n tau_rawMVAoldDMdR032017v2 = np.zeros(self.Ntaus, dtype=np.float16)\n tauv = ROOT.TLorentzVector()\n tauIdx = 0\n for tau in taus:\n if tauIdx == self.Ntaus:\n break\n tauv.SetPtEtaPhiM(tau.pt, tau.eta, tau.phi, tau.mass)\n if jetv.DeltaR(tauv) < 0.8:\n tau_charge[tauIdx] = tau.charge\n tau_chargedIso[tauIdx] = tau.chargedIso / tau.pt\n tau_dxy[tauIdx] = tau.dxy\n tau_dz[tauIdx] = tau.dz\n tau_eta[tauIdx] = tau.eta - jeta\n tau_leadTkDeltaEta[tauIdx] = tau.leadTkDeltaEta\n tau_leadTkDeltaPhi[tauIdx] = tau.leadTkDeltaPhi\n tau_leadTkPtOverTauPt[tauIdx] = tau.leadTkPtOverTauPt\n tau_mass[tauIdx] = tau.mass\n tau_neutralIso[tauIdx] = tau.neutralIso / tau.pt\n tau_phi[tauIdx] = signedDeltaPhi(tau.phi, jphi)\n tau_photonsOutsideSignalCone[tauIdx] = tau.photonsOutsideSignalCone\n tau_pt[tauIdx] = tau.pt / jpt\n tau_rawAntiEle[tauIdx] = tau.rawAntiEle\n tau_rawIso[tauIdx] = tau.rawIso / tau.pt\n tau_rawIsodR03[tauIdx] = tau.rawIsodR03\n tau_rawMVAoldDM2017v2[tauIdx] = tau.rawMVAoldDM2017v2\n tau_rawMVAoldDMdR032017v2[tauIdx] = tau.rawMVAoldDMdR032017v2\n tauIdx += 1\n\n ##find candidates associated to jet\n candrange = range(pf_idx, pf_idx + jet.nPFConstituents)\n\n ##Fill PF candidates\n pfpt = np.zeros(self.Nparts, dtype=np.float16)\n pfeta = np.zeros(self.Nparts, dtype=np.float16)\n pfphi = np.zeros(self.Nparts, dtype=np.float16)\n pftrk = np.zeros(self.Nparts, dtype=np.float16)\n pfpup = np.zeros(self.Nparts, dtype=np.float16)\n pfpupnolep = np.zeros(self.Nparts, dtype=np.float16)\n pfq = np.zeros(self.Nparts, dtype=np.float16)\n pfid = np.zeros(self.Nparts, dtype=np.float16)\n pfdz = np.zeros(self.Nparts, dtype=np.float16)\n pfdxy = np.zeros(self.Nparts, dtype=np.float16)\n pfdxyerr = np.zeros(self.Nparts, dtype=np.float16)\n arrIdx = 0\n for ip, part in enumerate(pfcands):\n if ip not in candrange: continue\n if arrIdx == self.Nparts: break\n pfpt[arrIdx] = part.pt / jpt\n pfeta[arrIdx] = part.eta - jeta\n pfphi[arrIdx] = signedDeltaPhi(part.phi, jphi)\n pfpup[arrIdx] = part.puppiWeight\n pfpupnolep[arrIdx] = part.puppiWeightNoLep\n pfq[arrIdx] = part.charge\n pfid[arrIdx] = part.pdgId\n pfdz[arrIdx] = part.dz\n pfdxy[arrIdx] = part.d0\n pfdxyerr[arrIdx] = part.d0Err\n pftrk[arrIdx] = part.trkChi2\n arrIdx += 1\n\n # print(pfpt,pfeta,pfphi,pfdz,pfd0)\n ##define and reshape features\n pfData = np.vstack([pfpt, pfeta, pfphi, pfq, pfdz, pfdxy, pfdxyerr, pfpup, pfpupnolep, pfid])\n pfData = np.transpose(pfData)\n pfData = np.expand_dims(pfData,axis=0)\n svData = np.vstack([svdlen,svdlenSig, svdxy, svdxySig, svchi2, svpAngle, svx, svy, svz, svpt, svmass, sveta, svphi])\n svData = np.transpose(svData)\n svData = np.expand_dims(svData, axis=0)\n tauData = np.vstack([tau_charge, tau_chargedIso, tau_dxy, tau_dz, tau_eta, tau_leadTkDeltaEta, tau_leadTkDeltaPhi, tau_leadTkPtOverTauPt, tau_mass, tau_neutralIso, tau_phi, tau_photonsOutsideSignalCone, tau_pt, tau_rawAntiEle, tau_rawIso, tau_rawIsodR03, tau_rawMVAoldDM2017v2, tau_rawMVAoldDMdR032017v2])\n tauData = np.transpose(tauData)\n tauData = np.expand_dims(tauData, axis=0)\n #[\"MET_covXX\",\"MET_covXY\",\"MET_covYY\",\"MET_phi\",\"MET_pt\",\"MET_significance\",\"PuppiMET_pt\",\"PuppiMET_phi\",\"fj_eta\",\"fj_phi\",\"fj_msd\",\"fj_pt\"]\n #evtData = np.array([met.covXX,met.covXY,met.covYY,met.phi,met.pt,met.significance,pupmet.pt,pupmet.phi,jeta,jphi,jmsd,jpt])\n evtData = np.array([met.covXX,met.covXY,met.covYY,signedDeltaPhi(met.phi,jphi),met.pt,met.significance,pupmet.pt,signedDeltaPhi(pupmet.phi,jphi),jeta,jphi,jmsd,jpt])\n evtData = np.expand_dims(evtData,axis=0)\n\n IN_hadhad_v4p1_old[0] = float(self.model4p1_hadhad_old.predict([pfData, svData]))\n GRU_hadel_v6p1_old[0] = float(self.model6p1_hadel_old.predict([pfData, svData]))\n GRU_hadmu_v6p1_old[0] = float(self.model6p1_hadmu_old.predict([pfData, svData]))\n\n idconv = {211.:1, 13.:2, 22.:3, 11.:4, 130.:5, 1.:6, 2.:7, 3.:8, 4.:9,\n 5.:10, -211.:1, -13.:2,\n -11.:4, -1.:-6, -2.:7, -3.:8, -4.:9, -5.:10, 0.:0}\n pfData[:,:,-1] = np.vectorize(idconv.__getitem__)(pfData[:,:,-1])\n\n IN_hadhad_v4p1[0] = float(self.model4p1_hadhad.predict([pfData, svData]))\n GRU_hadel_v6p1[0] = float(self.model6p1_hadel.predict([pfData, svData]))\n GRU_hadmu_v6p1[0] = float(self.model6p1_hadmu.predict([pfData, svData]))\n\n # Need to add in one hot encoding for particle data here! The order is just 0 through 10 consecutively\n\n IN_hadhad_v4p1_ohe[0] = float(self.model4p1_hadhad_ohe.predict([pfData, svData]))\n IN_hadel_v4p1_ohe[0] = float(self.model4p1_hadel_ohe.predict([pfData, svData]))\n IN_hadmu_v4p1_ohe[0] = float(self.model4p1_hadmu_ohe.predict([pfData, svData]))\n\n PostTagger_hadhad_v1p1[0] = float(self.postTagger1p1_hadhad.predict([tauData, IN_hadhad_v4p1_ohe[0]]))\n PostTagger_hadel_v1p1[0] = float(self.postTagger1p1_hadel.predict([tauData, IN_hadel_v4p1_ohe[0]]))\n PostTagger_hadmu_v1p1[0] = float(self.postTagger1p1_hadmu.predict([tauData, IN_hadmu_v4p1_ohe[0]]))\n\n Ztagger_pred = self.Ztagger.predict([pfData, svData])\n Ztagger_Zee[0] = float(Ztagger_pred[0][0])\n Ztagger_Zmm[0] = float(Ztagger_pred[0][1])\n Ztagger_Zhh[0] = float(Ztagger_pred[0][2])\n Ztagger_Zhe[0] = float(Ztagger_pred[0][3])\n Ztagger_Zhm[0] = float(Ztagger_pred[0][4])\n\n MassReg_hadhad[0] = float(self.massreg_hadhad.predict([pfData, svData, evtData]))\n MassReg_hadel[0] = float(self.massreg_hadel.predict([pfData, svData, evtData]))\n MassReg_hadmu[0] = float(self.massreg_hadmu.predict([pfData, svData, evtData]))\n\n #self.log_pf.append(pfData)\n #self.log_sv.append(svData)\n #self.log_evt.append(evtData)\n #self.log_mreg.append(np.array([MassReg_hadhad[0], MassReg_hadel[0], MassReg_hadmu[0]]))\n\n #with open('test.npy', 'wb') as f:\n # np.save(f, np.vstack(self.log_pf))\n # np.save(f, np.vstack(self.log_sv))\n # np.save(f, np.vstack(self.log_evt))\n # np.save(f, np.vstack(self.log_mreg))\n #np.save(f, pfData)\n #np.save(f, svData)\n #np.save(f, evtData)\n #np.save(f, np.array([MassReg_hadhad[0], MassReg_hadel[0], MassReg_hadmu[0]]))\n #np.save(f, self.massreg_hadhad.get_weights())\n #np.save(f, self.massreg_hadel.get_weights())\n #np.save(f, self.massreg_hadmu.get_weights())\n\n # assert abs( 1 - float(self.model.predict(X)[0,1]) - float(self.model.predict(X)[0,0])) < 0.02\n # print(X,IN_hadhad_v4p1[0], GRU_hadel_v6p1[0])\n self.out.fillBranch(\"IN_hadhad_v4p1_old\", IN_hadhad_v4p1_old)\n self.out.fillBranch(\"GRU_hadel_v6p1_old\", GRU_hadel_v6p1_old)\n self.out.fillBranch(\"GRU_hadmu_v6p1_old\", GRU_hadmu_v6p1_old)\n\n self.out.fillBranch(\"IN_hadhad_v4p1\", IN_hadhad_v4p1)\n self.out.fillBranch(\"GRU_hadel_v6p1\", GRU_hadel_v6p1)\n self.out.fillBranch(\"GRU_hadmu_v6p1\", GRU_hadmu_v6p1)\n\n self.out.fillBranch(\"IN_hadhad_v4p1_ohe\", IN_hadhad_v4p1_ohe)\n self.out.fillBranch(\"IN_hadel_v4p1_ohe\", IN_hadel_v4p1_ohe)\n self.out.fillBranch(\"IN_hadmy_v4p1_ohe\", IN_hadmu_v4p1_ohe)\n\n self.out.fillBranch(\"PostTagger_hadhad_v1p1\", PostTagger_hadhad_v1p1)\n self.out.fillBranch(\"PostTagger_hadel_v1p1\", PostTagger_hadel_v1p1)\n self.out.fillBranch(\"PostTagger_hadmu_v1p1\", PostTagger_hadmu_v1p1)\n\n self.out.fillBranch(\"Ztagger_Zee\", Ztagger_Zee)\n self.out.fillBranch(\"Ztagger_Zmm\", Ztagger_Zmm)\n self.out.fillBranch(\"Ztagger_Zhh\", Ztagger_Zhh)\n self.out.fillBranch(\"Ztagger_Zhe\", Ztagger_Zhe)\n self.out.fillBranch(\"Ztagger_Zhm\", Ztagger_Zhm)\n\n self.out.fillBranch(\"MassReg_hadhad\", MassReg_hadhad)\n self.out.fillBranch(\"MassReg_hadel\", MassReg_hadel)\n self.out.fillBranch(\"MassReg_hadmu\", MassReg_hadmu)\n return True", "def make_times(night, runs, observatory, times, full, instrument, okwrite):\n\n # use this to check times are vaguely right. time of runs\n # must lie between 06.00 local time on date corresponding to\n # start of night date and 1.5 days later. Has picked up a\n # few erroneously dated nights on the TNT.\n mjd_ref = Time(night).mjd - observatory.lon.degree/360 + 0.25\n\n tdata = {}\n with open(times if okwrite else os.devnull,'w') as tout:\n for run in runs:\n if full:\n print(f'Analysing times for run {run}')\n dfile = os.path.join(night, run)\n try:\n ntotal = 0\n if instrument == 'HiPERCAM':\n rtime = hcam.hcam.Rtime(dfile)\n else:\n rtime = hcam.ucam.Rtime(dfile)\n\n # Find first good time, has to roughly match the start\n # date of the night because some times can just be\n # junk\n not_alerted = True\n for n, tdat in enumerate(rtime):\n if instrument == 'HiPERCAM':\n time, tinfo, tflag = tdat\n expose = 1000000\n for tmid,texp,tiflag in tinfo:\n expose = min(round(texp,3),expose)\n else:\n time, tinfo = tdat[:2]\n tflag = time.good\n expose = round(time.expose,3)\n\n if instrument == 'HiPERCAM' or tflag:\n mjd_start = time.mjd\n tdelta = mjd_start-mjd_ref\n if tdelta > 0 and tdelta < 1.5:\n ts = Time(mjd_start, format=\"mjd\", precision=2)\n ut_start = ts.hms_custom\n n_start = n+1\n if expose >= 0 and expose < 2000:\n break\n elif not_alerted and (tdelta < 0 or tdelta > 1.5):\n # maximum one warning per run\n not_alerted = False\n print(f' Bad time: tdelta = {tdelta} < 0 or > 1.5 on time {n} of {dfile}')\n else:\n ntotal = 0\n raise hcam.HipercamError(f'No good times found in {dfile}')\n\n # Find last good time. First we just go for times near the\n # end of the run. Failing that, we try again from the start,\n # to account for runs with time stamp issues.\n if instrument == 'HiPERCAM':\n nback = 4\n elif rtime.header['MODE'] == 'DRIFT':\n # ultracam or hipercam\n win = rtime.win[0]\n nyu = win.ny*rtime.ybin\n nback = int((1033/nyu + 1) / 2) + 3\n elif rtime.header['MODE'] == 'UDRIFT':\n # ultraspec\n win = rtime.win[0]\n nyu = win.ny*rtime.ybin\n nback = int((1037/nyu + 1) / 2) + 3\n else:\n # non drift mode\n nback = 4\n\n if instrument == 'HiPERCAM':\n ntotal = rtime.ntotal()\n else:\n nbytes = os.stat(dfile + '.dat').st_size\n ntotal = nbytes // rtime.framesize\n\n if instrument != 'HiPERCAM' and ntotal > 20000:\n # this is a risk-reducing strategy in case the end\n # of a long ultracam or ultraspec run is\n # corrupt. Better to look at more than the\n # necessary number of frames if it prevents us\n # from having to wind through the whole lot.\n nback = max(nback, 500)\n\n # next statement basically resets the frame\n # we are on\n nreset = max(1, ntotal - nback)\n rtime.set(nreset)\n\n flast = False\n for n, tdat in enumerate(rtime):\n if instrument == 'HiPERCAM':\n time, tinfo, tflag = tdat\n nexpose = 1000000\n for tmid,texp,tiflag in tinfo:\n nexpose = min(round(texp,3),expose)\n else:\n time, tinfo = tdat[:2]\n tflag = time.good\n nexpose = round(time.expose,3)\n\n if instrument == 'HiPERCAM' or tflag:\n mjd = time.mjd\n if mjd >= mjd_start and mjd < mjd_start + 0.4:\n mjd_end = mjd\n ts = Time(mjd_end, format=\"mjd\", precision=2)\n ut_end = ts.hms_custom\n n_end = nreset + n\n if nexpose < 2000:\n expose = max(expose, nexpose)\n flast = True\n\n if not flast:\n # no good time found near end. There must be\n # one or we wouldn't get to this point, so\n # grind it out the hard way by going through\n # the whole run, which can be slow.\n rtime.set()\n for n, tdat in enumerate(rtime):\n if instrument == 'HiPERCAM':\n time, tinfo, tflag = tdat\n nexpose = 1000000\n for tmid,texp,tiflag in tinfo:\n nexpose = min(round(texp,3),expose)\n else:\n time, tinfo = tdat[:2]\n tflag = time.good\n nexpose = round(time.expose,3)\n\n if tflag:\n mjd = time.mjd\n if mjd >= mjd_start and mjd < mjd_start + 0.4:\n mjd_end = mjd\n ts = Time(mjd_end, format=\"mjd\", precision=2)\n ut_end = ts.hms_custom\n n_end = n + 1\n if nexpose < 2000:\n expose = max(expose, nexpose)\n\n nok = n_end-n_start+1\n if n_end > n_start:\n cadence = round(86400*(mjd_end-mjd_start)/(n_end-n_start),3)\n tdata[run] = [ut_start,mjd_start,ut_end,mjd_end,cadence,expose,nok,ntotal]\n else:\n cadence = 'UNDEF'\n tdata[run] = [ut_start,mjd_start,ut_end,mjd_end,'',expose,nok,ntotal]\n tout.write(f'{run} {ut_start} {mjd_start} {ut_end} {mjd_end} {cadence} {expose} {nok} {ntotal}\\n')\n\n except hcam.ucam.PowerOnOffError:\n # Power on/off\n tdata[run] = ['power-on-off',]\n tout.write(f'{run} power-on-off\\n')\n if full: print(f'{run} was a power-on or -off')\n\n except hcam.HipercamError:\n # No good times\n tdata[run] = ['','','','','','',0,ntotal]\n tout.write(f'{run} UNDEF UNDEF UNDEF UNDEF UNDEF UNDEF 0 {ntotal}\\n')\n if full:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1)\n traceback.print_exc()\n print(f'No good times found for {run}; ntotal = {ntotal}')\n\n except:\n # some other failure\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1)\n traceback.print_exc()\n print(\"Problem on run = \", dfile)\n\n # Load of undefined\n tdata[run] = 8*['']\n tout.write(f'{run} {\" \".join(8*[\"UNDEF\"])}\\n')\n\n if okwrite:\n print('Written timing data to',times)\n\n return tdata", "def PMTandPiezoPlot(datadir,run,event,gain): \n en = event\n mu = gain\n e = sbc.DataHandling.GetSBCEvent.GetEvent(datadir+'/'+run,en)\n print(e[\"fastDAQ\"].keys())\n cgate = e[\"fastDAQ\"][\"CAMgate\"]\n dcam = np.diff(cgate)\n \n p0=e[\"fastDAQ\"][\"Piezo1\"]\n p1 = e[\"fastDAQ\"][\"Piezo2\"]\n fdt = e[\"fastDAQ\"][\"time\"]\n runreconpath = \"/pnfs/coupp/persistent/grid_output/SBC-17/output/%s/\"%run\n pmtdiffs = []\n diffs = []\n \n camOnTimes = [fdt[i] for i in range(len(dcam)) if dcam[i] < -0.5]\n camOffTimes = [fdt[i] for i in range(len(dcam)) if dcam[i] > 0.5]\n print(len(camOnTimes))\n print(len(camOffTimes))\n \n acousticfilename = runreconpath+\"AcousticAnalysis_%s.bin\"%run\n a = sbc.DataHandling.ReadBinary.ReadBlock(acousticfilename)\n bubt0 = a[\"bubble_t0\"]\n \n pmttracetime = e[\"PMTtraces\"][\"t0_sec\"][:,0]+e[\"PMTtraces\"][\"t0_frac\"][:,0]\n d=sbc.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment(e)\n pmtalign = d[\"PMT_trigt0_sec\"]+d[\"PMT_trigt0_frac\"]\n tracetimes = pmttracetime - pmtalign\n at0 = bubt0[en,0]\n at0_1 = bubt0[en,1]\n \n allxyzfname = \"/pnfs/coupp/persistent/grid_output/SBC-17/output/SimpleXYZ_all.bin\"\n xyzf = sbc.DataHandling.ReadBinary.ReadBlock(allxyzfname)\n indices = [i for i,x in enumerate(xyzf[\"runid\"]) if str(x[0])+\"_\"+str(x[1]) == run]\n xyz_reconstructed = True\n if len(indices) > 0:\n runposreco = {\"ev\":[xyzf[\"ev\"][indices]],\"x\":[xyzf[\"bubX\"][indices]],\n \"y\":[xyzf[\"bubY\"][indices]],\"z\":[xyzf[\"bubZ\"][indices]]}\n z = runposreco[\"z\"][0][int(int(en))]\n else:\n print(\"no handscan?\")\n z = 1.5\n xyz_reconstructed = False\n lag_expected = (-23.387649*z - 261.020495)*1e-6 # fit from other analysis\n t0_expected_p0 = at0 + lag_expected\n t0_expected_p1 = at0_1 + lag_expected\n \n i=0\n candidates = []\n candidate_times=[]\n for t in (tracetimes-at0):\n \n if t<0.2 and t>-0.2:\n lastCamOff = 0\n for k in range(len(camOffTimes)):\n if t+at0 > camOffTimes[k]:\n lastCamOff = camOffTimes[k]\n elif t+at0 < camOffTimes[k]:\n break\n if t+at0-lastCamOff > 25e-6:\n \n pmtdiffs.append(t)\n trace = np.fabs(e[\"PMTtraces\"][\"traces\"][i][0])\n if max(trace) == 128:\n trace = pi.stitchTraces(trace,np.fabs(e[\"PMTtraces\"][\"traces\"][i][1]))\n dt = e[\"PMTtraces\"][\"dt\"][i][0]\n #baseline = np.mean(trace[0:50])\n #trace = trace - baseline\n [phe,n,totInt,pktimes] = pi.SBC_pulse_integrator_bressler(trace,dt)\n \n if phe != None:\n phe /= mu\n candidates.append(phe)\n candidate_times.append(t)\n i+=1\n candidate_phe = 0\n the_index = 0\n i=0\n near_trace_indices = []\n for t in candidate_times:\n if t > -500e-6 and t <0:\n near_trace_indices.append(list(tracetimes-at0).index(t))\n if candidates[i]>candidate_phe:\n candidate_phe = candidates[i]\n the_index = i\n i+=1\n \n if len(candidates) != 0:\n if max(candidates)>0:\n diffs.append(candidate_times[candidates.index(max(candidates))])\n fig,ax1 = plt.subplots()\n ax2 = ax1.twinx()\n ax1.plot(fdt,p0,'b',alpha=0.6, label = 'piezo 0')\n ax1.plot(fdt,p1,'k',alpha=0.2, label= 'piezo 1')\n for i in range(len(candidates)):\n if i == the_index:\n ax2.plot([candidate_times[i]+at0,candidate_times[i]+at0],[0,candidates[i]],'r',lw=4)\n else:\n ax2.plot([candidate_times[i]+at0,candidate_times[i]+at0],[0,candidates[i]],'y',lw=4)\n #ax2.plot([min(candidate_times),max(candidate_times)],[0,0],linewidth=2)\n ax1.plot([at0,at0],[-0.5,0.5],'b',linewidth=2, label = 'acoustic t0, p0')\n ax1.plot([at0_1,at0_1],[-0.5,0.5],'k',linewidth=2, label = 'acoustic t0, p1')\n \"\"\"\n if xyz_reconstructed:\n ax1.plot([t0_expected_p0,t0_expected_p0],[-0.5,0.5],'b:',linewidth=2, label = 'expected PMT t0, p0')\n ax1.plot([t0_expected_p1,t0_expected_p1],[-0.5,0.5],'k:',linewidth=2, label = 'expected PMT t0, p1')\n else:\n ax1.plot([t0_expected_p0,t0_expected_p0],[-0.5,0.5],'b:',linewidth=2, label = 'expected PMT t0, p0, center of chamber')\n ax1.plot([t0_expected_p1,t0_expected_p1],[-0.5,0.5],'k:',linewidth=2, label = 'expected PMT t0, p1, center of chamber')\n \"\"\"\n ax1.plot(fdt,cgate,'c')\n ax1.plot(fdt[:-1],dcam,'m')\n ax2.set_ylabel('pmt signal (phe)',fontsize=20)\n ax1.set_xlabel('time (s)',fontsize=20)\n ax1.set_ylabel('Acoustic signa(V)',fontsize=20)\n ax1.set_ylim([min(p1),max(p1)])\n ax2.set_xlim([-0.1,0.1])\n #ax2.set_ylim([0,5])\n ax1.legend()\n plt.show\n \n for j in near_trace_indices:\n trace = e[\"PMTtraces\"][\"traces\"][j][0]\n dt = e[\"PMTtraces\"][\"dt\"]\n dt_tr = dt[j][0]\n tPMT = np.arange(len(trace))*dt_tr\n plt.figure()\n plt.plot(tPMT,trace)\n plt.xlabel(\"t (s)\")\n plt.ylabel(\"PMT signal\")\n plt.show\n \n plt.figure()\n plt.plot(e[\"fastDAQ\"][\"time\"],e[\"fastDAQ\"][\"VetoCoinc\"])\n plt.ylabel(\"Veto Coincidence signal\",fontsize=18)\n plt.xlabel(\"time (s)\")\n plt.show", "def readProcessedFCD():\n procFcdDict = {}\n pqDateDict = {} # each date is a period / quota tupel assigned\n simDate = '2007-07-18 '\n day = 0\n # create keys for the procFcdDict\n for p in period:\n for q in quota:\n day += 86400\n date, time = calcTime.getDateFromDepart(day).split(\" \")\n pqDateDict.setdefault(date, (p, q))\n procFcdDict.setdefault((p, q), {})\n # print date,p,q\n\n inputFile = open(path.FQprocessedFCD, 'r')\n for line in inputFile:\n timestamp, edge, speed, cover, id = line.split('\\t')\n date, time = calcTime.getNiceTimeLabel(timestamp).split(\" \")\n # add values to actual Dict\n timestep = calcTime.getTimeInSecs(simDate + time)\n procFcdDict[pqDateDict[date]].setdefault(\n timestep, []).append((id, edge, float(speed) / 3.6))\n inputFile.close()\n\n return procFcdDict", "def rainfall_event(self):\n\n # assign local variables\n datatype = 'strds'\n increment = str(self.rain_interval)+' minutes'\n raster = 'raster'\n iterations = int(self.rain_duration)/int(self.rain_interval)\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n\n # create raster space time datasets\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n rain_duration=self.rain_duration,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # determine mode and run model\n if self.mode == 'simwe_mode':\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model\n # as a series of rainfall intervals in a rainfall event\n i = 1\n while i < iterations:\n\n # update the elevation\n evol.elevation = evolved_elevation\n print evol.elevation\n\n # update time\n evol.start = time\n print evol.start\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=self.rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n i = i+1\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"={evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def SC_generation(hourly_radiation, prop_observers, number_groups, weather_data, g, Sz, Az, ha, Tin_C, height,\n panel_properties, latitude):\n\n\n n0 = panel_properties['n0']\n c1 = panel_properties['c1']\n c2 = panel_properties['c2']\n mB0_r = panel_properties['mB0_r']\n mB_max_r = panel_properties['mB_max_r']\n mB_min_r = panel_properties['mB_min_r']\n C_eff = panel_properties['C_eff']\n t_max = panel_properties['t_max']\n IAM_d = panel_properties['IAM_d']\n Aratio = panel_properties['aperture_area_ratio']\n Apanel = panel_properties['module_area']\n dP1 = panel_properties['dP1']\n dP2 = panel_properties['dP2']\n dP3 = panel_properties['dP3']\n dP4 = panel_properties['dP4']\n Cp_fluid_JperkgK = panel_properties['Cp_fluid'] # J/kgK\n\n # create lists to store results\n list_results = [None] * number_groups\n list_areas_groups = [None] * number_groups\n Sum_mcp_kWperC = np.zeros(8760)\n Sum_qout_kWh = np.zeros(8760)\n Sum_Eaux_kWh = np.zeros(8760)\n Sum_qloss = np.zeros(8760)\n Sum_radiation_kWh = np.zeros(8760)\n\n Tin_array_C = np.zeros(8760) + Tin_C\n aperature_area_per_module = Aratio * Apanel\n total_area_module = prop_observers['total_area_module'].sum() # total area for panel installation\n\n # calculate equivalent length of pipes\n lv = panel_properties['module_length'] # module length\n number_modules = round(total_area_module/Apanel) # this is an estimation\n l_ext_mperm2 = (2 * lv * number_modules/ (total_area_module * Aratio)) # pipe length within the collectors\n l_int_mperm2 = 2 * height / (total_area_module * Aratio) # pipe length from building substation to roof top collectors\n Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture\n\n if panel_properties['type'] == 'ET': # for evacuated tubes\n Nseg = 100 # default number of subsdivisions for the calculation\n else:\n Nseg = 10 # default number of subsdivisions for the calculation\n\n for group in range(number_groups):\n # load panel angles from group\n teta_z = prop_observers.loc[group, 'surface_azimuth'] # azimuth of panels of group\n area_per_group = prop_observers.loc[group, 'total_area_module']\n tilt_angle_deg = prop_observers.loc[group, 'tilt'] # tilt angle of panels\n\n # create dataframe with irradiation from group\n\n radiation_Wh = pd.DataFrame({'I_sol': hourly_radiation[group]})\n radiation_Wh['I_diffuse'] = weather_data.ratio_diffhout * radiation_Wh.I_sol # calculate diffuse radiation\n radiation_Wh['I_direct'] = radiation_Wh['I_sol'] - radiation_Wh['I_diffuse'] # calculate direct radiation\n radiation_Wh.fillna(0, inplace=True) # set nan to zero\n\n # calculate incidence angle modifier for beam radiation\n IAM_b = calc_IAM_beam_SC(Az, g, ha, teta_z, tilt_angle_deg, panel_properties['type'], Sz, latitude)\n\n # calculate heat production from a solar collector of each group\n list_results[group] = calc_SC_module(tilt_angle_deg, IAM_b, IAM_d, radiation_Wh.I_direct,\n radiation_Wh.I_diffuse, weather_data.drybulb_C, n0,\n c1, c2, mB0_r, mB_max_r, mB_min_r, C_eff, t_max,\n aperature_area_per_module, dP1, dP2, dP3, dP4,\n Cp_fluid_JperkgK, Tin_C, Leq_mperm2, l_ext_mperm2,\n l_int_mperm2, Nseg)\n\n\n # multiplying the results with the number of panels in each group and write to list\n number_modules_per_group = area_per_group / Apanel\n list_areas_groups[group] = area_per_group\n radiation_array = hourly_radiation[group] * list_areas_groups[group] / 1000 # kWh\n Sum_qout_kWh = Sum_qout_kWh + list_results[group][1] * number_modules_per_group\n Sum_Eaux_kWh = Sum_Eaux_kWh + list_results[group][2] * number_modules_per_group\n Sum_qloss = Sum_qloss + list_results[group][0] * number_modules_per_group\n Sum_mcp_kWperC = Sum_mcp_kWperC + list_results[group][5] * number_modules_per_group\n Sum_radiation_kWh = Sum_radiation_kWh + radiation_Wh['I_sol']*area_per_group/1000\n\n Tout_group_C = (Sum_qout_kWh / Sum_mcp_kWperC) + Tin_C # in C assuming all collectors are connected in parallel\n\n Final = pd.DataFrame(\n {'Q_SC_gen_kWh': Sum_qout_kWh, 'T_SC_sup_C': Tin_array_C, 'T_SC_re_C': Tout_group_C, 'mcp_SC_kWperC': Sum_mcp_kWperC, 'Eaux_SC_kWh': Sum_Eaux_kWh,\n 'Q_SC_l_kWh': Sum_qloss, 'Area_SC_m2': sum(list_areas_groups), 'radiation_kWh': Sum_radiation_kWh}, index=range(8760))\n\n return list_results, Final", "def format_coregister():\r\n\r\n study_list = retrieve_ref('study_list')\r\n sensor_list = retrieve_ref('sensor_list')\r\n\r\n for study in study_list:\r\n\r\n df_meta = retrieve_meta(study)\r\n recordNames = list(df_meta['recordName'])\r\n\r\n # name the wearable used for each record\r\n for record in recordNames:\r\n\r\n i = df_meta[ df_meta['recordName']== record].index.values[0]\r\n print('i = ' + str(i))\r\n coregisterBegin = df_meta.loc[i, 'coregisterBegin' ]\r\n coregisterEnd = df_meta.loc[i, 'coregisterEnd' ]\r\n coregisterRecords = df_meta.loc[i, 'coregisterRecords' ]\r\n\r\n for sensor in sensor_list:\r\n\r\n df_coregister = pd.DataFrame()\r\n\r\n if len(coregisterRecords) == len(record):\r\n coregisterRecords = list([coregisterRecords])\r\n\r\n elif len(coregisterRecords) > len(record):\r\n coregisterRecords = coregisterRecords.split(' ')\r\n\r\n print('coregisterRecords = ')\r\n print(coregisterRecords)\r\n\r\n for item in coregisterRecords:\r\n\r\n format_type, segment, recordRef = 'truncate', 'All', item\r\n source = os.path.join('studies', study, 'formatted', format_type, recordRef, segment, sensor + '.csv')\r\n df = pd.read_csv(source)\r\n\r\n assert coregisterEnd > coregisterBegin + 100, 'during coregister format, coregisterBegin >= coregisterEnd'\r\n assert coregisterEnd < max(list(df['timeUnix'])), 'possible error with time'\r\n\r\n print('coregisterEnd = ' + str(coregisterEnd) + ' timeUnixEnd = ' + str(max(list(df['timeUnix']))))\r\n print('timeUnixEnd - coregisterEnd = ' + str((max(list(df['timeUnix'])) - coregisterEnd ) / 60))\r\n print('coregisterEnd - timeUnixBegin = ' + str((coregisterEnd - min(list(df['timeUnix']))) / 60))\r\n\r\n assert coregisterEnd > min(list(df['timeUnix']))\r\n\r\n df = df[df['timeUnix'] > coregisterBegin]\r\n df = df[df['timeUnix'] < coregisterEnd]\r\n\r\n assert len(list(df['timeUnix'])) > 0, 'coregistered df removed'\r\n\r\n recordSplit = item.split('_')\r\n wearableName = recordSplit[1]\r\n\r\n df_coregister['timeUnix'] = list(df['timeUnix'])\r\n df_coregister['timeMinutes'] = list(df['timeMinutes'])\r\n\r\n colName = str(wearableName + '_' + 'measurement')\r\n print('colName = ' + colName)\r\n df_coregister[colName] = list(df['measurement'])\r\n\r\n\r\n path = ['studies', study, 'formatted', 'coregister', record, segment]\r\n path = build_path(path)\r\n file = os.path.join(path, sensor + \".csv\")\r\n df_coregister.to_csv(file)\r\n print('formatted coregister file = ' + str(file))", "def buildExposureTable(exposures, fields, instruments):\n name = []\n ra = []\n dec= []\n field= []\n inst = []\n airmass = []\n mjd = []\n exptime = []\n epoch = []\n apcorr = []\n index = 0\n for k,e in exposures.items():\n name.append(e.name)\n ra.append(getDegree(e.coords.ra))\n dec.append(getDegree(e.coords.dec))\n field.append(fields[e.field].index)\n if e.instrument in specialInstruments:\n inst.append(specialInstruments[e.instrument])\n else:\n inst.append(instruments[e.instrument].index)\n e.index = index\n index += 1\n\n airmass.append(e.airmass)\n mjd.append(e.mjd)\n exptime.append(e.exptime)\n epoch.append(e.epoch)\n apcorr.append(e.apcorr)\n hdu = pf.BinTableHDU.from_columns(\\\n pf.ColDefs( [pf.Column(name='NAME',format=py_to_fits(name),array=name),\n pf.Column(name='RA',format=py_to_fits(ra),array=ra),\n pf.Column(name='DEC',format=py_to_fits(dec),array=dec),\n pf.Column(name='FIELDNUMBER',format=py_to_fits(field),array=field),\n pf.Column(name='INSTRUMENTNUMBER',format=py_to_fits(inst),\\\n array=inst),\n pf.Column(name=\"MJD\",format=py_to_fits(mjd),array=mjd),\n pf.Column(name=\"AIRMASS\",format=py_to_fits(airmass),array=airmass),\n pf.Column(name=\"EXPTIME\",format=py_to_fits(exptime),array=exptime),\n pf.Column(name=\"EPOCH\",format=py_to_fits(epoch),array=epoch),\n pf.Column(name=\"APCORR\",format=py_to_fits(apcorr),array=apcorr)] ),\n name = 'Exposures')\n # hdu.header['EXTNAME'] = 'Exposures'\n return hdu", "def GEEmacaGCMs(ptsFile,metric,timeStep,startYear,endYear,scenarios,buf,poly,models,\n username,folderOut, scalePix = 4000):\n \n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n time_d = {}\n time_d['month'] = 'projm'\n time_d['year'] = 'projy'\n \n for met in metric:\n\n for scenario in scenarios:\n\n for model in models:\n\n MACA = (ee.ImageCollection('IDAHO_EPSCOR/MACAv2_METDATA_MONTHLY')\n .select(met)\n .filterMetadata('model', 'equals', model)\n .filterMetadata('scenario', 'equals', scenario))\n\n metL = [met]\n \n years = list(range(startYear, endYear + 1))\n yearsEE = ee.List(years)\n \n if all([(timeStep == 'year'),any([(met == 'tasmin'),(met == 'tasmax'),\n (met == 'huss'),(met == 'rsds'),\n (met == 'was')])]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif (timeStep == 'month'):\n \n img_col = MACA.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n elif all([(timeStep == 'year'),(met == 'pr')]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (MACA\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .sum()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for MACA: ' + met + ' ' + scenario + ' ' + model)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for MACA: ' + met + ' ' + scenario + ' ' + model)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_MACA_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for MACA: ' + met + ' ' + scenario + ' ' + model)", "def build(self):\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n\n self.node = np.zeros(self.ntotal, dtype='int32')\n #oxx, oyy, ozz, txy, pressure\n self.data = np.zeros((self.ntimes, self.ntotal, 5), dtype='float32')\n self.location = np.empty(self.ntotal, dtype='U8')\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n\n self._times = np.zeros(self.ntimes, dtype=dtype)", "def generate_timeseries(data_list, setname=\"MagneticFields\"):\n full_data = TimeSeriesList()\n for seg in sorted(data_list):\n hfile = h5py.File(data_list[seg], \"r\")\n full_data.append(retrieve_data_timeseries(hfile, \"MagneticFields\"))\n hfile.close()\n return full_data", "def infotodict(seqinfo):\n\n last_run = len(seqinfo)\n\n info = {\n t1w: [], t2w: [], epi_fmap_AP: [], epi_fmap_PA: [],\n\n rest_ap_run1: [], rest_pa_run2: [],\n rest_ap_run3: [], rest_pa_run4: [],\n rest_ap_run1_sbref: [], rest_pa_run2_sbref: [],\n rest_ap_run3_sbref: [], rest_pa_run4_sbref: [],\n\n dwi_ap_run1: [], dwi_pa_run2: [],\n dwi_ap_run3: [], dwi_pa_run4: [],\n dwi_ap_run1_sbref: [], dwi_pa_run2_sbref: [],\n dwi_ap_run3_sbref: [], dwi_pa_run4_sbref: []\n }\n\n def get_latest_series(key, s):\n # if len(info[key]) == 0:\n info[key].append(s.series_id)\n # else:\n # info[key] = [s.series_id]\n\n for s in seqinfo:\n if \"abort\" in s.protocol_name.lower():\n continue\n\n if s.protocol_name == 'SpinEchoFieldMap_AP':\n get_latest_series(epi_fmap_AP, s)\n\n elif s.protocol_name == 'SpinEchoFieldMap_PA':\n get_latest_series(epi_fmap_PA, s)\n\n elif s.protocol_name == 'rfMRI_REST_AP_Run1':\n if s.dim3 > 1:\n get_latest_series(rest_ap_run1, s)\n else:\n get_latest_series(rest_ap_run1_sbref, s)\n\n elif s.protocol_name == 'rfMRI_REST_PA_Run2':\n if s.dim3 > 1:\n get_latest_series(rest_pa_run2, s)\n else:\n get_latest_series(rest_pa_run2_sbref, s)\n\n elif s.protocol_name == 'rfMRI_REST_AP_Run3':\n if s.dim3 > 1:\n get_latest_series(rest_ap_run3, s)\n else:\n get_latest_series(rest_ap_run3_sbref, s)\n\n elif s.protocol_name == 'rfMRI_REST_PA_Run4':\n if s.dim3 > 1:\n get_latest_series(rest_pa_run4, s)\n else:\n get_latest_series(rest_pa_run4_sbref, s)\n\n # dMRI naming conventions switch half-way through. Some end with _RunX\n elif s.protocol_name.startswith('dMRI_dir98_AP'):\n if s.dim3 > 1:\n get_latest_series(dwi_ap_run1, s)\n else:\n get_latest_series(dwi_ap_run1_sbref, s)\n\n elif s.protocol_name.startswith('dMRI_dir98_PA'):\n if s.dim3 > 1:\n get_latest_series(dwi_pa_run2, s)\n else:\n get_latest_series(dwi_pa_run2_sbref, s)\n\n elif s.protocol_name.startswith('dMRI_dir99_AP'):\n if s.dim3 > 1:\n get_latest_series(dwi_ap_run3, s)\n else:\n get_latest_series(dwi_ap_run3_sbref, s)\n\n elif s.protocol_name.startswith('dMRI_dir99_PA'):\n if s.dim3 > 1:\n get_latest_series(dwi_pa_run4, s)\n else:\n get_latest_series(dwi_pa_run4_sbref, s)\n\n elif s.protocol_name == 'T1w_MPR':\n get_latest_series(t1w, s)\n\n elif s.protocol_name == 'T2w_SPC':\n get_latest_series(t2w, s)\n\n else:\n print(\"Series not recognized!: \", s.protocol_name, s.dcm_dir_name)\n return info", "def find_inflections(study, record, sensor, segment, range):\n\n # check if the inflections have already been found\n path = [study, 'analyzed', 'inflections', 'all_times', str(range), record, segment]\n pathJoined = os.path.join(*path)\n file = os.path.join(pathJoined, sensor + \".csv\")\n\n if os.path.isfile(file):\n print('file found, not recalculated.')\n return\n\n print('finding inflections to build : ' + file)\n\n # retrieve the timestamped measurements for the study - record - sensor - segment\n format_type = 'truncate'\n source = os.path.join(study, 'formatted', format_type, record, segment, sensor + '.csv')\n print('source = ' + source)\n df = pd.read_csv(source)\n\n for colName in df.columns:\n\n # remove extra columns because the dataframe will be saved\n if 'Unnamed' in str(colName):\n del df[colName]\n\n # save the timestamps as a list\n elif 'Minutes' in str(colName):\n timeMinutes = list(df[colName])\n\n # find the measurement\n elif 'meas' in colName:\n\n # add new columns to the dataframe to save the new variables\n newColNames = ['inflectionDecision', 'inflectionLocation', 'polyfitCoefficients', 'polyfitEquation', 'polyfitSolution', 'derivativeEquation', 'derivativeSolution']\n colNameSplit = colName.split('_')\n print('colNameSplit[0] = ' + colNameSplit[0])\n\n for suffix in newColNames:\n label = str(colNameSplit[0] + '_' + suffix)\n print('label = ' + label)\n if label not in df.columns:\n df[label] = [None]*len((list(df['timeMinutes'])))\n\n df['timeBegin'] = [None]*len((list(df['timeMinutes'])))\n df['timeEnd'] = [None]*len((list(df['timeMinutes'])))\n\n for timeMinute in timeMinutes:\n\n i = df[ df['timeMinutes']== timeMinute].index.values[0]\n\n timeDif = (float(df.loc[2,'timeMinutes']) - float(df.loc[1,'timeMinutes']))\n timeTolerance = timeDif/2\n iRange = int(range/60*1/(timeDif))\n # print('iRange = ' + str(iRange))\n\n if len(list(df['timeMinutes'])) - i <= iRange+2:\n continue\n\n timeMedian = df.loc[int(i+iRange/2), 'timeMinutes']\n timeBegin = df.loc[int(i), 'timeMinutes']\n timeEnd = df.loc[int(i+iRange), 'timeMinutes']\n\n # print('timeMedian = ' + str(timeMedian) + ' timeBegin = ' + str(timeBegin) + ' timeEnd = ' + str(timeEnd))\n # print('range = ' + str(range/60) + ' timeEnd-timeBegin = ' + str(timeEnd-timeBegin) + ' % = ' + str(range/60/(timeEnd-timeBegin)))\n\n df_truncate = df[df['timeMinutes'] >= timeMinute]\n df_truncate = df_truncate[df_truncate['timeMinutes'] <= timeMinute + range/60]\n # df_truncate = df[df['timeMinutes'] >= timeMinute & df_truncate['timeMinutes'] <= timeMinute + range/60]\n\n timeTruncate = list(df_truncate['timeMinutes'])\n df.loc[int(i+iRange/2), 'timeBegin'] = min(timeTruncate)\n df.loc[int(i+iRange/2), 'timeEnd'] = max(timeTruncate)\n\n measTruncate = list(df_truncate[colName])\n\n coef = np.polyfit(timeTruncate, measTruncate, 2)\n # coef = [float(x) for x in coef]\n\n x = sym.Symbol('x')\n\n f = coef[0]*x*x+coef[1]*x+coef[2]\n # print('f = ')\n # print(f)\n\n dff = sym.diff(f,x)\n # print('dff = ')\n # print(dff)\n\n solf = sym.solve(f)\n soldf = sym.solve(dff)\n soldf = soldf[0]\n\n\n label = str(colNameSplit[0] + '_' + 'inflectionDecision')\n df.loc[int(i+iRange/2), label] = 'No'\n\n label = str(colNameSplit[0] + '_' + 'inflectionLocation')\n df.loc[int(i+iRange/2), label] = timeMinute\n\n label = str(colNameSplit[0] + '_' + 'polyfitCoefficients')\n df.loc[int(i+iRange/2), label] = str(''.join([str(x) for x in coef]))\n\n label = str(colNameSplit[0] + '_' + 'polyfitEquation')\n df.loc[int(i+iRange/2), label] = str(f)\n\n label = str(colNameSplit[0] + '_' + 'polyfitSolution')\n df.loc[int(i+iRange/2), label] = str(''.join([str(x) for x in solf]))\n\n label = str(colNameSplit[0] + '_' + 'derivativeEquation')\n df.loc[int(i+iRange/2), label] = str(dff)\n\n label = str(colNameSplit[0] + '_' + 'derivativeSolution')\n df.loc[int(i+iRange/2), label] = str(soldf)\n\n if soldf < timeMedian + timeTolerance:\n\n if soldf > timeMedian - timeTolerance:\n\n print('inflection found at time = ' + str(soldf))\n label = str(colNameSplit[0] + '_' + 'inflectionDecision')\n df.loc[int(i+iRange/2), label] = 'Yes'\n\n path = build_path(path)\n file = os.path.join(path, sensor + \".csv\")\n df.to_csv(file)\n print('inflection list saved : ' + file)\n return(file)", "def investigate4DRepeatability():\n parentdir = '/home/rallured/Dropbox/Interferometer/SolarBFlat/Repeatability/'\n avgs = [1,2,4,8,16,32]\n\n #Temporal with fringes tilted\n fn = glob.glob(parentdir+'Tilt/17*RepeatabilityTiltTemporal*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = np.array([met.readFlatScript(fi.split('.')[0])[0] for fi in fn])\n #Make progressive averaging plot\n plt.figure('TemporalTiltedFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Temporal,Tilted')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n reptemptilt = d[-1]-d[-2]\n figtemptilt = d[-1]\n\n #Dynamic with fringes tilted\n fn = glob.glob(parentdir+'Tilt/17*RepeatabilityTilt_*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = [met.readFlatScript(fi.split('.')[0])[0] for fi in fn]\n #Make progressive averaging plot\n plt.figure('DynamicTiltedFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Dynamic,Tilted')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n repdyntilt = d[-1]-d[-2]\n figdyntilt = d[-1]\n \n #Temporal with fringes nulled\n fn = glob.glob(parentdir+'Nulled/17*.bin')\n fn.sort()\n dx = met.readFlatScript(fn[0].split('.')[0])[1]\n d = np.array([met.readFlatScript(fi.split('.')[0])[0] for fi in fn])\n #Make progressive averaging plot\n plt.figure('TemporalNulledFigure')\n for i in np.arange(6)*2:\n f,p = fourier.meanPSD(d[i],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(avgs[i/2]))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Temporal,Nulled')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n reptempnull = d[-1]-d[-2]\n figtempnull = d[-1]\n \n #Dynamic with fringes nulled\n d = pyfits.getdata('/home/rallured/Dropbox/Interferometer/'\n 'SolarBFlat/Repeatability/'\n 'Nulled/170103_Processed.fits')\n rep = np.array([d[i,0]-d[i,1] for i in range(32)])\n #Make progressive averaging plot\n plt.figure('DynamicNulledFigure')\n for i in [0,1,3,7,15,31]:\n f,p = fourier.meanPSD(d[i,0],win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label=str(i+1))\n plt.legend(loc='lower left')\n plt.title('Solar B PSD - Dynamic,Nulled')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n #Get repeatability\n repdynnull = d[-1][0]-d[-1][1]\n figdynnull = d[-1][0]\n\n #Make comparative repeatability plots with 32 averages\n plt.figure('CompareRepeatability')\n f,p = fourier.meanPSD(repdynnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Nulled')\n f,p = fourier.meanPSD(repdyntilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Tilted')\n f,p = fourier.meanPSD(reptemptilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Tilted')\n f,p = fourier.meanPSD(reptempnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Nulled')\n plt.legend(loc='lower left')\n plt.title('Solar B Repeatability - 32 Averages')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n\n #Make comparative figure plots with 32 averages\n plt.figure('CompareFigure')\n f,p = fourier.meanPSD(figdynnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Nulled')\n f,p = fourier.meanPSD(figdyntilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Dynamic,Tilted')\n f,p = fourier.meanPSD(figtemptilt,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Tilted')\n f,p = fourier.meanPSD(figtempnull,win=np.hanning,dx=dx,irregular=True,\\\n minpx=200)\n plt.loglog(f,p/f[0],label='Temporal,Nulled')\n plt.legend(loc='lower left')\n plt.title('Solar B Figure - 32 Averages')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n\n #Make parroting repeatability plots\n fig = plt.figure('Parroting')\n fig.add_subplot(2,2,1)\n plt.imshow(repdyntilt)\n plt.title('Dynamic Repeatability')\n plt.colorbar()\n fig.add_subplot(2,2,2)\n plt.imshow(reptemptilt)\n plt.title('Temporal Repeatability')\n plt.colorbar()\n fig.add_subplot(2,2,3)\n res = legendre2d(repdyntilt,xo=3,yo=3)[0]\n plt.imshow(repdyntilt-res)\n plt.title('Dynamic Repeatability Filtered')\n plt.colorbar()\n fig.add_subplot(2,2,4)\n res = legendre2d(reptemptilt,xo=3,yo=3)[0]\n plt.imshow(reptemptilt-res)\n plt.title('Temporal Repeatability Filtered')\n plt.colorbar()", "def spotmap(week, bam, pt):\n print('Mapping MCAP data to Metrics... ',end=''), \n bam['Spotfire Data'] = bam.index.to_series().map(pt.fillna(0)['len',week])\n print('Done')\n return bam", "def _map_timestep2timeind(self, timestep):\n if not self.simulation and timestep not in self.timemap:\n # for steady state computation include year 0 or first 12 months\n if self.md.litter_mode=='monthly':\n incl = range(1, 13)\n infall = self.md.monthly_litter\n elif self.md.litter_mode=='yearly':\n incl = [0]\n infall = self.md.yearly_litter\n for ind in range(len(infall)):\n if infall[ind].timestep in incl:\n self.timemap[timestep].append(ind)\n if timestep not in self.timemap and self.md.litter_mode=='yearly':\n # if no year 0 specification, use the one for year 1\n for ind in range(len(infall)):\n if infall[ind].timestep==1:\n self.timemap[timestep].append(ind)\n if self.simulation and timestep not in self.timemap:\n # now for the simulation run\n now, end = self._get_now_and_end(timestep)\n if self.md.duration_unit=='month':\n dur = relativedelta(months=self.timestep_length)\n elif self.md.duration_unit=='year':\n dur = relativedelta(years=self.timestep_length)\n end = now + dur - relativedelta(days=1)\n if self.md.litter_mode=='monthly':\n inputdur = relativedelta(months=1)\n infall = self.md.monthly_litter\n elif self.md.litter_mode=='yearly':\n inputdur = relativedelta(years=1)\n infall = self.md.yearly_litter\n # the first mont/year will have index number 1, hence deduce 1 m/y\n start = STARTDATE - inputdur\n for ind in range(len(infall)):\n incl = self._test4inclusion(ind, infall, now, start, end)\n if incl:\n self.timemap[timestep].append(ind)\n # check for possible area reductions to be mapped\n areachange = self.md.area_change\n for ind in range(len(areachange)):\n incl = self._test4inclusion(ind, areachange, now, start, end)\n if incl:\n self.area_timemap[timestep].append(ind)\n if timestep not in self.timemap:\n self.timemap[timestep] = []\n if timestep not in self.area_timemap:\n self.area_timemap[timestep] = []\n return self.timemap[timestep]", "def __init__(self):\r\n \r\n # Define temporal range\r\n self.timelimit = np.zeros(2,dtype=datetime.datetime)\r\n self.timelimit[0] = datetime.datetime(1990,1,1)\r\n self.timelimit[1] = datetime.datetime(2050,1,1)\r\n \r\n ### emissions \r\n \r\n # Load in historical emissions by sector.\r\n self.year_E_hist_ccc, self.total_E_hist_ccc, self.sector_E_hist_ccc, self.sector_name_E, self.num_years_E_hist = load_historical_emissions()\r\n # Load in government emissions forecast.\r\n self.year_E_fore_gov, self.elms_E_fore_gov, self.total_E_fore_gov, self.total_E_fore_gov_95CI, sector_E_fore_gov, sector_name_E_fore_gov, self.num_years_E_fore = load_precovid_emission_gov_estimate()\r\n \r\n # Define Targets\r\n self.paris_wb_2 = (1-0.8) * self.total_E_hist_ccc[0]\r\n self.net_zero = 0 \r\n \r\n # Reorder to be consistent \r\n self.num_E_sectors = len(self.sector_name_E) \r\n org_hist_gov = np.zeros(self.num_E_sectors,dtype=int) \r\n \r\n for sec1 in range(0,self.num_E_sectors):\r\n \r\n for sec2 in range(0,self.num_E_sectors):\r\n \r\n if self.sector_name_E[sec1] == sector_name_E_fore_gov[sec2]:\r\n \r\n org_hist_gov[sec1] = sec2\r\n \r\n break\r\n \r\n elif (self.sector_name_E[sec1] == 'Land use, land use change and forestry' and sector_name_E_fore_gov[sec2] == 'LULUCF'):\r\n \r\n org_hist_gov[sec1] = sec2\r\n \r\n break\r\n \r\n \r\n # sector name short form (for plotting)\r\n \r\n self.sector_name_E_sf = np.array(['EneS','Bus','Tran','Pub','Res','Agr','IndP','LULUCF','WM'])\r\n self.sector_E_fore_gov = sector_E_fore_gov[org_hist_gov,:]\r\n\r\n ### economy\r\n \r\n # Load in historical economic data\r\n self.year_gdp_hist_ons, self.gdp_hist_ons, self.num_years_gdp_hist = load_annual_GDP_ons()\r\n self.month_gdp_hist_ons, self.gdp_hist_ons_mnth, self.num_months_gdp_hist = load_covid_GDP_ons() # By month\r\n \r\n # Load in projection of GDP\r\n self.year_gdp_fore_gov, self.elms_gdp_fore_gov, self.frac_change_gdp_fore_gove, self.num_years_gdp_fore = load_precovid_annual_GDP_estimate()\r\n \r\n # Find forecasted absolute values of GDP\r\n self.gdp_fore_gov = np.zeros(self.num_years_E_fore)\r\n \r\n for yr1 in range(0,self.num_years_E_fore):\r\n \r\n # Match Absolute Historical value to GDP before forecast year otherwise use percent change\r\n if self.elms_E_fore_gov[yr1] == False:\r\n \r\n for yr2 in range(0,self.num_years_gdp_hist):\r\n \r\n if self.year_gdp_hist_ons[yr1] == self.year_gdp_hist_ons[yr2]:\r\n \r\n self.gdp_fore_gov[yr1] = self.gdp_hist_ons[yr2]\r\n \r\n break\r\n \r\n else:\r\n \r\n break\r\n \r\n self.gdp_fore_gov[self.elms_E_fore_gov] = self.gdp_fore_gov[self.elms_E_fore_gov==False][-1] * (1+np.cumsum(self.frac_change_gdp_fore_gove[self.elms_gdp_fore_gov]))\r\n \r\n ### carbon intensity (eta)\r\n self.eta_gdp_hist = np.zeros(self.num_years_E_hist)\r\n\r\n # Historical \r\n for yr1 in range(0,self.num_years_gdp_hist):\r\n \r\n for yr2 in range(0,self.num_years_E_hist):\r\n \r\n if self.year_gdp_hist_ons[yr1] == self.year_E_hist_ccc[yr2]:\r\n \r\n self.eta_gdp_hist[yr1] = self.total_E_hist_ccc[yr2]/self.gdp_hist_ons[yr1]\r\n \r\n break\r\n # Forecast\r\n self.eta_gdp_fore = self.total_E_fore_gov/self.gdp_fore_gov\r\n \r\n ### COVID 19 Deaths\r\n self.day_mort_gov, self.mort_per_day_gov, self.R_per_day_gov = load_COVID19_mort()\r\n \r\n ### Google mobility\r\n self.day_m_google, self.m_google = load_google_mobility()\r\n \r\n self.tau_us, self.tau_as, self.Y_run, self.eta_run, self.E_run, self.R_run, self.m_run, self.mu_run = run_simulations()\r\n \r\n \r\n self.num_sec = len(self.tau_us)\r\n self.startdate_run = datetime.datetime(2020,1,31)\r\n self.endate_run = datetime.datetime(2050,1,1)\r\n self.num_days_run = len(self.Y_run[:,0])\r\n self.date_run = np.zeros(self.num_days_run,dtype=datetime.datetime)\r\n self.date_run[0] = self.startdate_run\r\n for day in range(1,self.num_days_run):\r\n \r\n self.date_run[day] = self.date_run[day-1] + datetime.timedelta(days=1)\r\n \r\n return", "def create_time_references(self, docFeatList, timexList): \n timeReferences = {} \n \n confidence = 1 \n ##: confidence = 1: input exposure and onset date; \n ##: = 0.9: with tags of interest;\n ##: = 0.8: obtained from extracted vaccines\n ##: = 0.7: obtained from extracted drugs\n ##: = 0.6: date of drug or vaccine is obtained from time impact zone\n if self.exposureDate: ##: input exposure date is available\n self.exposureDateConfidence = 1 \n timeReferences[('Vaccination', None, None, 0, None, None, confidence)] = self.exposureDate\n timeReferences[('Injection', None, None, 0, None, None, confidence)] = self.exposureDate\n if self.onsetDate: ##: input onset date is available\n self.onsetDateConfidence = 1\n timeReferences[('Onset', None, None, 0, None, None, confidence)] = self.exposureDate\n \n if self.receiveDate:\n timeReferences[('Administration', None, None, None, None, None, 1)] = self.receiveDate\n \n coordFeatTypes = set(['VACCINE', 'DRUG']) \n ##: add tags in features into coordinates\n for feature in docFeatList:\n \n if not feature.getType() in coordFeatTypes:\n continue\n \n if feature.inClause(): continue\n \n sentnumber = feature.getSentNum()\n \n if feature.getType()=='VACCINE':\n coordType = 'Vaccine'\n confidence = 0.8\n else: # DRUG\n coordType = 'Drug'\n confidence = 0.7\n \n tlink = feature.getTlink()\n if tlink:\n ##: Handle features with does number\n counts = []\n if 'DoseIndicator' in [tg[1] for tg in self.sentence_full_tags[sentnumber]]:\n counts = [tg[0] for tg in self.sentence_full_tags[sentnumber] if tg[1]=='Count']\n \n timexes = [t for t in tlink.getTimexes() if t.getDateTime() and t.getRole()!='IGNORE']\n \n if self.get_drug_dose_number(feature) and len(counts) == len(timexes):\n for i, t in enumerate(timexes):\n val = util.text2num.convertOrdinal(counts[i])\n timeReferences[(coordType, feature.getString(), val, sentnumber, feature.getStartPos(), t.getStartPos(), confidence)] = t.getDateTime()\n else:\n for t in timexes:\n timeReferences[(coordType, feature.getString(), 0, sentnumber, feature.getStartPos(), t.getStartPos(), confidence)] = t.getDateTime() \n \n exposureSet = ['Vaccination', 'Injection']\n anchorSet = ['Hospitalization', 'Administration']\n for sentnum, sentence in enumerate(self.sentences):\n tags = set([tg[1] for tg in self.taggedSentences[sentnum]])\n timexes = [t for t in timexList if t.getDateTime() and t.getSentNum()==sentnum and t.getRole()!='IGNORE']\n if timexes:\n sent_start = self.sentence_startPos[sentnum]\n intersect = tags.intersection(anchorSet)\n for st in intersect:\n words = [tg[0] for tg in self.taggedSentences[sentnum] if tg[1]==st]\n wordPos = [sentence.lower().find(word) for word in words]\n validWords = [pos for pos in wordPos if not self.is_in_clause(pos+sent_start, sentnum)]\n if not validWords:\n continue \n coord = (st, '', None, sentnum, None, None, 0.9)\n if not coord in timeReferences:\n timeReferences[coord] = timexes[0].getDateTime()\n \n ref =[]\n if tags.intersection(exposureSet):\n tgs = [tg for tg in self.taggedSentences[sentnum] if tg[1] in exposureSet]\n ref = tgs[0]\n \n if tags.intersection(['Treatment']):\n tokens = set([tg[0].lower() for tg in self.sentence_full_tags[sentnum]])\n intst = tokens.intersection(['started', 'starts', 'begins', 'began'])\n if intst:\n ref = (list(intst)[0], 'Injection') \n \n if ref:\n word = ref[0].lower()\n wpos = sentence.lower().find(word) + sent_start\n if self.is_in_clause(wpos, sentnum):\n continue\n leftTimexes = [t for t in timexes if t.getStartPos() <= wpos]\n rightTimexes = [t for t in timexes if t.getStartPos() >= wpos]\n if not leftTimexes:\n dt = rightTimexes[0].getDateTime()\n elif not rightTimexes:\n dt = leftTimexes[-1].getDateTime()\n else:\n leftSeg = self.text[leftTimexes[-1].getEndPos():wpos]\n rightSeg = self.text[wpos+len(word):rightTimexes[0].getStartPos()]\n \n if self.is_next_separated(leftSeg, rightSeg):\n dt = leftTimexes[-1].getDateTime()\n else:\n dt = rightTimexes[0].getDateTime() \n timeReferences[(ref[1], word, None, sentnum, wpos, wpos+len(word), 0.9)] = dt\n \n return timeReferences", "def get_event_uid_and_station_data_MTFIT_FORMAT_from_nonlinloc_hyp_file(nlloc_hyp_filename):\n # Array shape is (num_stations, 4) in the order: station name, azimuth, takeoff angle, polarity\n \n # Get event UID:\n # Get event origin times:\n # Get event time from NLLoc file for basal icequake:\n os.system(\"grep 'GEOGRAPHIC' \"+nlloc_hyp_filename+\" > ./tmp_event_GEO_line.txt\")\n GEO_line = np.loadtxt(\"./tmp_event_GEO_line.txt\", dtype=str)\n event_origin_time = UTCDateTime(GEO_line[2]+GEO_line[3]+GEO_line[4]+GEO_line[5]+GEO_line[6]+GEO_line[7])\n # And remove temp files:\n os.system(\"rm ./tmp_*GEO_line.txt\")\n uid = event_origin_time.strftime(\"%Y%m%d%H%M%S%f\")\n \n # And get station arrival times and azimuth+takeoff angles for each phase, for event:\n os.system(\"awk '/PHASE ID/{f=1;next} /END_PHASE/{f=0} f' \"+nlloc_hyp_filename+\" > ./tmp_event_PHASE_lines.txt\") # Get phase info and write to tmp file\n PHASE_lines = np.loadtxt(\"./tmp_event_PHASE_lines.txt\", dtype=str) # And import phase lines as np str array\n arrival_times_dict = {} # Create empty dictionary to store data (with keys: event_origin_time, station_arrivals {station {station_P_arrival, station_S_arrival}}})\n arrival_times_dict['event_origin_time'] = event_origin_time\n arrival_times_dict['station_arrival_times'] = {}\n arrival_times_dict['azi_takeoff_angles'] = {}\n # Loop over stations:\n for i in range(len(PHASE_lines[:,0])):\n station = PHASE_lines[i, 0]\n station_current_phase_arrival = UTCDateTime(PHASE_lines[i,6]+PHASE_lines[i,7]+PHASE_lines[i,8])\n station_current_azimuth_event_to_sta = float(PHASE_lines[i,22])\n station_current_toa_event_to_sta = float(PHASE_lines[i,24])\n station_current_toa_sta_inclination = 180. - station_current_toa_event_to_sta\n # See if station entry exists, and if does, write arrival to array, otherwise, create entry and write data to file:\n # For station arrival times:\n try:\n arrival_times_dict['station_arrival_times'][station]\n except KeyError:\n # If entry didnt exist, create it and fill:\n if PHASE_lines[i, 4] == \"P\":\n arrival_times_dict['station_arrival_times'][station] = {}\n arrival_times_dict['station_arrival_times'][station][\"P\"] = station_current_phase_arrival\n elif PHASE_lines[i, 4] == \"S\":\n arrival_times_dict['station_arrival_times'][station] = {}\n arrival_times_dict['station_arrival_times'][station][\"S\"] = station_current_phase_arrival\n # And if entry did exist:\n else:\n if PHASE_lines[i, 4] == \"P\":\n arrival_times_dict['station_arrival_times'][station][\"P\"] = station_current_phase_arrival\n elif PHASE_lines[i, 4] == \"S\":\n arrival_times_dict['station_arrival_times'][station][\"S\"] = station_current_phase_arrival\n # And for azimuth and takeoff angle:\n try:\n arrival_times_dict['azi_takeoff_angles'][station]\n except KeyError:\n # If entry didnt exist, create it and fill:\n if PHASE_lines[i, 4] == \"P\":\n arrival_times_dict['azi_takeoff_angles'][station] = {}\n arrival_times_dict['azi_takeoff_angles'][station][\"P_azimuth_sta_to_event\"] = station_current_azimuth_event_to_sta\n arrival_times_dict['azi_takeoff_angles'][station][\"P_toa_sta_inclination\"] = station_current_toa_sta_inclination\n # And if entry did exist:\n else:\n if PHASE_lines[i, 4] == \"P\":\n arrival_times_dict['azi_takeoff_angles'][station][\"P_azimuth_sta_to_event\"] = station_current_azimuth_event_to_sta\n arrival_times_dict['azi_takeoff_angles'][station][\"P_toa_sta_inclination\"] = station_current_toa_sta_inclination \n \n # And clean up:\n os.system(\"rm ./tmp*PHASE_lines.txt\")\n \n # And create stations array:\n stations = []\n for i in range(len(arrival_times_dict['azi_takeoff_angles'])):\n station = list(arrival_times_dict['azi_takeoff_angles'].keys())[i]\n azi = arrival_times_dict['azi_takeoff_angles'][station][\"P_azimuth_sta_to_event\"]\n toa = arrival_times_dict['azi_takeoff_angles'][station][\"P_toa_sta_inclination\"]\n pol = 0 # Assign zero polarity, as not needed for full waveform\n stations.append([np.array([station], dtype=str), np.array([[azi]], dtype=float), np.array([[toa]], dtype=float), np.array([[pol]], dtype=int)])\n #stations = np.array(stations) # HERE!!! (need to find out what type of object stations is!)\n \n return uid, stations", "def process(date, lat_oi, lon_oi, shared_args, verbose=False):\n \n filename = download(date, shared_args)\n\n atmo_data = data.open_netcdf4(filename)\n\n # choose points\n lat = atmo_data.variables['lat'][:]\n lon = atmo_data.variables['lon'][:]\n lat = numpy.stack([lat]*lon.shape[0], axis=0)\n lon = numpy.stack([lon]*lat.shape[1], axis=1)\n chosen_idxs, data_coor = funcs.choose_points(lat, lon, lat_oi, lon_oi)\n\n latidx = tuple(chosen_idxs[0])\n lonidx = tuple(chosen_idxs[1])\n \n t1, t2 = data.closest_hours(atmo_data.variables['time'][:].data,\n atmo_data.variables['time'].units, date)\n t1_dt = num2date(atmo_data.variables['time'][t1], atmo_data.variables['time'].units)\n t2_dt = num2date(atmo_data.variables['time'][t2], atmo_data.variables['time'].units)\n\n index1 = (t1, slice(None), latidx, lonidx)\n index2 = (t2, slice(None), latidx, lonidx)\n\n press = numpy.array(atmo_data.variables['lev'][:])\n\n temp1 = numpy.empty\n temp2 = numpy.empty\n \n temp1 = numpy.diagonal(atmo_data.variables['T'][index1], axis1=1, axis2=2).T\n temp2 = numpy.diagonal(atmo_data.variables['T'][index2], axis1=1, axis2=2).T\n\n rhum1 = numpy.diagonal(atmo_data.variables['RH'][index1], axis1=1, axis2=2).T # relative humidity\n rhum2 = numpy.diagonal(atmo_data.variables['RH'][index2], axis1=1, axis2=2).T\n\n height1 = numpy.diagonal(atmo_data.variables['H'][index1], axis1=1, axis2=2).T / 1000.0 # height\n height2 = numpy.diagonal(atmo_data.variables['H'][index2], axis1=1, axis2=2).T / 1000.0\n\n # interpolate in time, now they are shape (4, N)\n t = interp.interp_time(date, temp1, temp2, t1_dt, t2_dt)\n h = interp.interp_time(date, height1, height2, t1_dt, t2_dt)\n rh = interp.interp_time(date, rhum1, rhum2, t1_dt, t2_dt)\n \n # interpolate in space, now they are shape (1, N)\n height = interp.idw(h, data_coor, [lat_oi, lon_oi])\n temp = interp.idw(t, data_coor, [lat_oi, lon_oi])\n relhum = interp.idw(rh, data_coor, [lat_oi, lon_oi])\n \n # calculate the number of nan and zero values in the array and remove them, reducing the size of the array accordingly\n nr_of_nans1 = numpy.sum(temp1[0].mask)\n nr_of_nans2 = numpy.sum(temp2[0].mask)\n nr_of_nans = max([nr_of_nans1,nr_of_nans2])\n \n height = height[nr_of_nans:]\n temp = temp[nr_of_nans:]\n relhum = relhum[nr_of_nans:]\n press = press[nr_of_nans:]\n\n # load standard atmosphere for mid-lat summer\n # TODO evaluate standard atmo validity, add different ones for different TOY?\n stan_atmo = numpy.loadtxt(settings.STAN_ATMO, unpack=True)\n stan_height, stan_press, stan_temp, stan_relhum = stan_atmo\n # add standard atmo above cutoff index\n \n cutoff_idx = numpy.abs(stan_press - press[-1]).argmin()\n height = numpy.append(height, stan_height[cutoff_idx:])\n press = numpy.append(press, stan_press[cutoff_idx:])\n temp = numpy.append(temp, stan_temp[cutoff_idx:])\n relhum = numpy.append(relhum, stan_relhum[cutoff_idx:])\n \n # Convert relative humidity to percentage for modtran\n relhum = relhum * 100\n\n # TODO add buoy stuff to bottom of atmosphere\n\n if verbose:\n # send out plots and stuff\n stuff = numpy.asarray([height, press, temp, relhum]).T\n h = 'Height [km], Pressure[kPa], Temperature[k], Relative_Humidity[0-100]' + '\\nCoordinates: {0} Buoy:{1}'.format(data_coor, buoy)\n \n numpy.savetxt('atmosphere_{0}_{1}_{2}.txt'.format('merra', date.strftime('%Y%m%d'), buoy.id), stuff, fmt='%7.2f, %7.2f, %7.2f, %7.2f', header=h)\n\n return height, press, temp, relhum", "def read_FMI_weatherdata(forcfile, fyear,lyear, asdict=False):\n \n #OmaTunniste;OmaItä;OmaPohjoinen;Kunta;siteid;vuosi;kk;paiva;longitude;latitude;t_mean;t_max;t_min;\n #rainfall;radiation;hpa;lamposumma_v;rainfall_v;lamposumma;lamposumma_cum\n #-site number\n #-date (yyyy mm dd)\n #-latitude (in KKJ coordinates, metres)\n #-longitude (in KKJ coordinates, metres)\n #-T_mean (degrees celcius)\n #-T_max (degrees celcius)\n #-T_min (degrees celcius)\n #-rainfall (mm)\n #-global radiation (per day in kJ/m2)\n #-H2O partial pressure (hPa)\n\n from datetime import datetime\n #forcfile='c:\\\\pyspace\\\\DATAT\\\\Topmodel_calibr\\\\FMI_saa_Porkkavaara.csv'\n\n #import forcing data\n dat=np.genfromtxt(forcfile,dtype=float,delimiter=';', usecols=(5,6,7,10,11,12,13,14,15,16))\n\n fi=np.where(dat[:,0]>=fyear); li=np.where(dat[:,0]<=lyear)\n ix=np.intersect1d(fi,li); #del fi, li\n #print min(ix), max(ix), np.shape(ix)\n tvec=dat[ix,0:3] #YYYY MM DD\n\n dat=dat[ix, 3:] \n\n time=[]; doy=[]\n for k in range(0,len(tvec)):\n time.append(datetime( int(tvec[k,0]), int(tvec[k,1]), int(tvec[k,2]), 0, 0) )\n doy.append(time[k].timetuple().tm_yday)\n \n time=np.array(time)\n doy=np.array(doy)\n \n Ta=dat[:,0];Tmax=dat[:,1]; Tmin=dat[:,2]; Prec=dat[:,3]; Rg=1e3*dat[:,4]/86400.0; Par=Rg*0.5 #from kJ/m2/d-1 to Wm-2 \n e=1e-1*dat[:,5]; #hPa-->kPa\n dds=dat[:,6] #temperature sum\n\n #saturated vapor pressure \n esa=0.6112*np.exp((17.67*Ta)/ (Ta +273.16 -29.66)) #kPa\n vpd=esa - e; #kPa \n vpd[vpd<0]=0.0\n rh=100.0*e/esa;\n rh[rh<0]=0.0; rh[rh>100]=100.0\n \n F={'Ta':Ta, 'Tmin':Tmin, 'Tmax':Tmax, 'Prec':Prec, 'Rg':Rg, 'Par': Par, 'VPD':vpd, 'RH':rh, 'esa':esa, 'h2o':e, 'dds':dds}\n\n F['time']=time\n F['doy']=doy\n \n ix=np.where(np.isnan(F['Prec'])); \n F['Prec'][ix]=0.0\n #del dat, fields, n, k, time\n \n if asdict is not True:\n #return pandas dataframe\n F=pd.DataFrame(F)\n cols=['time', 'doy', 'Ta', 'Tmin','Tmax', 'Prec', 'Rg', 'Par', 'VPD', 'RH', 'esa', 'h2o', 'dds']\n F=F[cols]\n return F", "def tangent_map(field, pfl, rtol=1e-6, atol=1e-9):\n\n periods = pfl.periods # shorthand\n \n def tangent_map_integrand_wrapper(t, y):\n return tangent_map_integrand(t, y, field)\n\n single_period_tangent_maps = []\n R_end = np.roll(pfl.R_k, -1)\n Z_end = np.roll(pfl.Z_k, -1)\n print('R_k: ', pfl.R_k)\n print('R_end:', R_end)\n print('Z_k: ', pfl.Z_k)\n print('Z_end:', Z_end)\n for j in range(periods):\n print('---- Computing tangent map for period {} ----'.format(j))\n phimax = 2 * np.pi / field.nfp\n t_span = (0, phimax)\n # Initialize using (R,Z) from the spectral approach, since it\n # may be more accurate than the initial-value calculation\n # here.\n R0 = pfl.R_k[j]\n Z0 = pfl.Z_k[j]\n \n # The state vector has 6 unknowns: R, Z, and the 4 elements of the U matrix.\n x0 = [R0, Z0, 1, 0, 0, 1]\n\n soln = solve_ivp(tangent_map_integrand_wrapper, t_span, x0, rtol=rtol, atol=atol)\n\n print('# of function evals: ', soln.nfev)\n\n # Make sure we got to the end:\n assert np.abs(soln.t[-1] - phimax) < 1e-13\n\n R = soln.y[0, :]\n Z = soln.y[1, :]\n # Make sure field line is close to the result from periodic_field_line:\n print('R(end) - R0(k+1): ', R[-1] - R_end[j])\n print('Z(end) - Z0(k+1): ', Z[-1] - Z_end[j])\n\n tol = 1e-5\n if np.abs(R[-1] - R_end[j]) > tol or np.abs(Z[-1] - Z_end[j]) > tol:\n raise RuntimeError('Field line is not closed. Values of R0 and Z0 provided must have been incorrect')\n\n # Form the single-period tangent map:\n S = np.array([[soln.y[2, -1], soln.y[3, -1]],\n [soln.y[4, -1], soln.y[5, -1]]])\n \n print('S: ', S)\n det = np.linalg.det(S)\n print('determinant of S: ', det)\n\n single_period_tangent_maps.append(S)\n\n full_orbit_tangent_maps = []\n eigvals = []\n eigvects = []\n iotas = []\n iotas_per_period = []\n residues = []\n W_eigvals = []\n W_eigvects = []\n epars = []\n eperps = []\n sigma = np.array([[0, 1], [-1, 0]])\n for j in range(periods):\n print('---- Period {} ----'.format(j))\n # Multiple all the single-period tangent maps together to get the full-orbit tangent map:\n M = single_period_tangent_maps[j]\n for k in range(1, periods):\n M = np.matmul(single_period_tangent_maps[np.mod(j + k, periods)], M)\n #M = np.matmul(M, single_period_tangent_maps[np.mod(j + k, periods)])\n\n print(\"M:\", M)\n det = np.linalg.det(M)\n print('determinant of M: ', det)\n if np.abs(det - 1) > 1e-4:\n raise RuntimeError('Determinant of tangent map is not close to 1!')\n\n eigvals_j, eigvects_j = np.linalg.eig(M)\n print('eigvals: ', eigvals_j)\n print('eigvects: ', eigvects_j)\n\n iota_per_period = np.angle(eigvals_j[0]) / (2 * np.pi)\n iota = iota_per_period * field.nfp\n residue = 0.25 * (2 - np.trace(M))\n print('iota per period: {}, total iota: {}, residue: {}'.format(iota_per_period, iota, residue))\n\n tempmat = np.matmul(sigma, M)\n W = 0.5 * (tempmat + tempmat.transpose())\n W_eigvals_j, W_eigvects_j = np.linalg.eig(W)\n print('W_eigvals: ', W_eigvals_j)\n print('W_eigvects: ', W_eigvects_j)\n eig_ratio = np.max(np.abs(W_eigvals_j)) / np.min(np.abs(W_eigvals_j))\n print('Ratio of W eigvals: ', eig_ratio)\n # The larger eigenvalue corresponds to eperp\n if np.abs(W_eigvals_j[0]) > np.abs(W_eigvals_j[1]):\n eperp = W_eigvects_j[:,0]\n epar = W_eigvects_j[:,1]\n else:\n eperp = W_eigvects_j[:,1]\n epar = W_eigvects_j[:,0]\n\n \"\"\"\n if eig_ratio < 5:\n print('W eigenvalue ratio is close to 1, so using alternative method to pick epar and eperp.')\n Ravg = np.mean(pfl.R_k)\n Zavg = np.mean(pfl.Z_k)\n print('Ravg: {}, Zavg: {}'.format(Ravg, Zavg))\n dR = pfl.R_k[j] - Ravg\n dZ = pfl.Z_k[j] - Zavg\n prod0 = np.abs(dR * W_eigvects_j[0, 0] + dZ * W_eigvects_j[1, 0])\n prod1 = np.abs(dR * W_eigvects_j[0, 1] + dZ * W_eigvects_j[1, 1])\n if prod0 > prod1:\n eperp = W_eigvects_j[:,0]\n epar = W_eigvects_j[:,1]\n else:\n eperp = W_eigvects_j[:,1]\n epar = W_eigvects_j[:,0]\n \"\"\"\n \n # Alessandro uses the convention that epar * M * eperp is >0.\n sign_fac = np.dot(epar, np.dot(M, eperp))\n print('sign_fac: ', sign_fac)\n if sign_fac < 0:\n epar = -epar\n\n # Alessandro's second sign convention: epar(q) * S^q * eperp(0) is > 0\n # First form S^q\n if j == 1:\n Sq = single_period_tangent_maps[0]\n elif j > 1:\n Sq = np.matmul(single_period_tangent_maps[j - 1], Sq)\n # Now that we have Sq, flip the signs if needed:\n if j > 0 and np.dot(epar, np.dot(Sq, eperps[0])) < 0:\n epar = -epar\n eperp = -eperp\n \n full_orbit_tangent_maps.append(M)\n eigvals.append(eigvals_j)\n eigvects.append(eigvects_j)\n iotas_per_period.append(iota_per_period)\n iotas.append(iota)\n residues.append(residue)\n W_eigvals.append(W_eigvals_j)\n W_eigvects.append(W_eigvects_j)\n epars.append(epar)\n eperps.append(eperp)\n \n results = Struct()\n results.single_period_tangent_maps = single_period_tangent_maps\n results.full_orbit_tangent_maps = full_orbit_tangent_maps\n results.eigvals = eigvals\n results.eigvects = eigvects\n results.iota_per_period = iota_per_period\n results.iota = iota\n results.residue = residue\n results.epars = epars\n results.eperps = eperps\n \n return results", "def pulsEphem(self):\n\n hduMain = fits.open(self.ft1)\n\n # --------------------------------------------------------------------------------------------- #\n # Split the FT1 file every 4000 events\n noEv = 0\n deltEv = 5000\n count = 0\n wfil = open(os.path.dirname(self.ft1) + os.path.basename('tmpFT1.lis'), 'w')\n while noEv <= self.nevents:\n hduCols = []\n for colname, form, uni in zip(hduMain['EVENTS'].columns.names, hduMain['EVENTS'].columns.formats, hduMain['EVENTS'].columns.units):\n hduCols.append( fits.Column(name=colname, array=hduMain['EVENTS'].data[colname][noEv:noEv+deltEv], format=form, unit=uni) )\n # Updte the tstart and tstop in the header in order for tempo2 to work...\n hduMain['EVENTS'].header.set('TSTART', hduMain['EVENTS'].data['TIME'][noEv:noEv+deltEv][0])\n hduMain['EVENTS'].header.set('TSTOP', hduMain['EVENTS'].data['TIME'][noEv:noEv+deltEv][-1])\n newHDU = fits.BinTableHDU.from_columns(hduCols, name='EVENTS', header=hduMain['EVENTS'].header) \n hdulist = fits.HDUList([hduMain['PRIMARY'], newHDU, hduMain['GTI']])\n tmpName = os.path.dirname(self.ft1)+os.path.basename('tempFT1_'+str(count)+'.fits')\n hdulist.writeto(tmpName, clobber=True)\n wfil.write(tmpName + '\\n')\n noEv += deltEv\n count += 1\n if noEv != self.nevents:\n hduCols = []\n noEv -= deltEv\n for colname, form, uni in zip(hduMain['EVENTS'].columns.names, hduMain['EVENTS'].columns.formats, hduMain['EVENTS'].columns.units):\n hduCols.append( fits.Column(name=colname, array=hduMain['EVENTS'].data[colname][noEv:self.nevents], format=form, unit=uni) )\n hduMain['EVENTS'].header.set('TSTART', hduMain['EVENTS'].data['TIME'][noEv:self.nevents][0])\n hduMain['EVENTS'].header.set('TSTOP', hduMain['EVENTS'].data['TIME'][noEv:self.nevents][-1])\n newHDU = fits.BinTableHDU.from_columns(hduCols, name='EVENTS', header=hduMain['EVENTS'].header)\n hdulist = fits.HDUList([hduMain['PRIMARY'], newHDU, hduMain['GTI']])\n tmpName = os.path.dirname(self.ft1)+os.path.basename('tempFT1_'+str(count)+'.fits')\n hdulist.writeto(tmpName, clobber=True)\n wfil.write(tmpName + '\\n')\n wfil.close()\n\n hduMain.close()\n\n # --------------------------------------------------------------------------------------------- #\n # Run tempo2 for each piece of the FT1\n rfil = open(os.path.dirname(self.ft1) + 'tmpFT1.lis', 'r')\n percent = 0\n nbFiles = sum(1 for line in open(os.path.dirname(self.ft1) + 'tmpFT1.lis', 'r'))\n count = 0\n for tmpFil in rfil:\n # Print a progression bar every 5%\n if ( count / np.floor(nbFiles) * 100 ) >= percent:\n self._progressBar(percent, printEvery=5)\n percent += 5\n with open(os.devnull, 'wb') as devnull:\n subprocess.check_call(['/dsm/fermi/fermifast/glast/tempo2-2013.9.1/tempo2',\n '-gr', 'fermi', '-ft1', tmpFil[:-1], '-ft2', self.ft2, '-f', self.ephem,\n '-phase'], stdout=devnull, stderr=subprocess.STDOUT)\n count += 1\n # Replace the old ft1 by the new one with the PULSE_PHASE column\n #os.remove()\n self._gtSelect(data = os.path.dirname(self.ft1) + os.path.basename('tmpFT1.lis'))\n\n\n\n\n #self.nevents\n #J2032+4127_54683_57791_chol_pos.par\n #os.popen(\"tempo2 -gr fermi -ft1 {} -ft2 {} -f {} -phase\".format(self.ft1, self.ft2, self.ephem))", "def generate_sources(exp_time, fov, sky_center, area=40000.0, prng=None):\r\n prng = parse_prng(prng)\r\n\r\n exp_time = parse_value(exp_time, \"s\")\r\n fov = parse_value(fov, \"arcmin\")\r\n area = parse_value(area, \"cm**2\")\r\n\r\n agn_fluxes, gal_fluxes = generate_fluxes(exp_time, area, fov, prng)\r\n\r\n fluxes = np.concatenate([agn_fluxes, gal_fluxes])\r\n\r\n ind = np.concatenate([get_agn_index(np.log10(agn_fluxes)),\r\n gal_index * np.ones(gal_fluxes.size)])\r\n\r\n dec_scal = np.fabs(np.cos(sky_center[1] * np.pi / 180))\r\n ra_min = sky_center[0] - fov / (2.0 * 60.0 * dec_scal)\r\n dec_min = sky_center[1] - fov / (2.0 * 60.0)\r\n\r\n ra0 = prng.uniform(size=fluxes.size) * fov / (60.0 * dec_scal) + ra_min\r\n dec0 = prng.uniform(size=fluxes.size) * fov / 60.0 + dec_min\r\n\r\n return ra0, dec0, fluxes, ind", "def getHFtableData(self, ep=None):\n HFdict = {}\n if self.hfMode == 'limiter':\n HFdict['Heat Flux Mode'] = 'Limiter'\n if self.lqCNmode == 'eich':\n HFdict[\"\\u03BB Near Mode\"] = 'Eich Regression #15'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqEich\n else:\n HFdict[\"\\u03BB Near Mode\"] = 'User Defined'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqCN\n if self.lqCFmode == 'horacek':\n HFdict[\"\\u03BB Far Mode\"] = 'Horacek Figure 6a'\n HFdict[\"Common Region Far Heat Flux Width (\\u03BBq CF) [mm]\"] = self.lqCF\n else:\n HFdict[\"\\u03BB Far Mode\"] = 'User Defined'\n HFdict[\"Common Region Far Heat Flux Width (\\u03BBq CF) [mm]\"] = self.lqCF\n\n HFdict[\"Common Region Near Power Fraction\"] = self.fracCN\n HFdict[\"Common Region Far Power Fraction\"] = self.fracCF\n\n elif self.hfMode == 'multiExp':\n HFdict['Heat Flux Mode'] = 'Multiple (4) Exponentials'\n if self.lqCNmode == 'eich':\n HFdict[\"\\u03BB Near Mode\"] = 'Eich Regression #15'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqEich\n else:\n HFdict[\"\\u03BB Near Mode\"] = 'User Defined'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqCN\n\n if self.lqCFmode == 'horacek':\n HFdict[\"\\u03BB Far Mode\"] = 'Horacek Figure 6a'\n else:\n HFdict[\"\\u03BB Far Mode\"] = 'User Defined'\n\n\n\n HFdict[\"Common Region Far Heat Flux Width (\\u03BBq CF) [mm]\"] = self.lqCF\n HFdict[\"Private Region Near Heat Flux Width (\\u03BBq PN) [mm]\"] = self.lqPN\n HFdict[\"Private Region Far Heat Flux Width (\\u03BBq PF) [mm]\"] = self.lqPF\n HFdict[\"Common Region Near Power Fraction\"] = self.fracCN\n HFdict[\"Common Region Far Power Fraction\"] = self.fracCF\n HFdict[\"Private Region Near Power Fraction\"] = self.fracPN\n HFdict[\"Private Region Far Power Fraction\"] = self.fracPF\n\n elif self.hfMode == 'qFile':\n HFdict[\"Heat Flux Mode\"] = 'Read HF from qFile'\n HFdict['qFilePath'] = self.qFilePath\n HFdict['qFileTag'] = self.qFileTag\n\n elif self.hfMode == 'eich':\n HFdict['Heat Flux Mode'] = 'Gaussian Spreading'\n if self.lqCNmode == 'eich':\n HFdict[\"\\u03BB Mode\"] = 'Eich Regression #15'\n HFdict[\"Heat Flux Width (\\u03BBq) [mm]\"] = self.lqEich\n else:\n HFdict[\"\\u03BB Mode\"] = 'User Defined'\n HFdict[\"Heat Flux Width (\\u03BBq) [mm]\"] = self.lqCN\n\n if self.SMode == 'makowski':\n HFdict['Greenwald Density Fraction'] = self.fG\n HFdict['Spreading (S) Mode'] = 'Makowski Figure 6'\n else:\n HFdict['Spreading (S) Mode'] = 'User Defined'\n HFdict['Greenwald Density Fraction'] = 'Only used for Makowski S Mode'\n HFdict['S [mm]'] = self.S\n HFdict['Background Heat Flux'] = self.qBG\n\n if self.hfMode != 'qFile':\n HFdict[\"Power Injected (Pinj) [MW]\"] = self.Pinj\n HFdict[\"Radiated Fraction of Injected Power\"] = self.coreRadFrac\n HFdict[\"Power Crossing Separatrix (Psol) [MW]\"] = self.Psol\n HFdict[\"Upper Inner Divertor Power Fraction\"] = self.fracUI\n HFdict[\"Upper Outer Divertor Power Fraction\"] = self.fracUO\n HFdict[\"Lower Inner Divertor Power Fraction\"] = self.fracLI\n HFdict[\"Lower Outer Divertor Power Fraction\"] = self.fracLO\n\n return HFdict", "def F_interp_geos_mat(sounding_lon,sounding_lat,sounding_datenum,\\\n geos_dir='/mnt/Data2/GEOS/s5p_interp/',\\\n interp_fields=['TROPPT']):\n from scipy.io import loadmat\n from scipy.interpolate import RegularGridInterpolator\n \n start_datenum = np.amin(sounding_datenum)\n end_datenum = np.amax(sounding_datenum)\n start_datetime = datedev_py(start_datenum)\n start_year = start_datetime.year\n start_month = start_datetime.month\n start_day = start_datetime.day\n start_hour = start_datetime.hour\n \n end_datetime = datedev_py(end_datenum)\n end_year = end_datetime.year\n end_month = end_datetime.month\n end_day = end_datetime.day\n end_hour = end_datetime.hour\n end_minute = end_datetime.minute\n end_second = end_datetime.second\n \n step_hour = 3 # geos fp data are 3-hourly\n \n geos_start_hour = start_hour-start_hour%step_hour\n geos_start_datetime = datetime.datetime(year=start_year,month=start_month,day=start_day,hour=geos_start_hour)\n if end_hour > 24-step_hour or (end_hour == 24-step_hour and (end_minute > 0 or end_second > 0)):\n geos_end_hour = 0\n geos_end_datetime = datetime.datetime(year=end_year,month=end_month,day=end_day,hour=geos_end_hour) +datetime.timedelta(days=1)\n elif end_hour%step_hour == 0 and end_minute == 0 and end_second == 0:\n geos_end_hour = end_hour\n geos_end_datetime = datetime.datetime(year=end_year,month=end_month,day=end_day,hour=geos_end_hour)\n else:\n geos_end_hour = (step_hour-(end_hour+1)%step_hour)%step_hour+end_hour+1\n geos_end_datetime = datetime.datetime(year=end_year,month=end_month,day=end_day,hour=geos_end_hour)\n \n nstep = (geos_end_datetime-geos_start_datetime).total_seconds()/3600/step_hour+1\n nstep = int(nstep)\n \n geos_data = {}\n # load narr data\n for istep in range(nstep):\n file_datetime = geos_start_datetime+datetime.timedelta(hours=step_hour*istep)\n file_dir = os.path.join(geos_dir,file_datetime.strftime('Y%Y'),\\\n file_datetime.strftime('M%m'),\\\n file_datetime.strftime('D%d'))\n file_path = os.path.join(file_dir,'subset_'+file_datetime.strftime('%Y%m%d_%H')+'.mat')\n if not geos_data:\n mat_data = loadmat(file_path,variable_names=np.concatenate((['lat','lon'],interp_fields)))\n geos_data['lon'] = mat_data['lon'].flatten()\n geos_data['lat'] = mat_data['lat'].flatten()\n geos_data['datenum'] = np.zeros((nstep),dtype=np.float64)\n for fn in interp_fields:\n geos_data[fn] = np.zeros((len(geos_data['lon']),len(geos_data['lat']),nstep))\n geos_data[fn][...,istep] = mat_data[fn]\n else:\n mat_data = loadmat(file_path,variable_names=interp_fields)\n for fn in interp_fields:\n geos_data[fn][...,istep] = mat_data[fn]\n \n geos_data['datenum'][istep] = (file_datetime.toordinal()\\\n +file_datetime.hour/24.\\\n +file_datetime.minute/1440.\\\n +file_datetime.second/86400.+366.)\n # interpolate\n sounding_interp = {}\n for fn in interp_fields:\n my_interpolating_function = \\\n RegularGridInterpolator((geos_data['lon'],geos_data['lat'],geos_data['datenum']),\\\n geos_data[fn],bounds_error=False,fill_value=np.nan)\n sounding_interp[fn] = my_interpolating_function((sounding_lon,sounding_lat,sounding_datenum))\n return sounding_interp", "def __init__(self, fvcom):\n\n # Prepare this object with all the objects we'll need later on (data, dims, time, grid, atts).\n self._prep()\n\n self.obj_iter = lambda x: [a for a in dir(x) if not a.startswith('__')]\n\n grid_names = ('lon', 'lat', 'lonc', 'latc', 'nv',\n 'h', 'h_center',\n 'nbe', 'ntsn', 'nbsn', 'ntve', 'nbve',\n 'art1', 'art2', 'a1u', 'a2u',\n 'siglay', 'siglev')\n time_names = ('time', 'Times', 'datetime', 'Itime', 'Itime2')\n\n for key in fvcom:\n if key in grid_names:\n setattr(self.grid, key, fvcom[key])\n elif key in time_names:\n setattr(self.time, key, fvcom[key])\n else: # assume data.\n setattr(self.data, key, fvcom[key])\n # Make some dimensions\n self.dims.three = 3\n self.dims.four = 4\n self.dims.maxnode = 11\n self.dims.maxelem = 9\n # This is a little repetitive (each dimension can be set multiple times), but it has simplicity to its\n # advantage.\n for obj in self.obj_iter(self.data):\n if obj in ('ua', 'va'):\n try:\n self.dims.time, self.dims.nele = getattr(self.data, obj).shape\n except ValueError:\n # Assume we've got a single position.\n self.dims.time = getattr(self.data, obj).shape[0]\n self.dims.nele = 1\n elif obj in ('temp', 'salinity'):\n try:\n self.dims.time, self.dims.siglay, self.dims.node = getattr(self.data, obj).shape\n except ValueError:\n # Assume we've got a single position\n self.dims.time, self.dims.siglay = getattr(self.data, obj).shape[:2]\n self.dims.node = 1\n self.dims.siglev = self.dims.siglay + 1\n elif obj in ['zeta']:\n try:\n self.dims.time, self.dims.node = getattr(self.data, obj).shape\n except ValueError:\n # Assume we've got a single position\n self.dims.time = getattr(self.data, obj).shape[0]\n self.dims.node = 1\n elif obj in ('Times'):\n self.dims.time, self.dims.DateStrLen = getattr(self.time, obj).shape\n elif obj in ('time', 'Itime', 'Itime2', 'datetime'):\n self.dims.time = getattr(self.time, obj).shape", "def calculation_time_analysis():\n\tfrom . import spectra as sp\n\tp_dict = {'Bfield':700,'rb85frac':1,'Btheta':88*np.pi/180,'Bphi':0*np.pi/180,'lcell':75e-3,'T':84,'Dline':'D2','Elem':'Cs'}\n\tchiL,chiR,chiZ = sp.calc_chi([-3500],p_dict)\n\t\n\tfor angle in [0, np.pi/32, np.pi/16, np.pi/8, np.pi/4, np.pi/2]:\n\t\tprint(('Angle (degrees): ',angle*180/np.pi))\n\t\tRotMat, n1, n2 = solve_diel(chiL,chiR,chiZ,angle)", "def entries_from_goes_ts_file2(file, default_waveunit=None):\n\n headers = fits.get_header(file)\n if isinstance(file, (str, six.text_type)):\n filename = file\n else:\n filename = getattr(file, 'name', None)\n\n statinfo = os.stat(file)\n #print('a header')\n entry = DatabaseEntry(path=filename)\n size = statinfo.st_size\n\n # Add/tweak start/end entries for GOES\n if headers[0].get('TELESCOP','') != '':\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = headers[0]['TELESCOP']\n if (headers[0].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[0]['DATE-OBS'])\n elif (headers[1].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[1]['DATE-OBS'])\n\n if (headers[0].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[0]['DATE-END'])\n elif (headers[1].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[1]['DATE-END'])\n else:\n end_time = start_time + timedelta(days=1,seconds=-1)\n\n # Add these to the entry\n observation_time_start = start_time\n observation_time_end = end_time\n\n wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n metadata = MetaDict(headers[1])\n #entry.tags = sunpy.database.attrs.Tag('raw')\n\n entry = DatabaseEntry(observation_time_start=start_time,\n observation_time_end = end_time,\n instrument='EIT',\n wavemin=wavemin,\n wavemax=wavemax,\n metadata=metadata,\n size=size)\n\n return entry", "def compliance_time_series(county, core_path , patterns_path, backfill = False, GEOID_type = 'CBG'):\n #Load data about places and patterns\n county_places = pd.read_csv(core_path+'places-'+ county +'.csv', index_col='safegraph_place_id') \n \n pattern_dates = [x[5:9] for x in sorted(os.listdir(patterns_path+'main-file-'+ county +'/'))]\n w = 0\n next_date = pattern_dates[w]\n #Create visitor table\n place_cts = pd.DataFrame() #First two months don't have weekly patterns, use empty data.frame\n\n #List files for social distancing metrics in that county\n months_path = '../social_distancing/social_dist_'+ county +'/'\n month_list = sorted(os.listdir(months_path))\n os.makedirs( '../stats/time_series/', exist_ok = True)\n #metrics dictionary to be filled looping through every day\n if not os.path.isfile('../stats/time_series/metrics_{}_CT.csv'.format(county)) or backfill:\n metrics = pd.DataFrame()\n existing_dates = []\n else:\n metrics = pd.read_csv('../stats/time_series/metrics_{}_CT.csv'.format(county), dtype = {'origin_census_tract':str})\n #dates already processed\n existing_dates = metrics['date'].unique() #series of unique dates\n\n #Initialize columns of new data frame\n if GEOID_type == 'CBG':\n columns= {'date':[], 'origin_census_block_group':[], 'low_device_count':[], 'pct_at_home':[], 'pct_within_neighborhood':[], 'median_distance_traveled':[],\n 'median_percentage_time_home':[], 'total_visits_to_places':[], 'normalized_visits_to_places':[], 'total_expected_contacts':[], 'places_visited':[], 'cbgs_visited':[]}\n if GEOID_type == 'CT':\n columns= {'date':[], 'origin_census_tract':[], 'low_device_count':[], 'pct_at_home':[], 'pct_within_neighborhood':[], 'median_distance_traveled':[],\n 'median_percentage_time_home':[], 'total_visits_to_places':[], 'normalized_visits_to_places':[], 'places_visited':[]} \n changed = False\n for month in month_list:\n #Loop through every day\n day_list = sorted(os.listdir(months_path + month))\n for day in day_list: \n date_name = month + '-' + day\n print(date_name)\n if month+'-'+day == next_date:\n w = w+1\n if w < len(pattern_dates):\n next_date = pattern_dates[w]\n if date_name in existing_dates:\n continue\n print(\"--changing to next patterns file\")\n county_patterns = pd.read_csv(patterns_path + 'main-file-{}/2020-{}-weekly-patterns.csv.gz'.format(county, date_name),\n index_col='safegraph_place_id')\n norm_factors = pd.read_csv(\n '../social_distancing/normalization/'+'normalization_{}.csv'.format(county),\n dtype={'origin_census_block_group':str})\n norm_factors.set_index('origin_census_block_group', drop =True, inplace=True)\n\n\n #Establish prior for hourly distribution of visits at the top_category level\n county_patterns= county_patterns.join(county_places[['top_category','sub_category']], how='inner')\n restaurants = county_patterns['top_category'] == 'Restaurants and Other Eating Places'\n county_patterns.loc[restaurants, 'top_category'] = county_patterns.loc[restaurants, 'sub_category']\n norm_factor = norm_factors.loc[norm_factors.date == date_name].norm_factor\n\n prior_dict = {}\n for category in county_patterns.top_category.value_counts().index:\n places_in_cat = county_patterns['top_category'] == category\n dirich_samples = [np.array(json.loads(x)) for x in county_patterns.loc[places_in_cat, 'visits_by_each_hour'] ] \n prior_dict[category] = dirichlet.getInitAlphas(dirich_samples)\n if GEOID_type == 'CBG':\n place_cbgs = ccc.place_cbg_contacts_table(\n county_patterns,\n prior_dict,\n norm_factor,\n GEOID_type)\n place_cbgs = place_cbgs.loc[place_cbgs['expected_contacts']>1]\n place_cbgs = place_cbgs.join(county_places[['location_name','latitude','longitude']], how='inner')\n place_cbgs.reset_index(inplace=True, drop=False)\n place_cbgs.set_index('origin_census_block_group', inplace=True, drop=True)\n if GEOID_type == 'CT':\n place_cts = ccc.place_cbg_contacts_table(\n county_patterns,\n prior_dict,\n norm_factor,\n GEOID_type)\n place_cts = place_cts.join(county_places[['location_name','latitude','longitude']], how='inner')\n place_cts.reset_index(inplace=True, drop=False)\n place_cts.set_index('origin_census_tract', inplace=True, drop=True)\n print('--computed bipartite contact network')\n\n if date_name in existing_dates:\n continue\n\n\n file_name = os.listdir(months_path+ '/' + month+'/'+day)[0]\n data_soc_dist = pd.read_csv(months_path+ '/' + month+'/'+day+'/'+file_name, dtype={'origin_census_block_group':str})\n data_soc_dist['median_distance_traveled_from_home'] = [0 if math.isnan(x) else x for x in data_soc_dist['distance_traveled_from_home']] #NOT NEEDED?\n \n if GEOID_type == 'CBG':\n [update_metrics_columns(row, date_name, place_cbgs, county_places, columns) for i, row in data_soc_dist.iterrows()]\n else:\n ####CREATE FUNCTION THAT AGGREGATES TO CENSUS TRACT\n data_soc_dist['origin_census_tract'] = [x[:-1] for x in data_soc_dist['origin_census_block_group']]\n ct_data_soc_dist = data_soc_dist.groupby('origin_census_tract').apply(ddd.aggregate_to_ct).reset_index()\n [update_metrics_columns_CT(row, date_name, place_cts, county_places, columns) for i, row in ct_data_soc_dist.iterrows()]\n changed = True\n\n print(\"--merging rows from new dates\")\n new_metrics = pd.DataFrame.from_dict(columns, orient='index').transpose()\n\n metrics = pd.concat([metrics, new_metrics], ignore_index=True)\n if changed:\n if GEOID_type == 'CT':\n metrics.to_csv('../stats/time_series/metrics_{}_CT.csv'.format(county),index=False)\n else:\n metrics.to_csv('../stats/time_series/metrics_{}_CBG.csv'.format(county),index=False)\n print(\"--finished updating time series for {}\".format(county))\n return(0)", "def create_assumption_map(columns, df):\n assumption_map = pd.DataFrame(columns=columns)\n\n for fuel in fuel_types:\n for value in values:\n if fuel == \"coal\" and value == \"delta_capex\":\n retrofit = True\n else:\n retrofit = False\n array = values_array(df, fuel, value, retrofit=retrofit)\n mean, trim_mean_v, std, top_p, bot_p = value_stats(array)\n\n if value == \"delta_capex\":\n units = \"2019€_KW\"\n elif value == \"delta_om\":\n units = \"2019€_KWh\"\n elif value == \"delta_heatrate\":\n units = \"KW_KWh\"\n assumption_map = assumption_map.append({\"fuel_type\": fuel, \"value\": value, \"range_low\": bot_p,\n \"range_high\": top_p, \"reference_value\": mean, \"units\": units},\n ignore_index=True)\n return assumption_map", "def identify_peaks_amld_aeris(xCar, xDate, xDir, xFilename, outDir, processedFileLoc, Engineering, threshold='.1',\n rthresh = '.7',\n xTimeThreshold='5.0', minElevated='2', xB='102', basePerc='50',aeris=True):\n import csv, numpy\n import shutil\n from shapely.geometry import Point\n import pandas as pd\n import geopandas as gpd\n\n try:\n #amld = True\n baseCalc = float(basePerc)\n xABThreshold = float(threshold)\n minElevated = float(minElevated)\n rMin = float(rthresh)\n xDistThreshold = 160.0 # find the maximum CH4 reading of observations within street segments of this grouping distance in meters\n xSDF = 4 # multiplier times standard deviation for floating baseline added to mean\n\n xB = int(xB)\n xTimeThreshold = float(xTimeThreshold)\n fn = xDir + xFilename # set processed csv file to read in\n fnOut = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".csv\"\n fnShape = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".shp\"\n fnLog = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\", \"\") + \".log\"\n pkLog = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\",\"\") + \"_info.csv\"\n jsonOut = outDir + \"Peaks\" + \"_\" + xCar + \"_\" + xDate.replace(\"-\",\"\") + \".geojson\"\n infOut = processedFileLoc + xCar + \"_\" + xDate.replace(\"-\", \"\") + \"_info.csv\"\n\n ### TEST THING\n fn = xDir + xFilename # set raw text file to read in\n filenames = nameFiles(outDir,processedFileLoc,xCar,xDate,True)\n fnOut = filenames['fnOut']\n fnShape = filenames['fnShape']\n fnLog = filenames['fnLog']\n pkLog = filenames['pkLog']\n jsonOut = filenames['jsonOut']\n infOut = filenames['infOut']\n\n print(f\"{outDir}Peaks_{xCar}_{xDate}_info.csv\")\n fLog = open(fnLog, 'w')\n shutil.copy(infOut, pkLog)\n\n # convert lists to numpy arrays\n tempFile = pd.read_csv(fn)\n tempFile['ttot'] = tempFile['ttot'].astype(float)\n tempFile['ttot'] = tempFile['ttot'].astype(str)\n\n #tempFile.sort_values(by='ttot', ascending=True).reset_index(drop=True).to_csv(\n # '/Users/emilywilliams/Desktop/arg.csv')\n\n colnames = tempFile.columns\n\n aEpochTime = numpy.array(tempFile.iloc[:,colnames.get_loc('nearest10hz')])\n aDateTime = numpy.array(tempFile.apply(lambda x: x.DATE.replace('-','') + x.TIME.replace(':',''),axis=1))\n aLat = numpy.array(tempFile.iloc[:,colnames.get_loc('LAT')])\n aLon = numpy.array(tempFile.iloc[:,colnames.get_loc('LONG')])\n aCH4 = numpy.array(tempFile.iloc[:,colnames.get_loc('CH4')])\n aTCH4 = numpy.array(tempFile.iloc[:,colnames.get_loc('CH4')])\n aMean = numpy.zeros(len(aEpochTime))\n aCH4Mean_true = numpy.zeros(len(aEpochTime))\n aCH4STD= numpy.zeros(len(aEpochTime))\n aCH4Max= numpy.zeros(len(aEpochTime))\n aCH4Min= numpy.zeros(len(aEpochTime))\n aCH4Median= numpy.zeros(len(aEpochTime))\n\n aMeanC2H6 = numpy.zeros(len(aEpochTime))\n aThreshold = numpy.zeros(len(aEpochTime))\n aOdom = numpy.array(tempFile.apply(lambda x: x.VELOCITY*.1,axis=1).cumsum())\n aC2H6 = numpy.array(tempFile.iloc[:,colnames.get_loc('C2H6')])\n aC2C1 = numpy.array(tempFile.iloc[:,colnames.get_loc('C1C2')])\n aR = numpy.array(tempFile.iloc[:,colnames.get_loc('R')])\n if aeris:\n aBearingCCWE = numpy.array(tempFile.iloc[:, colnames.get_loc('bearing')])\n aBearingCWN = numpy.array(tempFile.iloc[:, colnames.get_loc('bearing')])\n aWS_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('r_avg')])\n aWD_CCWE_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('theta_avg')])\n aWD_CWN_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('theta_avg')])\n\n if not aeris:\n aBearingCCWE = numpy.array(tempFile.iloc[:,colnames.get_loc('Bearing_ccwe')])\n aBearingCWN = numpy.array(tempFile.iloc[:,colnames.get_loc('Bearing_cwn')])\n aWS_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('airmar_ws')])\n aWD_CCWE_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('airmar_wd_cor_ccwe')])\n aWD_CWN_cor = numpy.array(tempFile.iloc[:, colnames.get_loc('airmar_wd_cor_cwn')])\n\n arolling8= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingR_8')])\n arolling15= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingR_15')])\n arolling30= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingR_30')])\n arolling45= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingR_45')])\n arolling60= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingR_60')])\n\n arollingc2h6_15= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingc2h6_15')])\n arollingc2h6_30= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingc2h6_30')])\n arollingc2h6_45= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingc2h6_45')])\n\n arollingch4_60= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingch4_60')])\n arollingch4_45= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingch4_45')])\n arollingch4_30= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingch4_30')])\n arollingch4_15= numpy.array(tempFile.iloc[:,colnames.get_loc('rollingch4_15')])\n\n\n\n xLatMean = numpy.mean(aLat)\n xLonMean = numpy.mean(aLon)\n #xCH4Mean = numpy.mean(aCH4)\n #xC2H6Mean = numpy.mean(aC2H6)\n #xC2C1Mean = numpy.mean(aC2C1)\n\n fLog.write(\"Day CH4_mean = \" + str(numpy.mean(aCH4)) +\n \", Day CH4 SD = \" + str(numpy.std(aCH4)) + \"\\n\")\n fLog.write(\"Day C2H6 Mean = \" + str(numpy.mean(aC2H6)) +\n \", Day C2H6 SD = \" + str(numpy.std(aC2H6)) + \"\\n\")\n fLog.write(\"Center lon/lat = \" + str(xLonMean) + \", \" + str(xLatMean) + \"\\n\")\n\n lstCH4_AB = []\n count = tempFile.shape[0]\n # generate list of the index for observations that were above the threshold\n for i in range(0, count - 2):\n if ((count - 2) > xB):\n topBound = min((i + xB), (count - 2))\n botBound = max((i - xB), 0)\n\n for t in range(min((i + xB), (count - 2)), i, -1):\n if aEpochTime[t] < (aEpochTime[i] + (xB / 2)):\n topBound = t\n break\n for b in range(max((i - xB), 0), i):\n if aEpochTime[b] > (aEpochTime[i] - (xB / 2)):\n botBound = b\n break\n\n xCH4Mean = numpy.percentile(aCH4[botBound:topBound], baseCalc)\n xC2H6Mean = numpy.percentile(aC2H6[botBound:topBound], baseCalc)\n xCH4Mean_true = numpy.mean(aCH4[botBound:topBound])\n xCH4STD = numpy.std(aCH4[botBound:topBound])\n xCH4Min = numpy.min(aCH4[botBound:topBound])\n xCH4Max = numpy.max(aCH4[botBound:topBound])\n xCH4Median = numpy.percentile(aCH4[botBound:topBound],50)\n\n\n # xCH4SD = numpy.std(aCH4[botBound:topBound])\n else:\n xCH4Mean = numpy.percentile(aCH4[0:(count - 2)], baseCalc)\n xC2H6Mean = numpy.percentile(aC2H6[0:(count - 2)], baseCalc)\n xCH4Mean_true = numpy.mean(aCH4[0:(count - 2)])\n xCH4STD = numpy.std(aCH4[0:(count - 2)])\n xCH4Min = numpy.min(aCH4[0:(count - 2)])\n xCH4Max = numpy.max(aCH4[0:(count - 2)])\n xCH4Median = numpy.percentile(aCH4[0:(count - 2)],50)\n\n\n # xCH4SD = numpy.std(aCH4[0:(count-2)])\n xThreshold = xCH4Mean + (xCH4Mean * xABThreshold)\n xThreshold_c2h6 = xC2H6Mean + (xC2H6Mean * xABThreshold)\n\n if (aCH4[i] > xThreshold and aR[i]>rMin):\n #if (aCH4[i] > xThreshold):\n lstCH4_AB.append(i)\n aMean[i] = xCH4Mean\n aMeanC2H6[i] = xC2H6Mean\n aThreshold[i] = xThreshold\n aCH4STD[i] = xCH4STD\n aCH4Max[i] = xCH4Max\n aCH4Min[i] = xCH4Min\n aCH4Mean_true[i] = xCH4Mean_true\n aCH4Median[i] = xCH4Median\n\n # now group the above baseline threshold observations into groups based on distance threshold\n lstCH4_ABP = []; xDistPeak = 0.0; xCH4Peak = 0.0;\n xTime = 0.0; cntPeak = 0; cnt = 0; sID = \"\"; sPeriod5Min = \"\"; prevIndex = 0;\n\n for i in lstCH4_AB:\n if (cnt == 0):\n xLon1 = aLon[i]\n xLat1 = aLat[i]\n xOdom = aOdom[i]\n else:\n # calculate distance between points\n xDist = haversine(xLat1, xLon1, aLat[i], aLon[i])\n xDistPeak += xDist\n xCH4Peak += (xDist * (aCH4[i] - aMean[i]))\n xLon1 = aLon[i]\n xLat1 = aLat[i]\n xOdom = aOdom[i]\n if (sID == \"\"):\n xTime = aEpochTime[i]\n sID = str(xCar) + \"_\" + str(xTime)\n sPeriod5Min = str(int((aEpochTime[i] - 1350000000) / (30 * 1))) # 30 sec\n if ((aEpochTime[i] - aEpochTime[prevIndex]) > xTimeThreshold): # initial start of a observed peak\n cntPeak += 1\n xTime = aEpochTime[i]\n xDistPeak = 0.0\n xCH4Peak = 0.0\n sID = str(xCar) + \"_\" + str(xTime)\n sPeriod5Min = str(int((aEpochTime[i] - 1350000000) / (30 * 1))) # 30 sec\n # print str(i) +\", \" + str(xDist) + \",\" + str(cntPeak) +\",\" + str(xDistPeak)\n #lstCH4_ABP.append(\n # [sID, xTime, aEpochTime[i], aDateTime[i], aCH4[i], aLon[i], aLat[i], aMean[i], aThreshold[i],\n # xDistPeak, xCH4Peak, aTCH4[i],aC2H6[i],aC2C1[i],aR[i],aMeanC2H6[i], sPeriod5Min, xOdom,\n # aUavg[i],aVavg[i],aWavg[i],aRavg[i],aThavg[i]])\n lstCH4_ABP.append(\n [sID, xTime, aEpochTime[i], aDateTime[i], aCH4[i], aLon[i], aLat[i], aMean[i],aCH4Mean_true[i],aCH4STD[i],\n aCH4Max[i],aCH4Min[i],aCH4Median[i], aThreshold[i],\n xDistPeak, xCH4Peak, aTCH4[i],aC2H6[i],aC2C1[i],aR[i],aMeanC2H6[i], sPeriod5Min, xOdom,\n aWD_CCWE_cor[i],aWD_CWN_cor[i],aWS_cor[i],aBearingCCWE[i],aBearingCWN[i],arolling8[i],\n arolling15[i],arolling30[i],arolling60[i],arollingc2h6_15[i],arollingc2h6_30[i],arollingc2h6_45[i],\n arollingch4_15[i],arollingch4_30[i],arollingch4_45[i],arollingch4_60[i]\n ])\n\n cnt += 1\n prevIndex = i\n\n # Finding peak_id larger than 160.0 m\n tmpsidlist = []\n for r in lstCH4_ABP:\n if (float(r[9]) > 160.0) and (r[0] not in tmpsidlist):\n tmpsidlist.append(r[0])\n cntPeak -= len(tmpsidlist)\n\n fLog.write(\"Number of peaks found: \" + str(cntPeak) + \"\\n\")\n print(f\"{xCar} \\t {xDate} \\t {xFilename} \\t {count} \\t {len(lstCH4_ABP)}\")\n\n # write out the observed peaks to a csv to be read into a GIS\n fOut = open(fnOut, 'w')\n # s = \"PEAK_NUM,EPOCHSTART,EPOCH,DATETIME,CH4,LON,LAT,CH4_BASELINE,CH4_THRESHOLD,PEAK_DIST_M,PEAK_CH4,TCH4,PERIOD5MIN\\n\"\n s = \"OP_NUM,OP_EPOCHSTART,OB_EPOCH,OB_DATETIME,OB_CH4,OB_LON,OB_LAT,OB_CH4_BASELINE,OB_CH4_MEAN,OB_CH4_STD,OB_CH4_MAX,OB_CH4_MIN,OB_CH4_MED,\" \\\n \"OB_CH4_THRESHOLD,OP_PEAK_DIST_M,OP_PEAK_CH4,OB_TCH4,OB_C2H6,\" \\\n \"OB_C2C1,OB_R,OB_C2H6_BASELINE,OB_PERIOD5MIN,ODOMETER,OB_WD_CCWE,OB_WD_CWN,OB_WS,\" \\\n \"OB_BEARING_CCWE,OB_BEARING_CWN,OB_R_8,OB_R_15,OB_R_30,OB_R_60,OB_C2H6_15,OB_C2H6_30,OB_C2H6_45,\" \\\n \"OB_CH4_15,OB_CH4_30,OB_CH4_45,OB_CH4_60\\n\"\n\n\n fOut.write(s)\n\n truecount = 0\n for r in lstCH4_ABP:\n if r[0] not in tmpsidlist:\n s = ''\n for rr in r:\n s += str(rr) + ','\n s = s[:-1]\n s += '\\n'\n fOut.write(s)\n truecount += 1\n fOut.close()\n fLog.close()\n\n openFile = pd.read_csv(fnOut)\n if openFile.shape[0] != 0:\n pkDistDf = openFile.copy().groupby('OP_NUM', as_index=False).apply(\n lambda x: max(x.ODOMETER) - min(x.ODOMETER))\n pkDistDf.columns = ['OP_NUM', 'OP_DISTANCE']\n openFile = pd.merge(openFile.copy(), pkDistDf)\n tempCount = openFile.groupby('OP_NUM', as_index=False).OP_EPOCHSTART.count().rename(\n columns={'OP_EPOCHSTART': 'Frequency'})\n tempCount = tempCount.loc[tempCount.Frequency >= minElevated, :]\n if tempCount.shape[0] == 0:\n print(f\"No Observed Peaks with enough Elevated Readings Found in the file: {xFilename}\")\n tempCount.to_csv(fnOut) ## added to deal with issue where it wasn't being filtered out\n elif tempCount.shape[0] != 0:\n oFile = pd.merge(openFile, tempCount, on=['OP_NUM'])\n openFile = oFile.copy()\n del (oFile)\n openFile[\"minElevated\"] = openFile.apply(lambda x: int(minElevated), axis=1)\n openFile['OB_CH4_AB'] = openFile.loc[:, 'OB_CH4'].sub(openFile.loc[:, 'OB_CH4_BASELINE'], axis=0)\n openFile['OB_C2H6_AB'] = openFile.loc[:, 'OB_C2H6'].sub(openFile.loc[:, 'OB_C2H6_BASELINE'],axis=0)\n openFile.to_csv(fnOut, index=False)\n\n\n fileWt = weighted_loc(openFile, 'OB_LAT', 'OB_LON', 'OP_NUM', 'OB_CH4_AB',).loc[:, :].rename(\n columns={'OB_LAT': 'pk_LAT', 'OB_LON': 'pk_LON'}).reset_index(drop=True)\n geometry_temp = [Point(lon, lat) for lon, lat in zip(fileWt['pk_LON'], fileWt['pk_LAT'])]\n crs = 'EPSG:4326'\n # geometry is the point of the lat/lon\n # gdf_buff = gpd.GeoDataFrame(datFram, crs=crs, geometry=geometry_temp)\n\n ## BUFFER AROUND EACH 'OP_NUM' WITH BUFFER DISTANCE\n gdf_buff = gpd.GeoDataFrame(fileWt, crs=crs, geometry=geometry_temp)\n # gdf_buff = makeGPD(datFram,'LON','LAT')\n\n ##maybe this is the issue?\n #gdf_buff = gdf_buff.to_crs(epsg=32610)\n #gdf_buff['geometry'] = gdf_buff.loc[:, 'geometry'].buffer(30)\n try:\n gdf_buff.to_file(jsonOut, driver=\"GeoJSON\")\n #gdf_buff.to_file('testthing.geojson', driver=\"GeoJSON\")\n except:\n print(\"Error Saving JSON File\")\n elif openFile.shape[0] == 0:\n print(f\"No Observed Peaks Found in the file:{xFilename}\")\n except ValueError:\n print(\"Error in Identify Peaks\")\n return False", "def __init__(self):\n self.datasets = [\"ISCCP\",\"ISCCP_raw\",\"PATMOSX\",\"PATMOSX_raw\"]\n f = cdms.open(\"OBS/clt_ISCCP_corrected_198301-200912.nc\")\n fp = cdms.open(\"OBS/clt_PATMOSX_corrected_198301-200912.nc\")\n \n f_old = cdms.open(\"OBS/clt_ISCCP_198307-200806.nc\")\n fp_old = cdms.open(\"OBS/clt_PATMOSX_198200-200912.nc\")\n\n fgpcp = cdms.open(\"OBS/GPCP.precip.mon.mean.nc\")\n fcmap = cdms.open(\"OBS/CMAP.std.precip.mon.mean.nc\")\n \n \n self.ISCCP = f(\"clt\",time=('1984-1-1','2009-12-31'))\n self.ISCCP = MV.masked_where(np.isnan(self.ISCCP),self.ISCCP)\n cdutil.setTimeBoundsMonthly(self.ISCCP)\n\n self.PATMOSX = fp(\"clt\",time=('1984-1-1','2009-12-31'))\n self.PATMOSX = MV.masked_where(np.isnan(self.PATMOSX),self.PATMOSX)\n cdutil.setTimeBoundsMonthly(self.PATMOSX)\n\n self.ISCCP_raw = f_old(\"clt\",time=('1984-1-1','2008-6-31'))\n self.ISCCP_raw = MV.masked_where(np.isnan(self.ISCCP_raw),self.ISCCP_raw)\n cdutil.setTimeBoundsMonthly(self.ISCCP_raw)\n\n self.PATMOSX_raw = fp_old(\"clt\",time=('1982-1-1','2009-12-31'))\n self.PATMOSX_raw = MV.masked_where(np.isnan(self.PATMOSX_raw),self.PATMOSX_raw)\n cdutil.setTimeBoundsMonthly(self.PATMOSX_raw)\n\n self.GPCP = cdutil.averager(fgpcp(\"precip\",time=('1979-1-1','2014-12-31'),latitude=(-90,90)),axis='x')\n cdutil.setTimeBoundsMonthly(self.GPCP)\n self.CMAP = cdutil.averager(fcmap(\"precip\",time=('1979-1-1','2014-12-31'),latitude=(-90,90)),axis='x')\n self.CMAP.setAxis(0,self.GPCP.getTime())\n cdutil.setTimeBoundsMonthly(self.CMAP)", "def test_time_series_from_file():\r\n\r\n TR = 1.35\r\n ts_ff = io.time_series_from_file\r\n\r\n #File names:\r\n fmri_file1 = os.path.join(data_path,'fmri1.nii.gz')\r\n fmri_file2 = os.path.join(data_path,'fmri2.nii.gz')\r\n\r\n #Spatial coordinates into the volumes:\r\n coords1 = np.array([[5,5,5,5],[5,5,5,5],[1,2,3,4]])\r\n coords2 = np.array([[6,6,6,6],[6,6,6,6],[3,4,5,6]])\r\n\r\n #No averaging, no normalization:\r\n t1 = ts_ff([fmri_file1,fmri_file2],[coords1,coords2],TR)\r\n\r\n npt.assert_equal(t1[0].shape,(4,80)) # 4 coordinates, 80 time-points\r\n\r\n t2 = ts_ff([fmri_file1,fmri_file2],[coords1,coords2],TR,average=True)\r\n\r\n npt.assert_equal(t2[0].shape,(80,)) # collapse coordinates,80 time-points\r\n\r\n t3 = ts_ff(fmri_file1,coords1,TR,normalize='zscore')\r\n\r\n #The mean of each channel should be almost equal to 0:\r\n npt.assert_almost_equal(t3.data[0].mean(),0)\r\n #And the standard deviation should be almost equal to 1:\r\n npt.assert_almost_equal(t3.data[0].std(),1)\r\n\r\n t4 = ts_ff(fmri_file1,coords1,TR,normalize='percent')\r\n\r\n #In this case, the average is almost equal to 0, but no constraint on the\r\n #std:\r\n npt.assert_almost_equal(t4.data[0].mean(),0)\r\n\r\n #Make sure that we didn't mess up the sampling interval:\r\n npt.assert_equal(t4.sampling_interval,nitime.TimeArray(1.35))\r\n\r\n # Test the default behavior:\r\n data = io.load(fmri_file1).get_data()\r\n t5 = ts_ff(fmri_file1)\r\n npt.assert_equal(t5.shape, data.shape)\r\n npt.assert_equal(t5.sampling_interval, ts.TimeArray(1, time_unit='s'))\r\n\r\n # Test initializing TR with a TimeArray:\r\n t6= ts_ff(fmri_file1, TR=ts.TimeArray(1350, time_unit='ms'))\r\n npt.assert_equal(t4.sampling_interval, t6.sampling_interval)\r\n\r\n # Check the concatenation dimensions:\r\n t7 = ts_ff([fmri_file1, fmri_file2])\r\n npt.assert_equal([t7.shape[:3], t7.shape[-1]], [data.shape[:3], data.shape[-1]*2])\r\n\r\n t8 = ts_ff([fmri_file1, fmri_file2], average=True)\r\n npt.assert_equal(t8.shape[0], data.shape[-1]*2)\r\n\r\n t9 = ts_ff([fmri_file1, fmri_file2], average=True, normalize='zscore')\r\n npt.assert_almost_equal(t9.data.mean(), 0)", "def _make_meta(self):\n available_meas_times = list()\n available_intervals = list()\n drill_by = list()\n related = list()\n last_data_set_instance = dict()\n\n if self._data['report_save_historical_instances_ind'] == 'Y':\n # last measurement instance\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\n LIMIT 0, 1\"\"\",(self._id, self._segment_value_id))\n if res:\n last_data_set_instance = self._db.record[0]\n last_data_set_instance['measurement_time'] = self._formatter.format_date(last_data_set_instance['measurement_time'])\n\n # available measurement instances\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\"\"\",(self._id, self._segment_value_id))\n if res:\n for data_set_instance in self._db.record:\n data_set_instance['measurement_time'] = self._formatter.format_date(data_set_instance['measurement_time'])\n available_meas_times.append(data_set_instance)\n \n\n # get drill by. not for this version\n\n # available measurement intervals\n if self._data['report_primary_shared_dimension_id'] is None:\n self._data['report_primary_shared_dimension_id'] = 0\n\n self._db.Query(\"\"\"\n SELECT measurement_interval.*,\n dashboard_element.element_id\n FROM dashboard_element\n LEFT JOIN measurement_interval\n ON measurement_interval.measurement_interval_id = dashboard_element.measurement_interval_id\n WHERE\n (dashboard_element.`element_id`<>%s\n AND dashboard_element.measurement_interval_id <> %s\n AND dashboard_element.shared_measure_id = %s\n AND dashboard_element.`type` = 'internal report'\n AND ifnull(dashboard_element.report_used_for_drill_to_ind,'N') = %s\n AND ifnull(dashboard_element.report_primary_shared_dimension_id,0) = %s\n AND ifnull(dashboard_element.segment_id,0) = %s)\n OR\n dashboard_element.`element_id`=%s\n AND 3=4\n \n GROUP BY measurement_interval.measurement_interval_id\n ORDER BY\n measurement_interval.display_sequence,\n dashboard_element.name ASC\n \"\"\",\n (self._id,\n self._data['measurement_interval_id'],\n self._data['shared_measure_id'],\n self._data['report_used_for_drill_to_ind'],\n self._data['report_primary_shared_dimension_id'],\n self._data['segment_id'],\n self._id))\n\n\n for interval in self._db.record:\n interval['report_data_set_instance_id'] = 0\n available_intervals.append(interval)\n\n # see related\n self._db.Query(\"\"\"SELECT e.*\n FROM dashboard_element_topic det, dashboard_element e\n WHERE e.element_id = det.dashboard_element_id\n AND dashboard_element_id <> %s\n AND e.enabled_ind = 'Y'\n AND topic_id IN (select topic_id from dashboard_element_topic where dashboard_element_id = %s)\n UNION SELECT e.*\n FROM dashboard_element e, metric_drill_to_report m\n WHERE m.metric_element_id = e.element_id\n AND m.report_element_id = %s\n AND e.enabled_ind = 'Y'\n AND ifnull(e.segment_id,0) = %s\n \"\"\", (self._id, self._id, self._id, self._data['segment_id']))\n \n\n for related_element in self._db.record:\n if not related_element['segment_id']:\n related_element['segment_id'] = 0\n if related_element['segment_id'] == self._data['segment_id']:\n related_element['segment_value_id'] = self._segment_value_id\n else:\n related_element['segment_value_id'] = 0\n related.append(related_element)\n\n # elements displayed on the page\n before_dataset = list()\n after_dataset = list()\n \n charts_before_dataset = list()\n charts_after_dataset = list()\n \n \n # dataset table\n dataset_el = OrderedDict()\n dataset_el['element_id'] = ''\n dataset_el['element_type'] = 'dataset'\n dataset_el['element_name'] = ''\n dataset_el['element_desc'] = ''\n dataset_el['placement'] = ''\n dataset_el['sequence'] = 0\n dataset_el['show_ind'] = self._data['show_data_set_table_in_report_ind']\n \n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND \n (ISNULL(report_data_set_pivot_id)\n OR report_data_set_pivot_id = 0) \n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n charts_before_dataset.append(chart_el)\n else:\n charts_after_dataset.append(chart_el)\n \n # pivots\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_pivot\n WHERE\n `element_id`= %s\n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for pivot in self._db.record:\n before_pivot = list()\n after_pivot = list()\n #pivot_element = list()\n \n pivot_el = OrderedDict()\n pivot_el['element_id'] = pivot['report_data_set_pivot_id']\n pivot_el['element_type'] = 'pivot'\n pivot_el['element_name'] = pivot['name']\n pivot_el['element_desc'] = ''\n pivot_el['placement'] = pivot['pivot_table_report_placement']\n pivot_el['sequence'] = pivot['display_sequence']\n pivot_el['show_ind'] = pivot['enabled_ind']\n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND report_data_set_pivot_id = %s \n ORDER BY display_sequence ASC\"\"\",\n (self._id, pivot_el['element_id']))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n before_pivot.append(chart_el)\n else:\n after_pivot.append(chart_el)\n pivot_element = before_pivot + [pivot_el] + after_pivot \n \n if pivot_el['placement'] == 'before data set':\n before_dataset += pivot_element\n else:\n after_dataset += pivot_element\n elements = charts_before_dataset + before_dataset + [dataset_el] + after_dataset + charts_after_dataset\n \n \n self._jfile.make_current_meta(last_data_set_instance,\n available_meas_times,\n available_intervals,\n drill_by,\n related,\n elements,\n self._segment_values)", "def __init__(self, x=0, y=0, flux=None, time=None, wcs=None, quality=None, mask=None, exposure=1800, sector=0,\n size=150,\n camera=1, ccd=1, cadence=None):\n super(Source, self).__init__()\n if cadence is None:\n cadence = []\n if quality is None:\n quality = []\n if wcs is None:\n wcs = []\n if time is None:\n time = []\n if flux is None:\n flux = []\n\n self.size = size\n self.sector = sector\n self.camera = camera\n self.ccd = ccd\n self.cadence = cadence\n self.quality = quality\n self.exposure = exposure\n self.wcs = wcs\n co1 = 38.5\n co2 = 116.5\n catalog_1 = self.search_gaia(x, y, co1, co1)\n catalog_2 = self.search_gaia(x, y, co1, co2)\n catalog_3 = self.search_gaia(x, y, co2, co1)\n catalog_4 = self.search_gaia(x, y, co2, co2)\n catalogdata = vstack([catalog_1, catalog_2, catalog_3, catalog_4], join_type='exact')\n catalogdata = unique(catalogdata, keys='DESIGNATION')\n coord = wcs.pixel_to_world([x + (size - 1) / 2 + 44], [y + (size - 1) / 2])[0].to_string()\n ra = float(coord.split()[0])\n dec = float(coord.split()[1])\n catalogdata_tic = tic_advanced_search_position_rows(ra=ra, dec=dec, radius=(self.size + 2) * 21 * 0.707 / 3600)\n # print(f'no_of_stars={len(catalogdata_tic)}, camera={camera}, ccd={ccd}: ra={ra}, dec={dec}, radius={(self.size + 2) * 21 * 0.707 / 3600}')\n self.tic = convert_gaia_id(catalogdata_tic)\n self.flux = flux[:, y:y + size, x:x + size]\n self.mask = mask[y:y + size, x:x + size]\n self.time = np.array(time)\n median_time = np.median(self.time)\n interval = (median_time - 388.5) / 365.25\n\n num_gaia = len(catalogdata)\n tic_id = np.zeros(num_gaia)\n x_gaia = np.zeros(num_gaia)\n y_gaia = np.zeros(num_gaia)\n tess_mag = np.zeros(num_gaia)\n in_frame = [True] * num_gaia\n for i, designation in enumerate(catalogdata['DESIGNATION']):\n ra = catalogdata['ra'][i]\n dec = catalogdata['dec'][i]\n if not np.isnan(catalogdata['pmra'].mask[i]): # masked?\n ra += catalogdata['pmra'][i] * np.cos(np.deg2rad(dec)) * interval / 1000 / 3600\n if not np.isnan(catalogdata['pmdec'].mask[i]):\n dec += catalogdata['pmdec'][i] * interval / 1000 / 3600\n pixel = self.wcs.all_world2pix(\n np.array([catalogdata['ra'][i], catalogdata['dec'][i]]).reshape((1, 2)), 0, quiet=True)\n x_gaia[i] = pixel[0][0] - x - 44\n y_gaia[i] = pixel[0][1] - y\n try:\n tic_id[i] = catalogdata_tic['ID'][np.where(catalogdata_tic['GAIA'] == designation.split()[2])[0][0]]\n except:\n tic_id[i] = np.nan\n if np.isnan(catalogdata['phot_g_mean_mag'][i]):\n in_frame[i] = False\n elif catalogdata['phot_g_mean_mag'][i] >= 25:\n in_frame[i] = False\n elif -4 < x_gaia[i] < self.size + 3 and -4 < y_gaia[i] < self.size + 3:\n dif = catalogdata['phot_bp_mean_mag'][i] - catalogdata['phot_rp_mean_mag'][i]\n tess_mag[i] = catalogdata['phot_g_mean_mag'][\n i] - 0.00522555 * dif ** 3 + 0.0891337 * dif ** 2 - 0.633923 * dif + 0.0324473\n if np.isnan(tess_mag[i]):\n tess_mag[i] = catalogdata['phot_g_mean_mag'][i] - 0.430\n if np.isnan(tess_mag[i]):\n in_frame[i] = False\n else:\n in_frame[i] = False\n\n tess_flux = 10 ** (- tess_mag / 2.5)\n t = Table()\n t[f'tess_mag'] = tess_mag[in_frame]\n t[f'tess_flux'] = tess_flux[in_frame]\n t[f'tess_flux_ratio'] = tess_flux[in_frame] / np.nanmax(tess_flux[in_frame])\n t[f'sector_{self.sector}_x'] = x_gaia[in_frame]\n t[f'sector_{self.sector}_y'] = y_gaia[in_frame]\n catalogdata = hstack([catalogdata[in_frame], t]) # TODO: sorting not sorting all columns\n catalogdata.sort('tess_mag')\n self.gaia = catalogdata", "def amet_memoryWise(self):\r\n # set up logging files to monitor the calculation\r\n logging.basicConfig(filename = os.path.join(self.path,'history_amet_python.log'),\r\n filemode = 'w+', level = logging.DEBUG,\r\n format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n # initialize the time span\r\n # define sigma level\r\n A, B = self.defineSigmaLevels()\r\n # use example input file to load the basic dimensions information\r\n datapath_var = os.path.join(self.path, 'MERRA2_400.inst3_3d_asm_Nv.20160101.nc4.nc')\r\n var_key = Dataset(datapath_var)\r\n lat = var_key.variables['lat'][:]\r\n lon = var_key.variables['lon'][:]\r\n # calculate the reference levels based on A & B and standard surface pressure\r\n half_level = A + B * 101325\r\n level = (half_level[1:] + half_level[:-1]) / 2\r\n # create space for the output\r\n # AMET in the entire column\r\n E = np.zeros((len(lat),len(lon)), dtype=float)\r\n cpT = np.zeros((len(lat),len(lon)), dtype=float)\r\n Lvq = np.zeros((len(lat),len(lon)), dtype=float)\r\n gz = np.zeros((len(lat),len(lon)), dtype=float)\r\n uv2 = np.zeros((len(lat),len(lon)), dtype=float)\r\n logging.info(\"Start retrieving variables T,q,u,v,sp\")\r\n # The shape of each variable is (8,72,361,576)\r\n T = var_key.variables['T'][:]\r\n q = var_key.variables['QV'][:]\r\n sp = var_key.variables['PS'][:] #(8,361,576)\r\n u = var_key.variables['U'][:]\r\n v = var_key.variables['V'][:]\r\n logging.info(\"Extracting variables successfully!\") \r\n # compute gz\r\n z_model = self.calc_gz(var_key)\r\n # get the basic shape\r\n tt, hh, yy, xx = q.shape\r\n AMET = amet.met()\r\n E, cpT, Lvq, gz, uv2 = AMET.calc_met(T, q, sp, u, v, z_model, A, B,\r\n tt, hh, len(lat), len(lon), lat, self.lat_unit)\r\n\r\n return np.mean(E)", "def run(ts):\n nc = netCDF4.Dataset(('/mesonet/data/iemre/%s_mw_mrms_daily.nc'\n '') % (ts.year,), 'a')\n offset = iemre.daily_offset(ts)\n ncprecip = nc.variables['p01d']\n\n # We want this mrms variable to replicate the netcdf file, so the\n # origin is the southwestern corner\n ts += datetime.timedelta(hours=24)\n gmtts = ts.astimezone(pytz.timezone(\"UTC\"))\n\n gribfn = gmtts.strftime((\"/mnt/a4/data/%Y/%m/%d/mrms/ncep/\"\n \"RadarOnly_QPE_24H/\"\n \"RadarOnly_QPE_24H_00.00_%Y%m%d-%H%M00.grib2.gz\"))\n if not os.path.isfile(gribfn):\n print(\"merge_mrms_q3.py MISSING %s\" % (gribfn,))\n return\n\n fp = gzip.GzipFile(gribfn, 'rb')\n (_, tmpfn) = tempfile.mkstemp()\n tmpfp = open(tmpfn, 'wb')\n tmpfp.write(fp.read())\n tmpfp.close()\n grbs = pygrib.open(tmpfn)\n grb = grbs[1]\n lats, _ = grb.latlons()\n os.unlink(tmpfn)\n\n val = grb['values']\n # Anything less than zero, we set to zero\n val = np.where(val < 0, 0, val)\n\n # CAREFUL HERE! The MRMS grid is North to South\n # set top (smallest y)\n y0 = int((lats[0, 0] - iemre.NORTH) * 100.0)\n y1 = int((lats[0, 0] - iemre.SOUTH) * 100.0)\n x0 = int((iemre.WEST - mrms.WEST) * 100.0)\n x1 = int((iemre.EAST - mrms.WEST) * 100.0)\n # print 'y0:%s y1:%s x0:%s x1:%s' % (y0, y1, x0, x1)\n ncprecip[offset, :, :] = np.flipud(val[y0:y1, x0:x1])\n # m = MapPlot(sector='midwest')\n # x, y = np.meshgrid(nc.variables['lon'][:], nc.variables['lat'][:])\n # m.pcolormesh(x, y, ncprecip[offset,:,:], range(10), latlon=True)\n # m.postprocess(filename='test.png')\n # (fig, ax) = plt.subplots()\n # ax.imshow(mrms)\n # fig.savefig('test.png')\n # (fig, ax) = plt.subplots()\n # ax.imshow(mrms[y0:y1,x0:x1])\n # fig.savefig('test2.png')\n nc.close()", "def __init__(self):\n\n self.Cp_air0 = config_earth.earth_properties['Cp_air0']\n self.Rsp_air = config_earth.earth_properties['Rsp_air']\n\n self.d = config_earth.balloon_properties['d']\n self.vol = math.pi*4/3*pow((self.d/2),3) #volume m^3\n self.surfArea = math.pi*self.d*self.d #m^2\n self.cs_area = math.pi*self.d*self.d/4.0 #m^2\n\n #self.emissEnv = config_earth.balloon_properties['emissEnv']\n self.areaDensityEnv = config_earth.balloon_properties['areaDensityEnv']\n self.mp = config_earth.balloon_properties['mp']\n self.mdot = 0\n self.massEnv = config_earth.balloon_properties['mEnv']\n self.Upsilon = config_earth.balloon_properties['Upsilon']\n\n self.vent = config_earth.simulation['vent']\n self.coord = config_earth.simulation['start_coord']\n self.t = config_earth.simulation['start_time']\n self.lat = math.radians(self.coord['lat'])\n self.Ls = self.t.timetuple().tm_yday\n self.min_alt = config_earth.simulation['min_alt']\n\n self.vm_coeff = .1 #virtual mass coefficient\n self.k = self.massEnv*config_earth.balloon_properties['cp'] #thermal mass coefficient\n\n self.dt = config_earth.dt", "def get_ships_analysis(self):\n \n # Get SHIPS times\n times = self.search_ships()\n if len(times) <= 1:\n raise RuntimeError('SHIPS data is unavailable for the requested storm.')\n \n # Declare dict\n new_dict = {\n 'time': [],\n 'mslp': [],\n 'type': [],\n 'vmax': [],\n 'wmo_basin': [],\n }\n for attr in ['name', 'id', 'operational_id', 'year', 'season', 'basin', 'realtime']:\n new_dict[attr] = self[attr]\n new_dict['ace'] = 0.0\n \n # Construct data\n for time in times:\n ships = self.get_ships(time)\n if ships is None: continue\n if np.isnan(ships.lat[0]) or np.isnan(ships.lon[0]): continue\n\n # Add relevant variables\n new_dict['time'].append(time)\n new_dict['mslp'].append(np.nan)\n for key in ships.dict.keys():\n if key in ['fhr', 'vmax_noland_kt', 'vmax_lgem_kt']: continue\n\n # Special handling for storm type\n if key == 'storm_type':\n subtropical_flag = False\n derived_type = 'EX'\n try:\n if ships.dict['storm_type'][0] == 'SUBT':\n subtropical_flag = True\n derived_type = get_storm_type(ships.dict['vmax_land_kt'][0], subtropical_flag)\n if ships.dict['storm_type'][0] not in ['TROP', 'SUBT']:\n derived_type = 'EX'\n except:\n pass\n new_dict['type'].append(derived_type)\n\n # vmax handling\n elif key == 'vmax_land_kt':\n new_dict['vmax'].append(ships.dict[key][0])\n\n # Normal handling\n elif key in new_dict:\n new_dict[key].append(ships.dict[key][0])\n else:\n new_dict[key] = [ships.dict[key][0]]\n \n # Derive ACE\n if not np.isnan(new_dict['vmax'][-1]):\n new_dict['ace'] += accumulated_cyclone_energy(new_dict['vmax'][-1])\n\n # Derive basin\n new_dict['wmo_basin'].append(get_basin(new_dict['lat'][-1],\n new_dict['lon'][-1],\n self.basin))\n\n # Add other attributes\n new_dict['source_info'] = 'SHIPS Analysis'\n new_dict['source_method'] = 'UCAR SHIPS Archive'\n new_dict['source_url'] = 'https://hurricanes.ral.ucar.edu/'\n new_dict['invest'] = False\n new_dict['source'] = 'ships'\n new_dict['prob_2day'] = 'N/A'\n new_dict['prob_7day'] = 'N/A'\n new_dict['risk_2day'] = 'N/A'\n new_dict['risk_7day'] = 'N/A'\n \n return Storm(new_dict)", "def get_iPTF16asu():\n z = 0.187\n ebv = 0.0\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n \n tb = asci.read('../data/otherSN/Whitesides2017/table1.txt')\n tb = tb.to_pandas()\n tb = tb[tb[\"col4\"].values!=\">\"]\n \n tb = tb.rename(columns={'col1' : 'mjd',\n 'col2': 'tmax_rf',\n 'col3': 'filter',\n \"col4\": 'mag',\n 'col5': 'emag',\n 'col6': 'instrument'})\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb[\"mag\"] = np.array(tb[\"mag\"].values, dtype = np.float)\n #tb[\"emag\"] = np.array(tb[\"emag\"].values, dtype = np.float)\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb = tb[tb.wave!=0]\n return tb", "def GEEterraClimatePtsAvgMonth(ptsFile,metric,startYear,endYear,buf,poly,username,folderOut, scalePix = 4000):\n \n # load required libraries\n import ee\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n years = list(range(startYear, endYear + 1))\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n ID_field = \"geeID\"\n\n scale_d = {}\n scale_d['aet'] = 0.1\n scale_d['def'] = 0.1\n scale_d['pdsi'] = 0.01\n scale_d['pet'] = 0.1\n scale_d['soil'] = 0.1\n scale_d['srad'] = 0.1\n scale_d['tmmn'] = 0.1\n scale_d['tmmx'] = 0.1\n scale_d['vap'] = 0.001\n scale_d['vpd'] = 0.01\n scale_d['vs'] = 0.01\n \n for met in metric:\n metL = [met]\n Gridmet_pr = ee.ImageCollection('IDAHO_EPSCOR/TERRACLIMATE').select(met)\n \n img_col0 = Gridmet_pr.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n if any([(met == 'pr'),(met == 'ro'),(met == 'swe')]):\n\n img_col = img_col0\n \n else:\n\n def Scale1(img):\n return (img.float()\n .multiply(scale_d[metL[0]])\n .copyProperties(img,['system:time_start','system:time_end']))\n\n img_col = img_col0.map(Scale1)\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'tcy'+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print ('buffered pts by:' + str(buf) + ' for ' + met)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'tcy'+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for ' + met)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = 'tcy'+'_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for ' + met)", "def tomoScan(description, inBeamPosition, outOfBeamPosition, exposureTime=1., start=0., stop=180., step=0.1, darkFieldInterval=0, flatFieldInterval=0,\n imagesPerDark=10, imagesPerFlat=10, optimizeBeamInterval=0, pattern=\"default\", tomoRotationAxis=0, addNXEntry=True, autoAnalyse=True, additionalScannables=[]):\n dataFormat = LocalProperties.get(\"gda.data.scan.datawriter.dataFormat\")\n try:\n darkFieldInterval = int(darkFieldInterval)\n flatFieldInterval = int(flatFieldInterval)\n optimizeBeamInterval = int(optimizeBeamInterval)\n \n jns = beamline_parameters.JythonNameSpaceMapping(InterfaceProvider.getJythonNamespace())\n tomography_theta = jns.tomography_theta\n if tomography_theta is None:\n raise NameError(\"tomography_theta is not defined in Jython namespace\")\n tomography_shutter = jns.tomography_shutter\n if tomography_shutter is None:\n raise NameError(\"tomography_shutter is not defined in Jython namespace\")\n tomography_translation = jns.tomography_translation\n if tomography_translation is None:\n raise NameError(\"tomography_translation is not defined in Jython namespace\")\n \n tomography_detector = jns.tomography_detector\n if tomography_detector is None:\n raise NameError(\"tomography_detector is not defined in Jython namespace\")\n\n tomography_optimizer = jns.tomography_optimizer\n if tomography_optimizer is None:\n raise NameError(\"tomography_optimizer is not defined in Jython namespace\")\n\n tomography_time = jns.tomography_time\n if tomography_time is None:\n raise NameError(\"tomography_time is not defined in Jython namespace\")\n \n tomography_beammonitor = jns.tomography_beammonitor\n if tomography_beammonitor is None:\n raise NameError(\"tomography_beammonitor is not defined in Jython namespace\")\n \n tomography_camera_stage = jns.tomography_camera_stage\n if tomography_camera_stage is None:\n raise NameError(\"tomography_camera_stage is not defined in Jython namespace\")\n \n tomography_sample_stage = jns.tomography_sample_stage\n if tomography_sample_stage is None:\n raise NameError(\"tomography_sample_stage is not defined in Jython namespace\")\n \n tomo_additional_scannables = jns.tomography_additional_scannables\n if tomo_additional_scannables is None:\n raise NameError(\"tomo_additional_scannables is not defined in Jython namespace\")\n \n index = SimpleScannable()\n index.setCurrentPosition(0.0)\n index.setInputNames([\"imageNumber\"])\n index.setName(\"imageNumber\")\n index.configure()\n \n image_key = SimpleScannable()\n image_key.setCurrentPosition(0.0)\n image_key.setInputNames([\"image_key\"])\n image_key.setName(\"image_key\")\n image_key.configure()\n\n tomoScanDevice = make_tomoScanDevice(tomography_theta, tomography_shutter,\n tomography_translation, tomography_optimizer, image_key, index)\n\n# return tomoScanDevice\n #generate list of positions\n numberSteps = ScannableUtils.getNumberSteps(tomography_theta, start, stop, step)\n theta_points = []\n theta_points.append(start)\n previousPoint = start\n for i in range(numberSteps):\n nextPoint = ScannableUtils.calculateNextPoint(previousPoint, step);\n theta_points.append(nextPoint)\n previousPoint = nextPoint\n \n #generateScanPoints\n optimizeBeamNo = 0\n optimizeBeamYes = 1\n shutterOpen = 1\n shutterClosed = 0\n shutterNoChange = 2\n scan_points = []\n theta_pos = theta_points[0]\n index = 0\n #Added shutterNoChange state for the shutter. The scan points are added using the (pseudo) ternary operator, \n #if index is 0 then the shutterPosition is added to the scan point, else shutterNoChange is added to scan points.\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark\n index = index + 1\n \n for i in range(imagesPerFlat): \n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat\n index = index + 1 \n scan_points.append((theta_pos, shutterOpen, inBeamPosition, optimizeBeamNo, image_key_project, index)) #first\n index = index + 1 \n imageSinceDark = 1\n imageSinceFlat = 1\n optimizeBeam = 0\n for i in range(numberSteps):\n theta_pos = theta_points[i + 1]\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_project, index))#main image\n index = index + 1 \n \n imageSinceFlat = imageSinceFlat + 1\n if imageSinceFlat == flatFieldInterval and flatFieldInterval != 0:\n for i in range(imagesPerFlat):\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index))\n index = index + 1 \n imageSinceFlat = 0\n \n imageSinceDark = imageSinceDark + 1\n if imageSinceDark == darkFieldInterval and darkFieldInterval != 0:\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index))\n index = index + 1 \n imageSinceDark = 0\n\n optimizeBeam = optimizeBeam + 1\n if optimizeBeam == optimizeBeamInterval and optimizeBeamInterval != 0:\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamYes, image_key_project, index))\n index = index + 1 \n optimizeBeam = 0\n \n #add dark and flat only if not done in last steps\n if imageSinceFlat != 0:\n for i in range(imagesPerFlat):\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat\n index = index + 1\n if imageSinceDark != 0:\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark\n index = index + 1 \n scan_points1 = generateScanPoints(inBeamPosition, outOfBeamPosition, theta_points, darkFieldInterval, flatFieldInterval,\n imagesPerDark, imagesPerFlat, optimizeBeamInterval, pattern=pattern)\n if pattern == 'default' or pattern == 'DFPFD':\n i = 0\n for pt1 in scan_points1:\n pt = scan_points[i]\n if pt1 != pt:\n print \"Mismatch - please tell Kaz about your scan and its arguments!\"\n print \"i = \", i\n print \"pt = \", pt\n print \"pt1 = \", pt1\n i += 1\n #return None\n positionProvider = tomoScan_positions(start, stop, step, darkFieldInterval, imagesPerDark, flatFieldInterval, imagesPerFlat, \\\n inBeamPosition, outOfBeamPosition, optimizeBeamInterval, scan_points) \n scan_args = [tomoScanDevice, positionProvider, tomography_time, tomography_beammonitor, tomography_detector, exposureTime, tomography_camera_stage, tomography_sample_stage]\n #scan_args.append(RotationAxisScannable(\"approxCOR\", tomoRotationAxis))\n #meta_add(RotationAxisScannable(\"approxCOR\", tomoRotationAxis))\n #meta_add(\"RotationCoord_as_list\", [tomoRotationAxis])\n meta_add(\"approxCOR\", tomoRotationAxis)\n for scannable in additionalScannables:\n scan_args.append(scannable)\n for scannable in tomo_additional_scannables:\n scan_args.append(scannable)\n ''' setting the description provided as the title'''\n if not description == None: \n setTitle(description)\n else :\n setTitle(\"undefined\")\n \n dataFormat = LocalProperties.get(\"gda.data.scan.datawriter.dataFormat\")\n if not dataFormat == \"NexusDataWriter\":\n handle_messages.simpleLog(\"Data format inconsistent. Setting 'gda.data.scan.datawriter.dataFormat' to 'NexusDataWriter'\")\n LocalProperties.set(\"gda.data.scan.datawriter.dataFormat\", \"NexusDataWriter\")\n scanObject = createConcurrentScan(scan_args)\n if addNXEntry:\n addNXTomoSubentry(scanObject, tomography_detector.name, tomography_theta.name)\n scanObject.runScan()\n if autoAnalyse:\n lsdp=jns.lastScanDataPoint()\n OSCommandRunner.runNoWait([\"/dls_sw/apps/tomopy/tomopy/bin/gda/tomo_at_scan_end_kz\", lsdp.currentFilename], OSCommandRunner.LOGOPTION.ALWAYS, None)\n return scanObject;\n except InterruptedException:\n exceptionType, exception, traceback = sys.exc_info()\n handle_messages.log(None, \"User interrupted the scan\", exceptionType, exception, traceback, False)\n raise InterruptedException(\"User interrupted the scan\")\n except:\n exceptionType, exception, traceback = sys.exc_info()\n handle_messages.log(None, \"Error during tomography scan\", exceptionType, exception, traceback, False)\n raise Exception(\"Error during tomography scan\", exception)\n finally:\n handle_messages.simpleLog(\"Data Format reset to the original setting: \" + dataFormat)\n LocalProperties.set(\"gda.data.scan.datawriter.dataFormat\", dataFormat)", "def read_FMI_weather(ID, start_date, end_date, sourcefile, CO2=380.0):\n \n # OmaTunniste;OmaItä;OmaPohjoinen;Kunta;siteid;vuosi;kk;paiva;longitude;latitude;t_mean;t_max;t_min;\n # rainfall;radiation;hpa;lamposumma_v;rainfall_v;lamposumma;lamposumma_cum\n # -site number\n # -date (yyyy mm dd)\n # -latitude (in KKJ coordinates, metres)\n # -longitude (in KKJ coordinates, metres)\n # -T_mean (degrees celcius)\n # -T_max (degrees celcius)\n # -T_min (degrees celcius)\n # -rainfall (mm)\n # -global radiation (per day in kJ/m2)\n # -H2O partial pressure (hPa)\n\n sourcefile = os.path.join(sourcefile)\n\n #ID = int(ID)\n\n # import forcing data\n fmi = pd.read_csv(sourcefile, sep=';', header='infer', \n usecols=['OmaTunniste', 'Kunta', 'aika', 'longitude',\n 'latitude', 't_mean', 't_max', 't_min', 'rainfall',\n 'radiation', 'hpa', 'lamposumma_v', 'rainfall_v'],\n parse_dates=['aika'],encoding=\"ISO-8859-1\")\n \n time = pd.to_datetime(fmi['aika'], format='%Y%m%d')\n\n fmi.index = time\n fmi = fmi.rename(columns={'OmaTunniste': 'ID', 'longitude': 'lon',\n 'latitude': 'lat', 't_mean': 'T', 't_max': 'Tmax',\n 't_min': 'Tmin', 'rainfall': 'Prec',\n 'radiation': 'Rg', 'hpa': 'h2o', 'lamposumma_v': 'dds',\n 'rainfall_v': 'Prec_a'})\n \n fmi['h2o'] = 1e-1*fmi['h2o'] # hPa-->kPa\n fmi['Rg'] = 1e3 / 86400.0*fmi['Rg'] # kJ/m2/d-1 to Wm-2\n fmi['Par'] = 0.5*fmi['Rg']\n\n # saturated vapor pressure\n esa = 0.6112*np.exp((17.67*fmi['T']) / (fmi['T'] + 273.16 - 29.66)) # kPa\n vpd = esa - fmi['h2o'] # kPa\n vpd[vpd < 0] = 0.0\n rh = 100.0*fmi['h2o'] / esa\n rh[rh < 0] = 0.0\n rh[rh > 100] = 100.0\n\n fmi['RH'] = rh\n fmi['esa'] = esa\n fmi['VPD'] = vpd\n\n fmi['doy'] = fmi.index.dayofyear\n fmi = fmi.drop(['aika'], axis=1)\n # replace nan's in prec with 0.0\n #fmi['Prec'][np.isnan(fmi['Prec'])] = 0.0\n fmi['Prec']= fmi['Prec'].fillna(value=0.0)\n # add CO2 concentration to dataframe\n fmi['CO2'] = float(CO2)\n \n # get desired period\n fmi = fmi[(fmi.index >= start_date) & (fmi.index <= end_date)]\n# if ID > 0:\n# fmi = fmi[fmi['ID'] == ID]\n return fmi", "def read_traveltime(self):\r\n \r\n #### read Travel time from txt file\r\n \r\n \r\n #### Particle travel time branch 1\r\n excelfile_surface_branch1_high = r'excel\\flow_rate\\particle_surface_branch1_high.xlsx'\r\n inarray_surface_branch1_high = pd.read_excel(excelfile_surface_branch1_high).to_numpy() \r\n \r\n excelfile_surface_branch1_medium = r'excel\\flow_rate\\particle_surface_branch1_medium.xlsx'\r\n inarray_surface_branch1_medium = pd.read_excel(excelfile_surface_branch1_medium).to_numpy() \r\n \r\n excelfile_surface_branch1_low = r'excel\\flow_rate\\particle_surface_branch1_low.xlsx'\r\n inarray_surface_branch1_low = pd.read_excel(excelfile_surface_branch1_low).to_numpy()\r\n \r\n excelfile_bottom_branch1_high = r'excel\\flow_rate\\particle_bottom_branch1_high.xlsx'\r\n inarray_bottom_branch1_high = pd.read_excel(excelfile_bottom_branch1_high).to_numpy()\r\n \r\n excelfile_bottom_branch1_medium = r'excel\\flow_rate\\particle_bottom_branch1_medium.xlsx'\r\n inarray_bottom_branch1_medium = pd.read_excel(excelfile_bottom_branch1_medium).to_numpy()\r\n \r\n excelfile_bottom_branch1_low = r'excel\\flow_rate\\particle_bottom_branch1_low.xlsx'\r\n inarray_bottom_branch1_low = pd.read_excel(excelfile_bottom_branch1_low).to_numpy()\r\n \r\n \r\n #### Tracer travel time branch 1\r\n excelfile_tracer_branch1_high = r'excel\\flow_rate\\tracer_branch1_high.xlsx'\r\n inarray_tracer_branch1_high = pd.read_excel(excelfile_tracer_branch1_high).to_numpy()\r\n \r\n excelfile_tracer_branch1_medium = r'excel\\flow_rate\\tracer_branch1_medium.xlsx'\r\n inarray_tracer_branch1_medium = pd.read_excel(excelfile_tracer_branch1_medium).to_numpy()\r\n \r\n excelfile_tracer_branch1_low = r'excel\\flow_rate\\tracer_branch1_low.xlsx'\r\n inarray_tracer_branch1_low = pd.read_excel(excelfile_tracer_branch1_low).to_numpy()\r\n \r\n self.inarrays_branch1 = [inarray_surface_branch1_high, inarray_surface_branch1_medium, inarray_surface_branch1_low, \\\r\n inarray_bottom_branch1_high, inarray_bottom_branch1_medium, inarray_bottom_branch1_low, \\\r\n inarray_tracer_branch1_high, inarray_tracer_branch1_medium, inarray_tracer_branch1_low]\r\n \r\n \r\n #### Particle travel time branch 5\r\n excelfile_surface_branch5_high = r'excel\\flow_rate\\particle_surface_branch5_high.xlsx'\r\n inarray_surface_branch5_high = pd.read_excel(excelfile_surface_branch5_high).to_numpy()\r\n \r\n excelfile_surface_branch5_medium = r'excel\\flow_rate\\particle_surface_branch5_medium.xlsx'\r\n inarray_surface_branch5_medium = pd.read_excel(excelfile_surface_branch5_medium).to_numpy()\r\n \r\n excelfile_surface_branch5_low = r'excel\\flow_rate\\particle_surface_branch5_low.xlsx'\r\n inarray_surface_branch5_low = pd.read_excel(excelfile_surface_branch5_low).to_numpy()\r\n \r\n excelfile_bottom_branch5_high = r'excel\\flow_rate\\particle_bottom_branch5_high.xlsx'\r\n inarray_bottom_branch5_high = pd.read_excel(excelfile_bottom_branch5_high).to_numpy()\r\n \r\n excelfile_bottom_branch5_medium = r'excel\\flow_rate\\particle_bottom_branch5_medium.xlsx'\r\n inarray_bottom_branch5_medium = pd.read_excel(excelfile_bottom_branch5_medium).to_numpy()\r\n \r\n excelfile_bottom_branch5_low = r'excel\\flow_rate\\particle_bottom_branch5_low.xlsx'\r\n inarray_bottom_branch5_low = pd.read_excel(excelfile_bottom_branch5_low).to_numpy()\r\n \r\n \r\n #### Tracer travel time branch 5\r\n excelfile_tracer_branch5_high = r'excel\\flow_rate\\tracer_branch5_high.xlsx'\r\n inarray_tracer_branch5_high = pd.read_excel(excelfile_tracer_branch5_high).to_numpy()\r\n \r\n excelfile_tracer_branch5_medium = r'excel\\flow_rate\\tracer_branch5_medium.xlsx'\r\n inarray_tracer_branch5_medium = pd.read_excel(excelfile_tracer_branch5_medium).to_numpy()\r\n \r\n excelfile_tracer_branch5_low = r'excel\\flow_rate\\tracer_branch5_low.xlsx'\r\n inarray_tracer_branch5_low = pd.read_excel(excelfile_tracer_branch5_low).to_numpy()\r\n \r\n \r\n self.inarrays_branch5 = [inarray_surface_branch5_high, inarray_surface_branch5_medium, inarray_surface_branch5_low, \\\r\n inarray_bottom_branch5_high, inarray_bottom_branch5_medium, inarray_bottom_branch5_low, \\\r\n inarray_tracer_branch5_high, inarray_tracer_branch5_medium, inarray_tracer_branch5_low]", "def _locate_events(self, start_time, end_time):\n\n # Define pre-pad as a function of the onset windows\n if self.pre_pad is None:\n self.pre_pad = max(self.p_onset_win[1],\n self.s_onset_win[1]) \\\n + 3 * max(self.p_onset_win[0],\n self.s_onset_win[0])\n\n # Adjust pre- and post-pad to take into account cosine taper\n t_length = self.pre_pad + 4*self.marginal_window + self.post_pad\n self.pre_pad += np.ceil(t_length * 0.06)\n self.post_pad += np.ceil(t_length * 0.06)\n\n trig_events = self.output.read_triggered_events(start_time, end_time)\n n_evts = len(trig_events)\n\n for i, trig_event in trig_events.iterrows():\n event_uid = trig_event[\"EventID\"]\n msg = \"=\" * 120 + \"\\n\"\n msg += \"\\tEVENT - {} of {} - {}\\n\"\n msg += \"=\" * 120 + \"\\n\\n\"\n msg += \"\\tDetermining event location...\\n\"\n msg = msg.format(i + 1, n_evts, event_uid)\n self.output.log(msg, self.log)\n\n w_beg = trig_event[\"CoaTime\"] - 2*self.marginal_window \\\n - self.pre_pad\n w_end = trig_event[\"CoaTime\"] + 2*self.marginal_window \\\n + self.post_pad\n\n timer = util.Stopwatch()\n self.output.log(\"\\tReading waveform data...\", self.log)\n try:\n self._read_event_waveform_data(trig_event, w_beg, w_end)\n except util.ArchiveEmptyException:\n msg = \"\\tNo files found in archive for this time period\"\n self.output.log(msg, self.log)\n continue\n except util.DataGapException:\n msg = \"\\tAll available data for this time period contains gaps\"\n msg += \"\\n\\tOR data not available at start/end of time period\\n\"\n self.output.log(msg, self.log)\n continue\n self.output.log(timer(), self.log)\n\n timer = util.Stopwatch()\n self.output.log(\"\\tComputing 4D coalescence grid...\", self.log)\n\n daten, max_coa, max_coa_norm, loc, map_4d = self._compute(\n w_beg, w_end,\n self.data.signal,\n self.data.availability)\n coord = self.lut.xyz2coord(np.array(loc).astype(int))\n event_coa_data = pd.DataFrame(np.array((daten, max_coa,\n coord[:, 0],\n coord[:, 1],\n coord[:, 2])).transpose(),\n columns=[\"DT\", \"COA\", \"X\", \"Y\", \"Z\"])\n event_coa_data[\"DT\"] = event_coa_data[\"DT\"].apply(UTCDateTime)\n event_coa_data_dtmax = \\\n event_coa_data[\"DT\"].iloc[event_coa_data[\"COA\"].astype(\"float\").idxmax()]\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n\n if (event_coa_data_dtmax >= trig_event[\"CoaTime\"]\n - self.marginal_window) \\\n and (event_coa_data_dtmax <= trig_event[\"CoaTime\"]\n + self.marginal_window):\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n else:\n msg = \"\\n\\tEvent {} is outside marginal window.\\n\"\n msg += \"\\tDefine more realistic error - the marginal window\"\n msg += \" should be an estimate of the origin time uncertainty,\"\n msg += \"\\n\\tdetermined by the expected spatial uncertainty and\"\n msg += \"the seismic velocity in the region of the earthquake\\n\"\n msg += \"\\n\" + \"=\" * 120 + \"\\n\"\n msg = msg.format(event_uid)\n self.output.log(msg, self.log)\n continue\n\n event_mw_data = event_coa_data\n event_mw_data = event_mw_data[(event_mw_data[\"DT\"] >= w_beg_mw) &\n (event_mw_data[\"DT\"] <= w_end_mw)]\n map_4d = map_4d[:, :, :,\n event_mw_data.index[0]:event_mw_data.index[-1]]\n event_mw_data = event_mw_data.reset_index(drop=True)\n event_max_coa = event_mw_data.iloc[event_mw_data[\"COA\"].astype(\"float\").idxmax()]\n\n # Update event UID; make out_str\n event_uid = str(event_max_coa.values[0])\n for char_ in [\"-\", \":\", \".\", \" \", \"Z\", \"T\"]:\n event_uid = event_uid.replace(char_, \"\")\n out_str = \"{}_{}\".format(self.output.name, event_uid)\n self.output.log(timer(), self.log)\n\n # Make phase picks\n timer = util.Stopwatch()\n self.output.log(\"\\tMaking phase picks...\", self.log)\n phase_picks = self._phase_picker(event_max_coa)\n self.output.write_picks(phase_picks[\"Pick\"], event_uid)\n self.output.log(timer(), self.log)\n\n # Determining earthquake location error\n timer = util.Stopwatch()\n self.output.log(\"\\tDetermining earthquake location and uncertainty...\", self.log)\n loc_spline, loc_gau, loc_gau_err, loc_cov, \\\n loc_cov_err = self._calculate_location(map_4d)\n self.output.log(timer(), self.log)\n\n # Make event dictionary with all final event location data\n event = pd.DataFrame([[event_max_coa.values[0],\n event_max_coa.values[1],\n loc_spline[0], loc_spline[1], loc_spline[2],\n loc_gau[0], loc_gau[1], loc_gau[2],\n loc_gau_err[0], loc_gau_err[1],\n loc_gau_err[2],\n loc_cov[0], loc_cov[1], loc_cov[2],\n loc_cov_err[0], loc_cov_err[1],\n loc_cov_err[2]]],\n columns=self.EVENT_FILE_COLS)\n\n self.output.write_event(event, event_uid)\n\n self._optional_locate_outputs(event_mw_data, event, out_str,\n phase_picks, event_uid, map_4d)\n\n self.output.log(\"=\" * 120 + \"\\n\", self.log)\n\n del map_4d, event_coa_data, event_mw_data, event_max_coa, \\\n phase_picks\n self.coa_map = None", "def infer_timeindex(energy_system_dict):\n\n # find out which expression of 'Timeframe' was used\n ts_key = [variant for variant in spellings.timeframe\n if variant in energy_system_dict.keys()][0]\n\n # find out which expression of 'Timeindex' was used\n idx_key = [variant for variant in spellings.timeindex\n if variant in energy_system_dict[ts_key].columns][0]\n\n # logger.debug(\n # (\"Inferring timeframe using '{}' for timeframe and \"\n # \"'{}' for timeindex\".format(\n # ts_key, idx_key))\n\n # Extract the time index\n timeindex = pd.DatetimeIndex(\n energy_system_dict[ts_key][idx_key].values, freq='infer')\n\n # Round to time index according to temporal resolution settings to...\n # correct rounding errors of the input source due to time format display\n timeindex = timeindex.round(\n resolutions.temporal_rounding_map[configurations.temporal_resolution])\n\n # Reinfer frequency from newly round time index\n timeindex = pd.DatetimeIndex(timeindex, freq='infer')\n\n # If reinferring was unsuccessful, enforce tessif's current frequency\n if timeindex.freq is None:\n timeindex.freq = resolutions.temporal_rounding_map[\n configurations.temporal_resolution]\n\n return timeindex", "def get_sigma_map(start_x = 0,field_height=100,field_width=100,viewing_distance=12.0,screen_pixel_size=0.282,debug=False):\n start_x_pixels = np.round(get_pixels_at_degrees(degrees=start_x,viewing_distance=viewing_distance,screen_pixel_size=screen_pixel_size))\n optical_nodal_distance = 17.0 # mm from lens to fovea\n viewing_distance_inches = viewing_distance\n viewing_distance = viewing_distance * 25.4 # mm\n center_y, center_x = 0,0\n x_coords = (start_x_pixels + np.arange(-field_width/2.0,field_width/2,1))*screen_pixel_size\n y_coords = np.arange(-field_height/2.0,field_height/2,1)*screen_pixel_size\n x,y = np.meshgrid(x_coords,y_coords)\n coords = np.vstack((y.ravel(),x.ravel())).T\n\n image_dist = cdist(np.matrix([center_y,center_x]),coords)\n fovea_dist = (np.pi/180.0)*optical_nodal_distance*get_degrees_at_pixels(pixels=image_dist/screen_pixel_size,viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n midget_dendritic_field_diameter_micrometers = 8.64 * np.power(fovea_dist,1.04) # midget from Dacey and Peterson, 1994\n midget_dendritic_field_diameter_millimeters = midget_dendritic_field_diameter_micrometers/1000.0\n midget_projected_field_diameter_on_image = get_pixels_at_degrees(degrees=start_x+np.degrees(np.arctan((midget_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size) - get_pixels_at_degrees(degrees=start_x-np.degrees(np.arctan((midget_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n\n midget_sigma_map = midget_projected_field_diameter_on_image / 6.0 # ensures 99.7% of dendrites are connected to field diameter\n midget_sigma_map = midget_sigma_map.reshape((field_height,field_width))\n\n parasol_dendritic_field_diameter_micrometers = 70.2 * np.power(fovea_dist,0.65) # parasol from Dacey and Peterson, 1994\n parasol_dendritic_field_diameter_millimeters = parasol_dendritic_field_diameter_micrometers/1000.0\n parasol_projected_field_diameter_on_image = get_pixels_at_degrees(degrees=start_x+np.degrees(np.arctan((parasol_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size) - get_pixels_at_degrees(degrees=start_x-np.degrees(np.arctan((parasol_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n parasol_sigma_map = parasol_projected_field_diameter_on_image / 6.0 # ensures 99.7% of dendrites are connected to field diameter\n parasol_sigma_map = parasol_sigma_map.reshape((field_height,field_width))\n\n return midget_sigma_map,parasol_sigma_map", "def generate_exptime_table(self, ):\n\n # Perform calculation for all stars in biased sample\n Ndraw = self.NBIAS\n\n np.random.seed(seed=None)\n\n # Allocate memory for exposure times\n t_tots = np.zeros(Ndraw)\n tpbpcs = []\n pct_obs_iwas = []\n lammax_obs_iwas = []\n specs = []\n\n \"\"\"\n Calculate the exposure times and spectra in each bandpass for each\n star in biased sample\n \"\"\"\n\n # Loop over stars in this sample\n for i in range(Ndraw):\n #print(\"HIP %i, %.2f pc, %s \" %(hip[i], dist[i], stype[i]))\n\n # Set system parameters for this star\n self.prep_ith_star(i)\n\n # Calculate the time to observe the complete spectrum\n t_tots[i], tpbpc, spectrum, iwa = self.complete_spectrum_time()\n\n tpbpcs.append(tpbpc)\n pct_obs_iwas.append(iwa[0])\n specs.append(spectrum)\n\n # Calculate channel widths\n deltas = []\n for channel in CHANNELS:\n l = default_luvoir(channel=channel)\n deltas.append(l.lammax - l.lammin)\n self.deltas = np.array(deltas)\n\n # Calculate channel fractional completeness\n self.channel_weights = (self.deltas / np.sum(self.deltas))\n\n # Calculate completeness for each star in sample\n self.completeness = np.sum(np.array(pct_obs_iwas) * self.channel_weights, axis = 1)\n\n \"\"\"\n Make a Lookup Table of Exposure times for each star in sample\n \"\"\"\n\n tpbpcs_rect = [] # Time per bandpass\n tpcs_rect = [] # Time per channel\n\n # Loop over all the stars in sample\n for idrew in range(self.NBIAS):\n\n tpbpcs_rect.append([])\n tpcs_rect.append([])\n bp_names = []\n bp_chan = []\n\n # Loop over all the LUVOIR channels\n for ichan in range(len(CHANNELS)):\n\n tpcs_rect[idrew].append(0.0)\n\n # Loop over all the bands in this channel\n for iband in range(len(tpbpcs[0][ichan])):\n\n bp_names.append(\"%s %i\" %(CHANNELS[ichan], iband+1))\n bp_chan.append(ichan)\n tpbpcs_rect[idrew].append(tpbpcs[idrew][ichan][iband])\n tpcs_rect[idrew][ichan] += tpbpcs[idrew][ichan][iband]\n\n # Make np arrays\n tpbpcs_rect = np.array(tpbpcs_rect)\n tpcs_rect = np.array(tpcs_rect)\n bp_names = np.array(bp_names)\n bp_chan = np.array(bp_chan)\n\n # Make infs --> nans\n infmask = ~np.isfinite(tpbpcs_rect)\n tpbpcs_rect[infmask] = np.nan\n infmask = ~np.isfinite(tpcs_rect)\n tpcs_rect[infmask] = np.nan\n\n # Set attributes\n self.tpbpcs_rect = tpbpcs_rect\n self.tpcs_rect = tpcs_rect\n self.bp_names = bp_names\n self.bp_chan = bp_chan\n\n \"\"\"\n New completeness calculations\n \"\"\"\n\n bandpasses = []\n\n # Loop over telescope channels\n for j, channel in enumerate(CHANNELS):\n\n # Channel dependent bandwidth?\n if type(self.bandwidth) is float:\n bandwidth = self.bandwidth\n else:\n assert len(self.bandwidth) == len(CHANNELS)\n bandwidth = self.bandwidth[j]\n\n # Get the channel specific telescope parameters\n luvoir = default_luvoir(channel=channel)\n self.cn.telescope = luvoir\n\n # Calculate the bandpass edges\n edges = calculate_bandpass_edges(luvoir.lammin, luvoir.lammax, bandwidth = bandwidth)\n\n # Calculate the number of bandpasses\n Nbands = len(edges) - 1\n\n # Loop over bandpasses\n for i in range(Nbands):\n\n # Get the max, min, and middle wavelenths for this bandpass\n lammin = edges[i]\n lammax = edges[i+1]\n\n bandpasses.append([lammin, lammax])\n\n bandpasses = np.array(bandpasses)\n lmin, lmax = np.min(np.hstack(bandpasses)), np.max(np.hstack(bandpasses))\n\n # Fractional completeness of each bandpass\n bp_frac = ((bandpasses[:,1] - bandpasses[:,0]) / (lmax - lmin)) / np.sum((bandpasses[:,1] - bandpasses[:,0]) / (lmax - lmin))\n\n # Completeness by target\n tot_completeness = np.sum(np.isfinite(self.tpbpcs_rect) * bp_frac, axis=1)\n\n # Fraction of stars in biased sample that can completely observe each bandpass\n frac_bias_bp = np.sum(np.isfinite(tpbpcs_rect)*1.0, axis=0) / self.NBIAS\n\n # Set attributes\n self.bandpasses = bandpasses\n self.bp_frac = bp_frac\n self.tot_completeness = tot_completeness\n self.frac_bias_bp = frac_bias_bp\n\n self._make_pandas_table()\n\n return" ]
[ "0.62284875", "0.5733199", "0.54121506", "0.53654045", "0.5362718", "0.53480995", "0.53380924", "0.5298414", "0.5262806", "0.52612984", "0.52312875", "0.5173023", "0.5139651", "0.5129809", "0.5126058", "0.5113684", "0.51000977", "0.5095595", "0.5085868", "0.5080458", "0.5077105", "0.5057523", "0.50567466", "0.5055007", "0.505499", "0.50542855", "0.50395733", "0.5031074", "0.5021808", "0.50195307", "0.50088036", "0.5008618", "0.49954557", "0.49954557", "0.4992368", "0.49921328", "0.49889362", "0.4975839", "0.49535662", "0.49487898", "0.49475783", "0.49435306", "0.49413458", "0.49315256", "0.4929366", "0.49212334", "0.49199137", "0.491922", "0.49126995", "0.4886671", "0.48781836", "0.48748305", "0.48738384", "0.487269", "0.4866802", "0.48619714", "0.48554012", "0.48510167", "0.48442662", "0.484412", "0.48428318", "0.4840964", "0.4840718", "0.48395878", "0.48322207", "0.48298115", "0.48277527", "0.4826486", "0.48264334", "0.48232397", "0.48219532", "0.48213458", "0.48176044", "0.4816748", "0.4811229", "0.48103535", "0.47971296", "0.4796179", "0.4796073", "0.47932667", "0.47911024", "0.4787123", "0.47839302", "0.47797585", "0.477954", "0.47766447", "0.47697303", "0.4769306", "0.47664052", "0.47629997", "0.47629642", "0.47625357", "0.47607133", "0.475339", "0.47480643", "0.47447264", "0.47439194", "0.47427967", "0.47385743", "0.47376984" ]
0.59192055
1
Resolve anatomical names and links.
def _SetAnatNames(self, anat_tgt): # Define links to structural image in each output directory. for entry in self.entry_map['epi'] + self.entry_map['fmap'] + \ self.entry_map['dti'] + self.entry_map['asl']: self.info[entry]['anat_link'] = anat_tgt # Name the normalization source image T1High. Number the rest. anat_entries = self.entry_map['anat'][:] anat_entries.remove(anat_tgt) n_t1high = 1 for entry in anat_entries: if self.info[entry]['type'] == 'T1High': # High res T1-weighted, not normalization target. Rename it. fname = 'T1High_%d' % n_t1high fullname = '%s/%s' % (self.info[entry]['outdir'], fname) self.info[entry]['imgfile'] = fullname self.info[entry]['imgfile_skstrip'] = '%s_skstrip' % fullname self.info[entry]['matfile'] = '%s_matfile.aff12.1D' % fullname self.info[anat_tgt]['norm_src'] = False n_t1high += 1 fname = 'T1High' fullname = '%s/%s' % (self.info[anat_tgt]['outdir'], fname) self.info[anat_tgt]['imgfile'] = fullname self.info[anat_tgt]['imgfile_skstrip'] = '%s_skstrip' % fullname self.info[anat_tgt]['matfile'] = '%s_matfile.aff12.1D' % fullname self.info[anat_tgt]['norm_src'] = True self.anatomical = '%s%s' % (self.info[anat_tgt]['imgfile'], \ self.info[anat_tgt]['suffix']) # The target for motin correction is the source for spatial normalization. self.norm_src = anat_tgt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resolveNames(self):\n client.resolveNames(self)\n # TODO: Do any name resolutions here.\n # The names of other objects this object refers to, either intrinsically or in its parameters, should be checked here.", "def resolve_references(self):\n self.specs = self._resolve_partial(self.parsed_url, self.specs, ())", "def _resolve(self):\n pass", "def resolve( self, aWeb ):\n self.fullName= aWeb.fullNameFor( self.refTo )\n self.chunkList= aWeb.getchunk( self.refTo )", "def fix_links():\n pass", "def _parse_title(self, links):\n for link in links:\n if \"hearing\" in link[\"title\"].lower():\n return link[\"title\"].replace(\"Notice\", \"\").strip()\n if \"special\" in link[\"title\"].lower():\n return \"Special Meeting\"\n return \"Illinois Medical District Commission\"", "def resolveAlias(self, alias):", "def _fixup_find_links(find_links):\n if isinstance(find_links, str):\n return find_links.split()\n assert isinstance(find_links, (tuple, list))\n return find_links", "def resolve(self,nameseq):\n assert(is_seq(nameseq) and len(nameseq) >= 1)\n if len(nameseq) > 1:\n return self.nodes()[nameseq[0]].resolve(nameseq[1:])\n else:\n return self.nodes()[nameseq[0]]", "def get_links(names, html):\n ###TODO\n people = []\n readweb = BeautifulSoup(html, 'html.parser')\n for a in readweb.find_all('a'):\n person = os.path.basename(str(a.get('href')))\n if person in names:\n people.append(person)\n return SortedSet(people)\n pass", "def _parse_links(self, item):\n regex = compile(r\"<a\\s+(?:[^>]*?\\s+)?href=([\\\"\\'])(.*?)\\1.*\\>(.*)<\\/a>\")\n links = [\n {\"href\": href, \"title\": title}\n for (_, href, title) in findall(regex, item[\"Event\"][\"Description\"])\n ]\n for link in links:\n if link[\"href\"][0] == \"/\":\n link[\"href\"] = \"https://www.pghschools.org\" + link[\"href\"]\n return links", "def resolve(self, address):", "def resolve(self, anchors):\n\n for anchor in anchors:\n if self.node[DuAttrRefid] in anchor.ids():\n self.toAnchor = anchor\n break", "def links_name(link_from: str, link_to: str):\n if len(link_from) < 6:\n raise RuntimeError(\"from length must be at least 6\")\n if len(link_to) < 6:\n raise RuntimeError(\"to length must be at least 6\")\n base_link = get_short_link(link_from)\n if not base_link:\n raise RuntimeError(\"Couldn't find base link {}\".format(link_from))\n base_link[\"prefix\"][\"S\"] = link_to[0:6]\n base_link[\"unique_subhash\"][\"S\"] = link_to\n base_link[\"stats\"][\"M\"][\"clicks\"][\"N\"] = \"0\"\n base_link[\"creation_ip\"][\"S\"] = \"0.0.0.0\"\n # It's us, so we don't care about \"anonymizing\" the time\n base_link[\"creation_date\"][\"S\"] = datetime.datetime.utcnow().isoformat()\n title = input(\"Link title: \")\n author = input(\"Author(s): \")\n if len(author) == 0:\n # We explicitly ignore author = . in the site code\n author = \".\"\n project = input(\"Project: \")\n description = input(\"Description: \")\n base_link[\"named_metadata\"] = {\n \"M\": {\n \"title\": {\"S\": title},\n \"author\": {\"S\": author},\n \"project\": {\"S\": project},\n \"description\": {\"S\": description},\n }\n }\n print(\"New link: {}\".format(pformat(base_link)))\n if are_you_sure(\"create new link named {}\".format(link_to)):\n put_short_link(base_link)", "def process_link(self, env, refnode, has_explicit_title, title, target):\n refnode['json:name'] = normalize_object_name(target)\n return title, normalize_object_name(target)", "def filter_url_parse_partial_links(match):\n dname = html.unescape(match.group(1))\n dname = html.escape(dname)\n punctuation = match.group(2)\n caption = filter_url_trim(dname, filter_url_length)\n return '<a href=\"http://' + dname + '\">' + caption + '</a>' + punctuation", "def resolve_all_refs(s):\n for ref in list_of_all_unpointed_refs():\n ref.resolve()", "def _parse_links(self, response, start):\n links = self.document_date_map[start.date()]\n for link in response.css(\".agenda-min-pres .field a\"):\n link_url = response.urljoin(link.xpath(\"@href\").extract_first())\n title = link.xpath(\"./text()\").extract_first()\n if title.strip().startswith(\"Agenda\"):\n title = \"Agenda\"\n links.append(\n {\"title\": re.sub(r\"\\s+\", \" \", title).strip(), \"href\": link_url}\n )\n return links", "def _fix_links(self, text, page_names):\n for n in page_names:\n text = text.replace(f\"]({n})\", f\"]({n}.html)\")\n text = text.replace(f\"]({n}.md)\", f\"]({n}.html)\")\n return text", "def test_refersto_author_multi_name(self):\n inv_search = 'author:ellis refersto:author:\"parke, s. j.\"'\n spi_search = 'find a ellis and refersto author \"parke, s. j.\"'\n self._compare_searches(inv_search, spi_search)", "def test_url_name_mangling(self):\r\n\r\n modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy'])\r\n\r\n toy_id = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')\r\n\r\n course = modulestore.get_course(toy_id)\r\n chapters = course.get_children()\r\n ch1 = chapters[0]\r\n sections = ch1.get_children()\r\n\r\n self.assertEqual(len(sections), 4)\r\n\r\n for i in (2, 3):\r\n video = sections[i]\r\n # Name should be 'video_{hash}'\r\n print(\"video {0} url_name: {1}\".format(i, video.url_name))\r\n\r\n self.assertEqual(len(video.url_name), len('video_') + 12)", "def getAliases(self):", "def ref_to_link(txt):\n text = txt.group(1) # because it was a match in a regular expression\n\n thecite, everythingelse = first_bracketed_string(text)\n thecite = thecite[1:-1] # strip curly brackets\n thecite = thecite.replace(\"\\\\\",\"\") # \\href --> href\n\n refs = thecite.split(\",\")\n ans = \"\"\n\n # print \"refs\",refs\n\n for ref in refs:\n ref = ref.strip() # because \\cite{A, B, C,D} can have spaces\n this_link = \"\"\n if ref.startswith(\"href\"):\n the_link = re.sub(r\".*{([^}]+)}{.*\", r\"\\1\", ref)\n click_on = re.sub(r\".*}{([^}]+)}\\s*\", r\"\\1\", ref)\n this_link = '{{ LINK_EXT(\"' + click_on + '\",\"' + the_link + '\") | safe}}'\n elif ref.startswith(\"doi\"):\n ref = ref.replace(\":\",\"\") # could be doi:: or doi: or doi\n the_doi = ref[3:] # remove the \"doi\"\n this_link = '{{ LINK_EXT(\"' + the_doi + '\",\"https://doi.org/' + the_doi + '\")| safe }}'\n elif ref.lower().startswith(\"mr\"):\n ref = ref.replace(\":\",\"\")\n the_mr = ref[2:] # remove the \"MR\"\n this_link = '{{ LINK_EXT(\"' + 'MR:' + the_mr + '\", '\n this_link += '\"http://www.ams.org/mathscinet/search/publdoc.html?pg1=MR&s1='\n this_link += the_mr + '\") | safe}}'\n elif ref.lower().startswith(\"arxiv\"):\n ref = ref.replace(\":\",\"\")\n the_arx = ref[5:] # remove the \"arXiv\"\n this_link = '{{ LINK_EXT(\"' + 'arXiv:' + the_arx + '\", '\n this_link += '\"http://arxiv.org/abs/'\n this_link += the_arx + '\")| safe}}'\n\n\n if this_link:\n if ans:\n ans += \", \"\n ans += this_link\n\n return '[' + ans + ']' + everythingelse", "def replace_local_hyperlinks(\n text,\n base_url=\"https://github.com/project-rig/nengo_spinnaker/blob/master/\"\n ):\n def get_new_url(url):\n return base_url + url[2:]\n\n # Deal with anonymous URLS\n for match in re.finditer(r\"^__ (?P<url>\\./.*)\", text, re.MULTILINE):\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = re.sub(\"^__ {}\".format(orig_url),\n \"__ {}\".format(url), text, flags=re.MULTILINE)\n\n # Deal with named URLS\n for match in re.finditer(r\"^\\.\\. _(?P<identifier>[^:]*): (?P<url>\\./.*)\",\n text, re.MULTILINE):\n identifier = match.groupdict()[\"identifier\"]\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = re.sub(\n \"^\\.\\. _{}: {}\".format(identifier, orig_url),\n \".. _{}: {}\".format(identifier, url),\n text, flags=re.MULTILINE)\n\n # Deal with image URLS\n for match in re.finditer(r\"^\\.\\. image:: (?P<url>\\./.*)\",\n text, re.MULTILINE):\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = text.replace(\".. image:: {}\".format(orig_url),\n \".. image:: {}\".format(url))\n\n return text", "def _replace_links(input_string,link_dict={}):\n\n # Dictionary to hold link string/target pairs\n target_dict = {}\n\n # Strip white space from string\n this_string = input_string.strip()\n\n # If we have a string stub, return it\n if len(input_string) < 2:\n return input_string, {}\n\n # Look for patterns: \";_\" OR \",_\" OR \" _\" OR \":_\"\n search_pattern = re.compile(\"[\\,\\s\\;\\:]\\_|\\A\\_\")\n\n # Look for place to break link: \";\" OR \",\" OR \":\" OR \" \" OR \".\"\n end_pattern = re.compile(\"[\\,\\s\\;\\:\\.]\")\n\n # Look for a match\n match = search_pattern.search(this_string)\n while match:\n\n # If started with \"_\", link_alias starts at match.span()[0]\n if this_string[match.span()[0]] == \"_\":\n start = match.span()[0]\n\n # If started with \",_\" or the like, link_alias starts at match.span()[0] + 1\n else:\n start = match.span()[0] + 1\n\n # Chop string into before and after _ in link (front and back)\n front = this_string[:start]\n back = this_string[start:]\n\n # Look for end of the link\n link_end = end_pattern.search(back)\n\n # If we find the end, split back into link_alias and trailing\n if link_end:\n link_alias = back[:link_end.span()[0]]\n trailing = back[link_end.span()[0]:]\n\n # If we do not find the end, the whole back is link_alias ... no trailing\n else:\n link_alias = back[:]\n trailing = \"\"\n\n # Extract url and text for constructing the link text\n try:\n url, text = link_dict[link_alias]\n\n if url == \"\":\n if text == \"\":\n raise KeyError\n else:\n # Replace the link_alias with the text, no url\n link_string = text\n else:\n if text == \"\":\n # Replace the link_alias with link_alias[1:] -> url\n label = link_alias[1:]\n else:\n # Replace the link_alias with the text -> url\n label = text\n\n link_string = \"`{}`_\".format(label)\n url_string = \".. _`{}`: {}\".format(label,url)\n try:\n already_seen = target_dict[label]\n if already_seen != url_string:\n err = \"The same link_text '{}' corresponds to more than one url\\n\".format(text)\n raise ValueError(err)\n except KeyError:\n target_dict[label] = url_string\n\n except KeyError:\n\n # Replace the link_alias with link_alias[1:]\n link_string = link_alias[1:]\n\n # Rebuild this_string with front + new link + trailing\n this_string = \"{}{}{}\".format(front,link_string,trailing)\n\n # Look for another link\n match = search_pattern.search(this_string)\n\n return this_string, target_dict", "def build_messy_lookup(source,dest,ref_col):\n la = QuickGrid().open(source)\n od = QuickGrid().open(join(\"source_files\",\"local_authority_data_names.csv\"))\n\n lookup = QuickGrid()\n lookup.header = [\"la name\",ref_col]\n\n possible = [\"official-name\",\"alt-name-1\",\"alt-name-2\",\"alt-name-3\"]\n possible = [p for p in possible if p in la.header]\n for r in la:\n for p in possible:\n if r[p]:\n lookup.add([r[p],r[ref_col]])\n \n current_names = [x[0] for x in lookup]\n\n for r in od:\n if r[\"name\"] not in current_names:\n code = r[\"local-authority\"].split(\":\")[1]\n lookup.add([r[\"name\"],code])\n \n lookup.save(dest,force_unicode=True)", "def convert_name(self, human_name):\n\n human_name = HumanName(human_name)\n if human_name.suffix:\n self.metadata[\"gutenberg_name_suffix\"] = human_name.suffix\n human_name.suffix = \"\"\n if human_name.nickname:\n # LOGGER.debug(\"%s nickname: %s\", str(human_name), human_name.nickname)\n no_nickname = copy.copy(human_name)\n no_nickname.nickname = \"\"\n first_name_match = re.match(\n re.sub(r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\", human_name.first, re.UNICODE),\n human_name.nickname,\n re.UNICODE\n )\n # LOGGER.debug(\n # \"%s, %s\",\n # re.sub(\n # r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\",\n # human_name.first,\n # re.UNICODE\n # ),\n # human_name.nickname\n # )\n if first_name_match and len(first_name_match.group(0)) >= len(human_name.first):\n human_name.first = first_name_match.group(0)\n human_name.nickname = human_name.nickname[len(human_name.first):].strip()\n # LOGGER.debug(\"Adding %s to aliases\", str(no_nickname))\n self.metadata[\"aliases\"] = set([str(no_nickname)])\n middle_name_match = re.match(\n re.sub(r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\", human_name.middle, re.UNICODE),\n human_name.nickname,\n re.UNICODE\n )\n # LOGGER.debug(\n # \"%s, %s\",\n # re.sub(\n # r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\",\n # human_name.middle, re.UNICODE\n # ),\n # human_name.nickname\n # )\n if middle_name_match and len(middle_name_match.group(0)) >= len(human_name.middle):\n human_name.middle = middle_name_match.group(0)\n human_name.nickname = human_name.nickname[len(human_name.middle):].strip()\n # LOGGER.debug(\"Adding %s to aliases\", str(no_nickname))\n self.metadata[\"aliases\"].add(str(no_nickname))\n return human_name", "def get_author_name_urls(dept_name, dept_url):\n\t# Change to \"School of Humanities\" to match the name used in Enlighten\n\t# Done because the string obtained from http://www.gla.ac.uk/schools/ contains the Gaelic name as well\n\tif \"Humanities\" in dept_name:\n\t\tdept_name = \"School of Humanities\"\n\n\t# get list of names of researchers in department\n\tnames = get_names(dept_url)\n\n\twinning_name_urls = set()\n\n\t# loop through each name\n\tfor name in names:\n\t\tname = initialise_first_name(name)\n\t\t# Get Enlighten page on which author name will be found (page for the letter of author's last name)\n\t\tfull_url = author_list_base + \"index.\"+ name.split(\" \")[0][0] + \".html\"\n\t\ttree = get_tree(full_url)\n\t\t# Get all candidate authors which match the name\n\t\tname_urls = get_name_url_matches(name, tree)\n\t\t# If candidates were found\n\t\tif name_urls:\n\t\t\t# Filter out authors that have already been scraped\n\t\t\tname_urls = [name_url for name_url in name_urls if name_url not in winning_name_urls]\n\t\t\t# Get the first ranked (name, url) tuple for the target name from the remaining candidates\n\t\t\twinning_name_url = get_winning_url(name_urls, dept_name)\n\t\t\tif winning_name_url:\n\t\t\t\twinning_name_urls.add(winning_name_url)\n\n\treturn winning_name_urls", "def hook_internal_link(self, parser, space, name):\n link = name\n text = name\n\n # Split on pipe -- [[href|name]]\n separator = name.find('|')\n if separator != -1:\n link, text = link.split('|', 1)\n\n hash_pos = link.find('#')\n hash = ''\n if hash_pos != -1:\n link, hash = link.split('#', 1)\n\n # Sections use _, page names use +\n if hash != '':\n hash = '#' + hash.replace(' ', '_')\n\n # Links to this page can just contain href=\"#hash\"\n if link == '' and hash != '':\n return u'<a href=\"%s\">%s</a>' % (hash, text)\n\n link = self._getWikiLink(link)\n return u'<a href=\"%s%s\">%s</a>' % (link, hash, text)", "def _build_links(links):\n for link in links:\n link['href'] = link['href'].replace('servers', 'instances')\n return links", "def testAlternativeNamingSucceeds(self):\n cb_name_lib.GetRecoveryName(self.board,\n self.recovery,\n 1).AndReturn((self.index_page, self.rec_pat))\n cb_name_lib.DetermineUrl(self.index_page,\n self.rec_pat).AndReturn(self.rec_url)\n self.mox.ReplayAll()\n expected = (self.rec_url, self.index_page)\n actual = cb_name_lib.ResolveRecoveryUrl(self.board, self.recovery,\n alt_naming=1)\n self.assertEqual(expected, actual)", "def _get_name_relurl_and_desc(snippet_html):\n name_and_url_part, desc_part = snippet_html.find_all('p', 'snippet')\n name = name_and_url_part.get_text()\n relative_url = name_and_url_part.find('a').get('href')\n desc = desc_part.get_text()\n return name, relative_url, desc", "def resolve(host:str) -> Set[str]:\n\t\t\n\t\ttry:\n\t\t\treturn {resolution[4][0] for resolution in _forward(host, 80)}\n\t\texcept DNSError:\n\t\t\treturn set()", "def _resolve(addresses):\n\n for addr in addresses:\n _, _, ips = socket.gethostbyname_ex(addr)\n for ip in ips:\n yield ip", "def test_detectCanonicalNameLoop(self):\n servers = {\n ('1.1.2.3', 53): {\n ('example.com', A): {\n 'answers': [('example.com', Record_CNAME('example.net')),\n ('example.net', Record_CNAME('example.com'))],\n },\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress('example.com')\n return self.assertFailure(d, ResolverError)", "def get_name_url_matches(author_name, html_tree):\n\n\t# Convert name to lower case - this will be searched against lower case text on the Enlighten page\n\tlower_name = author_name.lower()\n\t# Used to convert text in <a> tags to lower case in paths before checking if matches the name provided\n\tcase = 'translate(text(), \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\")'\n\t# This is the path to look for <a> tags which contain the target name as text\n\t# N.B. contains() is used rather than equals as it can catch more cases\n\tpath = '//table/tr/td/ul/li/a[contains(%s, \\\"%s\\\")]' % (case, lower_name)\n\t# get the list of <a> elements whose text contains the name\n\telements = html_tree.xpath(path)\n\t# If target string was found, for each <a> element that contains it, make a\n\t# (text, url) tuple and create a list out of the resulting tuples\n\t# N.B. the href obtained from the element is concatenated to the base url as it is relative\n\tif elements:\n\t\t# have to concatenate as href is given as relative path\n\t\ttext_url_tups = [(elem.text, author_list_base + elem.get(\"href\")) for elem in elements]\n\telse:\n\t\ttext_url_tups = None\n\n\treturn text_url_tups", "def resolve_references_as_possible(s):\n refs = []\n resolved = []\n\n # ask all graphs for REFs\n for graph in s.graphs.values():\n refs.extend( graph.list_of_all_unpointed_refs() )\n\n # try to resolve all REFs\n for ref in refs:\n if ref.try_to_point():\n resolved.append(ref)\n\n # for REFs that link up,\n for ref in resolved:\n s.resolve_single_ref( ref )", "def parse_resolve(cls, url):\n loc = cls.parse(url)\n if loc.path and loc.path != '/':\n # If true ref name contains slash, a prefix of path might be a suffix of\n # ref. Try to resolve it.\n ref_prefix = None\n if loc.treeish.startswith('refs/'):\n ref_prefix = loc.treeish + '/'\n refs = get_refs(loc.hostname, loc.project, ref_prefix)\n if not refs:\n raise TreeishResolutionError('could not resolve treeish in %s' % url)\n\n treeishes = set(refs.keys())\n # Add branches and tags without a prefix.\n for ref in refs:\n for prefix in ('refs/tags/', 'refs/heads/'):\n if ref.startswith(prefix):\n treeishes.add(ref[len(prefix):])\n break\n loc = cls.parse(url, treeishes=treeishes)\n return loc", "def aliases():\n fetch(\"PropertyValueAliases.txt\")\n longforms = {}\n shortforms = {}\n re1 = re.compile(r\"^ *sc *; *(\\w+) *; *(\\w+)\")\n for line in fileinput.input(os.path.basename(\"PropertyValueAliases.txt\")):\n m = re1.match(line)\n if m:\n l = m.group(2).strip()\n s = m.group(1).strip()\n assert(s not in longforms)\n assert(l not in shortforms)\n longforms[s] = l\n shortforms[l] = s\n else:\n continue\n\n return (longforms, shortforms)", "def dv_urlize(text):\n\tpart1 = re.compile(r\"(^|[\\n ])(((news|telnet|nttp|irc|http|ftp|https)://[\\w\\#$%&~.\\-;:=,?@\\[\\]+]*)(/[\\w\\#$%&~/.\\-;:=,?@\\[\\]+]*)?)\", re.IGNORECASE | re.DOTALL)\n\tpart2 = re.compile(r\"(^|[\\n ])(((www|ftp)\\.[\\w\\#$%&~.\\-;:=,?@\\[\\]+]*)(/[\\w\\#$%&~/.\\-;:=,?@\\[\\]+]*)?)\", re.IGNORECASE | re.DOTALL)\n\n\t# Make a quick copy of our variable to work with\n\tlink = text\n\n\t# Depending on your personal preference, you can choose one of two things with the following\n\t# Lines of code. If the value of SHORTEN_ONELINER_LINKS is set to 1, links appear in the\n\t# Oneliner in a truncated format. Any other value inserts the full link. Default: 0\n\n\tlink_type = getattr(settings, 'SHORTEN_ONELINER_LINKS', 0)\n\n\tif(link_type == 1):\n\t\t# Truncate displayed links to just the starting address.\n\t\tlink = part1.sub(r'\\1<a href=\"\\2\" target=\"_blank\">\\3</a>', link)\n\t\tlink = part2.sub(r'\\1<a href=\"http://\\2\" target=\"_blank\">\\3</a>', link)\n\telse:\n\t\t# Show them as they originally were added.\n\t\tlink = part1.sub(r'\\1<a href=\"\\2\" target=\"_blank\">\\2</a>', link)\n\t\tlink = part2.sub(r'\\1<a href=\"http://\\2\" target=\"_blank\">\\2</a>', link)\n\t\n\t# Return the results of the conversion\n\treturn link", "def ref( self, aWeb ):\n self.resolve( aWeb )\n return self.fullName", "def do_canonicals(dname):\n _log.debug(f\"Walking {dname}\")\n for fullname in dname.rglob(\"*.html\"):\n _log.debug(f\"Checking {fullname}\")\n basename = pathlib.Path(*fullname.parts[1:])\n last = findlast(basename, tocheck)\n if last is not None:\n update_canonical(fullname, last, dname == tocheck[1])", "def test_followCanonicalName(self):\n servers = {\n ('1.1.2.3', 53): {\n ('example.com', A): {\n 'answers': [('example.com', Record_CNAME('example.net'))],\n },\n ('example.net', A): {\n 'answers': [('example.net', Record_A('10.0.0.5'))],\n },\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress('example.com')\n d.addCallback(lambda results: results[0]) # Get the answer section\n d.addCallback(\n self.assertEqual,\n [RRHeader('example.com', CNAME, payload=Record_CNAME('example.net')),\n RRHeader('example.net', A, payload=Record_A('10.0.0.5'))])\n return d", "def get_aliases(self, text, namespace):\n if text is None:\n return\n \n aliases = {}\n text = ''.join(text)\n for a in text.splitlines():\n name, value = a.split(None, 1)\n a = c_ast.Alias(name, value)\n aliases[name] = a\n self.all[name] = a\n\n # The alias value will be located in the namespace,\n # or the aliases. Otherwise, it's unfound.\n for name, a in aliases.items():\n value = a.value\n if value in namespace:\n a.typ = namespace[value]\n elif value in aliases:\n a.typ = aliases[value]\n else:\n pass", "def getExpandedLinks():", "def resolve(self):\n pass # pragma: no cover", "def lookup():", "def update_links(self):\n for a in self.book.xpath(\"//a[@href]\"):\n href = a.xpath(\"@href\")[0]\n index_list = a.xpath(\"@data-index\")\n \n ### If there is no data-index it is assumed link comes from initial book landing page (the index page)\n if index_list == []:\n index = self.manager.get_page_index(\"index.html\")\n else:\n index = index_list[0]\n \n ### Fix people who are bad at links\n if href.startswith(\"www.\"):\n href = \"https://\" + href\n a.set(\"href\", href)\n \n ## Correct for ambiguity (Naive assumption that this error only occours on index page)\n if href == \"./\":\n href = \"index.html\"\n \n if not href:\n return None\n \n href = self.manager.convert_link(href, index)\n a.set(\"href\", href)", "def get_names():\n only_links = SoupStrainer(\"a\")\n names = set()\n doc = requests.get(NAMES_URL).content\n links = BeautifulSoup(doc, \"html.parser\", parse_only=only_links)\n pokemon = links.find_all(title=re.compile(\"(\\w+)(\\s){1}(\\(Pokémon\\))\"))\n for cell in pokemon:\n names.add(str(cell.string))\n \n\n return names", "def get_clean_zotero_link(links):\n link = \"https://www.zotero.org/%s/items\" % os.getenv(\"ZTH_SEARCH_PREFIX_URI\")\n if \"alternate\" in links:\n link = links[\"alternate\"][\"href\"].replace(\"items\", \"items/itemKey\")\n return link", "def testDefaultNamingSucceeds(self):\n cb_name_lib.GetRecoveryName(self.board, self.recovery, 0).AndReturn(\n (self.index_page, self.rec_pat))\n cb_name_lib.DetermineUrl(self.index_page, self.rec_pat).AndReturn(\n self.rec_url)\n self.mox.ReplayAll()\n expected = (self.rec_url, self.index_page)\n actual = cb_name_lib.ResolveRecoveryUrl(self.board, self.recovery)\n self.assertEqual(expected, actual)", "def lookup(name):", "def lookup(name):", "async def _transform_hares(self, urls):\n transformed_links = []\n result_list = await self._connect(urls, raw=True)\n for result in result_list:\n url, source_code = result[:2]\n link = re.findall(r'(http://hares.tw/archives/.*?)\\\">繼續閱讀全文', source_code)\n if link:\n transformed_links.append(link[0])\n else: # list is empty\n transformed_links.append(url)\n return transformed_links", "def _resolve_any_to_text(name, ns, dom):\n ret = []\n cmdline = (\"dig +noadditional +noquestion +nocmd \"\n \"+nostats +nocomment %s any @%s | grep ^%s\"\n % (name, ns, name))\n for line in os.popen(cmdline, \"r\"):\n line = re.sub(r'\\s+', ' ', line).strip()\n line = re.sub(r'\\.%s. ' % (dom), ' ', line)\n line = re.sub(r'^%s. ' % (dom), '@ ', line)\n line = \"%-30s %6s %3s %6s %s\" % tuple(re.split(r'\\s+', line, 4))\n ret.append(line)\n return ret", "def reformat(array):\n global searched_domain\n response = []\n for tag in array:\n link = tag.get(\"href\", None)\n if link is not None:\n p = parse.urlparse(link)\n if re.match(searched_netloc, p.netloc):\n if p.scheme == \"\":\n link = parse.ParseResult(\"http\", *p[1:]).geturl()\n response.append(link)\n return response", "def resolution_map(names, env):\n return dict(zip(names, [resolve(n, env) for n in names]))", "def _parse_links(self, item) -> list:\n # TODO This would be a \"nice to have\" but is not necessary right now.\n return [{\"href\": \"\", \"title\": \"\"}]", "def auto_link_usernames_or_lists(self, options = {}):\r\n return self.auto_link_entities(self.extractor.extract_mentions_or_lists_with_indices(), options)", "def test_splits_urls_for_nouns(self):\r\n test_value = \"http://google.com/drives/autonomous/cars\"\r\n self.assertEqual(\r\n set([u'cars', u'autonomous']),\r\n suggest_tags(test_value))", "def test_link(self):\n self.assertEquals(\"[name](name)\", trans(\"[name]\"))\n self.assertEquals(\"[name](http://domain/path/to/resource)\",\n trans(\"[http://domain/path/to/resource name]\"))", "def tokenize_href(self, soup):\n for a in soup.find_all(u'a'):\n href = a.attrs.get(u'href', u'')\n # Absolute URLs only.\n if (href.startswith(u'//') or\n href.startswith(u'http://') or\n href.startswith(u'https://')):\n self.tokenize(href)", "def get_or_make_links(self, link_names):\n link_names = link_names if isinstance(link_names, (list, tuple)) else [link_names]\n urls = []\n for name in link_names:\n try:\n url = reverse(name)\n except NoReverseMatch as e:\n print(e)\n url = None\n # if name == 'password_reset':\n # path('test-password/', views.PasswordChangeView.as_view(template_name='update.html'), name=name)\n # else:\n # pass\n urls.append(url)\n # print(urls)\n return urls", "def __init__(self, name, links):\n self.name = name\n self.links = links", "def _buildResolvers(config):\n from twisted.names import client, cache, hosts\n\n ca, cl = [], []\n if config[\"cache\"]:\n ca.append(cache.CacheResolver(verbose=config[\"verbose\"]))\n if config[\"hosts-file\"]:\n cl.append(hosts.Resolver(file=config[\"hosts-file\"]))\n if config[\"recursive\"]:\n cl.append(client.createResolver(resolvconf=config[\"resolv-conf\"]))\n return ca, cl", "def __get_names(record: TNSRecord) -> Dict[str, str]:\n aliases = {'iau': record.name}\n internal_names = record.internal_names.split(',')\n for provider, pattern in Object.name_patterns.items():\n for name in internal_names:\n if pattern.match(name):\n aliases[provider] = name\n return aliases", "def link(address):", "def _resolve_name(self, name, service=None):\n service = service or self.service\n if re.match(r'^[-a-f0-9]{30,}$', name):\n return name\n\n try:\n request_cls = getattr(services, service).GetAllRequest\n except AttributeError:\n raise NameResolutionError('Name resolution unavailable for {}'.format(service))\n\n request = request_cls.from_dict(dict(name=name, only_fields=['name', 'id']))\n # from_dict will ignore unrecognised keyword arguments - not all GetAll's have only_fields\n response = getattr(self._session.send_api(request), service)\n matches = [db_object for db_object in response if name.lower() == db_object.name.lower()]\n\n def truncated_bullet_list(format_string, elements, callback, **kwargs):\n if len(elements) > self.MAX_SUGGESTIONS:\n kwargs.update(\n dict(details=' (showing {}/{})'.format(self.MAX_SUGGESTIONS, len(elements)), suffix='\\n...'))\n else:\n kwargs.update(dict(details='', suffix=''))\n bullet_list = '\\n'.join('* {}'.format(callback(item)) for item in elements[:self.MAX_SUGGESTIONS])\n return format_string.format(bullet_list, **kwargs)\n\n if len(matches) == 1:\n return matches.pop().id\n elif len(matches) > 1:\n message = truncated_bullet_list(\n 'Found multiple {service} with name \"{name}\"{details}:\\n{}{suffix}',\n matches,\n callback=attrgetter('id'),\n **locals())\n self.exit(message)\n\n message = 'Could not find {} with name/id \"{}\"'.format(service.rstrip('s'), name)\n\n if not response:\n raise NameResolutionError(message)\n\n suggestions = truncated_bullet_list(\n '. Did you mean this?{details}\\n{}{suffix}',\n sorted(response, key=attrgetter('name')),\n lambda db_object: '({}) {}'.format(db_object.id, db_object.name)\n )\n raise NameResolutionError(message, suggestions)", "def filter_url_parse_full_links(match):\n url = html.unescape(match.group(1))\n url = html.escape(url)\n punctuation = match.group(2)\n caption = filter_url_trim(url, filter_url_length)\n return '<a href=\"' + url + '\">' + caption + '</a>' + punctuation", "def simplify_links(proj,exp,links):\n simple_links =[] \n\n for key in links:\n (node_name,x,y) = key.rpartition(':')\n node_name = node_name+\".\"+exp+\".\"+proj+\".emulab.net\"\n simple_links.append((node_name,links[key]['ipaddr']))\n\n return simple_links", "def parse(html, url, bases): \n\n soup = BeautifulSoup(html, 'lxml')\n htmlBody = soup.find('body').get_text().strip()\n links = [urljoin(url, l.get('href')) for l in soup.findAll('a')]\n links = [l for l in links if urlparse(l).netloc in bases]\n return url, htmlBody, links", "def user_name_urls(self):\n raise NotImplementedError", "def TransformNames(self) -> _n_2_t_0[str]:", "def resolve(name):\n arg = Path(name)\n return str(arg.resolve())", "def gen_links(text):\n return []", "def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)", "def handle_dns(bot, ievent):\n if not ievent.args:\n ievent.missing('<host | ip>')\n else:\n is_a = None\n result = None\n # If we support IPv6 ...\n if socket.has_ipv6:\n # ... then check if this is an IPv6 ip\n try:\n socket.inet_pton(socket.AF_INET6, ievent.args[0])\n is_a = 'ipv6'\n except socket.error:\n pass\n # Ah not an IPv6 ip ...\n if not is_a:\n # ... maybe IPv4 ?\n try:\n socket.inet_pton(socket.AF_INET, ievent.args[0])\n is_a = 'ipv4'\n except socket.error:\n pass\n # Not an ip, must be a hostname then\n if not is_a:\n is_a = 'host'\n # If it was an ip ...\n if is_a in ['ipv4', 'ipv6']:\n try:\n # ... try to resolve it\n result = socket.gethostbyaddr(ievent.args[0])\n if result[1]:\n result = 'primary: %s, aliases: %s' % \\\n (result[0], ', '.join(result[1]))\n else:\n result = result[0]\n ievent.reply('%s ip %s resolves to %s' % \\\n (is_a, ievent.args[0], result))\n except Exception, e:\n ievent.reply('could not resolve %s address %s: %s' % \\\n (is_a, ievent.args[0], e[1]))\n # Oh it's a host, lets resolve that\n elif is_a == 'host':\n try:\n result = []\n for info in socket.getaddrinfo(ievent.args[0], None):\n if info[0] in [socket.AF_INET, socket.AF_INET6] and \\\n info[1] == socket.SOCK_STREAM:\n ip = info[4][0]\n if not ip in result:\n result.append(ip)\n if not result:\n ievent.reply('could not resolve hostname %s: not found' % \\\nievent.args[0])\n else:\n ievent.reply('%s resolves to: %s' % (ievent.args[0], \\\n', '.join(result)))\n except Exception, e:\n ievent.reply('could not resolve hostname %s: %s' % \\\n (ievent.args[0], e[1]))\n else:\n ievent.reply('lookup failed, no valid data found')", "def resolve_alias(self):\n cmd0 = self.cmd[0]\n\n if cmd0 in self.alias_stack:\n # Disabling the alias resolving to prevent infinite loop in call stack\n # and futher using binary_loc to resolve the alias name.\n self.alias = None\n return\n\n if callable(cmd0):\n alias = cmd0\n else:\n alias = XSH.aliases.get(cmd0, None)\n if alias is not None:\n self.alias_name = cmd0\n self.alias = alias", "def personas(self, pretty=True, sort=True):\n names = list(self.name2base)\n if pretty: names = [self.process_name(name, True) for name in names]\n if sort: names = sorted(names)\n return names", "def fix_names(users):\n for user in users:\n id = user['id']\n first_name = user['first_name'].strip()\n last_name = user['last_name'].strip()\n if not first_name and not last_name:\n # Empty name: skip\n print (f'Skipping empty name in record {id}')\n continue\n elif first_name == last_name:\n full_name = first_name\n elif first_name.endswith(last_name):\n full_name = first_name\n elif not last_name:\n full_name = first_name\n elif not first_name:\n full_name = last_name\n else:\n # In this case, the user has most likely entered the name\n # correctly split, so skip\n full_name = first_name + last_name\n print (f'Skipping already split name: {first_name} / {last_name} ({id})')\n continue\n \n print (f'Working on \"{full_name}\" ({id})')\n\n # Handle email addresses\n if '@' in full_name:\n print (f' - fixing email address')\n # Remove domain part\n e_name = full_name[:full_name.find('@')]\n if '+' in e_name:\n # Remove alias\n e_name = e_name[:e_name.find('+')]\n # Try to split name parts\n e_name = e_name.replace('.', ' ')\n e_name = e_name.replace('_', ' ')\n e_name = e_name.strip()\n if len(e_name) < 4:\n # Probably just initials: leave email as is\n pass\n else:\n full_name = e_name\n \n # Parse name\n name = nameparser.HumanName(full_name)\n name.capitalize()\n first_name = name.first\n last_name = name.last\n print (f' - splitting name into: {first_name} / {last_name} ({id})')\n yield (first_name, last_name, id)", "def get_assemblies_link_from_accession_number(term):\n ###########print('+++++++',term)\n # provide your own mail here # I wrote the email at the begining of the codes\n handle = Entrez.esearch(db=\"assembly\", term=term, retmax=\"200\")\n record = Entrez.read(handle)\n ids = record[\"IdList\"]\n links = []\n for aid in ids:\n summary = get_id_give_assembly_summary(aid) # get summary\n url = summary[\"DocumentSummarySet\"][\"DocumentSummary\"][0][\"FtpPath_RefSeq\"]\n if url == \"\":\n continue\n label = os.path.basename(url)\n # get the fasta link - change this to get other formats\n link = url + \"/\" + label + \"_genomic.fna.gz\"\n link = link.replace(\"ftp://\", \"https://\")\n links.append(link)\n \n #############print('=======', links)\n return links", "def handle_url(url, session, res):\n print(\"Parsing\", url, file=sys.stderr)\n try:\n data, baseUrl = getPageContent(url, session)\n except IOError as msg:\n print(\"ERROR:\", msg, file=sys.stderr)\n return\n for match in url_matcher.finditer(data):\n shortname = match.group(1)\n name = unescape(match.group(2))\n name = asciify(name.replace('&', 'And').replace('@', 'At'))\n name = capfirst(name)\n if name in exclude_comics:\n continue\n if contains_case_insensitive(res, name):\n # we cannot handle two comics that only differ in case\n print(\"INFO: skipping possible duplicate\", repr(name), file=sys.stderr)\n continue\n res[name] = shortname", "def buildAutoLinkRelaxInfo(wikiDocument):\r\n # Build up regular expression\r\n # First fetch all wiki words\r\n words = wikiDocument.getWikiData().getAllProducedWikiLinks()\r\n\r\n # Sort longest words first\r\n words.sort(key=lambda w: len(w), reverse=True)\r\n \r\n return [(_TheHelper._createAutoLinkRelaxWordEntryRE(w), w)\r\n for w in words if w != u\"\"]", "def do_resolve(self,args):\n try:\n for solution in self.resolve_all(args):\n self.print_solution(solution)\n except:\n traceback.print_exc(file=sys.stdout)", "def _validate_links(cls, links, relationship: Optional[str] = None):\n errors = []\n for name in links:\n qual_name = cls._qualname(name, relationship)\n if qual_name in cls.__links_factories__:\n if not isinstance(links[name], Mapping):\n errors.append(f\" You must provide an arguments dictionary for '{qual_name}' link.\")\n continue\n provided_link = links.get(name)\n if provided_link is None:\n errors.append(f\" Nothing provided for building '{qual_name}' link.\")\n elif not isinstance(links[name], str):\n errors.append(f\" Provided '{qual_name}' link is not a string.\")\n if errors:\n raise ValueError(\"\\n\" + \"\\n\".join(errors))", "def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r", "def final_rename(understat_no_similar, fpl_no_similar, join = 'inner'): \n name_mapper = {'Adrián':'Adrián Bernabé', # Contains both seasons corrections\n 'Alisson':'Alisson Ramses Becker',\n 'Allan':'Allan Marques Loureiro',\n 'André Gomes':'André Filipe Tavares Gomes',\n 'Angelino':'José Ángel Esmorís Tasende',\n 'Bernard':'Bernard Anício Caldeira Duarte', # Everton\n 'Bernardo Silva':'Bernardo Mota Veiga de Carvalho e Silva', # Manchester City\n 'Bernardo':'Bernardo Fernandes da Silva Junior', # \n 'Borja Bastón':'Borja González Tomás',\n 'Chicharito':'Javier Hernández Balcázar',\n 'David Luiz':'David Luiz Moreira Marinho', \n 'Ederson':'Ederson Santana de Moraes',\n 'Emerson':'Emerson Palmieri dos Santos',\n 'Fabinho':'Fabio Henrique Tavares',\n 'Felipe Anderson':'Felipe Anderson Pereira Gomes',\n 'Fred':'Frederico Rodrigues de Paula Santos', # Manchester United\n 'Hélder Costa': 'Hélder Wander Sousa de Azevedo e Costa', # Leeds\n 'Joelinton':'Joelinton Cássio Apolinário de Lira', # Chelsea\n 'Jonny':'Jonathan Castro Otto', # Wolves\n 'Jorginho':'Jorge Luiz Frello Filho', # Chelsea\n 'Jota':'José Ignacio Peleteiro Romallo',\n 'Kepa':'Kepa Arrizabalaga',\n 'Kiko Femenía':'Francisco Femenía Far',\n 'Lucas Moura':'Lucas Rodrigues Moura da Silva',\n 'Pedro': 'Pedro Rodríguez Ledesma', # Chelsea\n 'Raphinha':'Raphael Dias Belloli',\n 'Ricardo Pereira':'Ricardo Domingos Barbosa Pereira',\n 'Rodri':'Rodrigo Hernandez',\n 'Rúben Dias':'Rúben Santos Gato Alves Dias',\n 'Rúben Vinagre':'Rúben Gonçalo Silva Nascimento Vinagre',\n 'Semi Ajayi':'Oluwasemilogo Adesewo Ibidapo Ajayi',\n 'Trézéguet':'Mahmoud Ahmed Ibrahim Hassan', # Aston Villa\n 'Wesley':'Wesley Moraes',\n 'Willian':'Willian Borges Da Silva',\n }\n understat_no_similar['player_name'] = understat_no_similar['player_name'].map(name_mapper)\n manual_merge = pd.merge(fpl_no_similar, understat_no_similar, left_on=['player_name', 'kickoff_time'],\n right_on=['player_name', 'date'], how=join) # Merge using player name and date of game\n return manual_merge", "def process_xml(xml):\r\n\r\n def make_name_unique(xml_data):\r\n \"\"\"\r\n Make sure that the url_name of xml_data is unique. If a previously loaded\r\n unnamed descriptor stole this element's url_name, create a new one.\r\n\r\n Removes 'slug' attribute if present, and adds or overwrites the 'url_name' attribute.\r\n \"\"\"\r\n # VS[compat]. Take this out once course conversion is done (perhaps leave the uniqueness check)\r\n\r\n # tags that really need unique names--they store (or should store) state.\r\n need_uniq_names = ('problem', 'sequential', 'video', 'course', 'chapter',\r\n 'videosequence', 'poll_question', 'vertical')\r\n\r\n attr = xml_data.attrib\r\n tag = xml_data.tag\r\n id = lambda x: x\r\n # Things to try to get a name, in order (key, cleaning function, remove key after reading?)\r\n lookups = [('url_name', id, False),\r\n ('slug', id, True),\r\n ('name', Location.clean, False),\r\n ('display_name', Location.clean, False)]\r\n\r\n url_name = None\r\n for key, clean, remove in lookups:\r\n if key in attr:\r\n url_name = clean(attr[key])\r\n if remove:\r\n del attr[key]\r\n break\r\n\r\n def looks_like_fallback(url_name):\r\n \"\"\"Does this look like something that came from fallback_name()?\"\"\"\r\n return (url_name is not None\r\n and url_name.startswith(tag)\r\n and re.search('[0-9a-fA-F]{12}$', url_name))\r\n\r\n def fallback_name(orig_name=None):\r\n \"\"\"Return the fallback name for this module. This is a function instead of a variable\r\n because we want it to be lazy.\"\"\"\r\n if looks_like_fallback(orig_name):\r\n # We're about to re-hash, in case something changed, so get rid of the tag_ and hash\r\n orig_name = orig_name[len(tag) + 1:-12]\r\n # append the hash of the content--the first 12 bytes should be plenty.\r\n orig_name = \"_\" + orig_name if orig_name not in (None, \"\") else \"\"\r\n xml_bytes = xml.encode('utf8')\r\n return tag + orig_name + \"_\" + hashlib.sha1(xml_bytes).hexdigest()[:12]\r\n\r\n # Fallback if there was nothing we could use:\r\n if url_name is None or url_name == \"\":\r\n url_name = fallback_name()\r\n # Don't log a warning--we don't need this in the log. Do\r\n # put it in the error tracker--content folks need to see it.\r\n\r\n if tag in need_uniq_names:\r\n error_tracker(\"PROBLEM: no name of any kind specified for {tag}. Student \"\r\n \"state will not be properly tracked for this module. Problem xml:\"\r\n \" '{xml}...'\".format(tag=tag, xml=xml[:100]))\r\n else:\r\n # TODO (vshnayder): We may want to enable this once course repos are cleaned up.\r\n # (or we may want to give up on the requirement for non-state-relevant issues...)\r\n # error_tracker(\"WARNING: no name specified for module. xml='{0}...'\".format(xml[:100]))\r\n pass\r\n\r\n # Make sure everything is unique\r\n if url_name in self.used_names[tag]:\r\n # Always complain about modules that store state. If it\r\n # doesn't store state, don't complain about things that are\r\n # hashed.\r\n if tag in need_uniq_names:\r\n msg = (\"Non-unique url_name in xml. This may break state tracking for content.\"\r\n \" url_name={0}. Content={1}\".format(url_name, xml[:100]))\r\n error_tracker(\"PROBLEM: \" + msg)\r\n log.warning(msg)\r\n # Just set name to fallback_name--if there are multiple things with the same fallback name,\r\n # they are actually identical, so it's fragile, but not immediately broken.\r\n\r\n # TODO (vshnayder): if the tag is a pointer tag, this will\r\n # break the content because we won't have the right link.\r\n # That's also a legitimate attempt to reuse the same content\r\n # from multiple places. Once we actually allow that, we'll\r\n # need to update this to complain about non-unique names for\r\n # definitions, but allow multiple uses.\r\n url_name = fallback_name(url_name)\r\n\r\n self.used_names[tag].add(url_name)\r\n xml_data.set('url_name', url_name)\r\n\r\n try:\r\n # VS[compat]\r\n # TODO (cpennington): Remove this once all fall 2012 courses\r\n # have been imported into the cms from xml\r\n xml = clean_out_mako_templating(xml)\r\n xml_data = etree.fromstring(xml)\r\n\r\n make_name_unique(xml_data)\r\n\r\n descriptor = create_block_from_xml(\r\n etree.tostring(xml_data, encoding='unicode'),\r\n self,\r\n id_generator,\r\n )\r\n except Exception as err: # pylint: disable=broad-except\r\n if not self.load_error_modules:\r\n raise\r\n\r\n # Didn't load properly. Fall back on loading as an error\r\n # descriptor. This should never error due to formatting.\r\n\r\n msg = \"Error loading from xml. %s\"\r\n log.warning(\r\n msg,\r\n unicode(err)[:200],\r\n # Normally, we don't want lots of exception traces in our logs from common\r\n # content problems. But if you're debugging the xml loading code itself,\r\n # uncomment the next line.\r\n # exc_info=True\r\n )\r\n\r\n msg = msg % (unicode(err)[:200])\r\n\r\n self.error_tracker(msg)\r\n err_msg = msg + \"\\n\" + exc_info_to_str(sys.exc_info())\r\n descriptor = ErrorDescriptor.from_xml(\r\n xml,\r\n self,\r\n id_generator,\r\n err_msg\r\n )\r\n\r\n descriptor.data_dir = course_dir\r\n\r\n xmlstore.modules[course_id][descriptor.scope_ids.usage_id] = descriptor\r\n\r\n if descriptor.has_children:\r\n for child in descriptor.get_children():\r\n parent_tracker.add_parent(child.scope_ids.usage_id, descriptor.scope_ids.usage_id)\r\n\r\n # After setting up the descriptor, save any changes that we have\r\n # made to attributes on the descriptor to the underlying KeyValueStore.\r\n descriptor.save()\r\n return descriptor", "def _parse_name(self, cell, cell_content):\n mp_page = cell_content.find(\"a\").attrs[\"href\"]\n\n full_name = cell_content.text.strip()\n name, *title = full_name.split(\",\")\n last, *first = name.split(\" \")\n\n id_ = mp_page[mp_page.find(\"PAD_\") + 4 : mp_page.rfind(\"/\")]\n url = re.sub(\"index.shtml$\", \"\", mp_page)\n\n first_name = \" \".join(first).rstrip(\",\").strip()\n last_name = last.strip()\n title = \",\".join(title).strip()\n\n return {\n \"id\": id_,\n \"url\": url,\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"title\": title,\n }", "def collect_results(name: str) -> dict:\n full_response = {}\n\n target_name = dns.name.from_text(name)\n\n # lookup CNAME\n response = lookup(target_name, dns.rdatatype.CNAME)\n cnames = []\n if response is not None:\n for answers in response.answer:\n for answer in answers:\n cnames.append({\"name\": answer, \"alias\": name})\n\n # lookup A\n response = lookup(target_name, dns.rdatatype.A)\n arecords = []\n\n if response is not None:\n for answers in response.answer:\n a_name = answers.name\n for answer in answers:\n if answer.rdtype == 1: # A record\n arecords.append({\"name\": a_name, \"address\": str(answer)})\n\n # lookup AAAA\n response = lookup(target_name, dns.rdatatype.AAAA)\n aaaarecords = []\n\n if response is not None:\n for answers in response.answer:\n aaaa_name = answers.name\n for answer in answers:\n if answer.rdtype == 28: # AAAA record\n aaaarecords.append({\"name\": aaaa_name, \"address\": str(answer)})\n\n # lookup MX\n response = lookup(target_name, dns.rdatatype.MX)\n mxrecords = []\n if response is not None:\n for answers in response.answer:\n mx_name = answers.name\n for answer in answers:\n if answer.rdtype == 15: # MX record\n mxrecords.append({\"name\": mx_name,\n \"preference\": answer.preference,\n \"exchange\": str(answer.exchange)})\n\n full_response[\"CNAME\"] = cnames\n full_response[\"A\"] = arecords\n full_response[\"AAAA\"] = aaaarecords\n full_response[\"MX\"] = mxrecords\n\n return full_response", "def find_link_references(bytecode, full_reference_names):\n unprefixed_bytecode = remove_0x_prefix(bytecode)\n\n expand_fn = functools.partial(\n expand_shortened_reference_name,\n full_reference_names=full_reference_names,\n )\n\n link_references = tuple((\n LinkReference(\n reference_name=remove_dunderscore_wrapper(match.group()),\n full_name=expand_fn(remove_dunderscore_wrapper(match.group())),\n offset=match.start(),\n length=match.end() - match.start(),\n ) for match in re.finditer(DEPENDENCY_RE, unprefixed_bytecode)\n ))\n\n return link_references", "def resolve():\n while _TO_RESOLVE:\n obj = _TO_RESOLVE.pop()\n annotations(obj)", "async def _find_links(self, res: aiohttp.ClientResponse) -> Iterator[str]:\n\n content = await res.text()\n soup = BeautifulSoup(content, 'html.parser')\n links = [self._format(res.url, a) for a in soup.find_all('a')]\n return filter(lambda l: l is not None, links)", "def _parse_link_date_map(self, response):\n link_date_map = defaultdict(list)\n for link in response.css(\n \".vc_col-sm-4.column_container:nth-child(1) .mk-text-block.indent16\"\n )[:1].css(\"a\"):\n link_str = link.xpath(\"./text()\").extract_first()\n link_start = self._parse_start(link_str)\n if link_start:\n link_date_map[link_start.date()].append(\n {\n \"title\": re.sub(r\"\\s+\", \" \", link_str.split(\" – \")[-1]).strip(),\n \"href\": link.attrib[\"href\"],\n }\n )\n for section in response.css(\n \".vc_col-sm-4.column_container:nth-child(1) .vc_tta-panel\"\n ):\n year_str = section.css(\".vc_tta-title-text::text\").extract_first().strip()\n for section_link in section.css(\"p > a\"):\n link_str = section_link.xpath(\"./text()\").extract_first()\n link_dt = self._parse_start(link_str, year=year_str)\n if link_dt:\n link_date_map[link_dt.date()].append(\n {\n \"title\": re.sub(\n r\"\\s+\", \" \", link_str.split(\" – \")[-1]\n ).strip(),\n \"href\": section_link.xpath(\"@href\").extract_first(),\n }\n )\n return link_date_map", "def _parse_links(self, response):\n links = []\n for link in response.css(\".row.mt-4 .list-unstyled a\"):\n links.append(\n {\n \"title\": \" \".join(link.css(\"*::text\").extract()).strip(),\n \"href\": response.urljoin(link.attrib[\"href\"]),\n }\n )\n return links", "def _complete_name(self, cr, uid, ids, name, args, context=None):\n res = {}\n#####added \n context=context or {}\n \n for m in self.browse(cr, uid, ids, context=context):\n if context.get('no_complete_name'):\n res[m.id] = m.name\n return res\n names = [m.name]\n parent = m.location_id\n while parent:\n names.append(parent.name)\n parent = parent.location_id\n res[m.id] = ' / '.join(reversed(names))\n return res", "def parse_references(article):\n reference_list = []\n references = article.find(\"text\").find(\"div\", attrs={\"type\": \"references\"})\n references = references.find_all(\"biblstruct\") if references is not None else []\n reference_list = []\n for reference in references:\n title = reference.find(\"title\", attrs={\"level\": \"a\"})\n if title is None:\n title = reference.find(\"title\", attrs={\"level\": \"m\"})\n title = title.text if title is not None else \"\"\n journal = reference.find(\"title\", attrs={\"level\": \"j\"})\n journal = journal.text if journal is not None else \"\"\n if journal is \"\":\n journal = reference.find(\"publisher\")\n journal = journal.text if journal is not None else \"\"\n year = reference.find(\"date\")\n year = year.attrs.get(\"when\") if year is not None else \"\"\n authors = []\n for author in reference.find_all(\"author\"):\n firstname = author.find(\"forename\", {\"type\": \"first\"})\n firstname = firstname.text.strip() if firstname is not None else \"\"\n middlename = author.find(\"forename\", {\"type\": \"middle\"})\n middlename = middlename.text.strip() if middlename is not None else \"\"\n lastname = author.find(\"surname\")\n lastname = lastname.text.strip() if lastname is not None else \"\"\n if middlename is not \"\":\n authors.append(firstname + \" \" + middlename + \" \" + lastname)\n else:\n authors.append(firstname + \" \" + lastname)\n authors = \"; \".join(authors)\n reference_list.append(\n {\"title\": title, \"journal\": journal, \"year\": year, \"authors\": authors}\n )\n return reference_list", "def find_link_title(link_para):\n urls = []\n source_code = requests.get(link_para)\n plain_text = source_code.text\n parsed_html = BeautifulSoup(plain_text)\n for sub_link in parsed_html.find_all('a'):\n urls.append(sub_link.string)\n print urls", "def link_residues(self) -> None:\n ...", "def get_names(parsed_data):\n known_values = []\n result = []\n # get name from contacts\n contacts = {'registrant_contact': [], 'administrative_contact': [], 'technical_contact': [],\n 'domain_registrar': []}\n if 'registrant_contact' in parsed_data:\n contacts['registrant_contact'].append(parsed_data['registrant_contact'])\n if 'administrative_contact' in parsed_data:\n contacts['administrative_contact'].append(parsed_data['administrative_contact'])\n if 'technical_contact' in parsed_data:\n contacts['technical_contact'].append(parsed_data['technical_contact'])\n if 'domain_registrar' in parsed_data:\n contacts['domain_registrar'].append(parsed_data['domain_registrar'])\n\n for contact, info in contacts.items():\n # properties dictionary\n fax = {'fax': '', 'type': 4}\n phone = {'phone': '', 'type': 4}\n country = {'country': '', 'type': 11}\n street = {'street': '', 'type': 8}\n city = {'city': '', 'type': 11}\n email = {'email': '', 'type': 2}\n if info is not None:\n d = {'type': 11, 'data': '', 'properties': {}, 'special_properties': {}, 'ref': {}}\n properties_list = []\n special_properties_list = []\n d.update({'ref': {'task': 'whois', 'whois_for': '', 'whois_from': ''}})\n if 'domain_name' in parsed_data and len(parsed_data['domain_name']) > 0:\n d['ref']['whois_for'] = parsed_data['domain_name']\n if 'whois_server' in parsed_data:\n d['ref']['whois_from'] = parsed_data['whois_server']\n\n for name in info:\n if 'full_name' in name:\n if name['full_name'] in known_values:\n break\n if 'registrar_name' in name:\n if name['registrar_name'] in known_values:\n break\n\n for feature in name.keys():\n if feature == 'full_name':\n d['data'] = name['full_name']\n known_values.append(name['full_name'])\n if feature == 'registrar_name':\n d['data'] = name['registrar_name']\n known_values.append(name['registrar_name'])\n if feature == 'city_name':\n city['city'] = name['city_name']\n if feature == 'street_name':\n street['street'] = name['street_name']\n if feature == 'country_name':\n country['country'] = name['country_name']\n if feature == 'phone_number':\n phone['phone'] = name['phone_number']\n if feature == 'fax_number':\n fax['fax'] = name['fax_number']\n if feature == 'email_address':\n email['email'] = name['email_address']\n # if name is null, discard other info\n if d['data'] == '':\n continue\n # saving name special properties\n special_properties_list.append({'is_username': False, 'type': 0})\n special_properties_list.append({'is_domain_name': False, 'type': 0})\n special_properties_list.append({'is_public_name': False, 'type': 0})\n special_properties_list.append({'is_account_name': False, 'type': 0})\n d['special_properties'] = special_properties_list\n properties_list.append(fax)\n properties_list.append(phone)\n properties_list.append(country)\n properties_list.append(street)\n properties_list.append(city)\n properties_list.append(email)\n d['properties'] = properties_list\n result.append(d)\n return result", "def old_list_links(self, link_list, dd):\n link_names = []\n for link in link_list:\n if \"subgroup\" in link:\n sublinks = list(link[\"subgroup\"])\n for sublink in sublinks:\n link_names.append(sublink[\"name\"])\n else:\n link_names.append(link[\"name\"])\n return link_names" ]
[ "0.6231827", "0.6040455", "0.6001876", "0.5930197", "0.5827227", "0.5807689", "0.56926876", "0.562395", "0.5538968", "0.55118114", "0.54805756", "0.54794437", "0.54488856", "0.54369795", "0.5422763", "0.5406316", "0.53882045", "0.53638905", "0.53599083", "0.5340648", "0.53325975", "0.53300893", "0.5308678", "0.5289816", "0.52595043", "0.524822", "0.5247943", "0.5241662", "0.52368826", "0.521566", "0.5202907", "0.5198021", "0.5197292", "0.5172155", "0.5160526", "0.515221", "0.5152035", "0.5150633", "0.5141442", "0.51402974", "0.5129905", "0.50953114", "0.5078919", "0.5050045", "0.50441325", "0.5025653", "0.5020213", "0.50185096", "0.50101614", "0.5006818", "0.5006301", "0.5000861", "0.5000861", "0.49889967", "0.49886543", "0.498692", "0.49774492", "0.49706253", "0.4968416", "0.4959852", "0.4943529", "0.49409768", "0.4935024", "0.49155024", "0.49110454", "0.49099183", "0.49024552", "0.49002165", "0.4898681", "0.48969093", "0.48935494", "0.4886341", "0.48810443", "0.4869933", "0.48611316", "0.48584342", "0.48458412", "0.48444507", "0.48339295", "0.48339102", "0.48321062", "0.48293203", "0.48263758", "0.48262456", "0.48261815", "0.48207152", "0.48124608", "0.48100033", "0.48051864", "0.48043555", "0.47945708", "0.4787591", "0.4784799", "0.47830176", "0.47829956", "0.47810808", "0.47772548", "0.47729796", "0.47689462", "0.47685853", "0.47682443" ]
0.0
-1
Determine whether to (1) motioncorrect to frame nearest T1High and assume that T1High and the fieldmap are in register or (2) catenate transformations to the base epi with a transformation from the base epi to T1High.
def _SetCatMotionFmapMats(self, fmap, anat): if abs(self.info[fmap]['series'] - self.info[anat]['series']) == 1: # Adjacent series, use them. return False elif abs(self.info[fmap]['acqtime'] - self.info[anat]['acqtime']) < 180: return False else: sernos = [] min_series = min(self.info[fmap]['series'], self.info[anat]['series']) max_series = max(self.info[fmap]['series'], self.info[anat]['series']) gap_series = range(min_series+1, max_series, 1) for entry in self.info.keys(): if self.info[entry]['type'] != 'null': sernos.append(self.info[entry]['series']) for series in gap_series: if series in sernos: # Fieldmap is separated from structural by one "full" series, # where a full series is any series that was worth processing # by this progroam, i.e, not a HOS, an asset cal scan, a # b1 cal scan or any other very sort calibration scan. return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CorrectMotion(self):\n if self.verbose:\n print \"Correct for motion\"\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n\n if os.path.exists(info['imgfile_m'] + info['suffix']):\n return\n# Always use brik for 3dDeconvolve.\n suffix = '+orig'\n epifile = '%s%s' % (info['imgfile'], suffix)\n prefix = info['imgfile_m']\n base_entry = info['base_entry']\n if info['base'] == 'start':\n# Use the first frame specified in template file. Defaults\n# to zero.\n base = info['motion_ref_frame']\n else:\n# Use the last frame.\n base = self.info[base_entry]['tdim'] - info['skip']-1\n base = ('%d' % base).replace(' ','')\n\n# Correct for slice-timing.\n self.SliceTimeCorrect(info, epifile)\n\n plane = info['plane']\n anat_tgt = info['anat_tgt']\n# anat_entry = self.anat_entry[plane]\n\n if info['catmats']:\n# Include additonal transformation in motion correction such\n# that final image is in register with the fieldmap, which has\n# been registered to the structural image that will be used for\n# spatial normalization.\n self.MotcorCatenate(info, base, anat_tgt)\n else:\n# Assume fieldmap is in register with the structural.\n self.Motcor(info, base)\n\n if info.get('fmapname', None) is None:\n# No fieldmap correction.\n if self.fsl_flip:\n# Flip the way fslview likes it.\n self.FSLFlip(info['imgfile_m'], info['imgfile_final'])\n elif info['suffix'] == '.nii':\n# Copy motion-corrected images from /tmp to output directory\n outfile = info['imgfile_final'] + info['suffix']\n cmd = '3dcopy %s+orig %s' % (info['imgfile_m'], outfile)\n self.CheckExec(cmd, [outfile], force=True)\n cmd = '/bin/rm %s+orig*' % info['imgfile_m']\n self.CheckExec(cmd, [], force=True)", "def event_m10_29_x38(flag1=105405):\r\n \"\"\"State 0,1: Intrusion MAP determination\"\"\"\r\n CompareEventFlag(0, flag1, 0)\r\n if ConditionGroup(0):\r\n \"\"\"State 2: Move to: Madura side\"\"\"\r\n return 0\r\n else:\r\n \"\"\"State 3: Move to: Forest side of the imaginary shadow\"\"\"\r\n return 1", "def apply_forward_map(self, transport_map, sig1):\n # Check input arrays\n transport_map = check_array(transport_map, ndim=2,\n dtype=[np.float64, np.float32])\n sig1 = check_array(sig1, ndim=2, dtype=[np.float64, np.float32],\n force_strictly_positive=True)\n\n # Number of projections in transport map must match number of angles\n if transport_map.shape[1] != self.theta.size:\n raise ValueError(\"Length of theta must equal number of \"\n \"projections in transport map: {} vs \"\n \"{}\".format(self.theta.size, transport_map.shape[1]))\n\n # Initialize Radon transforms\n rad1 = radon(sig1, theta=self.theta, circle=False)\n rad0 = np.zeros_like(rad1)\n\n # Check transport map and Radon transforms are the same size\n assert_equal_shape(transport_map, rad0,\n ['transport_map', 'Radon transform of sig0'])\n\n # Loop over angles\n cdt = CDT()\n for i in range(self.theta.size):\n # Convert projection to PDF\n j1 = signal_to_pdf(rad1[:,i], epsilon=1e-8, total=1.)\n\n # Radon transform of sig0 comprised of inverse CDT of projections\n rad0[:,i] = cdt.apply_forward_map(transport_map[:,i], j1)\n\n # Inverse Radon transform\n sig0_recon = iradon(rad0, self.theta, circle=False, filter='ramp')\n\n # Crop sig0_recon to match sig1\n sig0_recon = match_shape2d(sig1, sig0_recon)\n\n return sig0_recon", "def tofPreproc(evt, type, key, outkey=None):\n \n if outkey is None:\n outkey = \"corrected - \" + key\n tof_trace = evt[type][key].data\n\n tof_trace_inverted = tof_trace * -1\n #Find photon peak\n tof_peak_threshold = np.std(tof_trace_inverted[:pre_pp_index])*5\n\n all_peak_x = np.where(tof_trace_inverted>(np.median(tof_trace_inverted[:pre_pp_index])+tof_peak_threshold))[0]\n any_peaks = all_peak_x.size >= 2\n if any_peaks:\n print all_peak_x \n diff_x = all_peak_x[1:] - all_peak_x[:-1]\n end_peak = all_peak_x[np.where(diff_x > 1)[0]]\n photon_peak_end = end_peak[0] + 1\n photon_peak_start = all_peak_x[0]\n \n \t #Inverted and baseline corrected Tof signal\n base_line = np.median(tof_trace_inverted[:photon_peak_start])\n \n base_std = np.std(tof_trace_inverted[:photon_peak_start])\n \t\n corrected_tof = (tof_trace_inverted-base_line)[photon_peak_end:]\n add_record(evt['analysis'], 'analysis', 'Corrected ToF (base line)', corrected_tof)\n \n \t #Convert to M/Q\n Hpeak = np.argmax(corrected_tof[:hpeak_region])\n new_x = (np.arange(len(corrected_tof)) / float(Hpeak))**2. \n add_record(evt['analysis'], 'analysis', 'M/Q', new_x)", "def post_process(self):\n\t\ti_s = 0\n\t\ti_e = 0\n\t\tif self.trans_t_dict[0][1] == 0:\n\t\t\tif len(self.noise_itv) == 0:\n\t\t\t\tself.trans_t_dict[0][1] = self.fake_start_offset\n\t\t\telse:\n\t\t\t\tself.trans_t_dict[0][1] = self.noise_itv[0][1] # start_offset\n\t\t\tself.trans_t_dict[0][2] = 0.1\n\t\tif self.trans_t_dict[len(self.trans_t_dict)-1][1] == 0:\n\t\t\tif len(self.noise_itv) == 0:\n\t\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][1] = self.fake_end_offset\n\t\t\telse:\n\t\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][1] = self.noise_itv[-1][0] # end_offset\n\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][2] = 0.1\n\n\t\twhile i_s < len(self.trans_t_dict):\n\t\t\twhile i_s < len(self.trans_t_dict) and self.trans_t_dict[i_s][1] != 0:\n\t\t\t\ti_s += 1\n\t\t\tif i_s == len(self.trans_t_dict):\n\t\t\t\ti_e = len(self.trans_t_dict)\n\t\t\tif i_s < len(self.trans_t_dict):\n\t\t\t\ti_s -= 1\n\t\t\t\ti_e = i_s + 1\n\t\t\t\twhile i_e < len(self.trans_t_dict) and self.trans_t_dict[i_e][1] == 0:\n\t\t\t\t\ti_e += 1\n\t\t\t\tif i_e == len(self.trans_t_dict):\n\t\t\t\t\tbreak\n\n\t\t\t\t# incorperate the noise inverval\n\t\t\t\ts_time = self.trans_t_dict[i_s][1]\n\t\t\t\te_time = self.trans_t_dict[i_e][1]\n\t\t\t\t\"\"\"\n\t\t\t\tfor ts in self.noise_itv:\n\t\t\t\t\tif len(ts) == 2:\t\t\t\t\t\t\n\t\t\t\t\t\ttime1 = ts[0]\n\t\t\t\t\t\ttime2 = ts[1]\n\t\t\t\t\t\tif s_time < time1 and time2 < e_time:\n\t\t\t\t\t\t\te_time = min(e_time, time1)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttime0 = ts[0]\n\t\t\t\t\t\tif s_time < time0 and time0 < e_time:\n\t\t\t\t\t\t\te_time = min(e_time, time0)\n\t\t\t\t\"\"\"\n\t\t\t\tchar_len = 0\n\t\t\t\tfor i in range(i_s, i_e):\n\t\t\t\t\tchar_len += len(self.trans_t_dict[i][0])\n\t\t\t\t# ratio = float(self.trans_t_dict[i_e][1]-self.trans_t_dict[i_s][1]) / float(char_len)\n\t\t\t\tratio = float(e_time - s_time) / float(char_len)\n\t\t\t\tchar_len = 0\n\t\t\t\t# s_time = self.trans_t_dict[i_s][1]\n\t\t\t\tfor i in range(i_s+1, i_e):\n\t\t\t\t\tchar_len += len(self.trans_t_dict[i-1][0])\n\t\t\t\t\tself.trans_t_dict[i][1] = s_time + char_len * ratio\n\t\t\t\t\tself.trans_t_dict[i][2] = len(self.trans_t_dict[i][0]) * ratio\n\t\t\ti_s = i_e", "def gonio_axis_align():\n \n # Invert camera image, so dark pin on light image becomes a peak\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # High threshold, so AD centroid doesn't interpret background\n cam_8ThresholdOld = cam_8.stats4.centroid_threshold.get()\n cam_8.stats4.centroid_threshold.put(150)\n cam_7ThresholdOld = cam_7.stats4.centroid_threshold.get()\n cam_7.stats4.centroid_threshold.put(150)\n \n # HiMag\n # Copy ROI2 geometry (HiMag Mag3) to ROI4 and use ROI4 centroid plugin\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get())\n cam_8.roi4.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get())\n cam_8.roi4.size.x.put(cam_8.roi2.size.x.get() * 0.20)\n cam_8.roi4.size.y.put(cam_8.roi2.size.y.get())\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get() + cam_8.roi2.size.x.get()/2 - cam_8.roi4.size.x.get()/2)\n \n # LoMag\n # Copy ROI2 geometry (LoMag Mag1) to ROI4 and use ROI4 centroid plugin\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get())\n cam_7.roi4.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get())\n cam_7.roi4.size.x.put(cam_7.roi2.size.x.get() * 0.05)\n cam_7.roi4.size.y.put(cam_7.roi2.size.y.get())\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get() + cam_7.roi2.size.x.get()/2 - cam_7.roi4.size.x.get()/2)\n \n centerPinYHiMag0 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag0 = centroid_avg(cam_7.stats4)[1]\n yield from bps.mvr(gonio.o,180)\n time.sleep(2)\n centerPinYHiMag180 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag180 = centroid_avg(cam_7.stats4)[1]\n centerPinYHiMag = (centerPinYHiMag0 + centerPinYHiMag180)/2\n centerPinYLoMag = (centerPinYLoMag0 + centerPinYLoMag180)/2\n\n centerPinOffsYHiMag = centerPinYHiMag - cam_8.roi4.size.y.get() / 2\n centerPinOffsYLoMag = centerPinYLoMag - cam_7.roi4.size.y.get() / 2\n \n # Correct Mag 3 (cam_8 ROI2)\n cam_8.roi2.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + centerPinOffsYHiMag)\n # Correct Mag 4 (cam_8 ROI1)\n cam_8.roi1.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + (cam_8.roi2.size.y.get()-cam_8.roi1.size.y.get())/2)\n \n # Correct Mag 1 (cam_7 ROI2)\n cam_7.roi2.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + centerPinOffsYLoMag)\n # Correct Mag 2 (cam_7 ROI3)\n cam_7.roi3.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + (cam_7.roi2.size.y.get()-cam_7.roi3.size.y.get())/2)\n\n # De-invert image\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # Set thresold to previous value\n cam_8.stats4.centroid_threshold.put(cam_8ThresholdOld)\n cam_7.stats4.centroid_threshold.put(cam_7ThresholdOld)\n \n return", "def shiftDetectorONH(frame, onh_info, x_onh_bounds):\n\n x_min = x_onh_bounds[0]-30\n x_max = x_onh_bounds[1]+30\n frame_len = frame.shape[1]\n mid_x = int(frame_len/2)\n\n norm = frame/np.max(frame)#(2**16)\n #if the frame midpoint is inside the bbox x bounds\n #this section is to avoid using any part of the onh as the a-scan to reference when doing the cross-correlation\n if mid_x>=x_min and mid_x<=x_max:\n d_min = mid_x-x_min\n d_max = x_max-mid_x\n #if mid_x is closer to x_min but not close to the edge of the image -- at least 75 px\n if d_min<d_max and x_min>75:\n acol = int((frame_len/2)-(d_min+1))\n elif x_max<frame_len-75:\n acol = int((frame_len/2)+(d_max+1))\n else:\n acol = int((frame_len/2)-(d_min+1))\n anchorCol = norm[:,acol]\n else:\n anchorCol = norm[:,mid_x]\n shifts = [np.argmax(signal.correlate(norm[:,i],anchorCol,mode='same'))-int((frame.shape[0])/2) for i in range(frame_len)]\n\n #if onh detection is bad, bbox might be huge. The onh area should be less that 10% of the image (256*1024 pixels)\n if onh_info.area/(2**18) > 0.10:\n return shifts\n #old, changed 1-29-2018 because this is really about location, not size\n #if x_min<100 or x_max>902:\n #return shifts\n\n #This ensures that clean_shifts and clean_x are the same length and comes into play when the ONH is basically touching the\n #side of the image.\n #if the onh is too far to the right side of the frame, only use the left side info\n #fit a quadratic to get LOCAL curvature\n if x_max>=frame_len-100:\n #this uses the entire bscans to get the curvature, otherwise it will fit very poorly\n clean_x = np.arange(0,x_min,1)\n curve_fit_params = np.polyfit(clean_x, shifts[0:x_min],2)\n curve_fit = lambda x: curve_fit_params[0]*x**2 + curve_fit_params[1]*x + curve_fit_params[2]\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = shifts\n clean_shifts[x_min:x_max+1]=corrected_shifts\n #if the onh is too far to the left side, only use right side info\n elif x_min<100:\n clean_x = np.arange(x_max+1,frame_len,1)\n curve_fit_params = np.polyfit(clean_x, shifts[x_max+1:frame_len],2)\n curve_fit = lambda x: curve_fit_params[0]*x**2 + curve_fit_params[1]*x + curve_fit_params[2]\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = shifts\n clean_shifts[x_min:x_max+1]=corrected_shifts\n #Everything is normal, everyone is happy.\n else:\n #need to cut out onh, I don't think there is a way to index this to put it\n #directly in polyfit\n clean_shifts = np.array(shifts[0:x_min] + shifts[x_max+1:frame_len])\n clean_x = np.concatenate((np.arange(x_min-100,x_min,1),np.arange(x_max+1,x_max+101,1)))\n curve_fit_params = np.polyfit(clean_x, clean_shifts[x_min-100:x_min+100],3)\n curve_fit = lambda x: curve_fit_params[0]*x**3 + curve_fit_params[1]*x**2 + curve_fit_params[2]*x + curve_fit_params[3]\n #!!astype added 4-18-19 because floats throw an error when correcting shifts\n corrected_shifts = np.round(curve_fit(np.arange(x_min,x_max+1,1))).astype('int')\n clean_shifts = np.insert(clean_shifts, x_min+1, corrected_shifts)\n\n return list(clean_shifts)", "def AlignFieldmaps(self):\n for entry in self.entry_map['fmap']:\n info = self.info[entry]\n\n# Register the magnitude image at the shortest TR to the T1-IR\n# structural image.\n target = self.info[self.norm_src]['imgfile'] + \\\n self.info[self.norm_src]['suffix']\n source = info['magfile'] + info['suffix']\n matfile = info['matfile']\n fmt = '3dAllineate -prefix NULL -1Dmatrix_save %s -base %s ' + \\\n '-source %s -cost mi -warp shift_rotate'\n cmd = fmt % (info['matfile'], target, source)\n self.CheckExec(cmd, [info['matfile']])\n\n# Convert to unitary matrix (remove scaling component.)\n cmd = 'cat_matvec -ONELINE %s -P > %s' % \\\n (info['matfile'], info['matfile_unitary'])\n self.CheckExec(cmd, [info['matfile_unitary']])\n\n# Rotate the magnitude image to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['magfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['magfile'] + info['suffix'])\n self.CheckExec(cmd, [info['magfile_r']+info['suffix']])\n\n# Rotate the fieldmap to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['imgfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['imgfile'] + info['suffix'])\n self.CheckExec(cmd, [info['imgfile_r']+info['suffix']])", "def match_based_on_spatial_temperal_prior_test_2(tracker_record_1, tracker_record_2, pt_obj_1, pt_obj_2, associate_dict, t_interval=30):\n print(\"===== Get in the match_based_on_spatial_temperal_prior_test_2! ===== \")\n \n # file path\n device_id_1 = 0\n device_id_2 = 1\n img_root_1 = data_path[device_id_1]\n img_root_2 = data_path[device_id_2]\n # save_root =\n \n obj_single_camera_stp_cam_1 = SingleCameraSTP(tracker_record_1, pt_obj_1)\n obj_single_camera_stp_cam_2 = SingleCameraSTP(tracker_record_2, pt_obj_2)\n \n print(obj_single_camera_stp_cam_1.perspective_trace)\n print(obj_single_camera_stp_cam_1.motion_params_4_each)\n obj_multi_cameras_stp_c1c2 = MultiCamerasSTP(\n obj_single_camera_stp_cam_1,\n obj_single_camera_stp_cam_2,\n associate_dict)\n\n # # ===== TEST:coord_transformer_test =====\n # coord_transformer_test(obj_multi_cameras_stp_c1c2)\n # obj_multi_cameras_stp_c1c2.get_start_point_transform()\n \n pt_box_info_1 = obj_multi_cameras_stp_c1c2.obj_single_camera_stp_cam_1.perspective_trace\n pt_box_info_2 = obj_multi_cameras_stp_c1c2.obj_single_camera_stp_cam_2.perspective_trace\n \n # Test on object id '1'\n object_id = '0'\n \n for i in range(np.min([len(pt_box_info_1[object_id]), len(pt_box_info_2[object_id])])):\n f1 = i\n f2 = i\n fname_1 = str(pt_box_info_1[object_id][f1][1])+'.jpg'\n fname_2 = str(pt_box_info_2[object_id][f2][1])+'.jpg'\n \n img_1 = cv2.imread(os.path.join(img_root_1, fname_1))\n img_2 = cv2.imread(os.path.join(img_root_2, fname_2))\n \n cam_1_x = pt_box_info_1[object_id][f1][0][0]\n cam_1_y = pt_box_info_1[object_id][f1][0][1]\n \n cam_2_x = pt_box_info_2[object_id][f2][0][0]\n cam_2_y = pt_box_info_2[object_id][f2][0][1]\n \n t_interval = pt_box_info_2[object_id][f2][1]-pt_box_info_1[object_id][f1][1]\n \n print(cam_1_x, cam_1_y)\n print(cam_2_x, cam_2_y)\n print(t_interval)\n # print(obj_multi_cameras_stp_c1c2.starting_point)\n \n p_map = obj_multi_cameras_stp_c1c2.get_probability_map(cam_1_x, cam_1_y, t_interval, height=210, width=80)\n p_map = cv2.applyColorMap(p_map, cv2.COLORMAP_JET)\n p = obj_multi_cameras_stp_c1c2.get_probability(cam_2_x, cam_2_y, cam_1_x, cam_1_y, t_interval)\n print(p)\n # dist = obj_multi_cameras_stp_c1c2.get_distance(cam_2_x,cam_2_y,cam_1_x,cam_1_y,t_interval)\n p_map = cv2.resize(p_map, (int(pt_obj_2.transformed_width_for_disp), int(pt_obj_2.transformed_height_for_disp)))\n p_map = cv2.flip(p_map, 0) # 0:vertical flip\n pt_color_p_map = pt_obj_2.get_inverse_disp_transform(p_map)\n \n alpha = 0.5\n img_3 = cv2.addWeighted(img_2, alpha, pt_color_p_map, 1-alpha, 0)\n \n img_4 = np.zeros((int(img_2.shape[0]), int(img_2.shape[1]*2), 3), np.uint8)\n img_4[:, :img_1.shape[1], :] = img_1\n img_4[:, img_1.shape[1]:, :] = img_3\n\n # cv2.namedWindow('img_1',cv2.WINDOW_NORMAL)\n # cv2.namedWindow('img_2',cv2.WINDOW_NORMAL)\n cv2.namedWindow('img_4', cv2.WINDOW_NORMAL)\n \n # cv2.imshow('img_1',img_1)\n # cv2.imshow('img_2',img_2)\n cv2.imshow('img_4', img_4)\n \n cv2.imwrite(os.path.join(save_root, fname_1), img_4)\n \n cv2.waitKey()\n return", "def grid_displacement_pc(grid1, grid2, field, level, return_value=\"pixels\"):\n # create copies of the data\n field_data1 = grid1.fields[field][\"data\"][level].copy()\n field_data2 = grid2.fields[field][\"data\"][level].copy()\n\n # replace fill values with valid_min or minimum value in array\n if \"valid_min\" in grid1.fields[field]:\n min_value1 = grid1.fields[field][\"valid_min\"]\n else:\n min_value1 = field_data1.min()\n field_data1 = np.ma.filled(field_data1, min_value1)\n\n if \"valid_min\" in grid2.fields[field]:\n min_value2 = grid2.fields[field][\"valid_min\"]\n else:\n min_value2 = field_data2.min()\n field_data2 = np.ma.filled(field_data2, min_value2)\n\n # discrete fast fourier transformation and complex conjugation of field 2\n image1fft = np.fft.fft2(field_data1)\n image2fft = np.conjugate(np.fft.fft2(field_data2))\n\n # inverse fourier transformation of product -> equal to cross correlation\n imageccor = np.real(np.fft.ifft2(image1fft * image2fft))\n\n # shift the zero-frequency component to the center of the spectrum\n imageccorshift = np.fft.fftshift(imageccor)\n\n # determine the distance of the maximum from the center\n # find the peak in the correlation\n row, col = field_data1.shape\n yshift, xshift = np.unravel_index(np.argmax(imageccorshift), (row, col))\n yshift -= int(row / 2)\n xshift -= int(col / 2)\n\n dx = grid1.x[\"data\"][1] - grid1.x[\"data\"][0]\n dy = grid1.y[\"data\"][1] - grid1.y[\"data\"][0]\n x_movement = xshift * dx\n y_movement = yshift * dy\n\n if return_value == \"pixels\":\n displacement = (yshift, xshift)\n elif return_value == \"distance\":\n displacement = (y_movement, x_movement)\n elif return_value == \"velocity\":\n t1 = num2date(grid1.time[\"data\"][0], grid1.time[\"units\"])\n t2 = num2date(grid2.time[\"data\"][0], grid2.time[\"units\"])\n dt = (t2 - t1).total_seconds()\n u = x_movement / dt\n v = y_movement / dt\n displacement = (v, u)\n else:\n displacement = (yshift, xshift)\n return displacement", "def trajectory_mode(location1, location2):\n while abs(axis_1.get_pos() - location1) >= 50 and abs(axis_0.get_pos() - location2) >= 50:\n axis_0.set_pos_trap(location2)\n axis_1.set_pos_trap(location1)\n axis_1.get_pos()\n axis_0.get_pos()", "def process_frame(self, raw_frame, detection_all):\n \n frame = raw_frame.copy()\n h,w,_ = frame.shape\n proximity = int(h/2.4)\n pic_proximity = int(h/1.45)\n min_distance = int(w/2)\n\n head_pos = (detection_all[4][0], detection_all[4][1])\n hand_pos = (detection_all[3][0], detection_all[3][1])\n \n \n #self.target_height = target_height\n self.target_height = detection_all[4][3]\n target_width = detection_all[4][2]\n target = (detection_all[4][0], detection_all[4][1])\n\n ref_x = int(w/2)\n ref_y = int(h*0.35)\n \n self.axis_speed = self.cmd_axis_speed.copy()\n\n #Is there a Picture Command ?\n if self.picture_approach:\n cls_number = int(self.classes_dict[self.picture_target])\n print (str(self.picture_target) + 'is' + str(cls_number))\n print (self.picture_target + ' values:' + str(detection_all[cls_number]))\n \n # If no pic target found --> rotate\n if (detection_all[cls_number][0] + detection_all[cls_number][1]) == 0:\n \n log.info(f'searching for {self.picture_target}')\n \n if time.time() - self.search_start_time < 8: \n self.axis_speed[\"yaw\"] = 60\n else:\n print('stopped searching after 8 seconds')\n self.axis_speed[\"yaw\"] = 0\n self.picture_approach = False\n \n # If pic target found set as new tracking target\n else:\n print('pic target found')\n self.axis_speed[\"yaw\"] = 0\n if self.timestamp_pic_target_found is None:\n self.timestamp_pic_target_found = time.time()\n\n log.info(f'found {self.picture_target}')\n target = (detection_all[cls_number][0], detection_all[cls_number][1])\n self.target_height = detection_all[cls_number][3]\n \n #If Human Head:\n if cls_number == 4:\n self.keep_distance = pic_proximity*0.75\n else:\n self.keep_distance = pic_proximity\n\n self.pid_pitch = PID(0.15,0.0,0.1,setpoint=0,output_limits=(-30,30))\n self.tracking = True\n \n # If voice command 'come home' activate RTH\n if self.rth:\n self.target_height = detection_all[4][3]\n target_width = detection_all[4][2]\n target = (detection_all[4][0], detection_all[4][1])\n self.keep_distance = proximity*0.75\n self.toggle_tracking(tracking=True)\n\n if self.timestamp_take_picture:\n if time.time() - self.timestamp_take_picture > 2:\n self.timestamp_take_picture = None\n self.drone.take_picture()\n else:\n\n if self.tracking: \n if target != (0,0): \n if self.distance_mode: \n # Locked distance mode\n if self.keep_distance is None:\n self.keep_distance = self.target_height\n #self.graph_distance = RollingGraph(window_name=\"Distance\", y_max=500, threshold=self.keep_distance, waitKey=False)\n \n if self.palm_landing_approach:\n self.keep_distance = proximity\n self.timestamp_keep_distance = time.time()\n log.info(\"APPROACHING on pose\")\n self.pid_pitch = PID(0.2,0.0,0.1,setpoint=0,output_limits=(-30,30))\n #self.graph_distance = RollingGraph(window_name=\"Distance\", y_max=500, threshold=self.keep_distance, waitKey=False)\n\n self.body_in_prev_frame = True\n \n xoff = int(target[0]-ref_x)\n yoff = int(ref_y-target[1])\n\n #We draw an arrow from the reference point to the head we are targeting \n color = (0,0,255)\n cv2.circle(frame, (ref_x, ref_y), 10, color, 1,cv2.LINE_AA)\n cv2.line(frame, (ref_x, ref_y), target, color, 4)\n cv2.rectangle(frame, (target[0]-target_width//2, target[1]-self.target_height//2), \n (target[0]+target_width//2, target[1]+self.target_height//2),color,4)\n \n # The PID controllers calculate the new speeds for yaw and throttle\n self.axis_speed[\"yaw\"] = int(-self.pid_yaw(xoff))\n #log.debug(f\"xoff: {xoff} - speed_yaw: {self.axis_speed['yaw']}\")\n self.last_rotation_is_cw = self.axis_speed[\"yaw\"] > 0\n\n self.axis_speed[\"throttle\"] = int(-self.pid_throttle(yoff))\n #log.debug(f\"yoff: {yoff} - speed_throttle: {self.axis_speed['throttle']}\")\n\n #If in locked distance mode\n if self.keep_distance and self.target_height: \n \n # Check RTH\n if self.rth and self.target_height>=self.keep_distance:\n self.rth = False\n \n elif self.palm_landing_approach and self.target_height>self.keep_distance:\n # The drone is now close enough to the body\n # Let's do the palm landing\n log.info(\"PALM LANDING after approaching\")\n self.palm_landing_approach = False\n self.toggle_tracking(tracking=False)\n self.palm_land() \n \n elif self.picture_approach and \\\n abs(self.target_height-self.keep_distance) < 15 and \\\n xoff < 12 and yoff < 15:\n \n # The drone is now close enough to the pic target\n # Let's take a picture \n self.toggle_tracking(tracking=False)\n print('take a picture')\n self.drone.take_picture()\n self.picture_approach = False\n self.timestamp_pic_target_found = None\n self.pid_pitch = PID(0.3,0.0,0.1,setpoint=0,output_limits=(-70,70))\n \n \n else:\n self.axis_speed[\"pitch\"] = int(self.pid_pitch(self.target_height-self.keep_distance))\n log.debug(f\"Target distance: {self.keep_distance} - cur: {self.target_height} -speed_pitch: {self.axis_speed['pitch']}\")\n \n if abs(head_pos[1] - hand_pos[1])<30:\n if self.timestamp_hand_ctrl is None:\n self.timestamp_hand_ctrl = time.time()\n if time.time() - self.timestamp_hand_ctrl > 1:\n if self.head_hand_x_dist is None:\n self.head_hand_x_ref = head_pos[0]-hand_pos[0]\n \n self.hand_ctrl = True\n self.head_hand_x_dist = head_pos[0]-hand_pos[0]\n self.axis_speed[\"roll\"] = int(-self.pid_roll(self.head_hand_x_ref - self.head_hand_x_dist))\n #print (f'head hand X distance: {abs(head_pos[0]-hand_pos[0])}')\n\n else:\n self.hand_ctrl = False\n self.timestamp_hand_ctrl = None\n self.head_hand_x_dist = None\n\n else: # Tracking but no body detected\n if self.body_in_prev_frame:\n self.timestamp_no_body = time.time()\n self.body_in_prev_frame = False\n self.axis_speed[\"throttle\"] = self.prev_axis_speed[\"throttle\"]\n self.axis_speed[\"yaw\"] = self.prev_axis_speed[\"yaw\"]\n else:\n if time.time() - self.timestamp_no_body < 1:\n print(\"NO BODY SINCE < 1\", self.axis_speed, self.prev_axis_speed)\n self.axis_speed[\"throttle\"] = self.prev_axis_speed[\"throttle\"]\n self.axis_speed[\"yaw\"] = self.prev_axis_speed[\"yaw\"]\n else:\n log.debug(\"NO BODY detected for 1s -> rotate\")\n self.axis_speed[\"yaw\"] = self.def_speed[\"yaw\"] * (1 if self.last_rotation_is_cw else -1)\n \n\n # Send axis commands to the drone\n for axis, command in self.axis_command.items():\n if self.axis_speed[axis]is not None and self.axis_speed[axis] != self.prev_axis_speed[axis]:\n #log.debug(f\"COMMAND {axis} : {self.axis_speed[axis]}\")\n command(self.axis_speed[axis])\n self.prev_axis_speed[axis] = self.axis_speed[axis]\n else:\n # This line is necessary to display current values in 'self.write_hud'\n self.axis_speed[axis] = self.prev_axis_speed[axis]\n \n # Write the HUD on the frame\n frame = self.write_hud(frame)\n\n return frame", "def Motion_estimate_reverse_1frame(ref0_frame,ref1_frame,P_frame,block_size):\n \n nb_blocks = width//block_size*height//block_size\n \n vect_field = np.array(P_frame[:nb_blocks*3])\n vect_field = vect_field.reshape((height//block_size,width//block_size,3))\n \n frame_error = DCT_inverse(np.array(P_frame[nb_blocks*3:]),offset=0)\n tar_Y = frame_error[ :sep1].reshape(height,width)\n tar_U = frame_error[sep1:sep2].reshape(height//2,width//2)\n tar_V = frame_error[sep2: ].reshape(height//2,width//2)\n \n ref_frame = [ref0_frame,ref1_frame]\n \n for X in range(0,height//block_size):\n for Y in range(0,width//block_size):\n xa, xz = X*block_size,(X+1)*block_size\n ya, yz = Y*block_size,(Y+1)*block_size\n \n ref,vx,vy = vect_field[X,Y,:]\n \n pxa, pxz = xa+vx,xz+vx\n pya, pyz = ya+vy,yz+vy\n \n patch_Y = ref_Y[ref][pxa:pxz,pya:pyz]\n patch_U = ref_U[ref][pxa//2:pxz//2,pya//2:pyz//2]\n patch_V = ref_V[ref][pxa//2:pxz//2,pya//2:pyz//2]\n \n tar_Y[xa:xz,ya:yz] += patch_Y\n tar_U[xa//2:xz//2,ya//2:yz//2] += patch_U\n tar_V[xa//2:xz//2,ya//2:yz//2] += patch_V\n\n target_frame = np.concatenate((tar_Y.flatten(),\n tar_U.flatten(),\n tar_V.flatten()))\n return target_frame", "def getTiltedCoordinates(img1, img2, tiltdiff, picks1=[], angsearch=True, inittiltaxis=-7.2, msg=True):\n\tt0 = time.time()\n\t#shrink images\n\tbin = 2\n\tbinned1 = apImage.binImg(img1, bin)\n\tbinned2 = apImage.binImg(img2, bin)\n\t#apImage.arrayToJpeg(binned1, \"binned1.jpg\")\n\t#apImage.arrayToJpeg(binned2, \"binned2.jpg\")\n\tfilt1 = apImage.highPassFilter(binned1, apix=1.0, radius=20.0, localbin=4/bin)\n\tfilt2 = apImage.highPassFilter(binned2, apix=1.0, radius=20.0, localbin=4/bin)\n\t#apImage.arrayToJpeg(filt1, \"filt1.jpg\")\n\t#apImage.arrayToJpeg(filt2, \"filt2.jpg\")\n\n\tif angsearch is True:\n\t\tbestsnr = 0\n\t\tbestangle = None\n\t\t### rough refine\n\t\t#for angle in [-6, -4, -2,]:\n\t\t#\tsys.stderr.write(\".\")\n\t\t#\tshift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, angle, bin, msg=False)\n\t\t#\tif snr > bestsnr:\t\n\t\t#\t\tbestsnr = snr\n\t\t#\t\tbestangle = angle\n\t\tbestangle = inittiltaxis\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"Best tilt axis angle= %.1f; SNR=%.2f\"%(bestangle,bestsnr))\n\t\t### finer refine\n\t\tfor angle in [bestangle-1, bestangle-0.5, bestangle+0.5, bestangle+1]:\n\t\t\tif msg is True:\n\t\t\t\tsys.stderr.write(\".\")\n\t\t\tshift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, angle, bin, msg=False)\n\t\t\tif snr > bestsnr:\t\n\t\t\t\tbestsnr = snr\n\t\t\t\tbestangle = angle\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"Best tilt axis angle= %.1f; SNR=%.2f\"%(bestangle,bestsnr))\n\t\t### really fine refine\n\t\tfor angle in [bestangle-0.2, bestangle-0.1, bestangle+0.1, bestangle+0.2]:\n\t\t\tif msg is True:\n\t\t\t\tsys.stderr.write(\".\")\n\t\t\tshift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, angle, bin, msg=False)\n\t\t\tif snr > bestsnr:\t\n\t\t\t\tbestsnr = snr\n\t\t\t\tbestangle = angle\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"Best tilt axis angle= %.1f; SNR=%.2f\"%(bestangle,bestsnr))\n\n\t\tshift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, bestangle, bin, msg=msg)\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"Best tilt axis angle= %.1f; SNR=%.2f\"%(bestangle,bestsnr))\n\telse:\n\t\tbestangle = 0.0\n\t\tshift, xfactor, snr = getTiltedRotateShift(img1, img2, tiltdiff, bestangle, bin)\n\n\tif msg and min(abs(shift)) < min(img1.shape)/16.0:\n\t\tapDisplay.printWarning(\"Overlap was too close to the edge and possibly wrong.\")\n\n\t### case 1: find tilted center of first image\n\tcenter = numpy.asarray(img1.shape)/2.0\n\tnewpoint = translatePoint(center, center, shift, bestangle, xfactor)\n\t#print \"newpoint=\", newpoint\n\thalfsh = (center + newpoint)/2.0\n\torigin = halfsh\n\n\t### case 2: using a list of picks\n\tif len(picks1) > 1:\n\t\t#get center most pick\n\t\tdmin = origin[0]/2.0\n\t\tfor pick in picks1:\n\t\t\tda = numpy.hypot(pick[0]-halfsh[0], pick[1]-halfsh[1])\n\t\t\tif da < dmin:\n\t\t\t\tdmin = da\n\t\t\t\torigin = pick\n\n\t# origin is pick from image 1\n\t# newpart is pick from image 2\n\tnewpart = translatePoint(origin, center, shift, bestangle, xfactor)\n\tnewpart2 = numpy.array([(origin[0]*xfactor-shift[0])*xfactor, origin[1]-shift[1]])\n\tif msg is True:\n\t\tapDisplay.printMsg(\"origin=(%d,%d); newpart=(%.1f,%.1f); newpart2=(%.1f,%.1f)\"\n\t\t\t%(origin[0],origin[1], newpart[0],newpart[1], newpart2[0],newpart2[1],))\n\t\tapDisplay.printMsg(\"completed in \"+apDisplay.timeString(time.time()-t0))\n\n\treturn origin, newpart, snr, bestangle\n\n\t### check to make sure points are not off the edge\n\twhile newpart[0] < 10:\n\t\tnewpart += numpy.asarray((20,0))\n\t\torigin += numpy.asarray((20,0))\n\twhile newpart[1] < 10:\n\t\tnewpart += numpy.asarray((0,20))\n\t\torigin += numpy.asarray((0,20))\n\twhile newpart[0] > img1.shape[0]-10:\n\t\tnewpart -= numpy.asarray((20,0))\n\t\torigin -= numpy.asarray((20,0))\n\twhile newpart[1] > img1.shape[1]-10:\n\t\tnewpart -= numpy.asarray((0,20))\n\t\torigin -= numpy.asarray((0,20))\n\n\treturn origin, newpart", "def timerange_change():\n global transformer_top\n assert transformer_top is not None\n global transformer_bottom\n assert transformer_bottom is not None\n global label_encoders_per_modality\n assert label_encoders_per_modality is not None\n global DEVICE\n assert DEVICE is not None\n global USE_LOCAL_CONDITIONING\n assert USE_LOCAL_CONDITIONING is not None\n global partial_sample_model\n assert partial_sample_model is not None\n\n layer = str(request.args.get('layer'))\n temperature = request.args.get('temperature', type=float)\n start_index_top = request.args.get('start_index_top', type=int)\n uniform_sampling = bool(strtobool(\n request.args.get('uniform_sampling', type=str,\n default=\"False\")))\n\n # try to retrieve local conditioning map in the request's JSON payload\n (class_conditioning_top_map, class_conditioning_bottom_map,\n input_conditioning_top, input_conditioning_bottom) = (\n parse_conditioning(request)\n )\n global_instrument_family_str = str(\n request.args.get('instrument_family_str'))\n global_pitch = request.args.get('pitch', type=int)\n global_class_conditioning = {\n 'pitch': global_pitch,\n 'instrument_family_str': global_instrument_family_str\n }\n if (not USE_LOCAL_CONDITIONING\n or not transformer_bottom.local_class_conditioning):\n class_conditioning_bottom = global_class_conditioning.copy()\n class_conditioning_tensors_bottom = make_conditioning_tensors(\n class_conditioning_bottom,\n label_encoders_per_modality)\n class_conditioning_bottom_map = None\n else:\n class_conditioning_bottom = class_conditioning_tensors_bottom = None\n\n top_code, bottom_code = parse_codes(request)\n\n # extract frame to operate on\n end_index_top = start_index_top + transformer_top.shape[1]\n top_code_frame = top_code[..., start_index_top:end_index_top]\n\n upsampling_ratio_time = (transformer_bottom.shape[1]\n // transformer_top.shape[1])\n start_index_bottom = upsampling_ratio_time * start_index_top\n end_index_bottom = start_index_bottom + transformer_bottom.shape[1]\n bottom_code_frame = bottom_code[..., start_index_bottom:end_index_bottom]\n generation_mask_batched = parse_mask(request).to(DEVICE)\n\n time_indexes_top = make_time_indexes(start_index_top,\n top_code.shape[-1],\n transformer_top.shape[-1])\n time_indexes_bottom = make_time_indexes(start_index_bottom,\n bottom_code.shape[-1],\n transformer_bottom.shape[-1])\n\n if layer == 'bottom':\n if not uniform_sampling:\n bottom_code_resampled_frame = partial_sample_model(\n model=transformer_bottom,\n condition=top_code_frame,\n batch_size=1,\n codemap_size=transformer_bottom.shape,\n temperature=temperature,\n class_conditioning=class_conditioning_tensors_bottom,\n local_class_conditioning_map=class_conditioning_bottom_map,\n initial_code=bottom_code_frame,\n mask=generation_mask_batched,\n time_indexes_source=time_indexes_top,\n time_indexes_target=time_indexes_bottom,\n )\n else:\n bottom_code_resampled_frame = bottom_code_frame.masked_scatter(\n generation_mask_batched,\n torch.randint_like(bottom_code_frame,\n high=transformer_bottom.n_class_target)\n )\n\n bottom_code_resampled = bottom_code\n bottom_code_resampled[..., start_index_bottom:end_index_bottom] = (\n bottom_code_resampled_frame)\n\n # create JSON response\n response = make_response(top_code, bottom_code_resampled,\n input_conditioning_top,\n input_conditioning_bottom)\n elif layer == 'top':\n if (not USE_LOCAL_CONDITIONING\n or not transformer_top.local_class_conditioning):\n # try to retrieve conditioning from http arguments\n class_conditioning_top = global_class_conditioning.copy()\n class_conditioning_tensors_top = make_conditioning_tensors(\n class_conditioning_top,\n label_encoders_per_modality)\n class_conditioning_top_map = None\n else:\n class_conditioning_top = class_conditioning_tensors_top = None\n\n if not uniform_sampling:\n if transformer_top.self_conditional_model:\n condition = top_code_frame\n else:\n condition = None\n top_code_resampled_frame = partial_sample_model(\n model=transformer_top,\n condition=condition,\n device=DEVICE,\n batch_size=1,\n codemap_size=transformer_top.shape,\n temperature=temperature,\n class_conditioning=class_conditioning_tensors_top,\n local_class_conditioning_map=class_conditioning_top_map,\n initial_code=top_code_frame,\n mask=generation_mask_batched,\n time_indexes_source=time_indexes_top,\n time_indexes_target=time_indexes_top,\n )\n else:\n top_code_resampled_frame = top_code_frame.masked_scatter(\n generation_mask_batched,\n torch.randint_like(top_code_frame,\n high=transformer_top.n_class_target)\n )\n\n top_code_resampled = top_code\n top_code_resampled[..., start_index_top:end_index_top] = (\n top_code_resampled_frame)\n\n upsampling_ratio_frequency = (transformer_bottom.shape[0]\n // transformer_top.shape[0])\n generation_mask_bottom_batched = (\n generation_mask_batched\n .repeat_interleave(upsampling_ratio_frequency, -2)\n .repeat_interleave(upsampling_ratio_time, -1)\n )\n bottom_code_resampled_frame = partial_sample_model(\n model=transformer_bottom,\n condition=top_code_resampled_frame,\n device=DEVICE,\n batch_size=1,\n codemap_size=transformer_bottom.shape,\n temperature=temperature,\n class_conditioning=class_conditioning_tensors_bottom,\n local_class_conditioning_map=class_conditioning_bottom_map,\n initial_code=bottom_code_frame,\n mask=generation_mask_bottom_batched,\n time_indexes_source=time_indexes_top,\n time_indexes_target=time_indexes_bottom,\n )\n\n # update conditioning map\n bottom_mask = generation_mask_bottom_batched[0]\n new_conditioning_map_bottom = {\n modality: masked_fill(modality_conditioning,\n bottom_mask,\n class_conditioning_bottom[modality])\n for modality, modality_conditioning\n in input_conditioning_bottom.items()\n }\n\n bottom_code_resampled = bottom_code\n bottom_code_resampled[..., start_index_bottom:end_index_bottom] = (\n bottom_code_resampled_frame)\n\n # create JSON response\n response = make_response(top_code_resampled, bottom_code_resampled,\n input_conditioning_top,\n new_conditioning_map_bottom)\n\n return response", "def overlay_lines(self, p1, p2, FT, frame):\n \n if p1 == p2:\n self.show_dif_class_msg()\n \n else:\n a1 = complete_scores[p1, p2][0]\n a2 = complete_scores[p1, p2][1]\n projection1 = make_1D(extract_2D[p1], a1)\n projection2 = make_1D(extract_2D[p2], a2)\n\n if FT: \n pad_p1 = np.pad(projection1.vector, pad_width=(0, shape-projection1.size()))\n pad_p2 = np.pad(projection2.vector, pad_width=(0, shape-projection2.size()))\n A = abs(np.fft.rfft(pad_p1))\n B = abs(np.fft.rfft(pad_p2))\n \n f = Figure(figsize=(8,4))\n ax = f.add_subplot(111)\n\n ax.bar(range(len(A)), A, alpha=0.35, color='deepskyblue', ec='k', linewidth=1)\n ax.bar(range(len(B)), B, alpha=0.35, color='yellow', ec='k', linewidth=1)\n \n ax.get_xaxis().set_ticks([])\n ax.set_xlabel('frequency component')\n ax.set_ylabel('Amplitude')\n\n else:\n a2_flip = complete_scores[p1, p2][1] + 180\n projection2_flip = make_1D(extract_2D[p2], a2_flip)\n\n score_default, r, c = slide_score(projection1, projection2) # Score and location of optimum\n score_flip, r_flip, c_flip = slide_score(projection1, projection2_flip) # Score of phase flipped\n\n if score_default <= score_flip:\n ref_intensity, comp_intensity = r, c\n else:\n ref_intensity, comp_intensity = r_flip, c_flip\n\n f = Figure(figsize=(8,4))\n ax = f.add_subplot(111)\n\n x_axis_max = len(ref_intensity)\n y_axis_max = max(np.amax(ref_intensity), np.amax(comp_intensity))\n y_axis_min = min(np.amin(ref_intensity), np.amin(comp_intensity))\n\n ax.plot(ref_intensity, color='black')\n ax.plot(comp_intensity, color='black')\n\n ax.fill_between(range(len(ref_intensity)), ref_intensity, alpha=0.35, color='deepskyblue')\n ax.fill_between(range(len(comp_intensity)), comp_intensity, alpha=0.35, color='yellow')\n\n ax.set_ylabel('Intensity')\n ax.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])\n ax.xaxis.set_visible(False)\n\n f.tight_layout()\n\n if self.projcanvas:\n self.projcanvas.get_tk_widget().destroy()\n self.projtoolbar.destroy()\n\n self.projcanvas = FigureCanvasTkAgg(f, frame)\n self.projcanvas.draw()\n self.projcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n self.projcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n self.projtoolbar = NavigationToolbar2Tk(self.projcanvas, frame)\n self.projtoolbar.update()", "def viz_2D_topomap_intra (epo1: mne.Epochs, epo2: mne.Epochs,\n C1: np.ndarray, C2: np.ndarray,\n threshold: float=0.95, steps: int=2,\n lab: bool = False):\n\n # defining head model and adding sensors\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect = 1)\n ax.axis(\"off\")\n plot_2d_topomap_intra(ax)\n # bads are represented as squares\n plot_sensors_2d_intra(epo1, epo2, lab = lab)\n # plotting links according to sign (red for positive values,\n # blue for negative) and value (line thickness increases\n # with the strength of connectivity)\n plot_links_2d_intra(epo1, epo2, C1=C1, C2=C2, threshold=threshold, steps=steps)\n plt.tight_layout()\n plt.show()\n\n return (ax)", "def test_apply_transform_two_tracks_one_matches(self):\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n self.assertNotEqual(tf_pk, 0)\n\n self.app.load_data()\n track = Track(artist='Artist', title='Title', last_transform=tf_pk)\n pk_first = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n track = Track(artist='Artist', title='Title')\n pk_second = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n\n row = self.get_track_by_id(pk_first)\n self.assertEqual(row['lasttransform'], tf_pk)\n row = self.get_track_by_id(pk_second)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk_first)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['artist'], 'Artist')\n\n row = self.get_track_by_id(pk_second)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['artist'], 'Artist 2')", "def _fp32_mte_process_lt_one_block(axis_1_lp_index, sub_axis_1):\n\n def _fp32_inner_last_dim_lt_one_block(axis_0_lp_index, sub_axis_0):\n \"\"\"\n inner process of last dim less than one block\n \"\"\"\n\n # move data in\n in_offset = (block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size +\n axis_0_lp_index * max_no_core_axis_size * axis_1) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, in_offset)\n _data_move_in_last_dim_lt_one_block(tik_inst, ub_input, data_in, data_pos_info)\n\n # do transpose\n with tik_inst.new_stmt_scope():\n temp_sub_axis_1 = tik_inst.Scalar(\"int64\")\n temp_sub_axis_0 = tik_inst.Scalar(\"int64\")\n data_size_one_block = _get_elment_cnt_one_block(data_in.dtype)\n axis_1_0_2_size = axis_0 * axis_1 * axis_2\n sub_axis_1_0_2_size = sub_axis_1 * sub_axis_0 * axis_2\n\n # to avoid multiple core dirty data\n with tik_inst.if_scope(tik.all(sub_axis_1_0_2_size < data_size_one_block,\n axis_1_0_2_size > data_size_one_block)):\n with tik_inst.if_scope(sub_axis_1 == 1):\n temp_sub_axis_0.set_as(_ceil_div(data_size_one_block, axis_2))\n temp_sub_axis_1.set_as(sub_axis_1)\n with tik_inst.else_scope():\n temp_sub_axis_0.set_as(sub_axis_0)\n temp_sub_axis_1.set_as(_ceil_div(data_size_one_block,\n axis_0 * axis_2))\n with tik_inst.else_scope():\n temp_sub_axis_1.set_as(sub_axis_1)\n temp_sub_axis_0.set_as(sub_axis_0)\n\n sub_dim_size = (temp_sub_axis_1, temp_sub_axis_0, axis_2)\n _transpose_by_2_vnchwconv_not_last_dim(tik_inst, ub_input[ub_offset],\n ub_input, sub_dim_size)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size) * axis_0 +\n axis_0_lp_index * max_no_core_axis_size) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, out_offset)\n _data_move_out_last_dim_lt_one_block(tik_inst, data_out, ub_input[ub_offset],\n data_pos_info)\n\n with tik_inst.for_range(0, no_core_loop_cnt) as axis_0_lp_idx:\n _fp32_inner_last_dim_lt_one_block(axis_0_lp_idx, max_no_core_axis_size)\n with tik_inst.if_scope(no_core_left > 0):\n _fp32_inner_last_dim_lt_one_block(no_core_loop_cnt, no_core_left)", "def process(frame, fgbg, kernel, debug, ttrack, angle):\n\t# Rotate the image\n\timage = frame\n\timage = imutils.rotate(image, angle = angle)\n\t\t\n\t# Apply background subtraction and clean up noise\n\tfgmask = fgbg.apply(image)\n\tfgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\n\tfgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)\n\tret, fgmask = cv2.threshold(fgmask,200,255,cv2.THRESH_BINARY)\n\t\t\n\t# Find the contours from the fgmask binary image\n\tcontours, hierarchy = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\t\n\t# Eliminate contours that are too small that likely come from noise and various\n\t# scene shifts. Also apply rectangles to the original image around contours if debug\n\tgoodcont = 0\n\tfor c in contours:\n\t\tif cv2.contourArea(c) > 600:\t\n\t\t\t(x, y, w, h) = cv2.boundingRect(c)\n\t\t\tif debug:\n\t\t\t\tcv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\t\t\tgoodcont += 1\n\t\n\t# Checks for motion in the frame\n\tif goodcont > 0:\n\t\tismotion = True\n\telse:\n\t\tismotion = False\n\t\n\t# time stamp the frame\n\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\tctime = time.strftime(\"%H:%M:%S\")\n\tif not ttrack:\n\t\tcv2.putText(image, ctime, (10, 450), font, 1.3,(255,255,255),3,cv2.LINE_AA) \n\telse:\n\t\tcv2.putText(image, ctime, (10, 450), font, 1.3,(0,255,0),3,cv2.LINE_AA) \n\treturn image, fgmask, ismotion", "def _preprocess(self, ori_img: np.ndarray, p_trans=False, adjust_angle=False, p_trans_options='traditional') -> (np.ndarray, np.ndarray):\n flagged = None\n if p_trans:\n ori_img, flagged = preprocess(ori_img, hed_model=self.hed_model if p_trans_options == 'hed' else None)\n if adjust_angle:\n ori_img, degree = eval_angle(ori_img, [-5, 5])\n RemoteLogger.info(\"开启了角度校正,度数为%d°\" % degree)\n return ori_img, flagged if flagged is not None else ori_img", "def compute_mtsat(nii_mt, nii_pd, nii_t1,\n tr_mt, tr_pd, tr_t1,\n fa_mt, fa_pd, fa_t1,\n nii_b1map=None):\n # params\n nii_t1map = \\\n None # it would be possible in the future to input T1 map from elsewhere (e.g. MP2RAGE). Note: this T1map\n # needs to be in s unit.\n b1correctionfactor = \\\n 0.4 # empirically defined in https://www.frontiersin.org/articles/10.3389/fnins.2013.00095/full#h3\n # R1 threshold, below which values will be clipped.\n r1_threshold = 0.01 # R1=0.01 s^-1 corresponds to T1=100s which is a reasonable threshold\n # Similarly, we also set a threshold for MTsat values\n mtsat_threshold = 1 # we expect MTsat to be on the order of 0.01\n\n # convert all TRs in s\n tr_mt *= 0.001\n tr_pd *= 0.001\n tr_t1 *= 0.001\n\n # Convert flip angles into radians\n fa_mt_rad = np.radians(fa_mt)\n fa_pd_rad = np.radians(fa_pd)\n fa_t1_rad = np.radians(fa_t1)\n\n # ignore warnings from division by zeros (will deal with that later)\n seterr_old = np.seterr(over='ignore', divide='ignore', invalid='ignore')\n\n # check if a T1 map was given in input; if not, compute R1\n if nii_t1map is None:\n # compute R1\n logger.info(\"Compute T1 map...\")\n r1map = 0.5 * np.true_divide((fa_t1_rad / tr_t1) * nii_t1.data - (fa_pd_rad / tr_pd) * nii_pd.data,\n nii_pd.data / fa_pd_rad - nii_t1.data / fa_t1_rad)\n # remove nans and clip unrelistic values\n r1map = np.nan_to_num(r1map)\n ind_unrealistic = np.where(r1map < r1_threshold)\n if ind_unrealistic[0].size:\n logger.warning(\"R1 values were found to be lower than {}. They will be set to inf, producing T1=0 for \"\n \"these voxels.\".format(r1_threshold))\n r1map[ind_unrealistic] = np.inf # set to infinity so that these values will be 0 on the T1map\n # compute T1\n nii_t1map = nii_mt.copy()\n nii_t1map.data = 1. / r1map\n else:\n logger.info(\"Use input T1 map.\")\n r1map = 1. / nii_t1map.data\n\n # Compute A\n logger.info(\"Compute A...\")\n a = (tr_pd * fa_t1_rad / fa_pd_rad - tr_t1 * fa_pd_rad / fa_t1_rad) * \\\n np.true_divide(np.multiply(nii_pd.data, nii_t1.data, dtype=float),\n tr_pd * fa_t1_rad * nii_t1.data - tr_t1 * fa_pd_rad * nii_pd.data)\n\n # Compute MTsat\n logger.info(\"Compute MTsat...\")\n nii_mtsat = nii_mt.copy()\n nii_mtsat.data = tr_mt * np.multiply((fa_mt_rad * np.true_divide(a, nii_mt.data) - 1),\n r1map, dtype=float) - (fa_mt_rad ** 2) / 2.\n # sct.printv('nii_mtsat.data[95,89,14]' + str(nii_mtsat.data[95,89,14]), type='info')\n # remove nans and clip unrelistic values\n nii_mtsat.data = np.nan_to_num(nii_mtsat.data)\n ind_unrealistic = np.where(np.abs(nii_mtsat.data) > mtsat_threshold)\n if ind_unrealistic[0].size:\n logger.warning(\"MTsat values were found to be larger than {}. They will be set to zero for these voxels.\"\n \"\".format(mtsat_threshold))\n nii_mtsat.data[ind_unrealistic] = 0\n # convert into percent unit (p.u.)\n nii_mtsat.data *= 100\n\n # Apply B1 correction to result\n # Weiskopf, N., Suckling, J., Williams, G., Correia, M.M., Inkster, B., Tait, R., Ooi, C., Bullmore, E.T., Lutti,\n # A., 2013. Quantitative multi-parameter mapping of R1, PD(*), MT, and R2(*) at 3T: a multi-center validation.\n # Front. Neurosci. 7, 95.\n if nii_b1map is not None:\n nii_mtsat.data = np.true_divide(nii_mtsat.data * (1 - b1correctionfactor),\n (1 - b1correctionfactor * nii_b1map.data))\n\n # set back old seterr settings\n np.seterr(**seterr_old)\n\n return nii_mtsat, nii_t1map", "def checkCoast1d(fieldset, x, y, direction=None, time=0):\n if direction == None:\n coast_x = checkCoast1d(fieldset, x, y, direction=\"x\")\n coast_y = checkCoast1d(fieldset, x, y, direction=\"y\")\n return coast_x, coast_y\n\n elif direction == \"x\":\n dims_U = fieldset.U.data.shape\n\n vector_U = fieldset.U.data[time, y, x-1:x+2]\n # vector_U = np.zeros(3)\n # vector_U[0] = fieldset.U.data[time, y, x-1%dims_U[-1]]\n # vector_U[1] = fieldset.U.data[time, y, x%dims_U[-1]]\n # vector_U[2] = fieldset.U.data[time, y, x+1%dims_U[-1]]\n #\n vector_U_trim = np.trim_zeros(vector_U)\n\n if len(vector_U_trim) == 1:\n # Checks if vector contains 2 zeros and one non-zero\n # and if the non_zero is at the begin or end\n if vector_U_trim == vector_U[0]:\n return [True, False]\n elif vector_U_trim == vector_U[-1]:\n return [False, True]\n else:\n return [False, False]\n else:\n return [False, False]\n\n elif direction == \"y\":\n dims_V = fieldset.V.data.shape\n\n vector_V = fieldset.V.data[time, y-1:y+2, x]\n # vector_V = np.zeros(3)\n # vector_V[0] = fieldset.V.data[time, y-1%dims[-2], x-1]\n # vector_V[1] = fieldset.V.data[time, y%dims[-2], x]\n # vector_V[2] = fieldset.V.data[time, y+1%dims[-2], x]\n\n vector_V_trim = np.trim_zeros(vector_V)\n\n if len(vector_V_trim) == 1:\n # Checks if vector contains 2 zeros and one non-zero\n # and if the non_zero is at the begin or end\n if vector_V_trim == vector_V[0]:\n return [True, False]\n elif vector_V_trim == vector_V[-1]:\n return [False, True]\n else:\n return [False, False]\n else:\n return [False, False]\n\n else:\n print \"checkCoast1d(): direction is not None, 'x' or 'y'.\"\n return False", "def has_remap(self):\n return self.mapping1 is not None or self.mapping2 is not None", "def analyze(self, event):\n '''\n\tif not (event.HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ or event.HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_DZ or event.HLT_IsoTkMu24 or event.HLT_IsoMu24):\n\t self.out.fillBranch(\"pass_selection\",0)\n return True\n '''\n\telectrons = Collection(event, \"Electron\")\n muons = Collection(event, \"Muon\")\n jets = Collection(event, \"Jet\")\n Z = ROOT.TLorentzVector()\n\n\ttight_muons = []\n\tgoodmuons_pt = []\n goodmuons_eta = [] \n\n\tif (len(muons)<=1):\n\t\tself.out.fillBranch(\"pass_selection\",0)\n return True\n\tfor i in range(0,len(muons)):\n #if (muons[i].eta) < 2.4 and (muons[i].mediumId) and (muons[i].pfIsoId)>=3:\n\t if (muons[i].eta) < 2.4 and (muons[i].mediumId):\n\t if (muons[i].pt) <= 25:\n continue\n\t\tfor j in range(i+1,len(muons)):\n \t\t #if (muons[j].eta) < 2.4 and (muons[j].mediumId) and (muons[j].pfIsoId)>=3:\n\t if (muons[j].eta) < 2.4 and (muons[j].mediumId):\n\t if (muons[j].pt) <= 20:\n\t\t\t continue\n\t\t if (muons[i].charge + muons[j].charge == 0):\n\t\t\t Z = muons[i].p4() + muons[j].p4()\n\t\t\t if (Z.M() > 76 and Z.M() < 106):\n\t\t\t\tself.out.fillBranch(\"pass_selection\",1)\n\t \t\tself.out.fillBranch(\"z_pt\",Z.Pt())\n\t\t\t\tself.out.fillBranch(\"z_mass\",Z.M())\n\t\t\t\tself.out.fillBranch(\"z_phi\",Z.Phi())\n\t\t\t\ttight_muons.append(muons[i]) \n\t\t\t\ttight_muons.append(muons[j])\n\t\n\tif len(tight_muons) < 2:\n\t self.out.fillBranch(\"pass_selection\",0)\n\t return True\n\n ngoodmuons = 0\n ngoodmuons = len(tight_muons)\n\tif ngoodmuons != 2:\n print(ngoodmuons)\n\n goodmuons_pt.append(tight_muons[0].pt)\n goodmuons_pt.append(tight_muons[1].pt)\n goodmuons_eta.append(tight_muons[0].eta)\n goodmuons_eta.append(tight_muons[1].eta) \n \n self.out.fillBranch(\"muon_pt\",goodmuons_pt)\n self.out.fillBranch(\"muon_eta\",goodmuons_eta) \n \n\tngoodjets = 0\n goodjets_pt = []\n\tgoodjets_id = []\n\tgoodjets_phi = []\n\tgoodjets_dphi_zjet = []\n\n\tfor k in range(0,len(jets)):\n #print(4)\n\t if abs(jets[k].eta) > 2.4:\n continue\n #print(5) \n\t if jets[k].pt < 30:\n\t\tcontinue\n\t #print(6)\n\t pass_lepton_dr_cut = True\n\n\t for i in range(0,len(tight_muons)):\n\t\t#if deltaR(muons[tight_muons[i]].eta,muons[tight_muons[i]].phi,jets[k].eta,jets[k].phi) < 0.4:\n if deltaR(tight_muons[i].eta,tight_muons[i].phi,jets[k].eta,jets[k].phi) < 0.4:\n\t pass_lepton_dr_cut = False\n\n\t if not pass_lepton_dr_cut:\n\t\tcontinue\n\n ngoodjets += 1\n goodjets_pt.append(jets[k].pt)\n\t #goodjets_id.append(jets[k].jetId)\n\t goodjets_phi.append(jets[k].phi)\t \n\t #goodjets_dphi_zjet.append(deltaPhi(Z.Phi(),jets[k].phi)) \n\n if ngoodjets != len(goodjets_pt):\n print(error)\n\n self.out.fillBranch(\"jet_pt\",goodjets_pt)\n\t#self.out.fillBranch(\"jet_id\",goodjets_id)\n\tself.out.fillBranch(\"jet_phi\",goodjets_phi)\n\t#self.out.fillBranch(\"dphi_zjet\",goodjets_dphi_zjet)\n\t'''\n\tif(njet!=0):\n\t print(njet)\n '''\n\tif hasattr(event,\"Generator_weight\"):\n self.out.fillBranch(\"gen_weight\",event.Generator_weight)\n else:\n self.out.fillBranch(\"gen_weight\",0)\n\treturn True", "def trimdynamic_pe(records1, records2, args):\n for rec1, rec2 in izip(records1, records2):\n cutpos1 = 0\n cutpos2 = 0\n tmp_qual1 = [0 if x < args.q else 1 for x in rec1.letter_annotations['phred_quality']]\n tmp_qual1.append(0)\n jumps1 = [i for i, x in enumerate(tmp_qual1[:len(tmp_qual1) - 1]) if [tmp_qual1[i], tmp_qual1[i + 1]] == [1, 0]]\n if len(jumps1) == 0:\n cutpos1 = 0\n if len(jumps1) != 0:\n cutpos1 = numpy.max(jumps1) + 1\n rec1 = rec1[:cutpos1]\n tmp_qual2 = [0 if x < args.q else 1 for x in rec2.letter_annotations['phred_quality']]\n tmp_qual2.append(0)\n jumps2 = [i for i, x in enumerate(tmp_qual2[:len(tmp_qual2) - 1]) if [tmp_qual2[i], tmp_qual2[i + 1]] == [1, 0]]\n if len(jumps2) == 0:\n cutpos2 = 0\n if len(jumps2) != 0:\n cutpos2 = numpy.max(jumps2) + 1\n rec2 = rec2[:cutpos2]\n if args.r:\n rec1 = rec1.reverse_complement(name=True,id=True,description=True)\n rec2 = rec2.reverse_complement(name=True,id=True,description=True)\n if args.d:\n rec1.name += '/1'\n rec1.id += '/1'\n rec1.description += '/1'\n rec2.name += '/2'\n rec2.id += '/2'\n rec2.description += '/2'\n y1 = False\n y2 = False\n if len(rec1) >= args.m and numpy.mean(rec1.letter_annotations['phred_quality']) >= args.a:\n y1 = True\n if len(rec2) >= args.m and numpy.mean(rec2.letter_annotations['phred_quality']) >= args.a:\n y2 = True\n if y1 and y2:\n yield rec1, None, rec2, None, 'pe'\n if y1 and not y2:\n yield None, rec1, None, None, 'se1'\n if not y1 and y2:\n yield None, None, None, rec2, 'se2'", "def CoordTrans(frame1, frame2, original_vec, oe=np.zeros(6), \n theta_gst=float('NaN'), lla_gs=np.zeros(3), mu=c.mu_earth, \n r_body=c.r_earth):\n\n # Orbital Elements\n a, e, inc, raan, w, nu = oe\n\n # Warnings\n oe_frames = ['ric', 'ntw', 'pqw']\n if any(frame in oe_frames for frame in (frame1, frame2)):\n if oe.dot(oe) == 0:\n print('ERROR: You forgot to define the orbital elements!')\n\n topocentric_frames = ['sez']\n if any(frame in topocentric_frames for frame in (frame1, frame2)):\n if lla_gs.dot(lla_gs) == 0:\n print('ERROR: You forgot lla for the ground stations!')\n\n # Coordinate System Logic\n if frame1.lower() == 'bci':\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(original_vec, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(original_vec, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(original_vec, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(original_vec, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec1 = bci2bcbf(original_vec, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec1, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec1 = bci2bcbf(original_vec, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec1, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'bcbf':\n if frame2.lower() == 'bci':\n rotated_vec = bcbf2bci(original_vec, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ntw':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'pqw':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'lla':\n rotated_vec = bcbf2lla(original_vec, r_body=r_body)\n \n elif frame2.lower() == 'sez':\n rotated_vec = bcbf2sez(original_vec, lla_gs, r_body=r_body)\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'ric':\n rotated_vec1 = ric2bci(original_vec, raan, inc, w, nu)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'ntw':\n rotated_vec1 = ntw2bci(original_vec, e, raan, inc, w, nu)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'pqw':\n rotated_vec1 = pqw2bci(original_vec, raan, inc, w)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'lla':\n rotated_vec1 = lla2bcbf(original_vec, r_body=r_body)\n if frame2.lower() == 'bcbf':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ric':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2ric(rotated_vec2, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ntw':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2ntw(rotated_vec2, e, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'pqw':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2pqw(rotated_vec2, raan, inc, w)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'bci':\n rotated_vec = bcbf2bci(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec = bcbf2sez(rotated_vec1, lla_gs, r_body=r_body)\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'sez':\n rotated_vec1 = sez2bcbf(original_vec, lla_gs, r_body=r_body)\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n if frame2.lower() == 'bcbf':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec2, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec2, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec2, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec = bcbf2lla(rotated_vec1, r_body=r_body)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec2\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n else:\n print('ERROR: Frame1 is not included in this function!')\n\n return rotated_vec", "def transformed_retrace(\n q_tm1: Array,\n q_t: Array,\n a_tm1: Array,\n a_t: Array,\n r_t: Array,\n discount_t: Array,\n pi_t: Array,\n mu_t: Array,\n lambda_: float,\n eps: float = 1e-8,\n stop_target_gradients: bool = True,\n tx_pair: TxPair = IDENTITY_PAIR,\n) -> Array:\n chex.assert_rank([q_tm1, q_t, a_tm1, a_t, r_t, discount_t, pi_t, mu_t],\n [2, 2, 1, 1, 1, 1, 2, 1])\n chex.assert_type([q_tm1, q_t, a_tm1, a_t, r_t, discount_t, pi_t, mu_t],\n [float, float, int, int, float, float, float, float])\n\n pi_a_t = base.batched_index(pi_t, a_t)\n c_t = jnp.minimum(1.0, pi_a_t / (mu_t + eps)) * lambda_\n target_tm1 = transformed_general_off_policy_returns_from_action_values(\n tx_pair, q_t, a_t, r_t, discount_t, c_t, pi_t, stop_target_gradients)\n q_a_tm1 = base.batched_index(q_tm1, a_tm1)\n return target_tm1 - q_a_tm1", "def apply_model(self, original, t1, t2, resolution_scaling_factor=1):\n img = Image()\n img.time_stamp = t2\n\n if t1 == t2:\n img.initialize_with_image(original)\n return img\n\n calc_shift_fnc = self.calculate_shift\n orig_get_fnc = original.get\n interp_fnc = my_math.linear_interpolation\n\n def generate(y, x):\n \"\"\"Function describing the transformed image\"\"\"\n realy = y / resolution_scaling_factor\n realx = x / resolution_scaling_factor\n\n # move to time t2\n posy = y + calc_shift_fnc(realy, realx, t2, 0) - \\\n calc_shift_fnc(realy, realx, t1, 0)\n posx = x + calc_shift_fnc(realy, realx, t2, 1) - \\\n calc_shift_fnc(realy, realx, t1, 1)\n\n x_left = int(posx) # math.floor(pos[0])\n x_right = x_left + 1 # math.ceil(pos[0])\n y_down = int(posy) # math.floor(pos[1])\n y_up = y_down + 1 # math.ceil(pos[1])\n\n v11 = orig_get_fnc(y_down, x_left, resolution_scaling_factor)\n v12 = orig_get_fnc(y_down, x_right, resolution_scaling_factor)\n v21 = orig_get_fnc(y_up, x_left, resolution_scaling_factor)\n v22 = orig_get_fnc(y_up, x_right, resolution_scaling_factor)\n\n return interp_fnc(y_down, x_left, y_up, x_right, v11, v12, v21, v22,\n posy, posx)\n\n img.image_data = np.fromfunction(np.vectorize(generate),\n (original.shape()[0]*resolution_scaling_factor,\n original.shape()[1]*resolution_scaling_factor))\n\n if resolution_scaling_factor != 1:\n img.image_data = skimage.transform.resize(img.image_data,\n original.shape(),\n preserve_range=True)\n\n return img", "def preprocess(self):\n\n mm_magcoord.add_aacgm_coordinates(self)\n mm_magcoord.add_quasi_dipole_coordinates(self)\n mm_sc.calculate_ecef_velocity(self)\n mm_sc.add_ram_pointing_sc_attitude_vectors(self)\n\n return", "def alignFromFiducials(self, mute=True, shift_markers=True, logfile_residual=''):\n from math import sqrt\n import scipy.optimize\n from pytom.reconstruction.tiltAlignmentFunctions import markerResidual, refMarkerResidualForTiltImage as refResidual\n\n self.sum_called = 0\n print('Shift Markers: ', shift_markers)\n self.optimizeMarkerPositions = shift_markers\n self.irefmark = self.TiltSeries_._TiltAlignmentParas.irefmark\n self.ireftilt = numpy.argwhere( self.TiltSeries_._projIndices.astype(int) == self.TiltSeries_._TiltAlignmentParas.ireftilt)[0][0]\n print('reftilt: ', self.ireftilt, self.TiltSeries_._TiltAlignmentParas.ireftilt, self._ntilt)\n # self._alignmentTransXOrig = numpy.array(self._alignmentTransX)\n # self._alignmentTransYOrig = numpy.array(self._alignmentTransY)\n scoringFunction = self.alignmentScore\n\n self.q = [.001,]*len(self._alignmentTransX)\n\n if self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin':\n optimizer = scipy.optimize.fmin\n if not mute:\n print(\"using scipy fmin optimizer\")\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_slsqp':\n optimizer = scipy.optimize.fmin_slsqp\n if not mute:\n print(\"using scipy fmin_slsqp (Sequential Least SQuares Programming) optimizer\")\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_cg':\n optimizer = scipy.optimize.fmin_cg\n if not mute:\n print(\"using scipy fmin_cg (conjugate gradients) optimizer\")\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'leastsq':\n optimizer = scipy.optimize.leastsq\n if not mute:\n print(\"using scipy leastsq optimizer - optimize matrix instead of scalar function\")\n self.TiltSeries_._TiltAlignmentParas.leastsq = True\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_powell':\n optimizer = scipy.optimize.fmin_powell\n if not mute:\n print(\"using scipy fmin_powell optimizer\")\n else:\n if not mute:\n print((\"optimizer \" + str(self.TiltSeries_._TiltAlignmentParas.optimizer) +\n \" not known\"))\n # first update alignment from projections\n self.getMarkersFromTiltSeries(self.TiltSeries_)\n self.getTranslationsFromTiltSeries(self.TiltSeries_)\n self.getRotationsFromTiltSeries(self.TiltSeries_)\n self.getMagnificationsFromTiltSeries(self.TiltSeries_)\n optimizableVariables0 = self.getOptimizableVariables(self.TiltSeries_._TiltAlignmentParas)\n\n # alignment score before optimization\n score = markerResidual(self.TiltSeries_._TiltAlignmentParas.cent,\n Markers_=self._Markers,\n cTilt=self._cTilt, sTilt=self._sTilt, ireftilt=self.ireftilt,\n transX=self._alignmentTransX, transY=self._alignmentTransY,\n rotInPlane=self._alignmentRotations, tiltangles=self._tiltAngles,\n isoMag=self._alignmentMagnifications, dBeam=self._alignmentBeamTilt,\n dMagnFocus=None, dRotFocus=None, equationSet=False, irefmark=self.irefmark)\n\n if not mute:\n print(( \"Alignment score before optimization (square root of residual): \"\n + str(sqrt(score)) ))\n\n # optimize scoring function\n if ((self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin') or\n (self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_powell')):\n optimizableVariables = optimizer(scoringFunction, optimizableVariables0,\n xtol=0.000001, ftol=0.000001,\n maxiter=self.TiltSeries_._TiltAlignmentParas.maxIter, maxfun=None)\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_cg':\n optimizableVariables = optimizer(scoringFunction, optimizableVariables0,\n gtol=0.0000001,\n maxiter=self.TiltSeries_._TiltAlignmentParas.maxIter)\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'fmin_slsqp':\n optimizableVariables = optimizer(scoringFunction, optimizableVariables0,\n iter=self.TiltSeries_._TiltAlignmentParas.maxIter, acc=1e-08)\n elif self.TiltSeries_._TiltAlignmentParas.optimizer == 'leastsq':\n optimizableVariables, success = optimizer(scoringFunction, optimizableVariables0,\n maxfev=self.TiltSeries_._TiltAlignmentParas.maxIter*10, epsfcn=0.0,\n factor=10)\n\n score = markerResidual(self.TiltSeries_._TiltAlignmentParas.cent,\n Markers_=self._Markers,\n cTilt=self._cTilt, sTilt=self._sTilt,\n transX=self._alignmentTransX, transY=self._alignmentTransY, ireftilt=self.ireftilt,\n rotInPlane=self._alignmentRotations, irefmark=self.irefmark, tiltangles=self._tiltAngles,\n isoMag=self._alignmentMagnifications, dBeam=self._alignmentBeamTilt,\n dMagnFocus=None, dRotFocus=None, equationSet=False, logfile_residual=logfile_residual)\n\n self.setOptimizableVariables(self.TiltSeries_._TiltAlignmentParas, optimizableVariables)\n\n # finally set values in tilt series\n self.setMarkersInTiltSeries(self.TiltSeries_)\n self.setTranslationsInTiltSeries(self.TiltSeries_)\n self.setRotationsInTiltSeries(self.TiltSeries_)\n self.setMagnificationsInTiltSeries(self.TiltSeries_)\n\n\n if not mute:\n print(\"Alignment Score after optimization: \" + str(sqrt(score)))\n\n\n errors = numpy.zeros((len(self._cTilt)))\n for i in range(len(self._cTilt)):\n errors[i] = refResidual(self.TiltSeries_._TiltAlignmentParas.cent,\n Marker=self._Markers[self.TiltSeries_._TiltAlignmentParas.irefmark],\n cTilt=self._cTilt, sTilt=self._sTilt, transX=self._alignmentTransX,\n transY=self._alignmentTransY, rotInPlane=self._alignmentRotations, iproj=i,\n ireftilt=self.ireftilt,\n isoMag=self._alignmentMagnifications, dBeam=self._alignmentBeamTilt,\n dMagnFocus=None, dRotFocus=None, equationSet=False)\n errorRef = markerResidual(self.TiltSeries_._TiltAlignmentParas.cent,\n Markers_=self._Markers,\n cTilt=self._cTilt, sTilt=self._sTilt,\n transX=self._alignmentTransX, transY=self._alignmentTransY, ireftilt=self.ireftilt,\n rotInPlane=self._alignmentRotations, irefmark=self.irefmark,\n tiltangles=self._tiltAngles,\n isoMag=self._alignmentMagnifications, dBeam=self._alignmentBeamTilt,\n dMagnFocus=None, dRotFocus=None, equationSet=False,\n logfile_residual=logfile_residual, verbose=True, errorRef=True)\n print(\"Error score refmarker: \", errorRef)\n\n\n\n\n # out = open('scores.txt', 'w')\n # for n, s in enumerate(scoresIt):\n # out.write(f'{n} {s}\\n')\n # out.close()\n\n return sqrt(score)", "def computeCoarseAlignmentOld(self, TiltSeries_, mute=True, outfile=''):\n #print('ref index: ', numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0], TiltSeries_._TiltAlignmentParas.ireftilt )\n (psiindeg, shiftX, shiftY, x, y, z, distLine, diffX, diffY,\n shiftVarX, shiftVarY) = alignmentFixMagRot(\n Markers_=self._Markers, cTilt=self._cTilt, sTilt=self._sTilt,\n ireftilt=numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0],\n irefmark=TiltSeries_._TiltAlignmentParas.irefmark,\n r=TiltSeries_._TiltAlignmentParas.r, imdim=TiltSeries_._imdim,\n handflip=TiltSeries_._TiltAlignmentParas.handflip, mute=mute, writeResults=outfile)\n if not mute:\n print((\"Tilt Axis: %.2f\" % psiindeg))\n # copy parameters to TiltSeries\n self._alignmentRotations = numpy.array(self._ntilt * [psiindeg])\n self.setRotationsInTiltSeries(TiltSeries_)\n self._alignmentTransX = shiftX\n self._alignmentTransY = shiftY\n self.set_TranslationsInTiltSeries(TiltSeries_)\n self.Psi = psiindeg\n\n for (imark, Marker) in enumerate(self._Markers):\n Marker.set_r(numpy.array([x[imark], y[imark], z[imark]]))", "def can_transform_full(\n self,\n target_frame: str,\n target_time: Time,\n source_frame: str,\n source_time: Time,\n fixed_frame: str,\n timeout: Duration = Duration()\n ) -> bool:\n try:\n self.lookup_transform_full(target_frame, target_time, source_frame, source_time, fixed_frame, timeout)\n return True\n except tf2.TransformException:\n return False", "def forward(self, observations, full_global_map, egocentric_map):\n grid_x, grid_y = self.to_grid.get_grid_coords(observations['gps'])\n\n if torch.cuda.is_available():\n with torch.cuda.device(self.device):\n agent_view = torch.cuda.FloatTensor(self.num_obs, self.global_map_depth, self.global_map_size, self.global_map_size).fill_(0)\n else:\n agent_view = torch.FloatTensor(self.num_obs, self.global_map_depth, self.global_map_size, self.global_map_size).to(self.device).fill_(0)\n\n agent_view[:, :, \n self.global_map_size//2 - math.floor(self.egocentric_map_size/2):self.global_map_size//2 + math.ceil(self.egocentric_map_size/2), \n self.global_map_size//2 - math.floor(self.egocentric_map_size/2):self.global_map_size//2 + math.ceil(self.egocentric_map_size/2)\n ] = egocentric_map\n\n st_pose = torch.cat(\n [-(grid_y.unsqueeze(1)-(self.global_map_size//2))/(self.global_map_size//2),\n -(grid_x.unsqueeze(1)-(self.global_map_size//2))/(self.global_map_size//2), \n observations['compass']], \n dim=1\n )\n rot_mat, trans_mat = get_grid(st_pose, agent_view.size(), self.device)\n rotated = F.grid_sample(agent_view, rot_mat)\n translated = F.grid_sample(rotated, trans_mat)\n \n registered_map = torch.max(full_global_map, translated.permute(0, 2, 3, 1))\n\n return registered_map", "def computeCoarseAlignment(self, TiltSeries_, mute=True, outfile='', optimizeShift=True, logfile_residual=''):\n #print('ref index: ', numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0], TiltSeries_._TiltAlignmentParas.ireftilt )\n (psiindeg, shiftX, shiftY, x, y, z, distLine, diffX, diffY,\n shiftVarX, shiftVarY) = alignmentFixMagRot(\n Markers_=self._Markers, cTilt=self._cTilt, sTilt=self._sTilt,\n ireftilt=numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0],\n irefmark=TiltSeries_._TiltAlignmentParas.irefmark,\n r=TiltSeries_._TiltAlignmentParas.r, imdim=TiltSeries_._imdim,imdimX=TiltSeries_._imdimX, imdimY=TiltSeries_._imdimY,\n handflip=TiltSeries_._TiltAlignmentParas.handflip, mute=mute, writeResults=outfile,\n optimizeShift=optimizeShift, logfile_residual=logfile_residual)\n if not mute:\n print((\"Tilt Axis: %.2f\" % psiindeg))\n # copy parameters to TiltSeries\n ireftilt = numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0]\n self._alignmentRotations = numpy.array(self._ntilt * [psiindeg])\n self.setRotationsInTiltSeries(TiltSeries_)\n self._alignmentTransX = shiftX\n self._alignmentTransY = shiftY\n self.set_TranslationsInTiltSeries(TiltSeries_)\n self.Psi = psiindeg\n\n for (imark, Marker) in enumerate(self._Markers):\n Marker.set_r(numpy.array([x[imark], y[imark], z[imark]]))\n # if not optimizeShift:\n # Marker.set_r(numpy.array([x[imark] + 6.326546124766944 , y[imark] + 5.187672225662868, z[imark]]))", "def apply_and_compare(self, image1_data, image2_data):\n\n return self.transformations_map[self.name](image1_data, image2_data)", "def fun_other(self, core_index, h_per_core, h_in_index,\n l1_xpos, l1_xscale, one_value_buf):\n reg_index_y = self.tik_instance.Scalar(dtype=\"int32\")\n reg_cur_index = self.tik_instance.Scalar(dtype=\"int32\")\n reg_cur_index.set_as(core_index*h_per_core + h_in_index)\n if self.out_size_w <= 600:\n out_size_w_num = _ceil_div(self.out_size_w, 4)\n ub_output = self.tik_instance.Tensor(\n \"float32\", (out_size_w_num*4, self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n ub_output_2 = self.tik_instance.Tensor(\n \"float32\", (out_size_w_num*4, self.c_block_size),\n name=\"ub_output_2\", scope=tik.scope_ubuf)\n with self.tik_instance.for_range(0, self.nc1) as nc1_index:\n self.tik_instance.vector_dup(\n MASK, ub_output, 0.0, out_size_w_num, 1, 8)\n self.tik_instance.vector_dup(\n MASK, ub_output_2, 0.0, out_size_w_num, 1, 8)\n h_floor_buf = self.tik_instance.Tensor(\"int32\", (8,),\n name=\"h_floor_buf\",\n scope=tik.scope_ubuf)\n h_floor_buf_fp = self.tik_instance.Tensor(\"float32\", (8,),\n name=\"h_floor_buf_fp\",\n scope=tik.scope_ubuf)\n h_scale_buf = self.tik_instance.Tensor(\"float32\", (8,),\n name=\"h_scale_buf\",\n scope=tik.scope_ubuf)\n h_block_buf = self.tik_instance.Tensor(\"int32\", (8,),\n name=\"h_scale_buf\",\n scope=tik.scope_ubuf)\n one_u_u_buf = self.tik_instance.Tensor(\"float32\", (2, 8),\n name=\"one_u_u_buf\",\n scope=tik.scope_ubuf)\n const_0 = self.tik_instance.Tensor(\"float32\", (8,),\n name=\"const_0\",\n scope=tik.scope_ubuf)\n self.tik_instance.vector_dup(\n 8, h_block_buf, reg_cur_index, 1, 1, 8)\n self.tik_instance.vconv(8, \"\", h_scale_buf[0],\n h_block_buf[0], 1, 1, 1, 8, 8)\n if self.half_pixel_centers:\n self.tik_instance.vadds(8, h_scale_buf, h_scale_buf,\n float(0.5), 1, 1, 1, 8, 8)\n self.tik_instance.vmuls(8, h_scale_buf, h_scale_buf,\n self.scale_h, 1, 1, 1, 8, 8)\n if self.half_pixel_centers:\n self.tik_instance.vector_dup(8, const_0, 0, 1, 1, 8)\n self.tik_instance.vadds(8, h_scale_buf, h_scale_buf,\n float(-0.5), 1, 1, 1, 8, 8)\n self.tik_instance.vmax(8, h_scale_buf[0], h_scale_buf[0], const_0[0],\n 1, 1, 1, 1, 8, 8, 0)\n self.tik_instance.vconv(8, \"floor\", h_floor_buf[0],\n h_scale_buf[0], 1, 1, 1, 8, 8)\n self.tik_instance.vconv(8, \"\", h_floor_buf_fp[0],\n h_floor_buf[0], 1, 1, 1, 8, 8)\n self.tik_instance.vsub(8, one_u_u_buf[8],\n h_scale_buf[0], h_floor_buf_fp[0],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vsub(8, one_u_u_buf[0],\n one_value_buf[0], one_u_u_buf[8],\n 1, 1, 1, 1, 8, 8, 8)\n reg_index_y.set_as(h_floor_buf[0])\n\n one_out = self.tik_instance.Tensor(\n \"float32\", (4*256, self.c_block_size),\n name=\"one_out\", scope=tik.scope_ubuf)\n scale_512_ub_x = self.tik_instance.Tensor(\n \"float32\", (512, 8), name=\"scale_512_ub_x\",\n scope=tik.scope_ubuf)\n int32_256_ub_x = self.tik_instance.Tensor(\n \"int32\", (256, 8), name=\"int32_256_ub_x\",\n scope=tik.scope_ubuf)\n uv_ub = self.tik_instance.Tensor(\n \"float32\", (4*256, 8), name=\"uv_ub\", scope=tik.scope_ubuf)\n reg_index_x = self.tik_instance.Scalar(dtype=\"int32\")\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n with self.tik_instance.for_range(0, self.w_in_loop) \\\n as loop_index:\n self.tik_instance.data_move(\n int32_256_ub_x, l1_xpos[loop_index*256*8], 0, 1,\n 256, 0, 0)\n self.tik_instance.data_move(\n scale_512_ub_x, l1_xscale[loop_index*512*8], 0, 1,\n 512, 0, 0)\n with self.tik_instance.if_scope(\n loop_index != self.w_in_loop - 1):\n ub_input = self.tik_instance.Tensor(\n \"float32\", (256, self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(nc1_index *\n self.in_size_h + reg_cur_index) *\n self.in_size_w*16 +\n loop_index*256*16],\n 0, 1, 512, 0, 0)\n\n self.tik_instance.vmul(\n MASK, uv_ub[0], one_u_u_buf[0], scale_512_ub_x[0],\n 64, 1, 0, 1, 8, 0, 8)\n self.tik_instance.vmul(\n MASK, uv_ub[2*256*8], one_u_u_buf[8],\n scale_512_ub_x[0], 64, 1, 0, 1, 8, 0, 8)\n with self.tik_instance.for_range(0, 2) as repeat_index:\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index],\n uv_ub[256*16*repeat_index], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+8],\n uv_ub[256*16*repeat_index], ub_input[8],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+16],\n uv_ub[256*16*repeat_index + 256*8], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+24],\n uv_ub[256*16*repeat_index + 256*8], ub_input[8],\n 32, 4, 1, 2, 32, 8, 16)\n with self.tik_instance.for_range(0, 256) as w_in_index:\n reg_index_x.set_as(int32_256_ub_x[w_in_index*8])\n with self.tik_instance.if_scope(\n reg_index_x != (self.out_size_w - 1)):\n self.tik_instance.vadd(\n 32, ub_output[reg_index_x*16],\n one_out[w_in_index*32],\n ub_output[reg_index_x*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 32, ub_output_2[reg_index_x*16],\n one_out[256*32+w_in_index*32],\n ub_output_2[reg_index_x*16],\n 1, 1, 1, 1, 8, 8, 8)\n with self.tik_instance.else_scope():\n self.tik_instance.vadd(\n 16, ub_output[reg_index_x*16],\n one_out[w_in_index*32],\n ub_output[reg_index_x*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output[reg_index_x*16],\n ub_output[reg_index_x*16],\n one_out[w_in_index*32+16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[reg_index_x*16],\n one_out[256*32+w_in_index*32],\n ub_output_2[reg_index_x*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[reg_index_x*16],\n ub_output_2[reg_index_x*16],\n one_out[256*32+w_in_index*32+16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n with self.tik_instance.else_scope():\n ub_input = self.tik_instance.Tensor(\n \"float32\", (256, self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(nc1_index*self.in_size_h +\n reg_cur_index)*self.in_size_w*16 +\n loop_index*256*16],\n 0, 1, self.w_in_tail*2, 0, 0)\n\n self.tik_instance.vmul(\n MASK, uv_ub[0], one_u_u_buf[0], scale_512_ub_x[0],\n 64, 1, 0, 1, 8, 0, 8)\n self.tik_instance.vmul(\n MASK, uv_ub[2*256*8], one_u_u_buf[8],\n scale_512_ub_x[0], 64, 1, 0, 1, 8, 0, 8)\n with self.tik_instance.for_range(0, 2) as repeat_index:\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index],\n uv_ub[256*16*repeat_index], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+8],\n uv_ub[256*16*repeat_index], ub_input[8],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+16],\n uv_ub[256*16*repeat_index + 256*8], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+24],\n uv_ub[256*16*repeat_index + 256*8], ub_input[8],\n 32, 4, 1, 2, 32, 8, 16)\n\n with self.tik_instance.for_range(0, self.w_in_tail) \\\n as w_in_index:\n reg_index_x.set_as(int32_256_ub_x[w_in_index*8])\n with self.tik_instance.if_scope(\n reg_index_x != (self.out_size_w - 1)):\n self.tik_instance.vadd(\n 32, ub_output[reg_index_x*16],\n one_out[w_in_index*32],\n ub_output[reg_index_x*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 32, ub_output_2[reg_index_x*16],\n one_out[256*32+w_in_index*32],\n ub_output_2[reg_index_x*16],\n 1, 1, 1, 1, 8, 8, 8)\n with self.tik_instance.else_scope():\n self.tik_instance.vadd(\n 16, ub_output[reg_index_x*16],\n one_out[w_in_index*32],\n ub_output[reg_index_x*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output[reg_index_x*16],\n ub_output[reg_index_x*16],\n one_out[w_in_index*32+16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[reg_index_x*16],\n one_out[256*32+w_in_index*32],\n ub_output_2[reg_index_x*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[reg_index_x*16],\n ub_output_2[reg_index_x*16],\n one_out[256*32+w_in_index*32+16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n #move data output\n self.tik_instance.set_atomic_add(1)\n self.tik_instance.data_move(\n self.output_gm[(nc1_index*self.out_size_h + reg_index_y) *\n self.out_size_w*self.c_block_size],\n ub_output[0], 0, 1, self.out_size_w*2, 0, 0)\n with self.tik_instance.if_scope(\n reg_index_y != self.out_size_h - 1):\n self.tik_instance.data_move(\n self.output_gm[(nc1_index*self.out_size_h +\n reg_index_y + 1) * self.out_size_w *\n self.c_block_size], ub_output_2[0],\n 0, 1, self.out_size_w*2, 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n self.output_gm[(nc1_index*self.out_size_h +\n reg_index_y) * self.out_size_w *\n self.c_block_size], ub_output_2[0],\n 0, 1, self.out_size_w*2, 0, 0)\n self.tik_instance.set_atomic_add(0)\n else:\n cut_num_w = _ceil_div(self.out_size_w, 600)\n ub_output = self.tik_instance.Tensor(\n \"float32\", (600, self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n ub_output_2 = self.tik_instance.Tensor(\n \"float32\", (600, self.c_block_size),\n name=\"ub_output_2\", scope=tik.scope_ubuf)\n with self.tik_instance.for_range(0, self.nc1) as nc1_index:\n with self.tik_instance.for_range(0, cut_num_w) as cut_w_index:\n self.tik_instance.vector_dup(\n MASK, ub_output[0], 0.0, 150, 1, 8)\n self.tik_instance.vector_dup(\n MASK, ub_output_2[0], 0.0, 150, 1, 8)\n with self.tik_instance.if_scope(\n cut_w_index != cut_num_w - 1):\n h_floor_buf = self.tik_instance.Tensor(\n \"int32\", (8,), name=\"h_floor_buf\",\n scope=tik.scope_ubuf)\n h_floor_buf_fp = self.tik_instance.Tensor(\n \"float32\", (8,), name=\"h_floor_buf_fp\",\n scope=tik.scope_ubuf)\n h_scale_buf = self.tik_instance.Tensor(\n \"float32\", (8,), name=\"h_scale_buf\",\n scope=tik.scope_ubuf)\n h_block_buf = self.tik_instance.Tensor(\n \"int32\", (8,), name=\"h_scale_buf\",\n scope=tik.scope_ubuf)\n one_u_u_buf = self.tik_instance.Tensor(\n \"float32\", (2, 8), name=\"one_u_u_buf\",\n scope=tik.scope_ubuf)\n const_0 = self.tik_instance.Tensor(\"float32\", (8,),\n name=\"const_0\",\n scope=tik.scope_ubuf)\n self.tik_instance.vector_dup(\n 8, h_block_buf, reg_cur_index, 1, 1, 8)\n self.tik_instance.vconv(8, \"\", h_scale_buf[0],\n h_block_buf[0], 1, 1, 1, 8, 8)\n if self.half_pixel_centers:\n self.tik_instance.vadds(8, h_scale_buf, h_scale_buf,\n float(0.5), 1, 1, 1, 8, 8)\n self.tik_instance.vmuls(\n 8, h_scale_buf, h_scale_buf, self.scale_h, 1,\n 1, 1, 8, 8)\n if self.half_pixel_centers:\n self.tik_instance.vector_dup(8, const_0, 0, 1, 1, 8)\n self.tik_instance.vadds(8, h_scale_buf, h_scale_buf,\n float(-0.5), 1, 1, 1, 8, 8)\n self.tik_instance.vmax(8, h_scale_buf[0], h_scale_buf[0], const_0[0],\n 1, 1, 1, 1, 8, 8, 0)\n self.tik_instance.vconv(\n 8, \"floor\", h_floor_buf[0], h_scale_buf[0],\n 1, 1, 1, 8, 8)\n self.tik_instance.vconv(8, \"\", h_floor_buf_fp[0],\n h_floor_buf[0], 1, 1, 1, 8, 8)\n self.tik_instance.vsub(8, one_u_u_buf[8],\n h_scale_buf[0],\n h_floor_buf_fp[0],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vsub(8, one_u_u_buf[0],\n one_value_buf[0],\n one_u_u_buf[8],\n 1, 1, 1, 1, 8, 8, 8)\n reg_index_y.set_as(h_floor_buf[0])\n #calc output\n one_out = self.tik_instance.Tensor(\n \"float32\", (4*256, self.c_block_size),\n name=\"one_out\", scope=tik.scope_ubuf)\n scale_512_ub_x = self.tik_instance.Tensor(\n \"float32\", (512, 8),\n name=\"scale_512_ub_x\", scope=tik.scope_ubuf)\n int32_256_ub_x = self.tik_instance.Tensor(\n \"int32\", (256, 8), name=\"int32_256_ub_x\",\n scope=tik.scope_ubuf)\n uv_ub = self.tik_instance.Tensor(\n \"float32\", (4*256, 8), name=\"uv_ub\",\n scope=tik.scope_ubuf)\n reg_index_x = self.tik_instance.Scalar(dtype=\"int32\")\n reg_index_w = self.tik_instance.Scalar(dtype=\"int32\")\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n with self.tik_instance.for_range(0, self.w_in_loop) \\\n as loop_index:\n self.tik_instance.data_move(\n int32_256_ub_x, l1_xpos[loop_index*256*8],\n 0, 1, 256, 0, 0)\n self.tik_instance.data_move(\n scale_512_ub_x, l1_xscale[loop_index*512*8],\n 0, 1, 512, 0, 0)\n ub_input = self.tik_instance.Tensor(\n \"float32\", (256, self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n with self.tik_instance.if_scope(\n loop_index != self.w_in_loop - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(nc1_index*self.in_size_h +\n reg_cur_index) *\n self.in_size_w*16 +\n loop_index*256*16],\n 0, 1, 512, 0, 0)\n\n self.tik_instance.vmul(\n MASK, uv_ub[0], one_u_u_buf[0],\n scale_512_ub_x[0],\n 64, 1, 0, 1, 8, 0, 8)\n self.tik_instance.vmul(\n MASK, uv_ub[2*256*8], one_u_u_buf[8],\n scale_512_ub_x[0],\n 64, 1, 0, 1, 8, 0, 8)\n with self.tik_instance.for_range(0, 2) \\\n as repeat_index:\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index],\n uv_ub[256*16*repeat_index], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+8],\n uv_ub[256*16*repeat_index],\n ub_input[8], 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+16],\n uv_ub[256*16*repeat_index + 256*8],\n ub_input[0], 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+24],\n uv_ub[256*16*repeat_index + 256*8],\n ub_input[8], 32, 4, 1, 2, 32, 8, 16)\n with self.tik_instance.for_range(0, 256) \\\n as w_in_index:\n reg_index_x.set_as(\n int32_256_ub_x[w_in_index*8])\n reg_index_w.set_as(reg_index_x % 600)\n with self.tik_instance.if_scope(\n tik.all(\n reg_index_x >= cut_w_index*600,\n reg_index_x < (cut_w_index + 1) *\n 600)):\n with self.tik_instance.if_scope(\n reg_index_w != 599):\n self.tik_instance.vadd(\n 32, ub_output[reg_index_w*16],\n one_out[w_in_index*32],\n ub_output[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 32, ub_output_2[reg_index_w*16],\n one_out[256*32+w_in_index*32],\n ub_output_2[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n with self.tik_instance.else_scope():\n self.tik_instance.vadd(\n 16, ub_output[reg_index_w*16],\n one_out[w_in_index*32],\n ub_output[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[reg_index_w*16],\n one_out[256*32+w_in_index*32],\n ub_output_2[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n with self.tik_instance.if_scope(\n reg_index_x == cut_w_index*600 - 1):\n self.tik_instance.vadd(\n 16, ub_output[0],\n one_out[w_in_index*32+16],\n ub_output[0], 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[0],\n one_out[256*32+w_in_index*32+16],\n ub_output_2[0], 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(nc1_index*self.in_size_h +\n reg_cur_index) *\n self.in_size_w*16 +\n loop_index*256*16],\n 0, 1, self.w_in_tail*2, 0, 0)\n\n self.tik_instance.vmul(\n MASK, uv_ub[0], one_u_u_buf[0],\n scale_512_ub_x[0], 64, 1, 0, 1, 8, 0, 8)\n self.tik_instance.vmul(\n MASK, uv_ub[2*256*8], one_u_u_buf[8],\n scale_512_ub_x[0], 64, 1, 0, 1, 8, 0, 8)\n with self.tik_instance.for_range(0, 2) \\\n as repeat_index:\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index],\n uv_ub[256*16*repeat_index], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+8],\n uv_ub[256*16*repeat_index], ub_input[8],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+16],\n uv_ub[256*16*repeat_index + 256*8],\n ub_input[0], 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+24],\n uv_ub[256*16*repeat_index + 256*8],\n ub_input[8], 32, 4, 1, 2, 32, 8, 16)\n\n with self.tik_instance.for_range(\n 0, self.w_in_tail) as w_in_index:\n reg_index_x.set_as(\n int32_256_ub_x[w_in_index*8])\n reg_index_w.set_as(reg_index_x % 600)\n with self.tik_instance.if_scope(\n tik.all(\n reg_index_x >= cut_w_index*600,\n reg_index_x < (cut_w_index + 1) *\n 600)):\n with self.tik_instance.if_scope(\n reg_index_w != 599):\n self.tik_instance.vadd(\n 32, ub_output[reg_index_w*16],\n one_out[w_in_index*32],\n ub_output[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 32, ub_output_2[reg_index_w*16],\n one_out[256*32+w_in_index*32],\n ub_output_2[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n with self.tik_instance.else_scope():\n self.tik_instance.vadd(\n 16, ub_output[reg_index_w*16],\n one_out[w_in_index*32],\n ub_output[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[reg_index_w*16],\n one_out[256*32+w_in_index*32],\n ub_output_2[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n with self.tik_instance.if_scope(\n reg_index_x == cut_w_index*600 - 1):\n self.tik_instance.vadd(\n 16, ub_output[0], ub_output[0],\n one_out[w_in_index*32+16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[0],\n one_out[256*32+w_in_index*32+16],\n ub_output_2[0], 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n #move data output\n self.tik_instance.set_atomic_add(1)\n self.tik_instance.data_move(\n self.output_gm[((nc1_index*self.out_size_h +\n reg_index_y) *\n self.out_size_w + cut_w_index*600) *\n self.c_block_size], ub_output[0],\n 0, 1, 600*2, 0, 0)\n with self.tik_instance.if_scope(\n reg_index_y != self.out_size_h - 1):\n self.tik_instance.data_move(\n self.output_gm[((nc1_index*self.out_size_h +\n reg_index_y + 1) *\n self.out_size_w +\n cut_w_index*600) *\n self.c_block_size],\n ub_output_2[0], 0, 1, 600*2, 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n self.output_gm[((nc1_index*self.out_size_h +\n reg_index_y) *\n self.out_size_w +\n cut_w_index*600) *\n self.c_block_size],\n ub_output_2[0], 0, 1, 600*2, 0, 0)\n self.tik_instance.set_atomic_add(0)\n with self.tik_instance.else_scope():\n h_floor_buf = self.tik_instance.Tensor(\n \"int32\", (8,), name=\"h_floor_buf\",\n scope=tik.scope_ubuf)\n h_floor_buf_fp = self.tik_instance.Tensor(\n \"float32\", (8,), name=\"h_floor_buf_fp\",\n scope=tik.scope_ubuf)\n h_scale_buf = self.tik_instance.Tensor(\n \"float32\", (8,), name=\"h_scale_buf\",\n scope=tik.scope_ubuf)\n h_block_buf = self.tik_instance.Tensor(\n \"int32\", (8,), name=\"h_scale_buf\",\n scope=tik.scope_ubuf)\n one_u_u_buf = self.tik_instance.Tensor(\n \"float32\", (2, 8), name=\"one_u_u_buf\",\n scope=tik.scope_ubuf)\n const_0 = self.tik_instance.Tensor(\"float32\", (8,),\n name=\"const_0\",\n scope=tik.scope_ubuf)\n self.tik_instance.vector_dup(\n 8, h_block_buf, reg_cur_index, 1, 1, 8)\n self.tik_instance.vconv(8, \"\", h_scale_buf[0],\n h_block_buf[0], 1, 1, 1, 8, 8)\n if self.half_pixel_centers:\n self.tik_instance.vadds(8, h_scale_buf, h_scale_buf,\n float(0.5), 1, 1, 1, 8, 8)\n self.tik_instance.vmuls(8, h_scale_buf, h_scale_buf,\n self.scale_h, 1,\n 1, 1, 8, 8)\n if self.half_pixel_centers:\n self.tik_instance.vector_dup(8, const_0, 0, 1, 1, 8)\n self.tik_instance.vadds(8, h_scale_buf, h_scale_buf,\n float(-0.5), 1, 1, 1, 8, 8)\n self.tik_instance.vmax(8, h_scale_buf[0], h_scale_buf[0], const_0[0],\n 1, 1, 1, 1, 8, 8, 0)\n self.tik_instance.vconv(8, \"floor\", h_floor_buf[0],\n h_scale_buf[0], 1, 1, 1, 8, 8)\n self.tik_instance.vconv(8, \"\", h_floor_buf_fp[0],\n h_floor_buf[0], 1, 1, 1, 8, 8)\n self.tik_instance.vsub(8, one_u_u_buf[8],\n h_scale_buf[0],\n h_floor_buf_fp[0],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vsub(8, one_u_u_buf[0],\n one_value_buf[0], one_u_u_buf[8],\n 1, 1, 1, 1, 8, 8, 8)\n reg_index_y.set_as(h_floor_buf[0])\n #calc\n one_out = self.tik_instance.Tensor(\n \"float32\", (4*256, self.c_block_size),\n name=\"one_out\", scope=tik.scope_ubuf)\n scale_512_ub_x = self.tik_instance.Tensor(\n \"float32\", (512, 8),\n name=\"scale_512_ub_x\", scope=tik.scope_ubuf)\n int32_256_ub_x = self.tik_instance.Tensor(\n \"int32\", (256, 8),\n name=\"int32_256_ub_x\",\n scope=tik.scope_ubuf)\n uv_ub = self.tik_instance.Tensor(\n \"float32\", (4*256, 8),\n name=\"uv_ub\", scope=tik.scope_ubuf)\n reg_index_x = self.tik_instance.Scalar(dtype=\"int32\")\n reg_index_w = self.tik_instance.Scalar(dtype=\"int32\")\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n with self.tik_instance.for_range(0, self.w_in_loop) \\\n as loop_index:\n self.tik_instance.data_move(\n int32_256_ub_x, l1_xpos[loop_index*256*8],\n 0, 1, 256, 0, 0)\n self.tik_instance.data_move(\n scale_512_ub_x, l1_xscale[loop_index*512*8],\n 0, 1, 512, 0, 0)\n ub_input = self.tik_instance.Tensor(\n \"float32\", (256, self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n with self.tik_instance.if_scope(\n loop_index != self.w_in_loop - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(nc1_index*self.in_size_h +\n reg_cur_index) *\n self.in_size_w*16 +\n loop_index*256*16],\n 0, 1, 512, 0, 0)\n\n self.tik_instance.vmul(\n MASK, uv_ub[0], one_u_u_buf[0],\n scale_512_ub_x[0],\n 64, 1, 0, 1, 8, 0, 8)\n self.tik_instance.vmul(\n MASK, uv_ub[2*256*8], one_u_u_buf[8],\n scale_512_ub_x[0],\n 64, 1, 0, 1, 8, 0, 8)\n with self.tik_instance.for_range(0, 2) \\\n as repeat_index:\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index],\n uv_ub[256*16*repeat_index], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+8],\n uv_ub[256*16*repeat_index], ub_input[8],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+16],\n uv_ub[256*16*repeat_index + 256*8],\n ub_input[0], 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+24],\n uv_ub[256*16*repeat_index + 256*8],\n ub_input[8], 32, 4, 1, 2, 32, 8, 16)\n with self.tik_instance.for_range(0, 256) \\\n as w_in_index:\n reg_index_x.set_as(\n int32_256_ub_x[w_in_index*8])\n reg_index_w.set_as(reg_index_x % 600)\n with self.tik_instance.if_scope(\n tik.all(\n reg_index_x >= cut_w_index*600,\n reg_index_x < (cut_w_index + 1) *\n 600)):\n with self.tik_instance.if_scope(\n reg_index_x != (\n self.out_size_w - 1)):\n self.tik_instance.vadd(\n 32, ub_output[reg_index_w*16],\n one_out[w_in_index*32],\n ub_output[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 32, ub_output_2[reg_index_w*16],\n one_out[256*32+w_in_index*32],\n ub_output_2[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n with self.tik_instance.else_scope():\n self.tik_instance.vadd(\n 16, ub_output[reg_index_w*16],\n one_out[w_in_index*32],\n ub_output[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output[reg_index_w*16],\n one_out[w_in_index*32 + 16],\n ub_output[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[reg_index_w*16],\n one_out[256*32+w_in_index*32],\n ub_output_2[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[reg_index_w*16],\n one_out[\n 256*32+w_in_index*32+16],\n ub_output_2[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n with self.tik_instance.if_scope(\n reg_index_x == cut_w_index*600 - 1):\n self.tik_instance.vadd(\n 16, ub_output[0],\n one_out[w_in_index*32 + 16],\n ub_output[0], 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[0],\n one_out[256*32 + w_in_index*32+16],\n ub_output_2[0], 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(nc1_index*self.in_size_h +\n reg_cur_index) *\n self.in_size_w*16 +\n loop_index*256*16],\n 0, 1, self.w_in_tail*2, 0, 0)\n\n self.tik_instance.vmul(\n MASK, uv_ub[0], one_u_u_buf[0],\n scale_512_ub_x[0], 64, 1, 0, 1, 8, 0, 8)\n self.tik_instance.vmul(\n MASK, uv_ub[2*256*8], one_u_u_buf[8],\n scale_512_ub_x[0], 64, 1, 0, 1, 8, 0, 8)\n with self.tik_instance.for_range(0, 2) \\\n as repeat_index:\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index],\n uv_ub[256*16*repeat_index], ub_input[0],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+8],\n uv_ub[256*16*repeat_index], ub_input[8],\n 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+16],\n uv_ub[256*16*repeat_index + 256*8],\n ub_input[0], 32, 4, 1, 2, 32, 8, 16)\n self.tik_instance.vmul(\n MASK, one_out[256*32*repeat_index+24],\n uv_ub[256*16*repeat_index + 256*8],\n ub_input[8], 32, 4, 1, 2, 32, 8, 16)\n with self.tik_instance.for_range(\n 0, self.w_in_tail) as w_in_index:\n reg_index_x.set_as(\n int32_256_ub_x[w_in_index*8])\n reg_index_w.set_as(reg_index_x % 600)\n with self.tik_instance.if_scope(\n tik.all(\n reg_index_x >= cut_w_index*600,\n reg_index_x < (cut_w_index + 1) *\n 600)):\n with self.tik_instance.if_scope(\n reg_index_x != (\n self.out_size_w - 1)):\n self.tik_instance.vadd(\n 32, ub_output[reg_index_w*16],\n one_out[w_in_index*32],\n ub_output[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 32, ub_output_2[reg_index_w*16],\n one_out[256*32+w_in_index*32],\n ub_output_2[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n with self.tik_instance.else_scope():\n self.tik_instance.vadd(\n 16, ub_output[reg_index_w*16],\n one_out[w_in_index*32],\n ub_output[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output[reg_index_w*16],\n one_out[w_in_index*32+16],\n ub_output[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[reg_index_w*16],\n one_out[256*32 + w_in_index*32],\n ub_output_2[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[reg_index_w*16],\n one_out[256*32 +\n w_in_index*32 + 16],\n ub_output_2[reg_index_w*16],\n 1, 1, 1, 1, 8, 8, 8)\n with self.tik_instance.if_scope(\n reg_index_x == cut_w_index*600 - 1):\n self.tik_instance.vadd(\n 16, ub_output[0],\n one_out[w_in_index*32+16],\n ub_output[0], 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vadd(\n 16, ub_output_2[0],\n one_out[256*32+w_in_index*32+16],\n ub_output_2[0], 1, 1, 1, 1, 8, 8, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[0], float(0), 128, 1, 8)\n self.tik_instance.vector_dup(\n MASK, one_out[8192], float(0), 128, 1, 8)\n #move data output\n self.tik_instance.set_atomic_add(1)\n self.tik_instance.data_move(\n self.output_gm[((nc1_index*self.out_size_h +\n reg_index_y) *\n self.out_size_w +\n cut_w_index*600) *\n self.c_block_size],\n ub_output[0], 0, 1, (self.out_size_w -\n cut_w_index*600)*2, 0, 0)\n with self.tik_instance.if_scope(\n reg_index_y != self.out_size_h - 1):\n self.tik_instance.data_move(\n self.output_gm[((nc1_index*self.out_size_h +\n reg_index_y + 1) *\n self.out_size_w +\n cut_w_index*600) *\n self.c_block_size],\n ub_output_2[0], 0, 1, (self.out_size_w -\n cut_w_index*600)*2, 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n self.output_gm[((nc1_index*self.out_size_h +\n reg_index_y) *\n self.out_size_w +\n cut_w_index*600) *\n self.c_block_size],\n ub_output_2[0], 0, 1, (self.out_size_w -\n cut_w_index*600)*2, 0, 0)\n self.tik_instance.set_atomic_add(0)", "def _fp32_1_0_2_mc_on_1(loop_cnt, left_data):\n\n def _fp32_mte_process_1(axis_0_index, w_lp_index, sub_w_size):\n \"\"\"\n do transpose by mte for not last dim under multiple core on axis 1\n \"\"\"\n\n def _fp32_inner_mte_1(h_lp_index, sub_h_size):\n \"\"\"\n inner mte\n \"\"\"\n # move data in\n in_offset = ((block_idx * per_core_col_size + axis_0_index * axis_1 +\n h_lp_index * max_core_axis_size) * axis_2 +\n w_lp_index * max_no_core_axis_size)\n data_in_inf = (sub_h_size, sub_w_size, axis_1, axis_2, in_offset)\n _data_move_in_last_dim_be_one_block(tik_inst, ub_input, data_in, data_in_inf)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size * axis_0 + axis_0_index +\n h_lp_index * max_core_axis_size * axis_0) * axis_2 +\n w_lp_index * max_no_core_axis_size)\n data_out_inf = (sub_h_size, sub_w_size, axis_0, axis_1, axis_2, out_offset)\n _data_move_out_last_dim_be_one_block(tik_inst, data_out, ub_input, data_out_inf)\n\n with tik_inst.for_range(0, loop_cnt) as h_lp_idx:\n _fp32_inner_mte_1(h_lp_idx, max_core_axis_size)\n with tik_inst.if_scope(left_data > 0):\n _fp32_inner_mte_1(loop_cnt, left_data)\n\n with tik_inst.for_range(0, axis_0) as axis_0_idx:\n with tik_inst.for_range(0, no_core_loop_cnt) as w_lp_idx:\n _fp32_mte_process_1(axis_0_idx, w_lp_idx, max_no_core_axis_size)\n with tik_inst.if_scope(no_core_left > 0):\n _fp32_mte_process_1(axis_0_idx, no_core_loop_cnt, no_core_left)", "def _O_and_O2_correction(self, alt):\n _O_and_O2_correction_fast(alt, self.Texo)", "def two_scale_forward_high(self, x_1x):\n x_hi = nn.functional.interpolate(\n x_1x,\n scale_factor=1.5,\n align_corners=self.align_corners,\n mode='bilinear')\n\n lo_outs = self.single_scale_forward(x_1x)\n pred_10x = lo_outs['cls_out']\n p_lo = pred_10x\n aux_lo = lo_outs['aux_out']\n logit_attn = lo_outs['logit_attn']\n\n hi_outs = self.single_scale_forward(x_hi)\n pred_15x = hi_outs['cls_out']\n p_hi = pred_15x\n aux_hi = hi_outs['aux_out']\n\n p_lo = p_lo * logit_attn\n aux_lo = aux_lo * logit_attn\n p_hi = scale_as(p_hi, p_lo)\n aux_hi = scale_as(aux_hi, aux_lo)\n\n # combine lo and hi predictions with attention\n joint_pred = p_lo + p_hi * (1 - logit_attn)\n joint_aux = aux_lo + aux_hi * (1 - logit_attn)\n\n output = [joint_pred, joint_aux]\n\n # Optionally, apply supervision to the multi-scale predictions\n # directly.\n scaled_pred_15x = scale_as(pred_15x, p_lo)\n output.extend(output)\n return output", "def proz(): \r\n print(\"processing: \",CURDATA()[0]) \r\n Check_180turn(left_boundary,right_boundary)\r\n EF() #exponential window multiplication + fourier\r\n APK0() #1. Phase correction 0th Ordnung\r\n APK1() #1. Phase correction 1st Ordnung\r\n ABS() #Baseline correction\r\n APK()\r\n ABS() #Baseline correction\r\n Check_180turn(left_boundary,right_boundary)", "def registration(im1, im2, num = 10, opt = 'py', outputPath = 'None'):\n\n # determin which one is the right side of the breast\n b_size = 5\n n_row, n_col = im1.shape\n side = 0\n if np.sum(im1[0:b_size,0:b_size]) < np.sum(im1[0:b_size,n_col-b_size:n_col]):\n side = 1 \n\n # flip the right side image\n if side == 1:\n im1 = np.fliplr(im1)\n else:\n im2 = np.fliplr(im2) \n\n # find edges of both images\n edge1 = findEdge(im1)\n edge2 = findEdge(im2)\n\n # tune edges of both side\n edge1 = tuneEdge(edge1,im1.shape)\n edge2 = tuneEdge(edge2,im2.shape)\n\n # samping from both side\n points1 = contour_sampling(edge1, num)\n points2 = contour_sampling(edge2, num)\n\n # for debugging .........................\n sam_im1 = np.zeros(im1.shape,np.float32)\n for point in points1:\n sam_im1[point[0],point[1]] = 1\n\n sam_im2 = np.zeros(im2.shape,np.float32)\n for point in points2:\n sam_im2[point[0],point[1]] = 1\n \n selem = disk(15)\n dilated1 = ndimage.convolve(sam_im1, selem, mode='constant', cval=0)\n dilated2 = ndimage.convolve(sam_im2, selem, mode='constant', cval=0)\n\n points1 = np.asarray(points1)\n points2 = np.asarray(points2)\n \n # Thin Plate Spline interpolation\n dst = np.zeros(im1.shape)\n # im1 as source\n if opt == 'py': \n tps = TPSpline.TPSpline()\n tps.setCorrespondences(points1, points2)\n dst = tps.warpImage(im1)\n return dst\n\n if opt == 'c':\n print \"Please run the interpolation with C++ exe file!\"\n print \"./TPSpline /home/yanbin/Tomosynthesis/libs/TPSpline/test/ps.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/pd.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/5016_test.tif /home/yanbin/Tomosynthesis/libs/TPSpline/test/dst.tif\"\n np.savetxt(outputPath + 'ps.txt', points1, '%d', delimiter=' ') # X is an array\n np.savetxt(outputPath + 'pd.txt', points2, '%d', delimiter=' ') # X is an array\n tiffLib.imsave(outputPath + 'im1.tif',im1)\n return None", "def test_transform_track_with_single_transform_high_id(self):\n track = Track(artist='Artist', title='Title')\n tflist = TransformList()\n tflist.add_transform(Transform(100,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n\n self.assertEqual(track.last_transform, 0)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 100)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.transformed, True)", "def shiftDetector(frame, onh_info=None):\n norm = frame/np.max(frame)#(2**16)\n anchorCol = norm[:,int((frame.shape[1])/2)]\n shifts = [np.argmax(signal.correlate(norm[:,i],anchorCol,mode='same'))-int((frame.shape[0])/2) for i in range(frame.shape[1])]\n \n return shifts", "def test_transform_track_with_two_transforms_with_gap_in_numbering(self):\n track = Track(artist='Artist', title='Title')\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n tflist.add_transform(Transform(3,\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2',\n ))\n\n self.assertEqual(track.last_transform, 0)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 3)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, True)", "def T1(TR, FA_1, FA_2, M1, M2):\n a = M1 * np.sin(FA_2) / (M2 * np.sin(FA_1))\n return -TR / np.log((a - 1.0) / ((a * np.cos(FA_1)) - np.cos(FA_2)))", "def _fp32_mte_process_1(axis_0_index, w_lp_index, sub_w_size):\n\n def _fp32_inner_mte_1(h_lp_index, sub_h_size):\n \"\"\"\n inner mte\n \"\"\"\n # move data in\n in_offset = ((block_idx * per_core_col_size + axis_0_index * axis_1 +\n h_lp_index * max_core_axis_size) * axis_2 +\n w_lp_index * max_no_core_axis_size)\n data_in_inf = (sub_h_size, sub_w_size, axis_1, axis_2, in_offset)\n _data_move_in_last_dim_be_one_block(tik_inst, ub_input, data_in, data_in_inf)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size * axis_0 + axis_0_index +\n h_lp_index * max_core_axis_size * axis_0) * axis_2 +\n w_lp_index * max_no_core_axis_size)\n data_out_inf = (sub_h_size, sub_w_size, axis_0, axis_1, axis_2, out_offset)\n _data_move_out_last_dim_be_one_block(tik_inst, data_out, ub_input, data_out_inf)\n\n with tik_inst.for_range(0, loop_cnt) as h_lp_idx:\n _fp32_inner_mte_1(h_lp_idx, max_core_axis_size)\n with tik_inst.if_scope(left_data > 0):\n _fp32_inner_mte_1(loop_cnt, left_data)", "def test_transform_track_full_transform(self):\n track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', conductor='Conductor', composer='Composer',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, change_artist=True, pattern_artist='Artist', to_artist='Artist 2',\n cond_album=True, change_album=True, pattern_album='Album', to_album='Album 2',\n cond_title=True, change_title=True, pattern_title='Title', to_title='Title 2',\n cond_ensemble=True, change_ensemble=True, pattern_ensemble='Ensemble', to_ensemble='Ensemble 2',\n cond_composer=True, change_composer=True, pattern_composer='Composer', to_composer='Composer 2',\n cond_conductor=True, change_conductor=True, pattern_conductor='Conductor', to_conductor='Conductor 2',\n )\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.ensemble, 'Ensemble 2')\n self.assertEqual(track.conductor, 'Conductor 2')\n self.assertEqual(track.composer, 'Composer 2')\n self.assertEqual(track.transformed, True)", "def arrived(self):\n \"\"\" Responsible for transformations \"\"\"\n \n if self.phase == 1:\n if self.closest_i_could_get is not None:\n return array_equal(self.closest_i_could_get, array([0,0]))\n else: \n return array_equal(self.destination, array([0,0]))\n elif self.phase > 1:\n if self.closest_i_could_get is not None:\n return array_equal(self.closest_i_could_get, self.position)\n else: \n return array_equal(self.destination, self.position)", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_H2 = self.edp_par['rho_H2'].value\n Z_H2 = self.edp_par['Z_H2'].value\n sigma_H2 = self.edp_par['sigma_H2'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n \n # Make sure Z_H2 > Z_H1. If Z_H2 < Z_H1, swap them\n if Z_H1 > Z_H2:\n Z_H1, Z_H2 = Z_H2, Z_H1\n sigma_H1, sigma_H2 = sigma_H2, sigma_H1\n rho_H1, rho_H2 = rho_H2, rho_H1\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H2 + sigma_H2\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG += 2*rho_H2*sigma_H2 * cos(alpha*Z_H2) * exp(-0.5*(alpha*sigma_H2)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def plot_links_2d_intra(epo1: mne.Epochs, epo2: mne.Epochs,\n C1: np.ndarray, C2: np.ndarray,\n threshold: str='auto', steps: int=2):\n\n # extract sensor infos and transform loc to fit with headmodel\n loc1 = copy(np.array([ch['loc'][:3] for ch in epo1.info['chs']]))\n loc1 = transform_2d_intra(loc1, traX=-0.178, traY=0.012, traZ=0, rotZ=(-np.pi/2))\n\n loc2 = copy(np.array([ch['loc'][:3] for ch in epo2.info['chs']]))\n loc2 = transform_2d_intra(loc2, traX=0.178, traY=0.012, traZ=0, rotZ=(-np.pi/2))\n\n ctr1 = np.nanmean(loc1, 0)\n ctr2 = np.nanmean(loc2, 0)\n \n # Calculate vmin and vmax for colormap as min and max [C1, C2]\n Cmax1=np.nanmax(C1[:])\n Cmax2=np.nanmax(C2[:])\n Cmax=[]\n Cmax=[Cmax1, Cmax2]\n vmax=np.nanmax(Cmax)\n Cmin1=np.nanmin(C1[:])\n Cmin2=np.nanmin(C2[:])\n Cmin=[]\n Cmin=[Cmin1, Cmin2]\n vmin=np.min(Cmin)\n\n # Calculate automatic threshold\n if threshold == 'auto':\n threshold = np.max([np.median(C1, 0),np.median(C2,0)])+np.max([np.std(C1, 0),np.std(C2, 0)])\n else:\n threshold = threshold\n \n\n # Define colormap for both participant\n cmap_p = matplotlib.cm.get_cmap('Reds')\n norm_p = matplotlib.colors.Normalize(vmin=threshold, vmax=vmax)\n cmap_n = matplotlib.cm.get_cmap('Blues_r')\n norm_n = matplotlib.colors.Normalize(vmin=vmin, vmax=-threshold)\n\n # plot links\n for e1 in range(len(loc1)):\n x1 = loc1[e1, 0]\n y1 = loc1[e1, 1]\n for e2 in range(len(loc1)):\n x2 = loc1[e2, 0]\n y2 = loc1[e2, 1]\n if C1[e1, e2] >= threshold:\n color_p = cmap_p(norm_p(C1[e1, e2]))\n if steps <= 2:\n weight = 0.2 +1.6*((C1[e1, e2]-threshold)/(np.nanmax(vmax-threshold)))\n plt.plot([loc1[e1, 0], loc1[e2, 0]],\n [loc1[e1, 1], loc1[e2, 1]],\n '-', color=color_p, linewidth=weight)\n else:\n alphas = np.linspace(0, 1, steps)\n weight = 0.2 +1.6*((C1[e1, e2]-threshold)/(np.nanmax(vmax-threshold)))\n for idx in range(len(alphas)-1):\n a = alphas[idx]\n b = alphas[idx+1]\n xn = ((1-a)**3 * x1 +\n 3 * (1-a)**2 * a * (2 * x1 - ctr1[0]) +\n 3 * (1-a) * a**2 * (2 * x2 - ctr1[0]) +\n a**3 * x2)\n xnn = ((1-b)**3 * x1 +\n 3 * (1-b)**2 * b * (2 * x1 - ctr1[0]) +\n 3 * (1-b) * b**2 * (2 * x2 - ctr1[0]) +\n b**3 * x2)\n yn = ((1-a)**3 * y1 +\n 3 * (1-a)**2 * a * (2 * y1 - ctr1[1]) +\n 3 * (1-a) * a**2 * (2 * y2 - ctr1[1]) +\n a**3 * y2)\n ynn = ((1-b)**3 * y1 +\n 3 * (1-b)**2 * b * (2 * y1 - ctr1[1]) +\n 3 * (1-b) * b**2 * (2 * y2 - ctr1[1]) +\n b**3 * y2)\n plt.plot([xn, xnn], [yn, ynn],\n '-', color=color_p, linewidth=weight)\n if C1[e1, e2] <= -threshold:\n color_n = cmap_n(norm_n(C1[e1, e2]))\n if steps <= 2:\n weight = 0.2 +1.6*((-C1[e1, e2]-threshold)/(np.nanmax(vmax-threshold)))\n plt.plot([loc1[e1, 0], loc1[e2, 0]],\n [loc1[e1, 1], loc1[e2, 1]],\n '-', color=color_n, linewidth=weight)\n else:\n alphas = np.linspace(0, 1, steps)\n weight = 0.2 +1.6*((-C1[e1, e2]-threshold)/(np.nanmax(vmax-threshold)))\n for idx in range(len(alphas)-1):\n a = alphas[idx]\n b = alphas[idx+1]\n xn = ((1-a)**3 * x1 +\n 3 * (1-a)**2 * a * (2 * x1 - ctr1[0]) +\n 3 * (1-a) * a**2 * (2 * x2 - ctr1[0]) +\n a**3 * x2)\n xnn = ((1-b)**3 * x1 +\n 3 * (1-b)**2 * b * (2 * x1 - ctr1[0]) +\n 3 * (1-b) * b**2 * (2 * x2 - ctr1[0]) +\n b**3 * x2)\n yn = ((1-a)**3 * y1 +\n 3 * (1-a)**2 * a * (2 * y1 - ctr1[1]) +\n 3 * (1-a) * a**2 * (2 * y2 - ctr1[1]) +\n a**3 * y2)\n ynn = ((1-b)**3 * y1 +\n 3 * (1-b)**2 * b * (2 * y1 - ctr1[1]) +\n 3 * (1-b) * b**2 * (2 * y2 - ctr1[1]) +\n b**3 * y2)\n plt.plot([xn, xnn], [yn, ynn],\n '-', color=color_n, linewidth=weight)\n\n for e1 in range(len(loc2)):\n x1 = loc2[e1, 0]\n y1 = loc2[e1, 1]\n for e2 in range(len(loc2)):\n x2 = loc2[e2, 0]\n y2 = loc2[e2, 1]\n if C2[e1, e2] >= threshold:\n color_p = cmap_p(norm_p(C2[e1, e2]))\n if steps <= 2:\n weight = 0.2 +1.6*((C2[e1, e2]-threshold)/(np.nanmax(vmax-threshold)))\n plt.plot([loc2[e1, 0], loc2[e2, 0]],\n [loc2[e1, 1], loc2[e2, 1]],\n '-', color=color_p, linewidth=weight)\n else:\n alphas = np.linspace(0, 1, steps)\n weight = 0.2 +1.6*((C2[e1, e2]-threshold)/(np.nanmax(vmax-threshold)))\n for idx in range(len(alphas)-1):\n a = alphas[idx]\n b = alphas[idx+1]\n xn = ((1-a)**3 * x1 +\n 3 * (1-a)**2 * a * (2 * x1 - ctr2[0]) +\n 3 * (1-a) * a**2 * (2 * x2 - ctr2[0]) +\n a**3 * x2)\n xnn = ((1-b)**3 * x1 +\n 3 * (1-b)**2 * b * (2 * x1 - ctr2[0]) +\n 3 * (1-b) * b**2 * (2 * x2 - ctr2[0]) +\n b**3 * x2)\n yn = ((1-a)**3 * y1 +\n 3 * (1-a)**2 * a * (2 * y1 - ctr2[1]) +\n 3 * (1-a) * a**2 * (2 * y2 - ctr2[1]) +\n a**3 * y2)\n ynn = ((1-b)**3 * y1 +\n 3 * (1-b)**2 * b * (2 * y1 - ctr2[1]) +\n 3 * (1-b) * b**2 * (2 * y2 - ctr2[1]) +\n b**3 * y2)\n plt.plot([xn, xnn], [yn, ynn],\n '-', color=color_p, linewidth=weight)\n if C2[e1, e2] <= -threshold:\n color_n = cmap_n(norm_n(C2[e1, e2]))\n if steps <= 2:\n weight = 0.2 +1.6*((-C2[e1, e2]-threshold)/(np.nanmax(vmax-threshold)))\n plt.plot([loc2[e1, 0], loc2[e2, 0]],\n [loc2[e1, 1], loc2[e2, 1]],\n '-', color=color_n, linewidth=weight)\n else:\n alphas = np.linspace(0, 1, steps)\n weight = 0.2 +1.6*((-C2[e1, e2]-threshold)/(np.nanmax(vmax-threshold)))\n for idx in range(len(alphas)-1):\n a = alphas[idx]\n b = alphas[idx+1]\n xn = ((1-a)**3 * x1 +\n 3 * (1-a)**2 * a * (2 * x1 - ctr2[0]) +\n 3 * (1-a) * a**2 * (2 * x2 - ctr2[0]) +\n a**3 * x2)\n xnn = ((1-b)**3 * x1 +\n 3 * (1-b)**2 * b * (2 * x1 - ctr2[0]) +\n 3 * (1-b) * b**2 * (2 * x2 - ctr2[0]) +\n b**3 * x2)\n yn = ((1-a)**3 * y1 +\n 3 * (1-a)**2 * a * (2 * y1 - ctr2[1]) +\n 3 * (1-a) * a**2 * (2 * y2 - ctr2[1]) +\n a**3 * y2)\n ynn = ((1-b)**3 * y1 +\n 3 * (1-b)**2 * b * (2 * y1 - ctr2[1]) +\n 3 * (1-b) * b**2 * (2 * y2 - ctr2[1]) +\n b**3 * y2)\n plt.plot([xn, xnn], [yn, ynn],\n '-', color=color_n, linewidth=weight)", "def __t_fine__(self, adc_temperature):\n var1 = (((adc_temperature >> 3) -\n (self._calibration_t[0] << 1)) * self._calibration_t[1]) >> 11\n var2 = (((\n ((adc_temperature >> 4) - self._calibration_t[0]) *\n ((adc_temperature >> 4) - self._calibration_t[0])) >> 12)\n * self._calibration_t[2]) >> 14\n return var1 + var2", "def test_converts_to_agisoft_and_back_by_optimization() -> None:\n # k[3:] must be non-zero\n cam = Camera(\n imgsz=(4288, 2848),\n f=(3100, 3200),\n c=(5, -4),\n k=(0.1, -0.05, 0.02, 0.003),\n p=(0.03, 0.04),\n )\n xcam_initial = Agisoft.from_camera(cam, optimize=False)\n residuals_initial = Converter(xcam_initial, cam).residuals()\n xcam = Agisoft.from_camera(cam)\n residuals = Converter(xcam, cam).residuals()\n assert np.sum(residuals ** 2) < np.sum(residuals_initial ** 2)\n np.testing.assert_allclose(residuals, 0, rtol=0, atol=1e-2)\n # k4 or b2 must be non-zero (but small)\n xcam.k4 = 1e-7\n xcam.b2 = 1e-12\n cam_initial = xcam.to_camera(optimize=False)\n residuals_initial = Converter(xcam, cam_initial).residuals()\n cam = xcam.to_camera()\n residuals = Converter(xcam, cam).residuals()\n assert np.sum(residuals ** 2) < np.sum(residuals_initial ** 2)\n np.testing.assert_allclose(residuals, 0, rtol=0, atol=1e-9)", "def _SetAnatTgts(self):\n anat_candidates = {}\n fmap_candidates = {}\n for entry in self.entry_map['anat']:\n if self.info[entry]['type'] == 'T1High':\n anat_candidates[entry] = self.info[entry]['acqtime']\n\n# Find the valid anatomical acquired nearest to fieldmap.\n tdiff_min = 1e6\n if len(self.entry_map['fmap']) > 0:\n for entry in self.entry_map['fmap']:\n anat_tgt = self. _FindNearestAnat(self.info[entry]['acqtime'])\n self.info[entry]['anat_ref'] = anat_tgt\n else:\n# No fieldmaps were collected. Find the structural nearest the\n# beginning of the EPIs.\n if len(self.entry_map['anat']) == 1:\n anat_tgt = self.entry_map['anat'][0]\n else:\n epi_start = []\n tmin = 1e6\n for anat in self.entry_map['anat']:\n if self.info[anat]['type'] != 'T1High':\n continue\n tsum1 = 0; tsum2 = 0;\n for epi in self.entry_map['epi']:\n# Difference from start of structural and first epi\n tsum1 += abs(self.info[anat]['acqtime'] - \\\n self.info[epi]['acqtime'])\n# Difference from start of structural and last epi\n tsum2 += abs(self.info[anat]['acqtime'] - \\\n (self.info[epi]['acqtime'] +\\\n self.info[epi]['TR']*self.info[epi]['tdim']))\n if tsum1 < tmin or tsum2 < tmin:\n tmin = min(tsum1, tsum2)\n anat_tgt = anat\n\n# Resolve anatomical names and links.\n self._SetAnatNames(anat_tgt)\n\n# Set appropriate attributes in the entry for each EPI.\n for epi in self.entry_map['epi']:\n if len(self.entry_map['fmap']) > 0 and not self.no_fmapcorr:\n fmap_entry = self.info[epi]['fmap_entry']\n anat_ref = self.info[fmap_entry]['anat_ref']\n self.info[epi]['anat_tgt'] = fmap_entry\n self.info[epi]['anat_matfile'] = self.info[fmap_entry]['matfile']\n if self.align_fmaps or (not self.no_align_fmaps and \\\n self._SetCatMotionFmapMats(fmap_entry, anat_ref)):\n# Concatenate motion-correction matrices with tranform from\n# fieldmap to structural. Use the registered fieldmap.\n self.info[epi]['catmats'] = True\n fmap_info = self.info[self.info[epi]['fmap_entry']]\n self.info[epi]['fmapname'] = \\\n fmap_info['imgfile_r'] + fmap_info['suffix']\n else:\n# Assume fieldmap is in register with the structural.\n self.info[epi]['catmats'] = False\n else:\n self.info[epi]['anat_tgt'] = anat_tgt\n self.info[epi]['anat_matfile'] = None\n self.info[epi]['catmats'] = False\n self.info[epi]['anat_link'] = self.info[anat_tgt]['imgfile'] + \\\n self.info[anat_tgt]['suffix']", "def fast_comparison(path = \"Data/data_fronts/\",path1 = \"Results/modified_images/fronts/\"):\n #computes the areas for the first frame in order to normalize the other areas\n pol0dx = grid(path1+\"m_0.png_dx.txt\")\n pol0dx.columns = [\"y\",\"x\"]\n pol0sx = grid(path1+\"m_0.png_sx.txt\")\n pol0sx.columns = [\"y\",\"x\"]\n if pol0dx[\"x\"][0]>100:\n pol0dx = pol0dx.reindex(index=pol0dx.index[::-1])\n if pol0sx[\"x\"][0]<100:\n pol0sx = pol0sx.reindex(index=pol0sx.index[::-1])\n pol0sx = pol0sx.append(pol0dx)\n pol0sx = np.array(pol0sx)\n pol0 = Polygon(pol0sx)\n\n polsx = grid(path + \"Sham_8-2-18_Field 5_1_sx.txt\",l = 633,delimiter ='\\t')\n polsx.columns = [\"y\",\"x\"]\n polsx[\"y\"] =polsx[\"y\"]/844*1600\n polsx[\"x\"] =polsx[\"x\"]/633*1200\n poldx = grid(path + \"Sham_8-2-18_Field 5_1_dx.txt\",l = 633,delimiter ='\\t')\n poldx.columns = [\"y\",\"x\"]\n poldx[\"y\"] =poldx[\"y\"]/844*1600\n poldx[\"x\"] =poldx[\"x\"]/633*1200\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n #makes an object polygon in order to compute the area\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n pol1 = Polygon(polsx)\n\n\n areas = []\n areas_hand = []\n #computes the areas for all the frames\n for i in range(42):\n poldx = grid(path1+\"m_\"+str(i)+\".png_dx.txt\")\n poldx.columns = [\"y\",\"x\"]\n polsx = grid(path1+\"m_\"+str(i)+\".png_sx.txt\")\n polsx.columns = [\"y\",\"x\"]\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n #makes an object polygon in order to compute the area\n\n pol = Polygon(polsx)\n\n #normalize the areas with respect to the area of the first frame\n areas.append(pol.area/pol0.area)\n\n polsx = grid(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_sx.txt\",l = 633,delimiter ='\\t')\n polsx.columns = [\"y\",\"x\"]\n polsx[\"y\"] =polsx[\"y\"]/844*1600\n polsx[\"x\"] =polsx[\"x\"]/633*1200\n poldx = grid(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_dx.txt\",l = 633,delimiter='\\t')\n poldx.columns = [\"y\",\"x\"]\n poldx[\"y\"] =poldx[\"y\"]/844*1600\n poldx[\"x\"] =poldx[\"x\"]/633*1200\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n pol2 = Polygon(polsx)\n #normalize the areas with respect to the area of the first frame\n areas_hand.append(pol2.area/pol1.area)\n #returns the two arrays with the normalized areas\n return np.array(areas) , np.array(areas_hand)", "def visualSignMap(phasemap1, phasemap2):\r\n\r\n if phasemap1.shape != phasemap2.shape:\r\n raise LookupError(\"'phasemap1' and 'phasemap2' should have same size.\")\r\n\r\n gradmap1 = np.gradient(phasemap1)\r\n gradmap2 = np.gradient(phasemap2)\r\n\r\n # gradmap1 = ni.filters.median_filter(gradmap1,100.)\r\n # gradmap2 = ni.filters.median_filter(gradmap2,100.)\r\n\r\n graddir1 = np.zeros(np.shape(gradmap1[0]))\r\n # gradmag1 = np.zeros(np.shape(gradmap1[0]))\r\n\r\n graddir2 = np.zeros(np.shape(gradmap2[0]))\r\n # gradmag2 = np.zeros(np.shape(gradmap2[0]))\r\n\r\n for i in range(phasemap1.shape[0]):\r\n for j in range(phasemap2.shape[1]):\r\n graddir1[i, j] = math.atan2(gradmap1[1][i, j], gradmap1[0][i, j])\r\n graddir2[i, j] = math.atan2(gradmap2[1][i, j], gradmap2[0][i, j])\r\n\r\n # gradmag1[i,j] = np.sqrt((gradmap1[1][i,j]**2)+(gradmap1[0][i,j]**2))\r\n # gradmag2[i,j] = np.sqrt((gradmap2[1][i,j]**2)+(gradmap2[0][i,j]**2))\r\n\r\n vdiff = np.multiply(np.exp(1j * graddir1), np.exp(-1j * graddir2))\r\n\r\n areamap = np.sin(np.angle(vdiff))\r\n\r\n return areamap", "def pan_corr(file):\n\n # # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n #\n # # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_1D_py.tif'\n # in_ref = in_path + in_pan_ref_file\n #\n # inreffil = gdal.Open(in_ref)\n # image_ref = inreffil.ReadAsArray()\n # # size_ref = image_ref.shape\n # # pix_count = size_ref[0]*size_ref[1]\n #\n # image_ref = image_ref[800:930, 1420:1640]\n # size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n #\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # raz_g1 = 1\n # raz_g2 = g1_avg/g2_avg\n # raz_r1 = g1_avg/r1_avg\n # raz_b1 = g1_avg/b1_avg\n #\n # avg = (g1+g2+r1+b1)/pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n raz_g1 = 1\n raz_g2 = 1.0245196396115988\n raz_r1 = 1.0131841989689434\n raz_b1 = 1.0517113199247086\n\n print('razmerje:', raz_g1, raz_g2, raz_r1, raz_b1)\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_4D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Slo_PAN\\_26_30\\\\'\n # in_pan_ref_file = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"tif\")]\n\n \n\n \n\n # print('image', i)\n in_ref=file\n inreffil = gdal.Open(in_ref)\n image_ref = inreffil.ReadAsArray()\n size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n # pix_count = np.count_nonzero(image_ref)\n # pix_count = 3664*650\n\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # avg = (g1 + g2 + r1 + b1) / pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n # popravek\n im_p_pop = np.zeros((size_ref[0], size_ref[1]), np.uint16)\n\n\n for i in range(size_ref[0]):\n for j in range(size_ref[1]):\n if (i % 2) == 0 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g1\n if (i % 2) == 1 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g2\n if (i % 2) == 0 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_r1\n if (i % 2) == 1 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_b1\n \n _,_,_,_,P=return_flatfield_set_path(2)\n P_flat=gdal_array.LoadFile(P)\n \n # im_p_pop=simple_flatfield_corr(P_flat, im_p_pop, 2, 1) \n \n # outout\n \n im_p_pop=BLUE_simple_flatfield_corr(P_flat, im_p_pop)\n \n out=os.path.abspath(file)+\"/corr/\"+os.path.basename(file)[:-4] + \"_pop_flat_corr.tif\"\n\n \n # out = in_ref[:-4] + \"_pop_flat_corr.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n # outRaster = driver.Create(out, size[1], size[0], 3, gdal.GDT_UInt16)\n outRaster = driver.Create(out, size_ref[1], size_ref[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(im_p_pop)\n outband.FlushCache()", "def beam_align():\n\n # do nothing if there is a sample mounted to avoid collisions\n if smart_magnet.sample_detect.get() == 0:\n raise Exception(\"Sample mounted on gonio! Avoided collision\")\n\n # wait for attenuators to finish moving\n yield from bps.abs_set(mxatten, 0.002)\n yield from bps.sleep(5)\n\n # transition to BL and open shutter\n yield from bps.abs_set(gov_rbt, \"BL\", wait=True)\n yield from bps.mv(sht.r, 0)\n\n yield from bps.abs_set(rot_aligner.cam_hi.cam_mode, \"beam_align\")\n\n # which direction, x pos. pitch beam outboard (-), y pos. pitch beam up (+)\n scan_uid = yield from bp.count([rot_aligner.cam_hi], 1)\n centroid_x, centroid_y = (\n db[scan_uid].table()[rot_aligner.cam_hi.cv1.outputs.output1.name][1],\n db[scan_uid].table()[rot_aligner.cam_hi.cv1.outputs.output2.name][1],\n )\n\n if np.isclose(0, centroid_x) or np.isclose(0, centroid_y):\n raise Exception(\"No centroid detected!\")\n\n yield from bps.abs_set(kbt.hor.delta_px, (centroid_x - 320))\n yield from bps.abs_set(kbt.ver.delta_px, -(centroid_y - 256))\n\n def lin_reg(independent, dependent, goal, **kwargs) -> float:\n b = dependent\n A = np.matrix([[pos, 1] for pos in independent])\n p = (\n np.linalg.inv(A.transpose() * A)\n * A.transpose()\n * np.matrix(b.to_numpy()).transpose()\n )\n best = (goal - p[1]) / p[0]\n return best\n\n for axis, signal, center in (\n kbt.hor,\n rot_aligner.cam_hi.cv1.outputs.output1,\n 320,\n ), (kbt.ver, rot_aligner.cam_hi.cv1.outputs.output2, 256):\n # skip if we are within 1 um\n if abs(axis.delta_px.get()) > 2:\n scan_uid = yield from rel_scan_no_reset(\n [rot_aligner.cam_hi],\n axis,\n 0,\n 0.4 * -(axis.delta_px.get() / abs(axis.delta_px.get())),\n 10,\n )\n scan_df = db[scan_uid].table()\n best_voltage = lin_reg(\n scan_df[axis.readback.name],\n scan_df[signal.name],\n center,\n )\n yield from bps.mv(axis, best_voltage)\n yield from bps.sleep(1)\n\n # close shutters and reset attenuators for manual viewing\n yield from bps.mv(sht.r, 20)", "def initialize_first_frame(self):\r\n img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(img)\r\n\r\n # Detect new features on the frist image.\r\n new_features = self.detector.detect(img)\r\n\r\n # Find the stereo matched points for the newly detected features.\r\n cam0_points = [kp.pt for kp in new_features]\r\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\r\n\r\n cam0_inliers, cam1_inliers = [], []\r\n response_inliers = []\r\n for i, inlier in enumerate(inlier_markers):\r\n if not inlier:\r\n continue\r\n cam0_inliers.append(cam0_points[i])\r\n cam1_inliers.append(cam1_points[i])\r\n response_inliers.append(new_features[i].response)\r\n # len(cam0_inliers) < max(5, 0.1 * len(new_features))\r\n\r\n # Group the features into grids\r\n grid_new_features = [[] for _ in range(self.config.grid_num)]\r\n\r\n for i in range(len(cam0_inliers)):\r\n cam0_point = cam0_inliers[i]\r\n cam1_point = cam1_inliers[i]\r\n response = response_inliers[i]\r\n\r\n row = int(cam0_point[1] / grid_height)\r\n col = int(cam0_point[0] / grid_width)\r\n code = row*self.config.grid_col + col\r\n\r\n new_feature = FeatureMetaData()\r\n new_feature.response = response\r\n new_feature.cam0_point = cam0_point\r\n new_feature.cam1_point = cam1_point\r\n grid_new_features[code].append(new_feature)\r\n\r\n # Sort the new features in each grid based on its response.\r\n # And collect new features within each grid with high response.\r\n for i, new_features in enumerate(grid_new_features):\r\n for feature in sorted(new_features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_min_feature_num]:\r\n self.curr_features[i].append(feature)\r\n self.curr_features[i][-1].id = self.next_feature_id\r\n self.curr_features[i][-1].lifetime = 1\r\n self.next_feature_id += 1", "def test_apply_transform_single_track_no_match(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)", "def get_motion(frame1k, frame2k, frame_count):\n frame1 = frame1k.copy()\n frame2 = frame2k.copy()\n\n global limb_coords, init_coords, num_blocks\n cv2.imwrite(\"thisImageAnalyse.png\", frame2)\n block_size = 3\n block_rad = int(block_size/2)\n\n def get_SSD():\n \"\"\" applies SSD formula to search area\n :return SSD value\"\"\"\n dist = 0\n # traversal of pixels in potential Bi+1 block\n # compare corresponding pixel positions with source block in f1 and neighbour block in f2\n y1 = center_y1 - block_rad # start pos.\n for y2 in range(center_y2 - block_rad, (center_y2 - block_rad + block_size)):\n x1 = center_x1 - block_rad # start pos\n for x2 in range(center_x2 - block_rad, (center_x2 - block_rad + block_size)):\n try:\n # displacement formula for RGB channels of each pixel in block\n dist = dist + (frame1[y1][x1][0] - frame2[y2][x2][0])**2 + (frame1[y1][x1][1] - frame2[y2][x2][1])**2 + (frame1[y1][x1][2] - frame2[y2][x2][2])**2\n except RuntimeWarning:\n pass\n x1 += 1\n y1 += 1\n return math.sqrt(dist)\n\n # for each body part\n b = 0\n while b < 5:\n avg_x = 0.0\n avg_y = 0.0\n new_x = 0.0\n new_y = 0.0\n a = 0\n # for each block on body part (9 total)\n while a < num_blocks:\n found = False\n search_rad = 5\n while found is False:\n center_y1 = int(init_coords[b][a][0])\n center_x1 = int(init_coords[b][a][1])\n min_SSD = 999999\n # for pythagoras to ensure closest block gets picked when equality occurs of SSD value\n min_d = 999999\n # this finds the center of the block to compare\n for factor_y in range(-search_rad, search_rad + 1):\n center_y2 = center_y1 + block_size*factor_y\n y_dist = center_y1 - abs(center_y2)\n for factor_x in range(-search_rad, search_rad + 1):\n center_x2 = center_x1 + block_size*factor_x\n x_dist = center_x1 - abs(center_x2)\n # pythagoras\n d = math.sqrt((y_dist**2 + x_dist**2))\n if d < min_d:\n min_d = d\n\n SSD = get_SSD()\n if frame2[center_y2][center_x2][1] != 0 and frame2[center_y2][center_x2][2] != 0:\n found = True\n if SSD < min_SSD:\n min_SSD = SSD\n new_y = center_y2\n new_x = center_x2\n elif SSD == min_SSD and d < min_d:\n new_y = center_y2\n new_x = center_x2\n if found is False:\n # if no block is found repeat the search, increasing the search size by 1\n search_rad += 1\n # draw extracted vectors\n cv2.arrowedLine(frame1k, (int(center_x1), int(center_y1)), (int(new_x), int(new_y)), (150, 200, 30), 1, 4, 0, 0.3)\n avg_x += new_x\n avg_y += new_y\n init_coords[b][a][0] = new_y\n init_coords[b][a][1] = new_x\n a += 1\n cv2.imwrite('monkeyFrames/contrast_enhanced%d.png' % frame_count, frame1k)\n limb_coords[b][frame_count][0] = int(avg_y/num_blocks)\n limb_coords[b][frame_count][1] = int(avg_x/num_blocks)\n b += 1", "def calibrate(science_list_fname, master_flat_fname, master_dark_fname, hp_map_fname, bp_map_fname, mask_bad_pixels = False,\n clean_Bad_Pix=True, replace_nans=True, background_fname = None, outdir = None):\n\n #Get the list of science frames\n #science_list = np.loadtxt(science_list_fname, dtype=str)\n science_list = science_list_fname\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the master flat\n master_flat_hdu = f.open(master_flat_fname)\n master_flat = master_flat_hdu[0].data\n print((\"Dividing each file by {}\".format(master_flat_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the bad pixel map from flat\n bp_map_hdu = f.open(bp_map_fname)\n bad_pixel_map = bp_map_hdu[0].data\n bad_pixel_map_bool = np.array(bad_pixel_map, dtype=bool)\n print((\"Using bad pixel map {}\".format(bp_map_fname)))\n\n #now if hot pixel map from dark is also given\n if hp_map_fname != None:\n hp_map_hdu = f.open(hp_map_fname)\n hot_pixel_map = hp_map_hdu[0].data\n bad_pixel_map_bool = np.logical_or(bad_pixel_map_bool, hot_pixel_map.astype(bool) )\n\n\n if background_fname != None:\n background_hdu = f.open(background_fname)\n background = background_hdu[0].data\n print(\"Subtracting background frame {} from all science files\".format(background_fname))\n\n\n for fname in science_list:\n #Open the file\n print((\"Calibrating {}\".format(fname\n )))\n hdu = f.open(fname)\n data = hdu[0].data\n science_exp_time = hdu[0].header['EXPTIME']\n\n if dark_exp_time != science_exp_time:\n warnings.warn(\"The master dark file doesn't have the same exposure time as the data. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = science_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #Subtract the dark, divide by flat\n redux = ((data - factor*master_dark)/master_flat)\n #get rid of crazy values at bad pixel\n redux = redux*~bad_pixel_map_bool\n\n if background_fname != None:\n redux -= background\n\n if clean_Bad_Pix:\n # plt.plot(bad_pixel_map_bool)\n redux = cleanBadPix(redux, bad_pixel_map_bool)\n #redux = ccdproc.cosmicray_lacosmic(redux, sigclip=5)[0]\n\n # redux = ccdproc.cosmicray_median(redux, mbox=7, rbox=5, gbox=7)[0]\n\n #Mask the bad pixels if the flag is set\n if mask_bad_pixels:\n redux *= ~bad_pixel_map_bool\n\n if replace_nans:\n # nan_map = ~np.isfinite(redux)\n # redux = cleanBadPix(redux, nan_map)\n # plt.imshow(redux-after)\n nanmask = np.isnan(redux) #nan = True, just in case this is useful\n redux = np.nan_to_num(redux)\n\n #Put the cablibrated data back in the HDU list\n hdu[0].data = redux\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"Subtracting {} from each flat file\".format(master_dark_fname)\n hdu[0].header['HISTORY'] = \"Dividing each file by {}\".format(master_flat_fname)\n\n if background_fname != None:\n hdu[0].header['HISTORY'] = \"Subtracted background frame {}\".format(background_fname)\n\n if mask_bad_pixels:\n hdu[0].header['HISTORY'] = \"Masking all bad pixels found in {}\".format(bp_map_fname)\n\n if clean_Bad_Pix:\n hdu[0].header['HISTORY'] = \"Cleaned all bad pixels found in {} using a median filter\".format(bp_map_fname)\n\n # #Append the bad pixel list to the HDU list\n # hdu.append(f.PrimaryHDU([bad_pixel_map]))\n # hdu[1].header['HISTORY'] = \"Appending bad pixel map :{}\".format(bp_map_fname)\n # hdu[1].header['HISTORY'] = \"0 = good pixel\"\n # hdu[1].header['HISTORY'] = \"1 = bad pixel from flat fields\"\n # hdu[1].header['HISTORY'] = \"2 = hot pixel from darks\"\n\n outname = fname.split('.')[0]+\"_calib.fits\"\n\n #if an output directory is specified we can write out to that directory instead\n #making sure to take only the stuff after the last '/' to avoid directory issues from fname\n if outdir:\n outname = outdir + fname.split('/')[-1]\n\n print((\"Writing calibrated file to {}\".format(outname)))\n #Save the calibrated file\n hdu.writeto(outname, overwrite=True)\n\n # f.PrimaryHDU(redux).writeto('redux_'+i, overwrite = True)", "def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]", "def camera_scan(h, cam, direction):\n if direction == \"right\":\n offset = -1.0\n elif direction == \"left\":\n offset = 1.0\n else:\n offset = 0.0\n\n\n cam.set_pan_tilt(offset * 1.3, 0.5)\n time.sleep(1)\n h.rotate(\"head_pan_joint\", offset * 1.3)\n time.sleep(4)\n h.rotate(\"head_tilt_joint\", 0.5)\n time.sleep(2)\n cam.set_pan_tilt(offset * 1.3, 0.3)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.3)\n cam.set_pan_tilt(offset * 1.3, 0.0)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.0)\n\n\n cam.set_pan_tilt(offset * 1.3 + 0.4, 0.5)\n time.sleep(1)\n h.rotate(\"head_pan_joint\", offset * 1.3 + 0.4)\n time.sleep(2)\n h.rotate(\"head_tilt_joint\", 0.5)\n time.sleep(2)\n cam.set_pan_tilt(offset * 1.3 + 0.4, 0.3)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.3)\n cam.set_pan_tilt(offset * 1.3 + 0.4, 0.0)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.0)\n \n cam.set_pan_tilt(offset * 1.3 - 0.4, 0.5)\n time.sleep(1)\n h.rotate(\"head_pan_joint\", offset * 1.3 - 0.4)\n time.sleep(2)\n h.rotate(\"head_tilt_joint\", 0.5)\n time.sleep(2)\n cam.set_pan_tilt(offset * 1.3 - 0.4, 0.3)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.3)\n cam.set_pan_tilt(offset * 1.3 - 0.4, 0.0)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.0)", "def data_rearrange_case_one(self, tik_instance, ub_ori, ub_cast_fp16,\n ub_trans, ub_cast_int8,\n num_loop_time, loop_num, is_last):\n cast_repeat_time = tik_instance.Scalar(\"uint64\")\n cast_remainder = tik_instance.Scalar(\"uint64\")\n with tik_instance.if_scope(num_loop_time == self.dst_shape[-3] - 1):\n if (self.src_shape[-2] % CUBE_SIZE) == 0:\n cast_repeat_time.set_as(loop_num * self.dst_shape[-1] *\n self.dst_shape[-2] // MAX_MASK)\n cast_remainder.set_as(loop_num * self.dst_shape[-1] *\n self.dst_shape[-2] % MAX_MASK)\n else:\n cast_repeat_time.set_as((self.src_shape[-2] % CUBE_SIZE) *\n loop_num * self.dst_shape[-1] //\n MAX_MASK)\n cast_remainder.set_as((self.src_shape[-2] % CUBE_SIZE) *\n loop_num * self.dst_shape[-1] %\n MAX_MASK)\n with tik_instance.else_scope():\n cast_repeat_time.set_as(loop_num * self.dst_shape[-1] *\n self.dst_shape[-2] // MAX_MASK)\n cast_remainder.set_as(loop_num * self.dst_shape[-1] *\n self.dst_shape[-2] % MAX_MASK)\n # cast the data from int8 to float16\n _cast_dtype(tik_instance, ub_cast_fp16, ub_ori, cast_repeat_time,\n cast_remainder, \"int8_2_float16\")\n\n scalar_zero = tik_instance.Scalar(dtype=\"float16\", init_value=0.0)\n with tik_instance.if_scope(is_last == 1):\n if self.src_shape[-1] % CUBE_SIZE_2 != 0:\n mask = 0\n for i, _ in enumerate(range(CUBE_SIZE_2 -\n self.src_shape[-1] % CUBE_SIZE_2)):\n mask += 2 ** (CUBE_SIZE_2 - 1 - i)\n\n tik_instance.vector_dup([0, mask],\n ub_cast_fp16[loop_num * CUBE_SIZE_2 -\n CUBE_SIZE_2],\n scalar_zero, CUBE_SIZE,\n self.cast_num_byte // 2,\n loop_num * CUBE_SIZE_2 //\n self.cast_num_data)\n with tik_instance.if_scope(num_loop_time == self.dst_shape[-3] - 1):\n if (self.src_shape[-2] % CUBE_SIZE) != 0:\n dup_number = (CUBE_SIZE - self.src_shape[-2] % CUBE_SIZE) * \\\n self.dst_shape[-1] * loop_num\n offset = (self.src_shape[-2] % CUBE_SIZE) * \\\n self.dst_shape[-1] * loop_num\n self.vector_dup_zero(tik_instance, ub_cast_fp16,\n dup_number, offset)\n with tik_instance.for_range(0, loop_num) as num_col_cube:\n tik_instance.vadds(CUBE_SIZE_2,\n ub_trans[num_col_cube *\n self.dst_shape[-2] *\n self.dst_shape[-1] +\n CUBE_SIZE_2 * num_col_cube],\n ub_cast_fp16[num_col_cube * CUBE_SIZE_2],\n scalar_zero, CUBE_SIZE,\n self.cast_num_byte // 2,\n self.cast_num_byte // 2,\n self.cast_num_byte,\n loop_num * self.dst_shape[-1] //\n self.cast_num_data)\n\n cast_repeat_time.set_as((CUBE_SIZE + 1) * loop_num *\n self.dst_shape[-1] // MAX_MASK)\n cast_remainder.set_as((CUBE_SIZE + 1) * loop_num * self.dst_shape[-1] %\n MAX_MASK)\n # cast the data from float16 to int8\n _cast_dtype(tik_instance, ub_cast_int8, ub_trans, cast_repeat_time,\n cast_remainder, \"float16_2_int8\")", "def test_transform_track_with_two_transforms(self):\n track = Track(artist='Artist', title='Title')\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n tflist.add_transform(Transform(2,\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2',\n ))\n\n self.assertEqual(track.last_transform, 0)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 2)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, True)", "def analyze(self, frame: np.ndarray, debug: bool) -> Tuple[float, float]:\n gate_center = self.output_class(250, 250)\n filtered_frame = self.combined_filter(frame, display_figs=False)\n filtered_frame_copies = [filtered_frame for _ in range(3)]\n stacked_filter_frames = np.concatenate(filtered_frame_copies, axis=2)\n mask = cv.inRange(\n stacked_filter_frames, np.array([100, 100, 100]), np.array([255, 255, 255])\n )\n contours = cv.findContours(mask, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)[-2]\n if len(contours) > 0:\n contours.sort(reverse=True, key=self.findStraightness)\n cnts = contours[:2]\n rects = [cv.minAreaRect(c) for c in cnts]\n centers = [np.array(r[0]) for r in rects]\n boxpts = [cv.boxPoints(r) for r in rects]\n box = [np.int0(b) for b in boxpts]\n for b in box:\n cv.drawContours(stacked_filter_frames, [b], 0, (0, 0, 255), 5)\n if len(centers) >= 2:\n gate_center = (centers[0] + centers[1]) * 0.5\n if self.__ema is None:\n self.__ema = gate_center\n else:\n self.__ema = (\n self.__alpha * gate_center + (1 - self.__alpha) * self.__ema\n )\n gate_center = (int(self.__ema[0]), int(self.__ema[1]))\n # TODO: clean this up via hyperparam or move to gate center algo\n # if len(self.__past_centers) < 15:\n # \tself.__past_centers += [gate_center]\n # else:\n # \tself.__past_centers.pop(0)\n # \tself.__past_centers += [gate_center]\n # gate_center = sum(self.__past_centers) / len(self.__past_centers)\n # gate_center = (int(gate_center[0]), int(gate_center[1]))\n cv.circle(stacked_filter_frames, gate_center, 10, (0, 255, 0), -1)\n\n if debug:\n return (gate_center[0], gate_center[1]), (frame, stacked_filter_frames)\n return (gate_center[0], gate_center[1])", "def _select_transform(self):\n for transform in self.transforms:\n if transform.applies is None or transform.applies(self.ti_dict) is True:\n self.transform = transform\n break\n else:\n raise RuntimeError('No transform found for TI data')", "def test_real_trace_transform(self):\n dec = TwoQubitDecomposeUpToDiagonal()\n u4 = scipy.stats.unitary_group.rvs(4, random_state=83)\n su4, _ = dec._u4_to_su4(u4)\n real_map = dec._real_trace_transform(su4)\n self.assertTrue(dec._cx2_test(real_map @ su4))", "def forward(self, image1, image2, intrinsic, posepred, insmap, initialdepth=None, iters=12):\n image2_org = torch.clone(image2)\n\n image1 = 2 * image1 - 1.0\n image2 = 2 * image2 - 1.0\n\n image1 = image1.contiguous()\n image2 = image2.contiguous()\n\n hdim = self.hidden_dim\n cdim = self.context_dim\n\n # run the feature network\n fmap1, fmap2 = self.fnet([image1, image2])\n\n bz, _, featureh, featurew = fmap1.shape\n device = fmap1.device\n self.init_sampling_pts(bz, featureh, featurew, device)\n \n fmap1 = fmap1.float()\n fmap2 = fmap2.float()\n corr_fn = CorrBlock(fmap1, fmap2, intrinsic, posepred, insmap, nedges=self.nedges, maxinsnum=self.args.maxinsnum)\n\n # run the context network\n cnet = self.cnet(image1)\n net, inp = torch.split(cnet, [hdim, cdim], dim=1)\n net = torch.tanh(net)\n inp = torch.relu(inp)\n\n bz, _, h, w = image1.shape\n if initialdepth is None:\n logdepth = self.initialize_logdepth(image1)\n else:\n logdepth = F.interpolate(torch.log(torch.clamp_min(initialdepth, min=1)), [int(h / 8), int(w / 8)], mode='nearest')\n\n depth_predictions = []\n logdepth_predictions = []\n local_sample_pts2ds = []\n delta_depths = []\n outputs = dict()\n for itr in range(iters):\n corr, local_sample_pts2d = corr_fn(logdepth, sampled_rld=self.sampled_rld, pts2ddict=self.pts2ddict) # index correlation volume\n\n net, up_mask, delta_depth = self.update_block(net, inp, corr, logdepth)\n\n # F(t+1) = F(t) + \\Delta(t)\n logdepth = logdepth + delta_depth\n depth = torch.exp(logdepth)\n\n depth_up = self.upsample_flow(depth, up_mask)\n\n if itr == iters - 1:\n flowpred, img1_recon, projMimg = self.depth2rgb(depth_up, image2_org, intrinsic, posepred, insmap)\n outputs['flowpred'] = flowpred\n outputs['img1_recon'] = img1_recon\n # with torch.no_grad():\n # outputs['orgflow'] = self.depth2flow(initialdepth, projMimg)\n \n depth_predictions.append(depth_up)\n logdepth_predictions.append(F.interpolate(logdepth, [h, w], mode='bilinear', align_corners=False))\n local_sample_pts2ds.append(local_sample_pts2d)\n delta_depths.append(delta_depth)\n\n outputs['depth_predictions'] = depth_predictions\n outputs['logdepth_predictions'] = logdepth_predictions\n outputs['local_sample_pts2ds'] = local_sample_pts2ds\n outputs['delta_depths'] = delta_depths\n return outputs", "def combine_par(output_dir): \n #start time\n start_time = time.time()\n \n # set input/output file paths\n infile0 = output_dir + 'TransformParameters.0.txt'\n infile1 = output_dir + 'TransformParameters.1.txt'\n outfile0 = output_dir +'TransformParameters.fwd.txt'\n outfile1 = output_dir +'TransformParameters.inv.txt'\n \n # define reference frame for registration\n ref = 0\n spacing = 1\n \n # Open parameter file 0 and search for GridSpacing and GridOrigin line\n text_filein0 = open( infile0, \"r\" )\n for line in text_filein0:\n if line.find( \"(GridOrigin \" ) == 0:\n origin_str = line\n elif line.find( \"(GridSpacing \" ) == 0:\n spacing_str = line\n text_filein0.close()\n \n # Extract time point origin from line\n origin_split = origin_str.strip().split(' ')\n origin_split = origin_split[ len( origin_split ) - 1 ].split(')')\n old_origin = float( origin_split[ 0 ] )\n \n # Extract time point spacing from line\n spacing_split = spacing_str.strip().split(' ')\n spacing_split = spacing_split[ len( spacing_split ) - 1 ].split(')')\n old_spacing = float( spacing_split[ 0 ] )\n \n \n print(\"Original grid origin in time dimension: \" + str( old_origin ))\n print(\"Original grid spacing in time dimension: \" + str( old_spacing ))\n print(\"\")\n \n # Determine new grid origin\n new_origin = ref - ( spacing / old_spacing ) * ( ref - old_origin )\n print( \"New grid origin in time dimension: \" + str( new_origin ))\n \n # Recompose origin and spacing lines\n new_origin_string = origin_str.strip().split(' ')\n new_origin_string.pop()\n new_origin_string = \" \".join( new_origin_string ) + \" \" + str( new_origin ) + \")\\n\"\n new_spacing_string = spacing_str.strip().split(' ')\n new_spacing_string.pop()\n new_spacing_string = \" \".join( new_spacing_string ) + \" \" + str( spacing ) + \")\\n\"\n \n # Reopen text file, replace origin and spacing and write to output file 0\n text_filein0 = open( infile0, \"r\" )\n text_fileout0 = open( outfile0, \"w\" )\n for line in text_filein0:\n if line.find( \"(GridOrigin \" ) == 0:\n # Write new origin line\n text_fileout0.write( new_origin_string )\n elif line.find( \"(GridSpacing \" ) == 0:\n # Write new spacing line\n text_fileout0.write( new_spacing_string )\n elif line.find( \"(InitialTransformParametersFileName \" ) == 0:\n # Remove initial transform\n text_fileout0.write( \"(InitialTransformParametersFileName \\\"NoInitialTransform\\\")\\n\" )\n else:\n # Write line read from input file (no change)\n text_fileout0.write( line )\n text_filein0.close()\n text_fileout0.close()\n \n # Open parameter file 1 and search for GridSize\n text_filein1 = open( infile1, \"r\" )\n for line in text_filein1:\n if line.find(\"(GridSize\") == 0:\n grid_str = line\n grid_split = grid_str.strip().split(' ')\n grid_split[-1] = grid_split[-1].replace(')','')\n grid_split = grid_split[1:]\n grid_float = [float(s) for s in grid_split]\n grid_all = int(grid_float[0] * grid_float[1] * grid_float[2] * grid_float[3])\n num_phase = int(grid_float[3])\n text_filein1.close()\n \n # Replace initial transform parameter filename\n text_filein1 = open( infile1, \"r\" )\n text_fileout1 = open( outfile1, \"w\" )\n for line in text_filein1:\n if line.find( \"(InitialTransformParametersFileName \" ) == 0:\n # Set initial transform filename\n text_fileout1.write( \"(InitialTransformParametersFileName \\\"\" + outfile0 + \"\\\")\\n\" )\n elif line.find(\"(TransformParameters \") == 0:\n # extract b-spline parameters, arrangment : x (Px*Py*Pz), y(Px*Py*Pz), z(Px*Py*Pz), t(Px*Py*Pz)\n transPar_str = line\n transPar_split = transPar_str.strip().split(' ')\n transPar_split[-1] = transPar_split[-1].replace(')','')\n transPar_split = transPar_split[1:]\n num_grid3d = int(grid_all / num_phase) \n str_seg = transPar_split[(ref*num_grid3d):((ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all+(ref*num_grid3d)): (grid_all + (ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all*2+(ref*num_grid3d)): (grid_all*2 + (ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all*3+(ref*num_grid3d)): (grid_all*3 + (ref+1)*num_grid3d)] * num_phase\n #str_seg = \"\"\n #str_seg = [str_seg + transPar_split[((ref*num_grid3d)+grid*i):((ref+1)*num_grid3d+grid*i)] * num_phase for i in range(4)]\n str_joined = ' '.join(str_seg)\n text_fileout1.write(\"(TransformParameters \" + str_joined + \")\\n\")\n else:\n # Write line read from input file (no change)\n text_fileout1.write( line )\n text_filein1.close()\n text_fileout1.close()\n \n # caclulate elapsed time\n end_time = time.time()\n elapsed_time = end_time - start_time\n print('combine_par done. elapsed time:', elapsed_time, 's')", "def post_process(y, number_of_mel_samples):\n # align input in a fixed (n_samples, n_prediction) shape, filling with NaNs if neccesary.\n time, aligned_y = np.asarray(VoiceActivationFeatureExtractor.frame_level_predict(y, number_of_mel_samples))\n # reduce n_samples, n_prediction to n_samples by mean\n reduced_y = FeatureExtractor.get_mean_voice_activation(aligned_y)\n y = reduced_y\n return time, y", "def compute_transform(self, image_pair, figure):\t\n\t\timage_pair = [int(image_pair[1]), int(image_pair[3])]\n\t\tindices = np.where(np.all(np.abs(self.Matches[\"image_pairs\"][0] - image_pair) == 0, axis=1)) \n\t\tfeature_pairs = self.Matches[\"image_match_pairs\"][0][indices[0][0]]\n\t\tfeature_pair_scores = self.Matches[\"image_feature_scores\"][0][indices[0][0]] \n\n\t\ti1_loc = self.Database[\"image_locs\"][np.where(np.array(self.Database[\"image_idx\"]) == image_pair[0])[0][0]]\n\t\ti2_loc = self.Database[\"image_locs\"][np.where(np.array(self.Database[\"image_idx\"]) == image_pair[1])[0][0]]\n\n\t\tself.image1 = cv2.cvtColor(extract_image(figure, i1_loc), cv2.COLOR_BGR2GRAY)\n\t\tself.image2 = cv2.cvtColor(extract_image(figure, i2_loc), cv2.COLOR_BGR2GRAY)\n\t\t# self.image1 = extract_image(figure, i1_loc)\n\t\t# self.image2 = extract_image(figure, i2_loc)\n\n\t\tself.match_alignment = {}; self.match_alignment_scores = {}\n\t\tfor pair, score in zip(feature_pairs, feature_pair_scores):\n\t\t\ti1 = self.Database[\"image_idx\"][pair[0]]\n\t\t\ti2 = self.Database[\"image_idx\"][pair[1]]\n\n\t\t\tif i1 == image_pair[0]:\n\t\t\t\ttarget = pair[0] \n\t\t\t\tstart = pair[1]\n\t\t\t\tif target not in self.match_alignment.keys():\n\t\t\t\t\tself.init_match_alignment(target)\n\t\t\telse:\n\t\t\t\ttarget = pair[1]\n\t\t\t\tstart = pair[0]\t\t\t\t\n\t\t\t\tif target not in self.match_alignment.keys():\n\t\t\t\t\tself.init_match_alignment(target)\n\n\t\t\t# get ellipse and convert to three cardinal points\n\t\t\ttarget_ellipse = self.Database[\"orientation\"][target]\n\t\t\tstart_ellipse = self.Database[\"orientation\"][start]\n\t\t\ttarget_points, target_params = utils.ellipse2points(target_ellipse)\n\t\t\tstart_points, start_params = utils.ellipse2points(start_ellipse)\n\n\t\t\t# compute angle between Major axes\n\t\t\trotation_angle = int(np.abs(target_params[0] - start_params[0]))\n\t\t\treflection_truth = target_params[1] * start_params[1]\n\t\t\tif reflection_truth > 0:\n\t\t\t\treflection_truth = \"No\"\n\t\t\telse:\n\t\t\t\treflection_truth = \"Yes\"\n\t\t\tresolution_perc = round(((target_params[2] / start_params[2]) - 1), 1)\n\n\t\t\t# extract features for histogram matching\n\t\t\ttarget_bloc = self.Database[\"blot_locs\"][target]\n\t\t\tstart_bloc = self.Database[\"blot_locs\"][start]\n\t\t\ttarget_feature = extract_image(self.image1, target_bloc, ex=5)\n\t\t\tfinal_feature = extract_image(self.image2, start_bloc, ex=5) \n\n\t\t\t#find affine transform to match\n\t\t\tif self.AFFINE:\n\t\t\t\tT = utils.compute_affine(start_points, target_points)\n\t\t\t\timage2_warped = cv2.warpAffine(self.image2.copy(), T, (self.image1.shape[1], self.image1.shape[0]))\n\t\t\t\tfinal_feature = extract_image(image2_warped, target_bloc, ex=5)\n\t\t\t\n\t\t\t# match hist\n\t\t\tif self.CONTRAST:\n\t\t\t\tfinal_feature = utils.histogram_match(final_feature, target_feature)\n\n\t\t\tself.match_alignment[target].append((final_feature, start, rotation_angle, reflection_truth, \\\n\t\t\t\t\t\t\t\t\t\t\t\t resolution_perc, score))\n\t\t\tself.match_alignment_scores[target].append(score)", "def Motion_estimate_compute_P_1block(ref0_frame, ref1_frame, target_block, xy0):\n\n block_size = len(target_block)\n x0, y0 = xy0\n \n err1=dist_sq(target_block,ref0_frame[x0:x0+block_size,y0:y0+block_size])\n err2=dist_sq(target_block,ref1_frame[x0:x0+block_size,y0:y0+block_size])\n min_err = min(err1,err2)\n min_err_ref= (min_err==err2)\n vx,vy = 0,0\n\n diam_coor_list = [np.zeros((8,2),dtype=int),np.zeros((4,2),dtype=int)]\n diam_list = [big_diam,small_diam]\n for diam_id in range(2):\n # Big then small diamond\n diam_coor = diam_coor_list[diam_id]\n diam = diam_list[diam_id]\n prev_min_err = min_err+1\n while prev_min_err>min_err:\n prev_min_err = min_err\n\n # Filter valid diamond coordinates\n diam_coor[:,0] = diam[:,0]+x0+vx\n diam_coor[:,1] = diam[:,1]+y0+vy\n in_range = (diam_coor[:,0]>=0)*(diam_coor[:,1]>=0)\n in_range*= (diam_coor[:,0]<(height-block_size+1))\n in_range*= (diam_coor[:,1]<(width -block_size+1))\n keep_diam_coor = diam_coor[in_range,:]\n \n for pxa,pya in keep_diam_coor:\n pxz,pyz = pxa+block_size,pya+block_size\n err0 = dist_sq(target_block,ref0_frame[pxa:pxz,pya:pyz])\n err1 = dist_sq(target_block,ref1_frame[pxa:pxz,pya:pyz])\n if err0<min_err:\n min_err = err0\n min_err_ref = 0\n vx,vy = pxa-x0,pya-y0\n if err1<min_err:\n min_err = err1\n min_err_ref = 1\n vx,vy = pxa-x0,pya-y0\n \n return min_err_ref,vx,vy", "def analyze(self, event):\n\n event = mappedEvent(event, mapname=self._branch_map)\n\n values = []\n ev = event.event\n\n values.append(self.GetValue(event, \"Lepton_pt[0]\") * math.cos(self.GetValue(event, \"Lepton_phi[0]\")))\n values.append(self.GetValue(event, \"Lepton_pt[0]\") * math.sin(self.GetValue(event, \"Lepton_phi[0]\")))\n values.append(self.GetValue(event, \"Lepton_pt[0]\") * math.sinh(self.GetValue(event, \"Lepton_eta[0]\")))\n values.append(self.GetValue(event, \"Lepton_pt[1]\") * math.cos(self.GetValue(event, \"Lepton_phi[1]\")))\n values.append(self.GetValue(event, \"Lepton_pt[1]\") * math.sin(self.GetValue(event, \"Lepton_phi[1]\")))\n values.append(self.GetValue(event, \"Lepton_pt[1]\") * math.sinh(self.GetValue(event, \"Lepton_eta[1]\")))\n\n if self.GetValue(event, \"nCleanJet\")>=1:\n values.append(self.GetValue(event, \"CleanJet_pt[0]\") * math.cos(self.GetValue(event, \"CleanJet_phi[0]\")))\n values.append(self.GetValue(event, \"CleanJet_pt[0]\") * math.sin(self.GetValue(event, \"CleanJet_phi[0]\")))\n values.append(self.GetValue(event, \"CleanJet_pt[0]\") * math.sinh(self.GetValue(event, \"CleanJet_eta[0]\")))\n else:\n values.append(0.0)\n values.append(0.0)\n values.append(0.0)\n if self.GetValue(event, \"nCleanJet\")>=2:\n values.append(self.GetValue(event, \"CleanJet_pt[1]\") * math.cos(self.GetValue(event, \"CleanJet_phi[1]\")))\n values.append(self.GetValue(event, \"CleanJet_pt[1]\") * math.sin(self.GetValue(event, \"CleanJet_phi[1]\")))\n values.append(self.GetValue(event, \"CleanJet_pt[1]\") * math.sinh(self.GetValue(event, \"CleanJet_eta[1]\")))\n else:\n values.append(0.0)\n values.append(0.0)\n values.append(0.0)\n\n values.append(self.GetValue(event, \"PuppiMET_pt\") * math.cos(self.GetValue(event, \"PuppiMET_phi\")))\n values.append(self.GetValue(event, \"PuppiMET_pt\") * math.sin(self.GetValue(event, \"PuppiMET_phi\")))\n values.append(self.GetValue(event, \"dphilmet1\"))\n values.append(self.GetValue(event, \"dphilmet2\"))\n values.append(self.GetValue(event, \"mll\"))\n values.append(self.GetValue(event, \"mTi\"))\n values.append(self.GetValue(event, \"mth\"))\n values.append(self.GetValue(event, \"mtw1\"))\n values.append(self.GetValue(event, \"mtw2\"))\n values.append(self.GetValue(event, \"ht\"))\n values.append(self.GetValue(event, \"vht_pt\") * math.cos(self.GetValue(event, \"vht_phi\")))\n values.append(self.GetValue(event, \"vht_pt\") * math.sin(self.GetValue(event, \"vht_phi\")))\n\n\n values_stacked = np.hstack(values).reshape(1, len(values))\n values_preprocessed = self.preprocessing[ev % 2].transform(values_stacked)\n response = self.classifiers[ev % 2].predict(values_preprocessed)\n response = np.squeeze(response)\n\n self.out.fillBranch(\"DNN_mth\", response)\n\n return True", "def Reward_lin(self, trg, hyp, show = False):\n\n tar_len = trg.shape[1]\n hyp_len = hyp.shape[1]\n\n final_rew = -1*np.ones(hyp_len-1)\n\n len_temp = 0\n if tar_len > hyp_len:\n len_temp = hyp_len\n else:\n len_temp = tar_len\n hyp2com = np.zeros([1,tar_len])\n hyp2com[0 ,:len_temp] = hyp[0 ,:len_temp]\n\n equal = (trg.numpy() == hyp2com)\n\n #equal = np.invert(equal)*np.ones(equal.size)*0.2\n # ind1, ind2 = np.where(equal == False)\n\n\n # if len(ind1) != 0:\n # equal[ind1[0]:, ind2[0]:] = False\n\n decoded_valid_tar = self.model.trg_vocab.arrays_to_sentences(arrays=trg ,\n cut_at_eos=True)\n decoded_valid_hyp = self.model.trg_vocab.arrays_to_sentences(arrays=hyp ,\n cut_at_eos=True)\n\n if show:\n print('la lista trg-out decodificada: ', decoded_valid_tar)\n print('la lista hypotesis decodificada: ', decoded_valid_hyp)\n\n # evaluate with metric on each src, tar, and hypotesis\n join_char = \" \" if self.level in [\"word\", \"bpe\"] else \"\"\n valid_references = [join_char.join(t) for t in decoded_valid_tar]\n valid_hypotheses = [join_char.join(t) for t in decoded_valid_hyp]\n\n # post-process\n if self.level == \"bpe\":\n valid_references = [bpe_postprocess(v)\n for v in valid_references]\n valid_hypotheses = [bpe_postprocess(v) for\n v in valid_hypotheses]\n # if references are given, evaluate against them\n if valid_references:\n assert len(valid_hypotheses) == len(valid_references)\n\n current_valid_score = 0\n if self.eval_metric.lower() == 'bleu':\n # this version does not use any tokenization\n current_valid_score = bleu(valid_hypotheses, valid_references)\n elif self.eval_metric.lower() == 'chrf':\n current_valid_score = chrf(valid_hypotheses, valid_references)\n elif self.eval_metric.lower() == 'token_accuracy':\n current_valid_score = token_accuracy(\n valid_hypotheses, valid_references, level=self.level)\n elif self.eval_metric.lower() == 'sequence_accuracy':\n current_valid_score = sequence_accuracy(\n valid_hypotheses, valid_references)\n else:\n current_valid_score = -1\n\n k = sum(np.arange(tar_len))\n a_i = np.arange(1,tar_len)/k\n VSa_i = [sum(a_i[:i]) for i in np.arange(1,tar_len, dtype='int')]\n VSa_i = np.multiply(np.asanyarray(VSa_i)\n .reshape([1, tar_len-1]), equal).reshape([tar_len-1])\n\n final_rew[: len_temp-1] = np.multiply(VSa_i\n , current_valid_score)[: len_temp]\n \n if show:\n print('Reward is: ' , final_rew)\n print('sum: ', sum(final_rew))\n return final_rew", "def compute_single_transport_map(self, config):\n\n import gc\n gc.collect()\n\n t0 = config.pop('t0', None)\n t1 = config.pop('t1', None)\n if t0 is None or t1 is None:\n raise ValueError(\"config must have both t0 and t1, indicating target timepoints\")\n ds = self.matrix\n covariate = config.pop('covariate', None)\n if covariate is None:\n p0_indices = ds.obs[self.day_field] == float(t0)\n p1_indices = ds.obs[self.day_field] == float(t1)\n else:\n p0_indices = (ds.obs[self.day_field] == float(t0)) & (ds.obs[self.covariate_field] == covariate[0])\n p1_indices = (ds.obs[self.day_field] == float(t1)) & (ds.obs[self.covariate_field] == covariate[1])\n\n p0 = ds[p0_indices, :]\n p1 = ds[p1_indices, :]\n\n if p0.shape[0] == 0:\n logger.info('No cells at {}'.format(t0))\n return None\n if p1.shape[0] == 0:\n logger.info('No cells at {}'.format(t1))\n return None\n\n local_pca = config.pop('local_pca', None)\n eigenvals = None\n if local_pca is not None and local_pca > 0:\n # pca, mean = wot.ot.get_pca(local_pca, p0.X, p1.X)\n # p0_x = wot.ot.pca_transform(pca, mean, p0.X)\n # p1_x = wot.ot.pca_transform(pca, mean, p1.X)\n p0_x, p1_x, pca, mean = wot.ot.compute_pca(p0.X, p1.X, local_pca)\n eigenvals = np.diag(pca.singular_values_)\n else:\n p0_x = p0.X\n p1_x = p1.X\n\n C = OTModel.compute_default_cost_matrix(p0_x, p1_x, eigenvals)\n config['C'] = C\n delta_days = t1 - t0\n\n if self.cell_growth_rate_field in p0.obs.columns:\n config['G'] = np.power(p0.obs[self.cell_growth_rate_field].values, delta_days)\n else:\n config['G'] = np.ones(C.shape[0])\n tmap, learned_growth = wot.ot.compute_transport_matrix(solver=self.solver, **config)\n learned_growth.append(tmap.sum(axis=1))\n obs_growth = {}\n for i in range(len(learned_growth)):\n g = learned_growth[i]\n g = np.power(g, 1.0 / delta_days)\n obs_growth['g' + str(i)] = g\n obs = pd.DataFrame(index=p0.obs.index, data=obs_growth)\n return anndata.AnnData(tmap, obs, pd.DataFrame(index=p1.obs.index))", "def test_transform_track_with_two_transforms_with_gap_in_numbering_and_one_already_applied(self):\n track = Track(artist='Artist', title='Title')\n track.last_transform = 2\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n tflist.add_transform(Transform(3,\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2',\n ))\n\n self.assertEqual(track.last_transform, 2)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 3)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, True)", "def getTiltedRotateShift(img1, img2, tiltdiff, angle=0, bin=1, msg=True):\n\n\t### untilt images by stretching and compressing\n\t# choose angle s/t compressFactor = 1/stretchFactor\n\t# this only works if one image is untilted (RCT) of both images are opposite tilt (OTR)\n\t#halftilt = abs(tiltdiff)/2.0\n\thalftiltrad = math.acos(math.sqrt(math.cos(abs(tiltdiff)/180.0*math.pi)))\n\t# go from zero tilt to half tilt\n\tcompressFactor = math.cos(halftiltrad)\n\t# go from max tilt to half tilt\n\tstretchFactor = math.cos(halftiltrad) / math.cos(abs(tiltdiff)/180.0*math.pi)\n\tif tiltdiff > 0:\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"compress image 1\")\n\t\tuntilt1 = transformImage(img1, compressFactor, angle)\n\t\tuntilt2 = transformImage(img2, stretchFactor, angle)\n\t\txfactor = compressFactor\n\telse:\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"stretch image 1\")\n\t\tuntilt1 = transformImage(img1, stretchFactor, angle)\n\t\tuntilt2 = transformImage(img2, compressFactor, angle)\n\t\txfactor = stretchFactor\n\n\t### filtering was done earlier\n\tfilt1 = untilt1\n\tfilt2 = untilt2\n\n\tif filt1.shape != filt2.shape:\n\t\tnewshape = ( max(filt1.shape[0],filt2.shape[0]), max(filt1.shape[1],filt2.shape[1]) )\n\t\tapDisplay.printMsg(\"Resizing images to: \"+str(newshape))\n\t\tfilt1 = apImage.frame_constant(filt1, newshape, filt1.mean())\n\t\tfilt2 = apImage.frame_constant(filt2, newshape, filt2.mean())\n\n\t### cross-correlate\n\tcc = correlator.cross_correlate(filt1, filt2, pad=True)\n\trad = min(cc.shape)/20.0\n\tcc = apImage.highPassFilter(cc, radius=rad)\n\tcc = apImage.normRange(cc)\n\tcc = blackEdges(cc)\n\tcc = apImage.normRange(cc)\n\tcc = blackEdges(cc)\n\tcc = apImage.normRange(cc)\n\tcc = apImage.lowPassFilter(cc, radius=10.0)\n\n\t#find peak\n\tpeakdict = peakfinder.findSubpixelPeak(cc, lpf=0)\n\t#import pprint\n\t#pprint.pprint(peak)\n\tpixpeak = peakdict['subpixel peak']\n\tif msg is True:\n\t\tapDisplay.printMsg(\"Pixel peak: \"+str(pixpeak))\n\t\tapImage.arrayToJpegPlusPeak(cc, \"guess-cross-ang\"+str(abs(angle))+\".jpg\", pixpeak)\n\n\trawpeak = numpy.array([pixpeak[1], pixpeak[0]]) #swap coord\n\tshift = numpy.asarray(correlator.wrap_coord(rawpeak, cc.shape))*bin\n\n\tif msg is True:\n\t\tapDisplay.printMsg(\"Found xy-shift btw two images\"\n\t\t\t+\";\\n\\t SNR= \"+str(round(peakdict['snr'],2))\n\t\t\t+\";\\n\\t halftilt= \"+str(round(halftiltrad*180/math.pi, 3))\n\t\t\t+\";\\n\\t compressFactor= \"+str(round(compressFactor, 3))\n\t\t\t+\";\\n\\t stretchFactor= \"+str(round(stretchFactor, 3))\n\t\t\t+\";\\n\\t xFactor= \"+str(round(xfactor, 3))\n\t\t\t+\";\\n\\t rawpeak= \"+str(numpy.around(rawpeak*bin, 1))\n\t\t\t+\";\\n\\t shift= \"+str(numpy.around(shift, 1))\n\t\t)\n\n\treturn shift, xfactor, peakdict['snr']", "def photometry(self, ctr1, ctr2):\n if not self.HAS_PIPECAL:\n log.warning('Photometry is not available. The '\n 'sofia_redux.calibration package is required.')\n return\n\n # reset photometry table if necessary\n if self.ptable is None:\n self.reset_ptable()\n\n # get photometry parameters\n param = self.phot_parameters\n\n # check for the current status of the viewer\n # (tiling, aligned by wcs)\n if self.run('tile', via='get') == 'yes':\n allframes = True\n frames = self.run('frame active', via='get').split()\n else:\n allframes = False\n frames = [self.run('frame', via='get')]\n if self.run('wcs align', via='get') == 'yes':\n cs = 'wcs'\n else:\n cs = 'image'\n\n # log input values\n log.info(f'Photometry at x={ctr1}, y={ctr2} (in {cs} coordinates)')\n log.info('Parameters:')\n log.info(f\" Model: {param['model']}\")\n log.info(f\" Window: {param['window']} {param['window_units']}\")\n log.info(f\" Starting FWHM: {param['fwhm']} {param['fwhm_units']}\")\n log.info(f\" Aperture: {param['psf_radius']} \"\n f\"{param['aperture_units']}\")\n log.info(f\" Background: radius {param['bg_inner']} \"\n f\"{param['aperture_units']}, width {param['bg_width']} \"\n f\"{param['aperture_units']}\")\n log.info('')\n\n for frame in frames:\n if allframes:\n log.debug('Selecting frame ' + frame)\n self.run('frame ' + frame)\n\n try:\n results = self.retrieve_data(ctr1, ctr2)\n except (ValueError, TypeError) as err:\n log.debug(f'Error in retrieving Frame {frame} data: {err}')\n continue\n ps = results['pix_scale']\n data = results['data']\n fulldata = results['fulldata']\n hwcs = results['wcs']\n wdw = results['window']\n xstart = results['xstart']\n ystart = results['ystart']\n xctr = results['xctr']\n yctr = results['yctr']\n filename = results['filename']\n\n log.info(f'Frame {frame}: {filename}')\n\n # check for reasonable data\n if np.sum(np.isfinite(data)) < 3:\n continue\n\n default_fwhm = param['fwhm']\n if param['fwhm_units'] == 'arcsec':\n default_fwhm /= ps\n try:\n psfr = float(param['psf_radius'])\n if param['aperture_units'] == 'arcsec':\n psfr /= ps\n except ValueError:\n # auto radius\n psfr = 2.15 * default_fwhm\n\n if (param['bg_inner'] is None\n or param['bg_width'] is None):\n do_bg = False\n skyrad = (0., 0.)\n else:\n do_bg = True\n try:\n bgrin = float(param['bg_inner'])\n if param['aperture_units'] == 'arcsec':\n bgrin /= ps\n except ValueError:\n bgrin = psfr + 0.2 * default_fwhm\n try:\n bgwid = float(param['bg_width'])\n if param['aperture_units'] == 'arcsec':\n bgwid /= ps\n bgrout = bgrin + bgwid\n except ValueError:\n bgrout = bgrin + 2.0 * default_fwhm\n\n if bgrout > bgrin:\n skyrad = (bgrin, bgrout)\n else:\n skyrad = (0., 0.)\n\n try:\n phot_par = pipecal_photometry(\n fulldata, np.full_like(fulldata, np.nan),\n srcpos=(xctr, yctr), fitsize=wdw, fwhm=default_fwhm,\n profile=param['model'], aprad=psfr,\n skyrad=skyrad, stamp_center=False, allow_badfit=True)\n except PipeCalError as err:\n log.warning(' Bad fit.')\n log.warning(f' {err}')\n continue\n\n peak, xcent, ycent, ra, dec, xfwhm, yfwhm, ellip, \\\n pa, pw_law, final_sum, bg_avg, bg_std = [np.nan] * 13\n bg_fit = 0.0\n for pp in phot_par:\n if pp['key'] == 'STPEAK':\n peak = pp['value'][0]\n elif pp['key'] == 'STCENTX':\n xcent = pp['value'][0]\n elif pp['key'] == 'STCENTY':\n ycent = pp['value'][0]\n elif pp['key'] == 'STFWHMX':\n xfwhm = pp['value'][0]\n elif pp['key'] == 'STFWHMY':\n yfwhm = pp['value'][0]\n elif pp['key'] == 'STANGLE':\n pa = pp['value'][0]\n elif pp['key'] == 'STPWLAW':\n pw_law = pp['value'][0]\n elif pp['key'] == 'STAPFLX':\n final_sum = pp['value'][0]\n elif pp['key'] == 'STAPSKY' and do_bg:\n bg_avg = pp['value'][0]\n elif pp['key'] == 'STAPSSTD' and do_bg:\n bg_std = pp['value']\n elif pp['key'] == 'STBKG':\n bg_fit = pp['value'][0]\n\n # check whether source is already in table\n limit = 2. * default_fwhm\n present = (int(frame) == self.ptable['Frame']) \\\n & (np.abs(self.ptable['X'] - (xcent + 1)) < limit) \\\n & (np.abs(self.ptable['Y'] - (ycent + 1)) < limit)\n if np.any(present):\n log.info(' Source already measured.')\n continue\n\n # check whether source is unreasonably large or small\n badfit = False\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n mfwhm = gmean([xfwhm, yfwhm])\n if np.isnan(mfwhm) or mfwhm > 20 or mfwhm < 1.0:\n log.warning(' Bad fit.')\n log.warning(' Calculated FWHM: {:.2f} pixels'.format(mfwhm))\n badfit = True\n mfwhm = np.nan\n ellip = np.nan\n pa = np.nan\n else:\n # calculate ellipticity and fix PA\n if xfwhm >= yfwhm:\n ellip = 1 - yfwhm / xfwhm\n else:\n ellip = 1 - xfwhm / yfwhm\n if pa <= 0:\n pa += 90\n else:\n pa -= 90\n\n # track flux by radial distance\n if param['show_plots']:\n y, x = np.mgrid[:wdw, :wdw]\n r = np.sqrt((x - xcent + xstart) ** 2\n + (y - ycent + ystart) ** 2)\n if badfit:\n moddata = None\n else:\n # get the equivalent 1D model from the profile fit\n # for plotting\n if param['model'] == 'gaussian':\n eqw = mfwhm * stats.gaussian_fwhm_to_sigma\n rmodel = modeling.models.Gaussian1D(peak, 0.0, eqw)\n else:\n n_1 = 1 / pw_law\n eqw = mfwhm / (2 * np.sqrt(2 ** n_1 - 1))\n rmodel = modeling.models.Moffat1D(\n peak, 0.0, eqw, pw_law)\n rmodel += modeling.models.Const1D(bg_fit)\n moddata = rmodel(r)\n\n # data for matplotlib viewer: primary is model;\n # scatter data and h/v lines are overplots\n rflat = r.ravel()\n dflat = data.ravel()\n sortidx = np.argsort(rflat)\n xdata = rflat[sortidx]\n overplots = [{'plot_type': 'scatter',\n 'args': [rflat, dflat],\n 'kwargs': {'marker': '*',\n 'c': dflat,\n 'label': 'Flux data'}},\n {'plot_type': 'hline',\n 'args': [0.0],\n 'kwargs': {'linestyle': ':',\n 'linewidth': 1,\n 'color': 'lightgray'}}]\n if moddata is not None:\n ydata = moddata.ravel()[sortidx]\n overplots.append({'plot_type': 'vline',\n 'args': [mfwhm / 2.0],\n 'kwargs': {\n 'linestyle': ':',\n 'linewidth': 1,\n 'color': '#ff7f0e',\n 'label': 'Fit HWHM'}})\n overplots.append({'plot_type': 'vline',\n 'args': [mfwhm],\n 'kwargs': {\n 'linestyle': ':',\n 'linewidth': 1,\n 'color': '#d62728',\n 'label': 'Fit FWHM'}})\n else:\n ydata = np.full_like(xdata, np.nan)\n\n title = f'Frame {frame}, x={xcent:.0f} y={ycent:.0f}'\n overplots.append({'plot_type': 'legend',\n 'args': []})\n plot_data = {'args': [xdata, ydata],\n 'kwargs': {\n 'title': title,\n 'xlabel': 'Distance (pixels)',\n 'ylabel': 'Flux'},\n 'plot_kwargs': {\n 'linestyle': '-',\n 'color': 'gray',\n 'label': f\"{param['model'].title()} profile\"},\n 'overplot': overplots}\n self.radial_data.append(plot_data)\n\n # add DS9 start index back into centroid and convert to RA/Dec\n xcent += 1\n ycent += 1\n if hwcs is not None:\n try:\n radec = hwcs.wcs_pix2world([[xcent, ycent, 1]], 1)\n except ValueError:\n try:\n radec = hwcs.wcs_pix2world([[xcent, ycent]], 1)\n except ValueError:\n radec = np.array([[None, None]])\n else:\n radec = np.array([[None, None]])\n\n # set region\n b0 = 'point({:f} {:f}) # ' \\\n 'point=x ' \\\n 'color=green tag={{imexam}}'.format(xcent, ycent)\n self.run('regions', b0)\n b1 = 'circle({:f} {:f} {:f}) # ' \\\n 'color=green tag={{imexam}}'.format(xcent, ycent, psfr)\n self.run('regions', b1)\n if do_bg:\n b2 = 'annulus({:f} {:f} {:f} {:f}) # ' \\\n 'color=red ' \\\n 'tag={{imexam}}'.format(xcent, ycent,\n skyrad[0], skyrad[1])\n self.run('regions', b2)\n\n self.ptable.add_row([frame, peak, xcent, ycent,\n radec[0, 0], radec[0, 1],\n mfwhm, mfwhm * ps, ellip, pa,\n final_sum, bg_avg, bg_std])\n\n self.ptable.sort(['Frame', 'Peak'])\n print_str = '\\n'.join(\n self.ptable.pformat(max_lines=-1, max_width=-1))\n log.info(f'\\nResults:\\n{print_str}\\n')", "def Trace1SL(self,Pts=(0,0.5),TOF_base=0.0,debug=0):\n \n #---------------Flow case determination------------------\n CaseID_a=0 #Normal Case\n CaseID_b=0 #Normal Case\n if (self.Qa0>0.00001 and self.Qa1<-0.00001): CaseID_a=1\n if (self.Qb0>0.00001 and self.Qb1<-0.00001): CaseID_b=1\n if (self.Qa0<-0.00001 and self.Qa1>0.00001): CaseID_a=2\n if (self.Qb0<-0.00001 and self.Qb1>0.00001): CaseID_b=2\n if (abs(self.c1)<0.00001): CaseID_a=3\n if (abs(self.c2)<0.00001): CaseID_b=3\n if (abs(self.Qa0)<0.00001 and abs(self.Qa1)<0.00001): CaseID_a=4\n if (abs(self.Qb0)<0.00001 and abs(self.Qb1)<0.00001): CaseID_b=4\n \n T=np.ones(4)*123e9 \n Qa,Qb=self.LinearFlux(Pts[0],Pts[1])\n #---------------Streamline Tracing------------------\n #1. Pesudo Time of flight T in 4 direction\n if (CaseID_a==0):\n T[0]=self.CalcT(self.c1,(Qa,self.Qa0))\n T[1]=self.CalcT(self.c1,(Qa,self.Qa1))\n if (CaseID_a==1):\n T[0]=T[1]=1.1e9 #Particle never leave in this direction\n if (CaseID_a==2):\n if(Qa>0.0001): #if Qa>0 leave on the right face\n T[0]=2.1e9\n T[1]=self.CalcT(self.c1,(Qa,self.Qa1))\n if(Qa<-0.0001): #if Qa<0 leave on the left face\n T[0]=self.CalcT(self.c1,(Qa,self.Qa0))\n T[1]=2.2e9\n if (abs(Qa)<0.00001): #particle in stagenent region, never leave in this direction\n T[0]=T[1]=0\n if (CaseID_a==3):\n T[0]=(0-Pts[0])/Qa\n T[1]=(1-Pts[0])/Qa\n if (CaseID_a==4):\n T[0]=T[1]=4.1e9\n \n if (CaseID_b==0):\n T[2]=self.CalcT(self.c2,(Qb,self.Qb0))\n T[3]=self.CalcT(self.c2,(Qb,self.Qb1))\n if (CaseID_b==1):\n T[2]=T[3]=1.1e9 #Particle never leave in this direction\n if (CaseID_b==2):\n if(Qb>0.0001): #if Qb>0 leave on the upper face\n T[2]=2.1e9\n T[3]=self.CalcT(self.c2,(Qb,self.Qb1))\n if(Qb<-0.0001): #if Qb<0 leave on the lower face\n T[2]=self.CalcT(self.c2,(Qb,self.Qb0))\n T[3]=2.2e9\n if (abs(Qb)<0.00001): #particle in stagenent region, never leave in this direction\n T[2]=T[3]=0\n if (CaseID_b==3):\n T[2]=(0-Pts[1])/Qb\n T[3]=(1-Pts[1])/Qb\n if (CaseID_b==4):\n T[2]=T[3]=4.1e9\n \n #Special treatment of dt<=0\n if (Pts[0]==0 and Qa>0.00001): T[0]=9.1e9\n if (Pts[0]==1 and Qa<-0.00001): T[1]=9.1e9\n if (Pts[1]==0 and Qb>0.00001): T[2]=9.1e9\n if (Pts[1]==1 and Qb<-0.00001): T[3]=9.1e9\n \n #Special treatment of intial point located on the no-flow boundary\n if (Pts[0]==0 and Qa==0): T[0]=0.0\n if (Pts[0]==1 and Qa==0): T[1]=0.0\n if (Pts[1]==0 and Qb==0): T[2]=0.0\n if (Pts[1]==1 and Qb==0): T[3]=0.0\n\n \n for i in range(len(T)):\n if T[i]<-0.000001:\n T[i]=9e9\n \n if (debug): print(\"Flow Case (x,y)-(alpha,beta)\",CaseID_a,CaseID_b)\n if (debug): print(\"Initial Point\",Pts)\n \n \n Te=min(T)\n self.Te=Te\n\n if (debug): print(T)\n\n #2. Calculate End point \n Pts_end=[0,0]\n Pts_end[0],Pts_end[1]=self.CalcPts(Te,Pts)\n \n if (debug): print(\"End Point\",Pts_end,'End Time',self.Te)\n if (debug): print(T)\n \n #3.Calculate the streamline nodes and TOF in both coordinates\n SL_T=np.linspace(0,self.Te,10)\n SL_tau=np.zeros(len(SL_T))+TOF_base\n SL_phy=np.zeros((len(SL_T),2))\n SL_trans=np.zeros((len(SL_T),2))\n \n for i in range(len(SL_T)):\n SL_trans[i,0],SL_trans[i,1]=self.CalcPts(SL_T[i],Pts)\n SL_phy[i,0],SL_phy[i,1]=self.Pts2Physic(Pts=(SL_trans[i,0],SL_trans[i,1]))\n SL_tau[i]+=self.T2Physic(SL_T[i],Pts)\n \n #fix the tiny error, the number of e-17\n if (SL_trans[i,0]<0.0000001): SL_trans[i,0]=0.0\n if (SL_trans[i,1]<0.0000001): SL_trans[i,1]=0.0\n if (SL_phy[i,0]<0.0000001): SL_phy[i,0]=0.0\n if (SL_phy[i,1]<0.0000001): SL_phy[i,1]=0.0\n \n #SL_tau=SL_tau-SL_tau[0]\n \n return self.Te,Pts_end,SL_trans,SL_phy,SL_T,SL_tau", "def test_2d_time_tran():\n dic,data = ng.pipe.read(\"common_data/2d_pipe/test_tp.ft2\")\n assert data.shape == (4096, 2048)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == -1525.10\n assert round(data[10,22],2) == 1731.94\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[253.90, -143.80])\n check_ppm_limits(dic,data,1,[174.84, 65.21])", "def apply_electronics_gain(full_frame, difference):\n #electronics_gain_odd = [0.0601, 0.0596, 0.0604, 0.0605]\n #electronics_gain_even = [0.0602, 0.0599, 0.0605, 0.0608]\n\n electronics_gain_odd = [0.0601, 0.0596, 0.0604, 0.0605]\n electronics_gain_even = [0.0602, 0.0599, 0.0605, 0.0608]\n\n all_quads = []\n num_quads = full_frame.shape[0]\n for quads in range(0, num_quads):\n active_quad = full_frame[quads, :, :]\n if difference[quads] < 0: # Note: Difference is odd-even\n gain_even = 1/electronics_gain_even[quads]\n gain_odd = 1/electronics_gain_odd[quads]\n elif difference[quads] > 0:\n gain_even = 1/electronics_gain_odd[quads]\n gain_odd = 1/electronics_gain_even[quads]\n gain_even = 1/electronics_gain_even[quads]\n gain_odd = 1/electronics_gain_odd[quads]\n spec_pix, spat_pix = active_quad.shape\n gain_applied_quad = np.array([[0]*spec_pix]*spat_pix)\n even_detector_active_quad = gain_even*active_quad[:, ::2]\n odd_detector_active_quad = gain_odd*active_quad[:, 1::2]\n\n gain_applied_quad = np.reshape(gain_applied_quad, (spec_pix, spat_pix))\n gain_applied_quad[:, ::2] = even_detector_active_quad\n gain_applied_quad[:, 1::2] = odd_detector_active_quad\n #print(np.max(gain_applied_quad))\n #cc\n all_quads.append(gain_applied_quad)\n #cc\n return np.array(all_quads)", "def analyse(self):\n self.__try_fitting()\n self.second.rotate()\n self.__try_fitting()", "def test_transform_track_change_conductor(self):\n track = Track(artist='Artist', album='Album', conductor='Conductor',\n tracknum=1, seconds=60)\n transform = Transform(1, cond_conductor=True, change_conductor=True,\n pattern_conductor='Conductor', to_conductor='Conductor 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.conductor, 'Conductor 2')\n self.assertEqual(track.transformed, True)", "def transform(self, previousimage):", "def formation(self, node1, node2):\n value1 = self.graph.node[node1]['opinion']\n value2 = self.graph.node[node2]['opinion']\n diff = abs(value1 - value2)\n if diff < self.threshold and diff > PRECISION:\n value_1 = value1 - self.param * (value1 - value2)\n value_2 = value2 - self.param * (value2 - value1)\n return value_1, value_2\n elif diff < PRECISION:\n return True, False\n else:\n return False, False", "def event_timex_analysis(event1, event2):\n tagged = tag(event1.text)\n base_time = event1.get_best_time()\n\n if base_time is not None:\n dt, trusted = base_time.to_datetime() # Get the datetime representation of the reference's best_time\n grounded_times = ground(tagged, dt) # Ground any timex tags to that time\n new_dates = [] # holds new dates constructed from the grounded datetimes\n\n for time in grounded_times:\n new_date = Date()\n if trusted['year']:\n new_date.year = time.year\n\n if trusted['month']:\n new_date.month = time.month\n\n if trusted['day']:\n new_date.day = time.day\n\n if trusted['hour']:\n new_date.hour = time.hour\n\n if trusted['minute']:\n new_date.minute = time.minute\n\n new_dates.append(new_date)\n\n if len(new_dates) == 0: # Nothing interesting found.\n return\n\n new_dates = sorted(new_dates, lambda x: x.precision(), reverse=True)\n best_date = new_dates[0]\n\n other_best_date = event2.get_best_time()\n if other_best_date is not None:\n if best_date.precision() > other_best_date.precision():\n event2.set_best_time(best_date)\n else:\n event2.set_best_time(best_date)", "def test_process_metadata_1(self):\n data_1 = ET.parse(\"data/metadata_1.xml\")\n data_2 = ET.parse(\"data/metadata_2.xml\")\n data_1_str = ET.tostring(data_1.getroot())\n data_2_str = ET.tostring(data_2.getroot())\n\n enu_T_unity= tesse_ros_bridge.enu_T_unity\n brh_T_blh = tesse_ros_bridge.brh_T_blh\n\n dict_1 = tesse_ros_bridge.utils.parse_metadata(data_1_str)\n dict_2 = tesse_ros_bridge.utils.parse_metadata(data_2_str)\n proc_1 = tesse_ros_bridge.utils.process_metadata(dict_1, 0, [0,0,0], np.identity(3))\n proc_2 = tesse_ros_bridge.utils.process_metadata(dict_2, dict_1['time'],\n proc_1['velocity'], np.identity(3))\n\n prev_enu_T_brh = proc_1['transform']\n enu_T_brh = proc_2['transform']\n prev_enu_T_brh[:,3] = enu_T_brh[:,3] = np.array([0,0,0,1])\n\n prev_unity_T_brh = brh_T_blh.dot(\n tf.transformations.quaternion_matrix(dict_1['quaternion']))\n unity_T_brh = brh_T_blh.dot(\n tf.transformations.quaternion_matrix(dict_2['quaternion']))\n\n dt = dict_2['time'] - dict_1['time']\n expected_ang_vel = Rotation.from_quat(\n tf.transformations.quaternion_from_matrix(np.transpose(\n prev_enu_T_brh).dot(enu_T_brh))).as_rotvec() / dt\n actual_ang_vel = proc_2['ang_vel']\n\n print \"\\nexpected ang_vel: \", expected_ang_vel\n print \"actual ang_vel: \", actual_ang_vel\n\n self.assertTrue(np.allclose(expected_ang_vel, actual_ang_vel))\n\n expected_accel = (proc_2['velocity'] - proc_1['velocity']) / \\\n (proc_2['time']-proc_1['time'])\n actual_accel = proc_2['acceleration']\n self.assertTrue(np.allclose(expected_accel, actual_accel))\n\n # TODO(marcus): add a test for angular rates in all three axes", "def track(self, old_frame, new_frame):\n \n global redetect\n self.old_points = np.reshape(self.old_points, (-1,1,2))\n \n # forward detection\n self.new_points, st, err = cv2.calcOpticalFlowPyrLK(old_frame, \n new_frame, \n self.old_points, \n None, \n **self.lk_params)\n # backward redetection\n old_points_recon, st, err = cv2.calcOpticalFlowPyrLK(new_frame, \n old_frame, \n self.new_points, \n None, \n **self.lk_params)\n \n # discard the points which have even a single pixel displacement\n # after the forward-backward error detection\n d = abs(self.old_points - old_points_recon).reshape(-1,2).max(-1)\n good_points = d < 1\n self.new_points = np.array([pt for pt in itertools.compress(self.new_points,\n good_points)])\n \n # at least two keypoints are neede for tracking\n if len(self.new_points.shape) < 2:\n redetect = True\n return (0,0,0,0)\n\n #self.remove_outliers()\n\n # update the new points\n self.old_points = self.new_points\n\n # get the updated bounding box\n x,y,w,h = cv2.boundingRect(self.new_points)\n self.bounding_box = (x,y,w,h)\n \n return (x,y,w,h)", "def test_converts_to_agisoft_and_back_exactly() -> None:\n # k[3:] must be zero\n cam = Camera(\n imgsz=(4288, 2848),\n f=(3100, 3200),\n c=(5, -4),\n k=(0.1, -0.05, 0.02),\n p=(0.03, 0.04),\n )\n xcam = Agisoft.from_camera(cam)\n residuals = Converter(xcam, cam).residuals()\n np.testing.assert_allclose(residuals, 0, rtol=0, atol=1e-11)\n cam2 = xcam.to_camera()\n np.testing.assert_equal(cam.to_array(), cam2.to_array())", "def set_thresh(self, t0=0.5, t1=None):\n self.t0 = t0\n self.t1 = t1", "def test_transform_track_with_two_transforms_undo(self):\n track = Track(artist='Artist', title='Title')\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n tflist.add_transform(Transform(2,\n cond_artist=True, pattern_artist='Artist 2',\n change_artist=True, to_artist='Artist',\n ))\n\n self.assertEqual(track.last_transform, 0)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 2)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, True)", "def apply_tracking1(td, alpha=0.98, threshold=-1):\n assert (alpha >= 0)\n assert (alpha <= 1)\n mix = 1 - alpha\n\n # with timer.Timer() as my_timer:\n track_x = center_x = td.width / 2\n track_y = center_y = td.height / 2\n threshold_sq = math.floor(center_y ** 2)\n\n if threshold > 0:\n threshold_sq = math.floor(threshold ** 2)\n\n copy = np.copy(td.data).view(np.recarray)\n for i in range(copy.size):\n datum = copy[i]\n y_val = datum.y\n x_val = datum.x\n distance = (track_x - x_val) ** 2 + (track_y - y_val) ** 2\n\n if distance <= threshold_sq:\n track_x = track_x * alpha + x_val * mix\n track_y = track_y * alpha + y_val * mix\n\n datum.y = round(y_val - track_y + center_y)\n datum.x = round(x_val - track_x + center_x)\n # print 'Applying tracker took %s seconds' % my_timer.secs\n # remove the events that are out of bounds\n return copy[(copy.x >= 0) & (copy.y >= 0) & (copy.x < td.width) & (\n copy.y < td.height)]", "def find_initial_position(img1, img2):\n # find points of interest in points\n img1_kp, img1_des = compute_orb(img1)\n img2_kp, img2_des = compute_orb(img2)\n\n # get closest 2 matches per point\n bf = cv2.BFMatcher(normType=cv2.NORM_HAMMING)\n matches = bf.knnMatch(img1_des, img2_des, k=2)\n\n good_matches = []\n pts1 = []\n pts2 = []\n # Lowe's ratio test\n for m, n in matches:\n if m.distance < 0.75*n.distance:\n good_matches.append(m)\n pts1.append(img1_kp[m.queryIdx].pt)\n pts2.append(img2_kp[m.trainIdx].pt)\n\n pts1 = np.float32(pts1)\n pts2 = np.float32(pts2)\n\n # essential matrix gives the motion of the points\n # to get motion of the camera, flip the inputs between pts1 and pts2\n essential_matrix, e_mask = cv2.findEssentialMat(pts2, pts1, intrinsic_camera_matrix)\n\n # select only inlier points as per the RANSAC method\n pts1 = pts1[e_mask.ravel() == 1]\n pts2 = pts2[e_mask.ravel() == 1]\n\n _, rotation, translation, mask, triangulated_points = cv2.recoverPose(essential_matrix, pts2, pts1, intrinsic_camera_matrix, distanceThresh=50)\n triangulated_points = np.asarray([np.divide(triangulated_points[0], triangulated_points[3]),\n np.divide(triangulated_points[1], triangulated_points[3]),\n np.divide(triangulated_points[2], triangulated_points[3])]).transpose()\n\n CAMERA_POSES.clear()\n CAMERA_POSES.append(np.hstack((np.identity(3), np.array([[0], [0], [0]]))))\n CAMERA_POSES.append(np.hstack((rotation, translation)))\n return rotation, translation, triangulated_points", "def check_transforms_match(self, transform: Mapping) -> None:\n xform_id = transform.get(TraceKeys.ID, \"\")\n if xform_id == id(self):\n return\n # TraceKeys.NONE to skip the id check\n if xform_id == TraceKeys.NONE:\n return\n xform_name = transform.get(TraceKeys.CLASS_NAME, \"\")\n warning_msg = transform.get(TraceKeys.EXTRA_INFO, {}).get(\"warn\")\n if warning_msg:\n warnings.warn(warning_msg)\n # basic check if multiprocessing uses 'spawn' (objects get recreated so don't have same ID)\n if torch.multiprocessing.get_start_method() in (\"spawn\", None) and xform_name == self.__class__.__name__:\n return\n raise RuntimeError(\n f\"Error {self.__class__.__name__} getting the most recently \"\n f\"applied invertible transform {xform_name} {xform_id} != {id(self)}.\"\n )", "def test_invalid_events(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n tel_azimuth = {}\n tel_altitude = {}\n\n #source = EventSource(filename, max_events=1)\n #subarray = source.subarray\n calib = CameraCalibrator(subarray)\n fit = HillasReconstructor(subarray)\n\n #for event in source:\n\n calib(event)\n\n hillas_dict = {}\n for tel_id, dl1 in event.dl1.tel.items():\n\n geom = subarray.tel[tel_id].camera.geometry\n tel_azimuth[tel_id] = event.pointing.tel[tel_id].azimuth\n tel_altitude[tel_id] = event.pointing.tel[tel_id].altitude\n\n mask = tailcuts_clean(\n geom, dl1.image, picture_thresh=10.0, boundary_thresh=5.0\n )\n\n dl1.parameters = ImageParametersContainer()\n\n try:\n moments = hillas_parameters(geom[mask], dl1.image[mask])\n hillas_dict[tel_id] = moments\n dl1.parameters.hillas = moments\n except HillasParameterizationError:\n dl1.parameters.hillas = HillasParametersContainer()\n continue\n\n # copy event container to modify it\n event_copy = deepcopy(event)\n # overwrite all image parameters but the last one with dummy ones\n for tel_id in list(event_copy.dl1.tel.keys())[:-1]:\n event_copy.dl1.tel[tel_id].parameters.hillas = HillasParametersContainer()\n fit(event_copy)\n assert event_copy.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to 0\n event.dl1.tel[tel_id].parameters.hillas.width = 0 * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to NaN\n event.dl1.tel[tel_id].parameters.hillas.width = np.nan * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False", "def analyze_orbit_corrector(OC1, OC2, beamline, phase_beg):\n\n M = np.identity(4)\n OC_parameters = np.zeros(4)\n\n for element in beamline:\n M = np.dot(element.M1, M)\n\n # Since the X and Y are decoupled, we can treat them separately.\n M_x = M[0:2, 0:2]\n M_y = M[2:4, 2:4]\n\n L1 = [[OC1.length/2], [1]]\n L2 = [[OC2.length/2], [1]]\n\n M_OC1 = np.array(OC1.M1)[0:2, 0:2]\n M_OC2 = np.array(OC2.M1)[0:2, 0:2]\n\n # The following part solve the cx_1 and cx_2\n M1_x = np.linalg.multi_dot([M_OC2, M_x, L1])\n M2_x = np.linalg.multi_dot([M_OC2, M_x, M_OC1])\n M_OC_x = np.hstack((M1_x, L2))\n\n OC_parameters[0:2] = -np.linalg.multi_dot([np.linalg.inv(M_OC_x), M2_x, phase_beg[0:2]])\n # The end of the X-part\n\n # The following part solve the cy_1 and cy_2\n M1_y = np.linalg.multi_dot([M_OC2, M_y, L1])\n M2_y = np.linalg.multi_dot([M_OC2, M_y, M_OC1])\n M_OC_y = np.hstack((M1_y, L2))\n\n OC_parameters[2:4] = -np.linalg.multi_dot([np.linalg.inv(M_OC_y), M2_y, phase_beg[2:4]])\n # The end of the Y-part\n\n\n return OC_parameters", "def process_scan(self, msg):\n if len(msg.ranges) <= 330:\n # throw out scans that don't have more than 90% of the data\n return\n # get pose according to the odometry\n p = PoseStamped(header=Header(stamp=msg.header.stamp, frame_id=\"base_link\"), pose=Pose())\n self.odom_pose = self.tf_listener.transformPose(\"odom\", p)\n self.base_pose = self.tf_listener.transformPose(\"base_laser_link\", p)\n # convert the odom pose to the tuple (x,y,theta)\n self.odom_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.odom_pose.pose)\n #(-0.0069918, 0.000338577, 0.048387097)\n #(1.0208817, 0.04827240, 0.048387)\n self.base_pose = OccupancyGridMapper.convert_pose_to_xy_and_theta(self.base_pose.pose)\n for i in range(len(msg.ranges)):\n if 0.0 < msg.ranges[i] < 5.0: #for any reding within 5 meters\n #Using the pose and the measurement nd the angle, find it in the world\n map_x = self.odom_pose[0] + msg.ranges[i] * cos(i * pi / 180.0 + self.odom_pose[2])\n map_y = self.odom_pose[1] + msg.ranges[i] * -sin(i * pi / 180.0 + self.odom_pose[2])\n\n #Relate that map measure with a place in the picture\n x_detect = int((map_x - self.origin[0]) / self.resolution)\n y_detect = int((map_y - self.origin[1]) / self.resolution)\n\n\n #Determine how to mark the location in the map, along with the stuff inbetween\n u = (map_x - self.odom_pose[0], map_y - self.odom_pose[1])\n magnitude = sqrt(u[0] ** 2 + u[1] ** 2)\n n_steps = max([1, int(ceil(magnitude / self.resolution))])\n u_step = (u[0] / (n_steps - 1), u[1] / (n_steps - 1))\n marked = set()\n for i in range(n_steps):\n curr_x = self.odom_pose[0] + i * u_step[0]\n curr_y = self.odom_pose[1] + i * u_step[1]\n if not (self.is_in_map(curr_x, curr_y)):\n break\n\n x_ind = int((curr_x - self.origin[0]) / self.resolution)\n y_ind = int((curr_y - self.origin[1]) / self.resolution)\n if x_ind == x_detect and y_ind == y_detect:\n break\n if not ((x_ind, y_ind) in marked):\n # odds ratio is updated according to the inverse sensor model\n self.odds_ratios[x_ind, y_ind] *= self.p_occ / (1 - self.p_occ) * self.odds_ratio_miss\n marked.add((x_ind, y_ind))\n if self.is_in_map(map_x, map_y):\n # odds ratio is updated according to the inverse sensor model\n self.odds_ratios[x_detect, y_detect] *= self.p_occ / (1 - self.p_occ) * self.odds_ratio_hit\n\n self.seq += 1\n # to save time, only publish the map every 10 scans that we process\n if self.seq % 10 == 0:\n # make occupancy grid\n map = OccupancyGrid()\n map.header.seq = self.seq\n self.seq += 1\n map.header.stamp = msg.header.stamp\n map.header.frame_id = \"map\" # the name of the coordinate frame of the map\n map.info.origin.position.x = self.origin[0]\n map.info.origin.position.y = self.origin[1]\n map.info.width = self.n\n map.info.height = self.n\n map.info.resolution = self.resolution\n map.data = [0] * self.n ** 2 # map.data stores the n by n grid in row-major order\n for i in range(self.n):\n for j in range(self.n):\n idx = i + self.n * j # this implements row major order\n if self.odds_ratios[i, j] < 1 / 5.0: # consider a cell free if odds ratio is low enough\n map.data[idx] = 0\n elif self.odds_ratios[i, j] > 5.0: # consider a cell occupied if odds ratio is high enough\n map.data[idx] = 100\n else: # otherwise cell is unknown\n map.data[idx] = -1\n self.pub.publish(map)\n\n # create the image from the probabilities so we can visualize using opencv\n im = np.zeros((self.odds_ratios.shape[0], self.odds_ratios.shape[1], 3))\n for i in range(im.shape[0]):\n for j in range(im.shape[1]):\n if self.odds_ratios[i, j] < 1 / 5.0:\n im[i, j, :] = 1.0\n elif self.odds_ratios[i, j] > 5.0:\n im[i, j, :] = 0.0\n else:\n im[i, j, :] = 0.5\n\n # compute the index of the odometry pose so we can mark it with a circle\n x_odom_index = int((self.odom_pose[0] - self.origin[0]) / self.resolution)\n y_odom_index = int((self.odom_pose[1] - self.origin[1]) / self.resolution)\n\n x_base_index = int((self.base_pose[0] - self.origin[0] - 1) / self.resolution)\n y_base_index = int((self.base_pose[1] - self.origin[1]) / self.resolution)\n\n\n # computer the ball locations so we can mark with a colored circle\n #TODO Track and relate the robot's angle pose for accuracy\n\n if self.depth_red > 0:\n self.y_camera_red = int(x_odom_index - self.depth_red * cos(self.angle_diff_red + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_red = int(y_odom_index - self.depth_red * sin(self.angle_diff_red + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_red, self.y_camera_red), 1, self.red)\n\n real_red_y = self.depth_red * cos(self.angle_diff_red + pi - self.odom_pose[2])\n real_red_x = self.depth_red * sin(self.angle_diff_red + pi - self.odom_pose[2])\n\n self.rcoor_pub.publish(Vector3(-real_red_x, -real_red_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_red, self.y_camera_red), 1, self.red)\n\n if self.depth_blue > 0:\n self.y_camera_blue = int(x_odom_index - self.depth_blue * cos(self.angle_diff_blue + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_blue = int(y_odom_index - self.depth_blue * sin(self.angle_diff_blue + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_blue, self.y_camera_blue), 1, self.blue)\n\n real_blue_y = self.depth_blue * cos(self.angle_diff_blue + pi - self.odom_pose[2])\n real_blue_x = self.depth_blue * sin(self.angle_diff_blue + pi - self.odom_pose[2])\n\n self.bcoor_pub.publish(Vector3(-real_blue_x, -real_blue_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_blue, self.y_camera_blue), 1, self.blue)\n\n if self.depth_green > 0:\n self.y_camera_green = int(x_odom_index - self.depth_green * cos(self.angle_diff_green + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_green = int(y_odom_index - self.depth_green * sin(self.angle_diff_green + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_green, self.y_camera_green), 1, self.green)\n \n real_green_y = self.depth_green * cos(self.angle_diff_green + pi - self.odom_pose[2])\n real_green_x = self.depth_green * sin(self.angle_diff_green + pi - self.odom_pose[2])\n\n self.gcoor_pub.publish(Vector3(-real_green_x, -real_green_y/2, 0))\n\n if self.depth_yellow > 0:\n self.y_camera_yellow = int(x_odom_index - self.depth_yellow * cos(self.angle_diff_yellow + pi - self.odom_pose[2])/self.resolution)\n self.x_camera_yellow = int(y_odom_index - self.depth_yellow * sin(self.angle_diff_yellow + pi - self.odom_pose[2])/self.resolution)\n cv2.circle(im, (self.x_camera_yellow, self.y_camera_yellow), 1, self.yellow)\n \n real_yellow_y = self.depth_yellow * cos(self.angle_diff_yellow + pi - self.odom_pose[2])\n real_yellow_x = self.depth_yellow * sin(self.angle_diff_yellow + pi - self.odom_pose[2])\n\n self.ycoor_pub.publish(Vector3(-real_yellow_x, -real_yellow_y/2, 0))\n else:\n cv2.circle(im, (self.x_camera_yellow, self.y_camera_yellow), 1, self.yellow)\n\n # draw the robot\n cv2.circle(im, (y_odom_index, x_odom_index), 2, (255, 0, 0))\n \n # display the image resized\n cv2.imshow(\"map\", cv2.resize(im, (500, 500)))\n cv2.waitKey(20)", "def track_features(self):\r\n img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(img)\r\n\r\n # Compute a rough relative rotation which takes a vector \r\n # from the previous frame to the current frame.\r\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\r\n\r\n # Organize the features in the previous image.\r\n prev_ids = []\r\n prev_lifetime = []\r\n prev_cam0_points = []\r\n prev_cam1_points = []\r\n\r\n for feature in chain.from_iterable(self.prev_features):\r\n prev_ids.append(feature.id)\r\n prev_lifetime.append(feature.lifetime)\r\n prev_cam0_points.append(feature.cam0_point)\r\n prev_cam1_points.append(feature.cam1_point)\r\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\r\n\r\n # Number of the features before tracking.\r\n self.num_features['before_tracking'] = len(prev_cam0_points)\r\n\r\n # Abort tracking if there is no features in the previous frame.\r\n if len(prev_cam0_points) == 0:\r\n return\r\n\r\n # Track features using LK optical flow method.\r\n curr_cam0_points = self.predict_feature_tracking(\r\n prev_cam0_points, cam0_R_p_c, self.cam0_intrinsics)\r\n\r\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.prev_cam0_pyramid, self.curr_cam0_pyramid,\r\n prev_cam0_points.astype(np.float32), \r\n curr_cam0_points.astype(np.float32), \r\n **self.config.lk_params)\r\n \r\n # Mark those tracked points out of the image region as untracked.\r\n for i, point in enumerate(curr_cam0_points):\r\n if not track_inliers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n track_inliers[i] = 0\r\n\r\n # Collect the tracked points.\r\n prev_tracked_ids = select(prev_ids, track_inliers)\r\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\r\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\r\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\r\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\r\n\r\n # Number of features left after tracking.\r\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\r\n\r\n # Outlier removal involves three steps, which forms a close\r\n # loop between the previous and current frames of cam0 (left)\r\n # and cam1 (right). Assuming the stereo matching between the\r\n # previous cam0 and cam1 images are correct, the three steps are:\r\n #\r\n # prev frames cam0 ----------> cam1\r\n # | |\r\n # |ransac |ransac\r\n # | stereo match |\r\n # curr frames cam0 ----------> cam1\r\n #\r\n # 1) Stereo matching between current images of cam0 and cam1.\r\n # 2) RANSAC between previous and current images of cam0.\r\n # 3) RANSAC between previous and current images of cam1.\r\n #\r\n # For Step 3, tracking between the images is no longer needed.\r\n # The stereo matching results are directly used in the RANSAC.\r\n\r\n # Step 1: stereo matching.\r\n curr_cam1_points, match_inliers = self.stereo_match(\r\n curr_tracked_cam0_points)\r\n\r\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\r\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\r\n prev_matched_cam0_points = select(prev_tracked_cam0_points, match_inliers)\r\n prev_matched_cam1_points = select(prev_tracked_cam1_points, match_inliers)\r\n curr_matched_cam0_points = select(curr_tracked_cam0_points, match_inliers)\r\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\r\n\r\n # Number of features left after stereo matching.\r\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\r\n\r\n # Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.\r\n # cam0_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam0_points, curr_matched_cam0_points,\r\n # cam0_R_p_c, self.cam0_intrinsics, \r\n # self.cam0_distortion_model, self.cam0_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n\r\n # cam1_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam1_points, curr_matched_cam1_points,\r\n # cam1_R_p_c, self.cam1_intrinsics, \r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\r\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\r\n\r\n # Number of features after ransac.\r\n after_ransac = 0\r\n for i in range(len(cam0_ransac_inliers)):\r\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\r\n continue \r\n row = int(curr_matched_cam0_points[i][1] / grid_height)\r\n col = int(curr_matched_cam0_points[i][0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n\r\n grid_new_feature = FeatureMetaData()\r\n grid_new_feature.id = prev_matched_ids[i]\r\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\r\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\r\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\r\n prev_matched_lifetime[i] += 1\r\n\r\n self.curr_features[code].append(grid_new_feature)\r\n after_ransac += 1\r\n self.num_features['after_ransac'] = after_ransac\r\n\r\n # Compute the tracking rate.\r\n # prev_feature_num = sum([len(x) for x in self.prev_features])\r\n # curr_feature_num = sum([len(x) for x in self.curr_features])\r" ]
[ "0.53131735", "0.5240588", "0.5225582", "0.52202463", "0.5204122", "0.51401055", "0.51253384", "0.51180834", "0.51146686", "0.50602734", "0.5022961", "0.49833623", "0.49649775", "0.4957402", "0.4932199", "0.49016243", "0.48917276", "0.4890399", "0.48004982", "0.47914508", "0.47794846", "0.476652", "0.47421205", "0.47079486", "0.46961775", "0.46942806", "0.46724653", "0.46724087", "0.46602553", "0.4656398", "0.46510798", "0.464796", "0.46444303", "0.4643163", "0.4640168", "0.46350947", "0.46327144", "0.4624977", "0.46129036", "0.4602981", "0.46013036", "0.45913747", "0.45882022", "0.45858482", "0.45836663", "0.45790875", "0.457478", "0.45725125", "0.4571185", "0.45699725", "0.4566002", "0.4563572", "0.45615342", "0.4561188", "0.45547462", "0.45492846", "0.4544768", "0.452316", "0.45212463", "0.45188147", "0.45159554", "0.45153546", "0.45138156", "0.45086026", "0.45045447", "0.45002237", "0.4498711", "0.44937363", "0.44933143", "0.44885778", "0.4479424", "0.44738466", "0.446982", "0.44644588", "0.44631767", "0.4460425", "0.44588155", "0.44578552", "0.44552317", "0.44550452", "0.44546857", "0.44527137", "0.44506225", "0.4446778", "0.4444932", "0.4443548", "0.4443022", "0.44417387", "0.44407263", "0.44392833", "0.44378963", "0.4434138", "0.44301802", "0.44243175", "0.4424187", "0.44212496", "0.44203928", "0.44180274", "0.44107977", "0.44103035" ]
0.44811964
70
Define the series and frame for the target epi for motion correction. This is done by first creating a dictionary indexed by the timedelay
def _SetBaseEpi(self): tinfo = {} for entry in self.entry_map['epi']: info = self.info[entry] if self.info[entry]['fmap_entry'] is None: tgt = info['anat_tgt'] else: tgt = info['fmap_entry'] tgt_time = self.info[tgt]['acqtime'] plane = info['plane'] if not tinfo.has_key(plane): tinfo[plane] = {} tdiff = abs(info['acqtime'] - tgt_time) tinfo[plane][tdiff] = (entry, 'start') tdiff = abs(info['acqtime'] + info['TR']*info['tdim']/1000 - tgt_time) tinfo[plane][tdiff] = (entry, 'end') bases = {} for plane in tinfo.keys(): tdiffs = tinfo[plane].keys() tdiffs.sort() bases[plane] = tinfo[plane][tdiffs[0]] for epi in self.entry_map['epi']: plane = self.info[epi]['plane'] base_entry, base = bases[plane] self.info[epi]['base_entry'] = base_entry self.info[epi]['base'] = base self.info[epi]['basefile'] = '%s'%(self.info[base_entry]['imgfile'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _animation_step(self, par_dict):\n\n t0 = time.time()\n dt = par_dict[\"dt\"]\n controller = par_dict[\"controller\"]\n integrator = par_dict[\"integrator\"]\n if controller is not None:\n _, _, tau = controller.get_control_output(\n meas_pos=self.x[:self.plant.dof],\n meas_vel=self.x[self.plant.dof:],\n meas_tau=np.zeros(self.plant.dof),\n meas_time=self.t)\n else:\n tau = np.zeros(self.plant.n_actuators)\n self.step(tau, dt, integrator=integrator)\n ee_pos = self.plant.forward_kinematics(self.x[:self.plant.dof])\n ee_pos.insert(0, self.plant.base)\n ani_plot_counter = 0\n for link in range(self.plant.n_links):\n self.animation_plots[ani_plot_counter].set_data(\n [ee_pos[link][0], ee_pos[link+1][0]],\n [ee_pos[link][1], ee_pos[link+1][1]])\n ani_plot_counter += 1\n self.animation_plots[ani_plot_counter].set_data(ee_pos[link+1][0],\n ee_pos[link+1][1])\n ani_plot_counter += 1\n\n set_arrow_properties(self.tau_arrowarcs[link],\n self.tau_arrowheads[link],\n float(np.squeeze(tau)),\n ee_pos[link][0],\n ee_pos[link][1])\n t = float(self.animation_plots[ani_plot_counter].get_text()[4:])\n t = round(t+dt, 3)\n self.animation_plots[ani_plot_counter].set_text(f\"t = {t}\")\n\n # if the animation runs slower than real time\n # the time display will be red\n if time.time() - t0 > dt:\n self.animation_plots[ani_plot_counter].set_color(\"red\")\n else:\n self.animation_plots[ani_plot_counter].set_color(\"black\")\n return self.animation_plots + self.tau_arrowarcs + self.tau_arrowheads", "def PMTandPiezoPlot(datadir,run,event,gain): \n en = event\n mu = gain\n e = sbc.DataHandling.GetSBCEvent.GetEvent(datadir+'/'+run,en)\n print(e[\"fastDAQ\"].keys())\n cgate = e[\"fastDAQ\"][\"CAMgate\"]\n dcam = np.diff(cgate)\n \n p0=e[\"fastDAQ\"][\"Piezo1\"]\n p1 = e[\"fastDAQ\"][\"Piezo2\"]\n fdt = e[\"fastDAQ\"][\"time\"]\n runreconpath = \"/pnfs/coupp/persistent/grid_output/SBC-17/output/%s/\"%run\n pmtdiffs = []\n diffs = []\n \n camOnTimes = [fdt[i] for i in range(len(dcam)) if dcam[i] < -0.5]\n camOffTimes = [fdt[i] for i in range(len(dcam)) if dcam[i] > 0.5]\n print(len(camOnTimes))\n print(len(camOffTimes))\n \n acousticfilename = runreconpath+\"AcousticAnalysis_%s.bin\"%run\n a = sbc.DataHandling.ReadBinary.ReadBlock(acousticfilename)\n bubt0 = a[\"bubble_t0\"]\n \n pmttracetime = e[\"PMTtraces\"][\"t0_sec\"][:,0]+e[\"PMTtraces\"][\"t0_frac\"][:,0]\n d=sbc.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment(e)\n pmtalign = d[\"PMT_trigt0_sec\"]+d[\"PMT_trigt0_frac\"]\n tracetimes = pmttracetime - pmtalign\n at0 = bubt0[en,0]\n at0_1 = bubt0[en,1]\n \n allxyzfname = \"/pnfs/coupp/persistent/grid_output/SBC-17/output/SimpleXYZ_all.bin\"\n xyzf = sbc.DataHandling.ReadBinary.ReadBlock(allxyzfname)\n indices = [i for i,x in enumerate(xyzf[\"runid\"]) if str(x[0])+\"_\"+str(x[1]) == run]\n xyz_reconstructed = True\n if len(indices) > 0:\n runposreco = {\"ev\":[xyzf[\"ev\"][indices]],\"x\":[xyzf[\"bubX\"][indices]],\n \"y\":[xyzf[\"bubY\"][indices]],\"z\":[xyzf[\"bubZ\"][indices]]}\n z = runposreco[\"z\"][0][int(int(en))]\n else:\n print(\"no handscan?\")\n z = 1.5\n xyz_reconstructed = False\n lag_expected = (-23.387649*z - 261.020495)*1e-6 # fit from other analysis\n t0_expected_p0 = at0 + lag_expected\n t0_expected_p1 = at0_1 + lag_expected\n \n i=0\n candidates = []\n candidate_times=[]\n for t in (tracetimes-at0):\n \n if t<0.2 and t>-0.2:\n lastCamOff = 0\n for k in range(len(camOffTimes)):\n if t+at0 > camOffTimes[k]:\n lastCamOff = camOffTimes[k]\n elif t+at0 < camOffTimes[k]:\n break\n if t+at0-lastCamOff > 25e-6:\n \n pmtdiffs.append(t)\n trace = np.fabs(e[\"PMTtraces\"][\"traces\"][i][0])\n if max(trace) == 128:\n trace = pi.stitchTraces(trace,np.fabs(e[\"PMTtraces\"][\"traces\"][i][1]))\n dt = e[\"PMTtraces\"][\"dt\"][i][0]\n #baseline = np.mean(trace[0:50])\n #trace = trace - baseline\n [phe,n,totInt,pktimes] = pi.SBC_pulse_integrator_bressler(trace,dt)\n \n if phe != None:\n phe /= mu\n candidates.append(phe)\n candidate_times.append(t)\n i+=1\n candidate_phe = 0\n the_index = 0\n i=0\n near_trace_indices = []\n for t in candidate_times:\n if t > -500e-6 and t <0:\n near_trace_indices.append(list(tracetimes-at0).index(t))\n if candidates[i]>candidate_phe:\n candidate_phe = candidates[i]\n the_index = i\n i+=1\n \n if len(candidates) != 0:\n if max(candidates)>0:\n diffs.append(candidate_times[candidates.index(max(candidates))])\n fig,ax1 = plt.subplots()\n ax2 = ax1.twinx()\n ax1.plot(fdt,p0,'b',alpha=0.6, label = 'piezo 0')\n ax1.plot(fdt,p1,'k',alpha=0.2, label= 'piezo 1')\n for i in range(len(candidates)):\n if i == the_index:\n ax2.plot([candidate_times[i]+at0,candidate_times[i]+at0],[0,candidates[i]],'r',lw=4)\n else:\n ax2.plot([candidate_times[i]+at0,candidate_times[i]+at0],[0,candidates[i]],'y',lw=4)\n #ax2.plot([min(candidate_times),max(candidate_times)],[0,0],linewidth=2)\n ax1.plot([at0,at0],[-0.5,0.5],'b',linewidth=2, label = 'acoustic t0, p0')\n ax1.plot([at0_1,at0_1],[-0.5,0.5],'k',linewidth=2, label = 'acoustic t0, p1')\n \"\"\"\n if xyz_reconstructed:\n ax1.plot([t0_expected_p0,t0_expected_p0],[-0.5,0.5],'b:',linewidth=2, label = 'expected PMT t0, p0')\n ax1.plot([t0_expected_p1,t0_expected_p1],[-0.5,0.5],'k:',linewidth=2, label = 'expected PMT t0, p1')\n else:\n ax1.plot([t0_expected_p0,t0_expected_p0],[-0.5,0.5],'b:',linewidth=2, label = 'expected PMT t0, p0, center of chamber')\n ax1.plot([t0_expected_p1,t0_expected_p1],[-0.5,0.5],'k:',linewidth=2, label = 'expected PMT t0, p1, center of chamber')\n \"\"\"\n ax1.plot(fdt,cgate,'c')\n ax1.plot(fdt[:-1],dcam,'m')\n ax2.set_ylabel('pmt signal (phe)',fontsize=20)\n ax1.set_xlabel('time (s)',fontsize=20)\n ax1.set_ylabel('Acoustic signa(V)',fontsize=20)\n ax1.set_ylim([min(p1),max(p1)])\n ax2.set_xlim([-0.1,0.1])\n #ax2.set_ylim([0,5])\n ax1.legend()\n plt.show\n \n for j in near_trace_indices:\n trace = e[\"PMTtraces\"][\"traces\"][j][0]\n dt = e[\"PMTtraces\"][\"dt\"]\n dt_tr = dt[j][0]\n tPMT = np.arange(len(trace))*dt_tr\n plt.figure()\n plt.plot(tPMT,trace)\n plt.xlabel(\"t (s)\")\n plt.ylabel(\"PMT signal\")\n plt.show\n \n plt.figure()\n plt.plot(e[\"fastDAQ\"][\"time\"],e[\"fastDAQ\"][\"VetoCoinc\"])\n plt.ylabel(\"Veto Coincidence signal\",fontsize=18)\n plt.xlabel(\"time (s)\")\n plt.show", "def fill_dict(self):\n image_time = (self.nl_image - 1) * (self.tcycle * self.dec)\n slc_dict = default_slc_dict()\n ts = self.time_start\n sod = _dt.timedelta(hours=ts.hour, minutes=ts.minute,\n seconds=ts.second, microseconds=ts.microsecond).total_seconds()\n st0 = sod + self.nl_acc * self.tcycle * self.dec + \\\n (self.dec / 2.0) * self.tcycle # include time to center of decimation window\n az_step = self.ang_per_tcycle * self.dec\n prf = abs(1.0 / (self.tcycle * self.dec))\n seq = self.TX_RX_SEQ\n GPRI_TX_z = self.mapping_dict['TX_' + seq[0] + \"_position\"]\n GPRI_RX_z = self.mapping_dict['RX_' + seq[1] + seq[3] + \"_position\"]\n fadc = C / (2. * self.rps)\n # Antenna elevation angle\n ant_elev = _np.deg2rad(self.antenna_elevation)\n # Compute antenna position\n rx1_coord = [0., 0., 0.]\n rx2_coord = [0., 0., 0.]\n tx_coord = [0., 0., 0.]\n #\n # Topsome receiver\n rx1_coord[0] = xoff + ant_radius * _np.cos(\n ant_elev) # local coordinates of the tower: x,y,z, boresight is along +X axis, +Z is up\n rx1_coord[1] = 0.0 # +Y is to the right when looking in the direction of +X\n rx1_coord[2] = GPRI_RX_z + ant_radius * _np.sin(\n ant_elev) # up is Z, all antennas have the same elevation angle!\n # Bottomsome receiver\n rx2_coord[0] = xoff + ant_radius * _np.cos(ant_elev)\n rx2_coord[1] = 0.0\n rx2_coord[2] = GPRI_RX_z + ant_radius * _np.sin(ant_elev)\n tx_coord[0] = xoff + ant_radius * _np.cos(ant_elev)\n tx_coord[1] = 0.0\n tx_coord[2] = GPRI_TX_z + ant_radius * _np.sin(ant_elev)\n chan_name = 'CH1 lower' if seq[3] == 'l' else 'CH2 upper'\n slc_dict['title'] = str(ts) + ' ' + chan_name\n slc_dict['date'] = self.time_start.date()\n slc_dict['start_time'] = st0\n slc_dict['center_time'] = st0 + image_time / 2\n slc_dict['end_time'] = st0 + image_time\n slc_dict['range_samples'] = self.ns_out\n slc_dict['azimuth_lines'] = self.nl_tot_dec - 2 * self.nl_acc\n slc_dict['range_pixel_spacing'] = self.rps\n slc_dict['azimuth_line_time'] = self.tcycle * self.dec\n slc_dict['near_range_slc'] = self.rmin\n slc_dict['center_range_slc'] = (self.rmin + self.rmax) / 2\n slc_dict['far_range_slc'] = self.rmax\n slc_dict['radar_frequency'] = self.RF_center_freq\n slc_dict['adc_sampling_rate'] = fadc\n slc_dict['prf'] = prf\n slc_dict['chirp_bandwidth'] = self.RF_freq_max - self.RF_freq_min\n slc_dict['receiver_gain'] = 60 - self.IMA_atten_dB\n slc_dict['GPRI_TX_mode'] = self.TX_mode\n slc_dict['GPRI_TX_antenna'] = seq[0]\n slc_dict.add_parameter('GPRI_RX_antennas', seq[1] + seq[3])\n slc_dict['GPRI_tx_coord'] = [tx_coord[0], tx_coord[1], tx_coord[2]]\n slc_dict['GPRI_rx1_coord'] = [rx1_coord[0], rx1_coord[1], rx1_coord[2]]\n slc_dict['GPRI_rx2_coord'] = [rx2_coord[0], rx2_coord[1], rx2_coord[2]]\n slc_dict['GPRI_az_start_angle'] = self.az_start\n slc_dict['GPRI_az_angle_step'] = az_step\n slc_dict['GPRI_ant_elev_angle'] = self.antenna_elevation\n slc_dict['GPRI_ref_north'] = self.geographic_coordinates[0]\n slc_dict['GPRI_ref_east'] = self.geographic_coordinates[1]\n slc_dict['GPRI_ref_alt'] = self.geographic_coordinates[2]\n slc_dict['GPRI_geoid'] = self.geographic_coordinates[3]\n return slc_dict", "def tomoScanWithFrames(description, inBeamPosition, outOfBeamPosition, exposureTime=1., start=0., stop=180., step=0.1, darkFieldInterval=0, flatFieldInterval=0,\n imagesPerDark=10, imagesPerFlat=10, optimizeBeamInterval=0, pattern=\"default\", nframes=1, tomoRotationAxis=0, addNXEntry=True, autoAnalyse=True, additionalScannables=[]):\n dataFormat = LocalProperties.get(\"gda.data.scan.datawriter.dataFormat\")\n try:\n darkFieldInterval = int(darkFieldInterval)\n flatFieldInterval = int(flatFieldInterval)\n optimizeBeamInterval = int(optimizeBeamInterval)\n \n image_key_frame = 3\n nframes = int(nframes)\n if nframes < 1:\n nframes = 1\n \n jns = beamline_parameters.JythonNameSpaceMapping(InterfaceProvider.getJythonNamespace())\n tomography_theta = jns.tomography_theta\n if tomography_theta is None:\n raise NameError(\"tomography_theta is not defined in Jython namespace\")\n tomography_shutter = jns.tomography_shutter\n if tomography_shutter is None:\n raise NameError(\"tomography_shutter is not defined in Jython namespace\")\n tomography_translation = jns.tomography_translation\n if tomography_translation is None:\n raise NameError(\"tomography_translation is not defined in Jython namespace\")\n \n tomography_detector = jns.tomography_detector\n if tomography_detector is None:\n raise NameError(\"tomography_detector is not defined in Jython namespace\")\n\n tomography_optimizer = jns.tomography_optimizer\n if tomography_optimizer is None:\n raise NameError(\"tomography_optimizer is not defined in Jython namespace\")\n\n tomography_time = jns.tomography_time\n if tomography_time is None:\n raise NameError(\"tomography_time is not defined in Jython namespace\")\n \n tomography_beammonitor = jns.tomography_beammonitor\n if tomography_beammonitor is None:\n raise NameError(\"tomography_beammonitor is not defined in Jython namespace\")\n \n tomography_camera_stage = jns.tomography_camera_stage\n if tomography_camera_stage is None:\n raise NameError(\"tomography_camera_stage is not defined in Jython namespace\")\n \n tomography_sample_stage = jns.tomography_sample_stage\n if tomography_sample_stage is None:\n raise NameError(\"tomography_sample_stage is not defined in Jython namespace\")\n \n tomo_additional_scannables = jns.tomography_additional_scannables\n if tomo_additional_scannables is None:\n raise NameError(\"tomo_additional_scannables is not defined in Jython namespace\")\n \n index = SimpleScannable()\n index.setCurrentPosition(0.0)\n index.setInputNames([\"imageNumber\"])\n index.setName(\"imageNumber\")\n index.configure()\n \n image_key = SimpleScannable()\n image_key.setCurrentPosition(0.0)\n image_key.setInputNames([\"image_key\"])\n image_key.setName(\"image_key\")\n image_key.configure()\n\n tomoScanDevice = make_tomoScanDevice(tomography_theta, tomography_shutter,\n tomography_translation, tomography_optimizer, image_key, index)\n\n# return tomoScanDevice\n #generate list of positions\n numberSteps = ScannableUtils.getNumberSteps(tomography_theta, start, stop, step)\n theta_points = []\n theta_points.append(start)\n previousPoint = start\n for i in range(numberSteps):\n nextPoint = ScannableUtils.calculateNextPoint(previousPoint, step);\n theta_points.append(nextPoint)\n previousPoint = nextPoint\n \n #generateScanPoints\n optimizeBeamNo = 0\n optimizeBeamYes = 1\n shutterOpen = 1\n shutterClosed = 0\n shutterNoChange = 2\n scan_points = []\n theta_pos = theta_points[0]\n index = 0\n #Added shutterNoChange state for the shutter. The scan points are added using the (pseudo) ternary operator, \n #if index is 0 then the shutterPosition is added to the scan point, else shutterNoChange is added to scan points.\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark\n index = index + 1\n \n for i in range(imagesPerFlat): \n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat\n index = index + 1\n for frm in range(nframes): \n scan_points.append((theta_pos, shutterOpen, inBeamPosition, optimizeBeamNo, image_key_project if frm==0 else image_key_frame, index)) #first\n index = index + 1 \n imageSinceDark = 1\n imageSinceFlat = 1\n optimizeBeam = 0\n for i in range(numberSteps):\n theta_pos = theta_points[i + 1]\n for frm in range(nframes):\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_project if frm==0 else image_key_frame, index))#main image\n index = index + 1 \n \n imageSinceFlat = imageSinceFlat + 1\n if imageSinceFlat == flatFieldInterval and flatFieldInterval != 0:\n for i in range(imagesPerFlat):\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index))\n index = index + 1 \n imageSinceFlat = 0\n \n imageSinceDark = imageSinceDark + 1\n if imageSinceDark == darkFieldInterval and darkFieldInterval != 0:\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index))\n index = index + 1 \n imageSinceDark = 0\n\n optimizeBeam = optimizeBeam + 1\n if optimizeBeam == optimizeBeamInterval and optimizeBeamInterval != 0:\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamYes, image_key_project, index))\n index = index + 1 \n optimizeBeam = 0\n \n #add dark and flat only if not done in last steps\n if imageSinceFlat != 0:\n for i in range(imagesPerFlat):\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat\n index = index + 1\n if imageSinceDark != 0:\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark\n index = index + 1 \n# scan_points1 = generateScanPoints(inBeamPosition, outOfBeamPosition, theta_points, darkFieldInterval, flatFieldInterval,\n# imagesPerDark, imagesPerFlat, optimizeBeamInterval, pattern=pattern)\n# if pattern == 'default' or pattern == 'DFPFD':\n# i = 0\n# for pt1 in scan_points1:\n# pt = scan_points[i]\n# if pt1 != pt:\n# print \"Mismatch - please tell Kaz about your scan and its arguments!\"\n# print \"i = \", i\n# print \"pt = \", pt\n# print \"pt1 = \", pt1\n# i += 1\n #return None\n positionProvider = tomoScan_positions(start, stop, step, darkFieldInterval, imagesPerDark, flatFieldInterval, imagesPerFlat, \\\n inBeamPosition, outOfBeamPosition, optimizeBeamInterval, scan_points) \n scan_args = [tomoScanDevice, positionProvider, tomography_time, tomography_beammonitor, tomography_detector, exposureTime, tomography_camera_stage, tomography_sample_stage]\n #scan_args.append(RotationAxisScannable(\"approxCOR\", tomoRotationAxis))\n #meta_add(RotationAxisScannable(\"approxCOR\", tomoRotationAxis))\n #meta_add(\"RotationCoord_as_list\", [tomoRotationAxis])\n meta_add(\"approxCOR\", tomoRotationAxis)\n for scannable in additionalScannables:\n scan_args.append(scannable)\n for scannable in tomo_additional_scannables:\n scan_args.append(scannable)\n ''' setting the description provided as the title'''\n if not description == None: \n setTitle(description)\n else :\n setTitle(\"undefined\")\n \n dataFormat = LocalProperties.get(\"gda.data.scan.datawriter.dataFormat\")\n if not dataFormat == \"NexusDataWriter\":\n handle_messages.simpleLog(\"Data format inconsistent. Setting 'gda.data.scan.datawriter.dataFormat' to 'NexusDataWriter'\")\n LocalProperties.set(\"gda.data.scan.datawriter.dataFormat\", \"NexusDataWriter\")\n scanObject = createConcurrentScan(scan_args)\n if addNXEntry:\n addNXTomoSubentry(scanObject, tomography_detector.name, tomography_theta.name)\n scanObject.runScan()\n if autoAnalyse:\n lsdp=jns.lastScanDataPoint()\n OSCommandRunner.runNoWait([\"/dls_sw/apps/tomopy/tomopy/bin/gda/tomo_at_scan_end_kz\", lsdp.currentFilename], OSCommandRunner.LOGOPTION.ALWAYS, None)\n return scanObject;\n except InterruptedException:\n exceptionType, exception, traceback = sys.exc_info()\n handle_messages.log(None, \"User interrupted the scan\", exceptionType, exception, traceback, False)\n raise InterruptedException(\"User interrupted the scan\")\n except:\n exceptionType, exception, traceback = sys.exc_info()\n handle_messages.log(None, \"Error during tomography scan\", exceptionType, exception, traceback, False)\n raise Exception(\"Error during tomography scan\", exception)\n finally:\n handle_messages.simpleLog(\"Data Format reset to the original setting: \" + dataFormat)\n LocalProperties.set(\"gda.data.scan.datawriter.dataFormat\", dataFormat)", "def interpret_parameters(self) :\n\n if hasattr(self,'exposure_schedule') and self.exposure_schedule is not None :\n if isinstance(self.exposure_schedule,float) :\n self.exposure_schedule = [np.repeat(self.exposure_schedule,24)]\n\n elif isinstance(self.exposure_schedule,int) :\n temp = self.exposure_schedule\n self.exposure_schedule = [np.zeros(24)]\n self.exposure_schedule[0][temp] = 1\n\n elif isinstance(self.exposure_schedule,dict) :\n temp = self.exposure_schedule\n self.exposure_schedule = [np.zeros(24)]\n for x in temp.items() :\n self.exposure_schedule[0][int(x[0])] = x[1] \n\n elif isinstance(self.exposure_schedule,np.ndarray) :\n if len(np.shape(self.exposure_schedule)) == 1 and np.shape(self.exposure_schedule)[0] == 24 :\n self.exposure_schedule = [self.exposure_schedule]\n elif len(np.shape(self.exposure_schedule)) == 2 and np.shape(self.exposure_schedule)[1] == 24 :\n # split an array of multiple schedules into a list of single schedule arrays\n self.exposure_schedule = np.split(self.exposure_schedule,np.shape(self.exposure_schedule)[0])\n else :\n raise ValueError(\"Exposure schedule not a comprehensible numpy array, \" +\n \"must be length 24 in first or second dimension\")\n\n elif isinstance(self.exposure_schedule,list) :\n if len(self.exposure_schedule) == 24 and all(isinstance(x,(int,float)) for x in self.exposure_schedule) :\n self.exposure_schedule = [np.array(self.exposure_schedule)]\n \n for i in range(len(self.exposure_schedule)) :\n if isinstance(self.exposure_schedule[i],float) :\n self.exposure_schedule[i] = np.repeat(self.exposure_schedule[i],24)\n\n elif isinstance(self.exposure_schedule[i],int) :\n temp = self.exposure_schedule[i]\n self.exposure_schedule[i] = np.zeros(24)\n self.exposure_schedule[i][temp] = 1\n\n elif isinstance(self.exposure_schedule[i],dict) :\n temp = self.exposure_schedule[i]\n self.exposure_schedule[i] = np.zeros(24)\n for x in temp.items() :\n self.exposure_schedule[i][int(x[0])] = x[1] \n\n elif isinstance(self.exposure_schedule[i],np.ndarray) :\n if not (len(np.shape(self.exposure_schedule[i])) == 1 \n and np.shape(self.exposure_schedule[i])[0] == 24 ):\n raise ValueError(\"Exposure schedule list contains an incomprehensible entry, \" + \n \"a numpy array that is not length 24\")\n \n elif isinstance(self.exposure_schedule[i],list) :\n if len(self.exposure_schedule[i]) == 24 :\n self.exposure_schedule[i] = np.array(self.exposure_schedule[i])\n else :\n raise ValueError(\"Exposure schedule list contains an incomprehensible entry, \" + \n \"a list that is not length 24\")\n \n else :\n raise TypeError(\"Exposure schedule list contains an incomprehensible entry\")\n\n else :\n raise TypeError(\"Exposure schedule must be a list of length-24 numpy arrays or similar\")\n ###################################################################################################### \n if hasattr(self,'year_selection') and self.year_selection is not None :\n if isinstance(self.year_selection,int) :\n if self.year_selection==0:\n self.year_selection = [np.array([x]) for x in self.dataset_years]\n else:\n self.year_selection = [np.array([self.year_selection])]\n elif isinstance(self.year_selection,np.ndarray) :\n if len(np.shape(self.year_selection)) == 1 :\n self.year_selection = [self.year_selection]\n else :\n raise ValueError(\"Year selection should be a list of numpy arrays, \" +\n \"provided numpy array has incomprehensible shape\")\n elif isinstance(self.year_selection,list) :\n if all([isinstance(x,int) for x in self.year_selection]) and all(x!=0 for x in self.year_selection) :\n self.year_selection = [np.array(self.year_selection)]\n else :\n i=0\n for k in range(len(self.year_selection)) :\n if isinstance(self.year_selection[i],int) :\n if self.year_selection[i] == 0 :\n temp = self.year_selection[0:i] + [np.array([x]) for x in self.dataset_years]\n if i != len(self.year_selection)-1 : \n temp = temp + self.year_selection[i+1:]\n self.year_selection = temp\n i = i + len(self.dataset_years) - 1\n else :\n self.year_selection[i] = np.array([self.year_selection[i]])\n elif isinstance(self.year_selection[i],list) :\n self.year_selection[i] = np.array(self.year_selection[i])\n elif not isinstance(self.year_selection[i],np.ndarray) :\n raise TypeError(\"Year selection list must contain ints, lists, or numpy arrays\")\n i=i+1\n else :\n raise TypeError(\"Year selection must be an int, numpy array, or list of numpy arrays\")\n\n for i in range(len(self.year_selection)) :\n if all(self.year_selection[i] == 0) :\n self.year_selection[i] = np.array(self.dataset_years)\n #####################################################################################################\n if hasattr(self,'units') and self.units is not None :\n if isinstance(self.units,str) :\n self.units = [self.units]\n elif isinstance(self.units,list) :\n if not all(isinstance(x,str) for x in self.units) :\n raise TypeError(\"Units input must be a list of strings\")\n else :\n raise TypeError(\"Units input must be a list of strings\")\n\n for i in range(len(self.units)) :\n if not isinstance(self.units[i],str) :\n raise TypeError(\"Units input must be a list of strings\")\n if self.units[i] not in [\"SED\",\"UVIh\",\"UVI\",\"J m-2\",\"W m-2\",\"mW m-2\"] :\n raise ValueError(\"Units input must be list of accepted unit strings, \" +\n \"those being SED, UVIh, J m-2, UVI, W m-2, or mW m-2\")\n\n\n if hasattr(self,'bin_width') :\n if self.bin_width is None :\n self.bin_width = []\n for unit in self.units :\n self.bin_width.append({\n \"SED\" : 0.1, \n \"J m-2\" : 10, \n \"UVI\" : 0.1, \n \"W m-2\" : 0.0025, \n \"mW m-2\" : 2.5\n }[unit])\n elif isinstance(self.bin_width,(int,float)) :\n self.bin_width = [self.bin_width]\n\n\n return self", "def sample_trajectory(self, env, animate_this_episode, is_evaluation):\n # Using current task with meta inside\n env.reset_task(is_evaluation=is_evaluation)\n stats = []\n #====================================================================================#\n # ----------PROBLEM 2----------\n #====================================================================================#\n ep_steps = 0\n steps = 0\n\n num_samples = max(self.history, self.max_path_length + 1)\n meta_obs = np.zeros((num_samples + self.history + 1, self.meta_ob_dim))\n rewards = []\n\n while True:\n if animate_this_episode:\n env.render()\n time.sleep(0.1)\n\n if ep_steps == 0:\n ob = env.reset()\n # first meta ob has only the observation\n # set a, r, d to zero, construct first meta observation in meta_obs\n # YOUR CODE HERE\n ac = np.zeros(self.ac_dim); rew = np.zeros(self.reward_dim); done = np.zeros(self.terminal_dim)\n meta_obs[steps, :] = np.concatenate((ob, ac, rew, done))\n steps += 1\n\n # index into the meta_obs array to get the window that ends with the current timestep\n # please name the windowed observation `in_` for compatibilty with the code that adds to the replay buffer (lines 418, 420)\n # YOUR CODE HERE\n # padding for input obs size\n sample_action_in_ = meta_obs[steps-self.history:steps, :] if steps>=self.history else np.squeeze(np.concatenate(([meta_obs[0,:], ] * (self.history - steps), meta_obs[:steps, :]), axis=0))\n # need to clear hidden size, in order to avoid previous hidden state as it may be generated by the other totally different task (env setting may be changed)\n hidden = np.zeros((1, self.gru_size), dtype=np.float32)\n\n # get action from the policy\n # YOUR CODE HERE\n # Tensor(\"ob:0\", shape=(?, 1, 10), dtype=float32)\n # print(self.sy_ob_no)\n # Tensor(\"hidden:0\", shape=(?, 32), dtype=float32)\n # print(self.sy_hidden)\n ac = self.sess.run(self.sy_sampled_ac, feed_dict={\n self.sy_ob_no: sample_action_in_.reshape(-1, self.history, self.meta_ob_dim),\n self.sy_hidden: hidden,\n })\n assert len(ac) == 1\n ac = ac[0]\n\n # step the environment\n # YOUR CODE HERE\n ob, rew, done, _= env.step(ac)\n\n ep_steps += 1\n\n done = bool(done) or ep_steps == self.max_path_length\n # construct the meta-observation and add it to meta_obs\n # YOUR CODE HERE\n meta_obs[steps, :] = np.concatenate((ob, ac, [rew], [done]))\n\n rewards.append(rew)\n steps += 1\n\n in_ = meta_obs[steps, :]\n # add sample to replay buffer\n if is_evaluation:\n self.val_replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)\n else:\n self.replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)\n\n # start new episode\n if done:\n # compute stats over trajectory\n s = dict()\n s['rewards']= rewards[-ep_steps:]\n s['ep_len'] = ep_steps\n stats.append(s)\n ep_steps = 0\n\n if steps >= num_samples:\n break\n\n return steps, stats", "def extract_trans_history(self, mode=\"GO\", fps=30, max_frames=None, post_frames=0, verbose=False) -> dict:\n dataset = self.dataset\n assert mode in [\"GO\", \"STOP\"], \"Transition type should be STOP or GO\"\n ids = list(dataset.keys())\n samples = {}\n j = 0\n step = 30 // fps\n assert isinstance(step, int)\n for idx in ids:\n vid_id = copy.deepcopy(dataset[idx]['video_number'])\n frames = copy.deepcopy(dataset[idx]['frames'])\n bbox = copy.deepcopy(dataset[idx]['bbox'])\n action = copy.deepcopy(dataset[idx]['action'])\n cross = copy.deepcopy(dataset[idx]['cross'])\n behavior = copy.deepcopy(dataset[idx]['behavior'])\n traffic_light = copy.deepcopy(dataset[idx]['traffic_light'])\n attributes = copy.deepcopy(dataset[idx]['attributes'])\n next_transition = copy.deepcopy(dataset[idx][\"next_transition\"])\n for i in range(len(frames)):\n key = None\n old_id = None\n d1 = min(i, 5)\n d2 = min(len(frames) - i - 1, 5)\n if mode == \"GO\":\n if next_transition[i] == 0 and action[i] == 1 and action[i - d1] == 0 and action[i + d2] == 1:\n j += 1\n new_id = \"{:04d}\".format(j) + \"_\" + self.name\n key = \"JG_\" + new_id\n old_id = idx\n ae = np.array(action[i::-step])\n ce = np.array(np.nonzero(ae == 1))\n d_pre = ce[0][1] - 1 if ce.size > 1 else len(ae) - 1\n ap = np.array(action[i::step])\n cp = np.array(np.nonzero(ap == 0))\n d_pos = cp[0][0] if cp.size > 0 else len(ap)\n if mode == \"STOP\":\n if next_transition[i] == 0 and action[i] == 0 and action[i - d1] == 1 and action[i + d2] == 0:\n j += 1\n new_id = \"{:04d}\".format(j) + \"_\" + self.name\n key = \"JS_\" + new_id\n old_id = idx\n ae = np.array(action[i::-step])\n ce = np.array(np.nonzero(ae == 0))\n d_pre = ce[0][1] - 1 if ce.size > 1 else len(ae) - 1\n ap = np.array(action[i::step])\n cp = np.array(np.nonzero(ap == 1))\n d_pos = cp[0][0] if cp.size > 0 else len(ap)\n if key is not None:\n if max_frames is None:\n t = None\n else:\n t = i - max_frames * step if (i - max_frames * step >= 0) else None\n i = i + min(post_frames, d_pos) * step\n samples[key] = {}\n samples[key][\"source\"] = \"JAAD\"\n samples[key][\"old_id\"] = old_id\n samples[key]['video_number'] = vid_id\n samples[key]['frame'] = frames[i:t:-step]\n samples[key]['frame'].reverse()\n samples[key]['bbox'] = bbox[i:t:-step]\n samples[key]['bbox'].reverse()\n samples[key]['action'] = action[i:t:-step]\n samples[key]['action'].reverse()\n samples[key]['cross'] = cross[i:t:-step]\n samples[key]['cross'].reverse()\n samples[key]['behavior'] = behavior[i:t:-step]\n samples[key]['behavior'].reverse()\n samples[key]['traffic_light'] = traffic_light[i:t:-step]\n samples[key]['traffic_light'].reverse()\n samples[key]['attributes'] = attributes\n samples[key]['pre_state'] = d_pre\n samples[key]['post_state'] = d_pos\n samples[key]['type'] = mode\n samples[key]['fps'] = fps\n if verbose:\n keys = list(samples.keys())\n pids = []\n num_frames = 0\n for k in keys:\n pids.append(samples[k]['old_id'])\n num_frames += len(samples[k]['frame'])\n print(f\"Extract {len(pids)} {mode} history samples from {self.name} dataset in JAAD ,\")\n print(f\"samples contain {len(set(pids))} unique pedestrians and {num_frames} frames.\")\n\n return samples", "def next_target(self, old_sInd, modes):\n\n OS = self.OpticalSystem\n ZL = self.ZodiacalLight\n Comp = self.Completeness\n TL = self.TargetList\n Obs = self.Observatory\n TK = self.TimeKeeping\n \n # create DRM\n DRM = {}\n \n # allocate settling time + overhead time\n tmpCurrentTimeAbs = TK.currentTimeAbs.copy() + Obs.settlingTime + modes[0]['syst']['ohTime']\n tmpCurrentTimeNorm = TK.currentTimeNorm.copy() + Obs.settlingTime + modes[0]['syst']['ohTime']\n\n # look for available targets\n # 1. initialize arrays\n slewTimes = np.zeros(TL.nStars)*u.d\n fZs = np.zeros(TL.nStars)/u.arcsec**2\n dV = np.zeros(TL.nStars)*u.m/u.s\n intTimes = np.zeros(TL.nStars)*u.d\n obsTimes = np.zeros([2,TL.nStars])*u.d\n sInds = np.arange(TL.nStars)\n \n # 2. find spacecraft orbital START positions (if occulter, positions \n # differ for each star) and filter out unavailable targets \n sd = None\n if OS.haveOcculter == True:\n sd = Obs.star_angularSep(TL, old_sInd, sInds, tmpCurrentTimeAbs)\n obsTimes = Obs.calculate_observableTimes(TL,sInds,tmpCurrentTimeAbs,self.koMap,self.koTimes,modes[0])\n slewTimes = Obs.calculate_slewTimes(TL, old_sInd, sInds, sd, obsTimes, tmpCurrentTimeAbs) \n \n # 2.1 filter out totTimes > integration cutoff\n if len(sInds.tolist()) > 0:\n sInds = np.intersect1d(self.intTimeFilterInds, sInds)\n \n # start times, including slew times\n startTimes = tmpCurrentTimeAbs.copy() + slewTimes\n startTimesNorm = tmpCurrentTimeNorm.copy() + slewTimes\n\n # 2.5 Filter stars not observable at startTimes\n try:\n koTimeInd = np.where(np.round(startTimes[0].value)-self.koTimes.value==0)[0][0] # find indice where koTime is startTime[0]\n sInds = sInds[np.where(np.transpose(self.koMap)[koTimeInd].astype(bool)[sInds])[0]]# filters inds by koMap #verified against v1.35\n except:#If there are no target stars to observe \n sInds = np.asarray([],dtype=int)\n \n # 3. filter out all previously (more-)visited targets, unless in \n if len(sInds.tolist()) > 0:\n sInds = self.revisitFilter(sInds, tmpCurrentTimeNorm)\n\n # 4.1 calculate integration times for ALL preselected targets\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, modes[0])\n maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife)#Maximum intTime allowed\n\n if len(sInds.tolist()) > 0:\n if OS.haveOcculter == True and old_sInd is not None:\n sInds,slewTimes[sInds],intTimes[sInds],dV[sInds] = self.refineOcculterSlews(old_sInd, sInds, slewTimes, obsTimes, sd, mode) \n endTimes = tmpCurrentTimeAbs.copy() + intTimes + slewTimes\n else: \n intTimes[sInds] = self.calc_targ_intTime(sInds, startTimes[sInds], modes[0])\n sInds = sInds[np.where(intTimes[sInds] <= maxIntTime)] # Filters targets exceeding end of OB\n endTimes = startTimes + intTimes\n \n if maxIntTime.value <= 0:\n sInds = np.asarray([],dtype=int)\n\n # 5.1 TODO Add filter to filter out stars entering and exiting keepout between startTimes and endTimes\n \n # 5.2 find spacecraft orbital END positions (for each candidate target), \n # and filter out unavailable targets\n if len(sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\n try: # endTimes may exist past koTimes so we have an exception to hand this case\n koTimeInd = np.where(np.round(endTimes[0].value)-self.koTimes.value==0)[0][0]#koTimeInd[0][0] # find indice where koTime is endTime[0]\n sInds = sInds[np.where(np.transpose(self.koMap)[koTimeInd].astype(bool)[sInds])[0]]# filters inds by koMap #verified against v1.35\n except:\n sInds = np.asarray([],dtype=int)\n\n # 6. choose best target from remaining\n if len(sInds.tolist()) > 0:\n # choose sInd of next target\n sInd, waitTime = self.choose_next_target(old_sInd, sInds, slewTimes, intTimes[sInds])\n \n if sInd == None and waitTime is not None:#Should Choose Next Target decide there are no stars it wishes to observe at this time.\n self.vprint('There are no stars Choose Next Target would like to Observe. Waiting %dd'%waitTime.value)\n return DRM, None, None, waitTime, None\n elif sInd == None and waitTime == None:\n self.vprint('There are no stars Choose Next Target would like to Observe and waitTime is None')\n return DRM, None, None, waitTime, None\n # store selected star integration time\n det_mode = copy.deepcopy(modes[0])\n if self.WAint[sInd] > modes[1]['IWA'] and self.WAint[sInd] < modes[1]['OWA']:\n det_mode['BW'] = det_mode['BW'] + modes[1]['BW']\n det_mode['OWA'] = modes[1]['OWA']\n det_mode['inst']['sread'] = det_mode['inst']['sread'] + modes[1]['inst']['sread']\n det_mode['inst']['idark'] = det_mode['inst']['idark'] + modes[1]['inst']['idark']\n det_mode['inst']['CIC'] = det_mode['inst']['CIC'] + modes[1]['inst']['CIC']\n det_mode['syst']['optics'] = np.mean((det_mode['syst']['optics'], modes[1]['syst']['optics']))\n det_mode['instName'] = 'combined'\n intTime = self.calc_targ_intTime(sInd, startTimes[sInd], det_mode)[0]\n else:\n intTime = intTimes[sInd]\n \n # if no observable target, advanceTime to next Observable Target\n else:\n self.vprint('No Observable Targets at currentTimeNorm= ' + str(TK.currentTimeNorm.copy()))\n return DRM, None, None, None, None\n \n # update visited list for selected star\n self.starVisits[sInd] += 1\n # store normalized start time for future completeness update\n self.lastObsTimes[sInd] = startTimesNorm[sInd]\n \n # populate DRM with occulter related values\n if OS.haveOcculter:\n DRM = Obs.log_occulterResults(DRM,slewTimes[sInd],sInd,sd[sInd],dV[sInd])\n return DRM, sInd, intTime, waitTime, det_mode\n\n return DRM, sInd, intTime, waitTime, det_mode", "def test_single_ended_trans_att_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n nx = 200\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, nx)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n ts_ambient = np.ones(nt) * 12\n ts_valid = np.ones(nt) * 16\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask1 = np.logical_and(x > 0.125 * cable_len, x < 0.25 * cable_len)\n cold_mask2 = np.logical_and(x > 0.625 * cable_len, x < 0.75 * cable_len)\n warm_mask1 = np.logical_and(x > 0.75 * cable_len, x < 0.875 * cable_len)\n warm_mask2 = np.logical_and(x > 0.25 * cable_len, x < 0.375 * cable_len)\n valid_mask = np.logical_and(x > 0.40 * cable_len, x < 0.50 * cable_len)\n temp_real = np.ones((len(x), nt)) * 12 + 273.15\n temp_real[cold_mask1 + cold_mask2] = ts_cold + 273.15\n temp_real[warm_mask1 + warm_mask2] = ts_warm + 273.15\n temp_real[valid_mask] = ts_valid + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n # Add attenuation\n tr_att = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.4) :] *= tr_att\n tr_att2 = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.6) :] *= tr_att2\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n \"ambient\": ([\"time\"], ts_ambient),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"ambient\": [slice(0.52 * cable_len, 0.58 * cable_len)],\n \"cold\": [\n slice(0.125 * cable_len, 0.25 * cable_len),\n slice(0.65 * cable_len, 0.70 * cable_len),\n ],\n \"warm\": [slice(0.25 * cable_len, 0.375 * cable_len)],\n }\n\n ds_test = ds.copy(deep=True)\n\n # WLS\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n # test `trans_att` related functions\n # Clear out old results\n ds_test.set_trans_att([])\n\n assert ds_test.trans_att.size == 0, \"clear out trans_att config\"\n\n del_keys = []\n for k, v in ds_test.data_vars.items():\n if \"trans_att\" in v.dims:\n del_keys.append(k)\n\n assert len(del_keys) == 0, \"clear out trans_att config\"\n\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing gamma + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_gamma=(482.6, 0),\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing alpha + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_dalpha=(6.46e-05, 0),\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )", "def calc_frame_time(instrument, aperture, xdim, ydim, amps):\n instrument = instrument.lower()\n if instrument == \"nircam\":\n xs = xdim\n ys = ydim\n colpad = 12\n\n # Fullframe\n if amps == 4:\n rowpad = 1\n fullpad = 1\n else:\n # All subarrays\n rowpad = 2\n fullpad = 0\n if ((xdim <= 8) & (ydim <= 8)):\n # The smallest subarray\n rowpad = 3\n\n elif instrument == \"niriss\":\n xs = ydim\n ys = xdim\n colpad = 12\n\n # Fullframe\n if amps == 4:\n rowpad = 1\n fullpad = 1\n else:\n rowpad = 2\n fullpad = 0\n\n elif instrument == 'fgs':\n xs = ydim\n ys = xdim\n colpad = 6\n if 'acq1' in aperture.lower():\n colpad = 12\n rowpad = 1\n if amps == 4:\n fullpad = 1\n else:\n fullpad = 0\n\n return ((1.0 * xs / amps + colpad) * (ys + rowpad) + fullpad) * 1.e-5", "def _electron_multiplier(self, hdr):\n d = {}\n d['em yield'], d['em background'], d['em deadtime'] = \\\n unpack(self._bo + 'd 2i', hdr.read(16))\n return d", "def TwoStage(Ref,Q,Te,Tc,DTsh,DTsc,eta_oi,f_p,Tsat_ic,DTsh_ic,Ts_Ph='Ph',prints=False,skipPlot=False,axis=None,**kwargs):\n\n warnings.warn(\"This function has been deprecated. PLease consider converting it to an object inheriting from \\\"BaseCycle\\\".\",DeprecationWarning)\n\n T=np.zeros((8))\n h=np.zeros_like(T)\n p=np.zeros_like(T)\n s=np.zeros_like(T)\n rho=np.zeros_like(T)\n T[0]=np.NAN\n s[0]=np.NAN\n T[1]=Te+DTsh\n pe=PropsSI('P','T',Te,'Q',1.0,Ref)\n pc=PropsSI('P','T',Tc,'Q',1.0,Ref)\n pic=PropsSI('P','T',Tsat_ic,'Q',1.0,Ref)\n Tbubble_c=PropsSI('T','P',pc,'Q',0,Ref)\n Tbubble_e=PropsSI('T','P',pe,'Q',0,Ref)\n\n h[1]=PropsSI('H','T',T[1],'P',pe,Ref)\n s[1]=PropsSI('S','T',T[1],'P',pe,Ref)\n rho[1]=PropsSI('D','T',T[1],'P',pe,Ref)\n T[5]=Tbubble_c-DTsc\n h[5]=PropsSI('H','T',T[5],'P',pc,Ref)\n s[5]=PropsSI('S','T',T[5],'P',pc,Ref)\n rho[5]=PropsSI('D','T',T[5],'P',pc,Ref)\n mdot=Q/(h[1]-h[5])\n\n rho1=PropsSI('D','T',T[1],'P',pe,Ref)\n h2s=PropsSI('H','S',s[1],'P',pic,Ref)\n Wdot1=mdot*(h2s-h[1])/eta_oi\n h[2]=h[1]+(1-f_p)*Wdot1/mdot\n T[2]=PropsSI('T','H',h[2],'P',pic,Ref)\n s[2]=PropsSI('S','T',T[2],'P',pic,Ref)\n rho[2]=PropsSI('D','T',T[2],'P',pic,Ref)\n T[3]=288\n p[3]=pic\n h[3]=PropsSI('H','T',T[3],'P',pic,Ref)\n s[3]=PropsSI('S','T',T[3],'P',pic,Ref)\n rho[3]=PropsSI('D','T',T[3],'P',pic,Ref)\n rho3=PropsSI('D','T',T[3],'P',pic,Ref)\n h4s=PropsSI('H','T',s[3],'P',pc,Ref)\n Wdot2=mdot*(h4s-h[3])/eta_oi\n h[4]=h[3]+(1-f_p)*Wdot2/mdot\n T[4]=PropsSI('T','H',h[4],'P',pc,Ref)\n s[4]=PropsSI('S','T',T[4],'P',pc,Ref)\n rho[4]=PropsSI('D','T',T[4],'P',pc,Ref)\n\n sbubble_e=PropsSI('S','T',Tbubble_e,'Q',0,Ref)\n sbubble_c=PropsSI('S','T',Tbubble_c,'Q',0,Ref)\n sdew_e=PropsSI('S','T',Te,'Q',1,Ref)\n sdew_c=PropsSI('S','T',Tc,'Q',1,Ref)\n\n hsatL=PropsSI('H','T',Tbubble_e,'Q',0,Ref)\n hsatV=PropsSI('H','T',Te,'Q',1,Ref)\n ssatL=PropsSI('S','T',Tbubble_e,'Q',0,Ref)\n ssatV=PropsSI('S','T',Te,'Q',1,Ref)\n vsatL=1/PropsSI('D','T',Tbubble_e,'Q',0,Ref)\n vsatV=1/PropsSI('D','T',Te,'Q',1,Ref)\n x=(h[5]-hsatL)/(hsatV-hsatL)\n s[6]=x*ssatV+(1-x)*ssatL\n T[6]=x*Te+(1-x)*Tbubble_e\n rho[6]=1.0/(x*vsatV+(1-x)*vsatL)\n\n h[6]=h[5]\n h[7]=h[1]\n s[7]=s[1]\n T[7]=T[1]\n p=[np.nan,pe,pic,pic,pc,pc,pe,pe]\n COP=Q/(Wdot1+Wdot2)\n RE=h[1]-h[6]\n\n if prints==True:\n print('x5:',x)\n print('COP:', COP)\n print('COPH', (Q+Wdot1+Wdot2)/(Wdot1+Wdot2))\n print(T[2]-273.15,T[4]-273.15,p[2]/p[1],p[4]/p[3])\n print(mdot,mdot*(h[4]-h[5]),pic)\n print('Vdot1',mdot/rho1,'Vdisp',mdot/rho1/(3500/60.)*1e6/0.7)\n print('Vdot2',mdot/rho3,'Vdisp',mdot/rho3/(3500/60.)*1e6/0.7)\n print(mdot*(h[4]-h[5]),Tc-273.15)\n for i in range(1,len(T)-1):\n print('%d & %g & %g & %g & %g & %g \\\\\\\\' %(i,T[i]-273.15,p[i],h[i],s[i],rho[i]))\n else:\n print(Tsat_ic,COP)\n\n if skipPlot==False:\n if axis==None:\n ax=matplotlib.pyplot.gca()\n else:\n ax=axis\n if Ts_Ph in ['ph','Ph']:\n ax.plot(h,p)\n elif Ts_Ph in ['Ts','ts']:\n s_copy=s.copy()\n T_copy=T.copy()\n for i in range(1,len(s)-1):\n ax.plot(s[i],T[i],'bo',mfc='b',mec='b')\n dT=[0,-5,5,-20,5,5,5]\n ds=[0,0.05,0,0,0,0,0]\n ax.text(s[i]+ds[i],T[i]+dT[i],str(i))\n\n s=list(s)\n T=list(T)\n s.insert(7,sdew_e)\n T.insert(7,Te)\n s.insert(5,sbubble_c)\n T.insert(5,Tbubble_c)\n s.insert(5,sdew_c)\n T.insert(5,Tc)\n\n ax.plot(s,T)\n s=s_copy\n T=T_copy\n else:\n raise TypeError('Type of Ts_Ph invalid')\n return COP", "def __init__(self,\n debug=False,\n urdf_version=None,\n control_time_step=0.005,\n action_repeat=5,\n control_latency=0,\n pd_latency=0,\n on_rack=False,\n motor_kp=1.0,\n motor_kd=0.02,\n render=False,\n num_steps_to_log=2000,\n env_randomizer=None,\n log_path=None,\n signal_type='ik',\n target_position=None,\n backwards=None,\n gait_type='trot',\n terrain_type='plane',\n terrain_id='plane',\n mark='base',\n ):\n self.phase = 0\n\n self._gait_type = gait_type \n # for observation space bounding \n self.max_speed = 1.0\n self.min_speed = 0.5 # change back to 0.2 for OLD TD3 model evaluation\n \n self.min_side_speed = 0.0\n self.max_side_speed = 0.0\n\n self.speed = np.random.uniform(self.min_speed, self.max_speed)\n self.side_speed = np.random.uniform(self.min_side_speed, self.max_side_speed)\n self.speed_des = [self.speed, self.side_speed]\n\n # Initialization variables for periodic reward sum composition\n self.theta_FL = phase_constants.PHASE_VALS[self._gait_type]['front_left']\n self.theta_FR = phase_constants.PHASE_VALS[self._gait_type]['front_right']\n self.theta_RL = phase_constants.PHASE_VALS[self._gait_type]['rear_left']\n self.theta_RR = phase_constants.PHASE_VALS[self._gait_type]['rear_right']\n\n self.min_swing_ratio = 0.6\n self.max_swing_ratio = 0.8\n self.ratio = np.random.uniform(self.min_swing_ratio, self.max_swing_ratio)\n\n super(rexPeriodicRewardEnv,\n self).__init__(urdf_version=urdf_version,\n accurate_motor_model_enabled=True,\n motor_overheat_protection=False,\n motor_kp=motor_kp,\n motor_kd=motor_kd,\n remove_default_joint_damping=False,\n control_latency=control_latency,\n pd_latency=pd_latency,\n on_rack=on_rack,\n render=render,\n num_steps_to_log=num_steps_to_log,\n env_randomizer=env_randomizer,\n log_path=log_path,\n control_time_step=control_time_step,\n action_repeat=action_repeat,\n target_position=target_position,\n signal_type=signal_type,\n backwards=backwards,\n debug=debug,\n terrain_id=terrain_id,\n terrain_type=terrain_type,\n mark=mark,\n ratio=self.ratio,\n forward_reward_cap=5\n )\n\n self.height_des = 0.206 # this is init standing height for rex\n\n self.cycle_complete = 0\n self.cycle_len = 1000 # this is L\n \n # vonmises variables\n self.kappa = phase_constants.VON_MISES_KAPPA\n\n rex_joints = p.getNumJoints(bodyUniqueId=self.rex.quadruped)\n link_name_to_ID = {}\n for i in range(rex_joints):\n name = p.getJointInfo(self.rex.quadruped, i)[12].decode('UTF-8')\n link_name_to_ID[name] = i\n\n self.link_name_to_ID = link_name_to_ID\n self.toe_pos_last = { 'front_left_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['front_left_toe_link'])[0],\n 'front_right_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['front_right_toe_link'])[0],\n 'rear_left_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['rear_left_toe_link'])[0],\n 'rear_right_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['rear_right_toe_link'])[0]\n\n } \n\n print('Using Periodic Reward Composition Rex Environment')", "def config_independent_frames(self):\n return {'standard': 'dispname','bias': None, 'dark': None}", "def set_exposure_times(self, exposure_time=None, duration=None,\n start_time=None, mid_time=None, end_time=None):\n import time, datetime\n # Modified Julian date of the \"zero epoch\" of the time library (1/1/70)\n MJD_ZEROPOINT = 40587.0\n # Number of seconds per day.\n SECONDS_PER_DAY = 86400.0\n if hasattr(self, 'meta') and hasattr(self.meta, 'exposure'):\n if exposure_time is not None:\n self.meta.exposure.exposure_time = exposure_time\n if duration is not None:\n self.meta.exposure.duration = duration\n elif exposure_time is not None:\n self.meta.exposure.duration = exposure_time\n \n if start_time == 'NOW':\n start_time = MJD_ZEROPOINT + (time.time()/SECONDS_PER_DAY)\n if start_time is not None:\n self.meta.exposure.start_time = float(start_time)\n \n if mid_time == 'NOW':\n mid_time = MJD_ZEROPOINT + (time.time()/SECONDS_PER_DAY)\n if mid_time is not None:\n self.meta.exposure.mid_time = float(mid_time)\n \n if end_time == 'NOW':\n end_time = time.time()\n elif self.meta.exposure.start_time is not None and \\\n self.meta.exposure.duration is not None and end_time is None:\n # Set the end time to start_time + duration\n end_time = self.meta.exposure.start_time + \\\n (self.meta.exposure.duration/SECONDS_PER_DAY)\n if end_time is not None:\n self.meta.exposure.end_time = float(end_time)\n else:\n strg = \"Exposure metadata attributes missing from data model\"\n raise AttributeError(strg)", "def setup(self, tau1=None, tau2=None, template_tmax=None, dt=None, \n delay=0.0, sign=1, eventstartthr=None, risepower=4., min_event_amplitude=2.0):\n assert sign in [-1, 1] # must be selective, positive or negative events only\n self.sign = sign\n self.taus = [tau1, tau2]\n self.dt = 1/20000.\n self.template_tmax = template_tmax\n self.idelay = int(delay/dt) # points delay in template with zeros\n self.template = None # reset the template if needed.\n self.eventstartthr = eventstartthr\n self.risepower = risepower\n self.min_event_amplitude = min_event_amplitude", "def muontrg_efficiencies():\r\n# to be updated with new numbers:\r\n ejpsi_trg = Jpsi_trg\r\n ejpsi_trg.add_relative_error(0.03) # TISTOS Justine 140711\r\n ebsmm_trg.add_relative_error(0.02) # Extra lines Justine 140711\r\n ebsmm_trg = BmmE_trg\r\n ebsmm_trg.add_relative_error(0.03) # TISTOS Justine 140711\r\n ebsmm_trg.add_relative_error(0.025) # Extra lines Justine 120711\r\n return ejpsi_trg,ebsmm_trg", "def __init__(self):\n super().__init__()\n self.dynamic = True # from base class, indicates time-dependence is handled internally\n self.numBins = None # integer number of bins to use in creating the duration curve. TODO default?\n self.targets = None # list of strings, variables to apply postprocessor to", "def setup(self, tau1=None, tau2=None, template_tmax=None, dt=None, \n delay=0.0, sign=1, eventstartthr=None, risepower=4., min_event_amplitude=2.0):\n assert sign in [-1, 1]\n self.sign = sign\n self.taus = [tau1, tau2]\n self.dt = dt\n self.template_tmax = template_tmax\n self.idelay = int(delay/dt) # points delay in template with zeros\n self.template = None # reset the template if needed.\n self.eventstartthr = eventstartthr\n self.risepower = risepower\n self.min_event_amplitude = min_event_amplitude", "def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()", "def delay(self):\r\n p_shape = self.phase.shape[:-1]\r\n delay = np.zeros(self.phase.shape)\r\n for i in range(p_shape[0]):\r\n for j in range(p_shape[1]):\r\n this_phase = self.phase[i, j]\r\n #If requested, unwrap the phases:\r\n if self._unwrap_phases:\r\n this_phase = tsu.unwrap_phases(this_phase)\r\n\r\n delay[i, j] = this_phase / (2 * np.pi * self.frequencies)\r\n\r\n return delay", "def timestep(self, simsystem, osc, obs):\n pass", "def animate_configuration(self, fps=30, **kwargs):\n\n if self.config_plot_update_func is None:\n msg = ('No ploting update function has been assigned to '\n 'config_plot_update_func.')\n raise ValueError(msg)\n\n kwargs.pop('interval', None) # ignore the user's supplied interval\n try:\n sample_rate = int(1.0 / np.diff(self.result.index).mean())\n except AttributeError:\n msg = (\"No trajectory has been computed yet, so the animation \"\n \"can't run. Run one of the response functions.\")\n raise AttributeError(msg)\n\n fps = int(fps)\n if sample_rate != fps:\n trajectories, interval = self._resample_trajectories(fps)\n else:\n trajectories, interval = self.result, 1000 / sample_rate\n\n # TODO : Could be:\n # axes, *objs_to_modify = ..\n # try:\n # fig = axes.figure\n # except AttributeError:\n # fig = axes[0].figure\n try:\n fig, *objs_to_modify = self.plot_configuration()\n except TypeError:\n msg = ('The configuration plot function does not return any objects'\n ' to modify in the update function.')\n raise ValueError(msg)\n\n def gen_frame(row_tuple, pop_list):\n time = row_tuple[0]\n row = row_tuple[1]\n # Don't mutate the orginal list.\n pop_list = pop_list.copy()\n args = []\n for k in getfullargspec(self.config_plot_update_func).args:\n if k == 'time':\n args.append(time)\n elif k == 'time__hist':\n args.append(trajectories[:time].index)\n elif k == 'time__futr':\n args.append(trajectories[time:].index)\n elif k.endswith('__hist'):\n args.append(trajectories[k[:-6]][:time])\n elif k.endswith('__futr'):\n args.append(trajectories[k[:-6]][time:])\n elif k in trajectories: # constants, coordinates, measurements\n args.append(row[k])\n elif k in self.constants:\n args.append(self.constants[k])\n else: # must be matplotlib object\n # TODO : This last part is quite fragile. It requires these\n # remaining args to be in the same order as the returned\n # tuple from the plot function and there is no way to know\n # if these are the correct objects to append if the order\n # isn't correct.\n args.append(pop_list.pop(0))\n self.config_plot_update_func(*args)\n\n # TODO : Run this with the initial conditions so that errors will\n # propogate before the animation is run.\n # NOTE : This is useful to uncomment in debugging because the errors\n # push to the top if in the FuncAnimation.\n #gen_frame((1.0, self.result.iloc[0]), list(objs_to_modify))\n\n # NOTE : If the iterrows() generator is not converted to a list then\n # matplotlib will throw a StopIteration error when the animation\n # reaches the last frame instead of repeating. This causes headaches in\n # the notebook and elsewhere. See issue #39 in the resonance repo.\n return animation.FuncAnimation(fig, gen_frame,\n fargs=(objs_to_modify, ),\n frames=list(trajectories.iterrows()),\n interval=interval,\n **kwargs)", "def __init__(self, markers):\n self.markers = markers\n self.last_time = None # Used to keep track of time between measurements \n self.Q_t = np.eye(2)\n self.R_t = np.eye(3)\n # YOUR CODE HERE", "def time_history_animation(self, frame_step=1, magnification=1):\n import matplotlib.pyplot as plt\n import matplotlib.animation as ani\n\n \"\"\"Retrieve maximum displacement for axis limits\"\"\"\n max_list = [max(map(abs, item)) * magnification for item in self.displacement]\n\n \"\"\"Start figure for animation\"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n \"\"\"Define the rectangles that represent the DOFs\"\"\"\n rectangle = []\n for i in range(len(self.coordinates)):\n rectangle.append(plt.Rectangle((self.coordinates[i][0],\n self.coordinates[i][1]),\n self.size[i][0], self.size[i][1], alpha=0.5))\n\n \"\"\"Init function for animation draws the frame, so that blip can be used and the animation runs faster\"\"\"\n\n def init():\n for i in range(len(self.coordinates)):\n ax.add_patch(rectangle[i])\n plt.axis('auto')\n plt.xlim([-max(max_list) + min(self.coordinates[:][0]),\n max(max_list) + max([item[0] for item in self.coordinates]) + max(self.size[:][0])])\n return rectangle\n\n \"\"\"Animation function: only the coordinates of the rectangles are updated here\"\"\"\n\n def motion(t_step):\n for i in range(len(self.coordinates)):\n rectangle[i].set_xy((float(self.coordinates[i][0]\n + self.displacement[i][t_step * frame_step] * magnification),\n float(self.coordinates[i][1])))\n return rectangle\n\n \"\"\"Animation function: inter gives the time delay between frames in milli seconds\"\"\"\n inter = int(1000 * self.dt * frame_step)\n self.anim = ani.FuncAnimation(fig,\n motion,\n init_func=init,\n interval=inter,\n blit=True)\n\n motion(int(len(self.displacement) / frame_step))\n plt.show()", "def test_double_ended_two_matching_sections_and_two_asym_atts():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 5\n time = np.arange(nt)\n nx_per_sec = 4\n nx = nx_per_sec * 9\n x = np.linspace(0.0, cable_len, nx)\n ts_cold = 4.0 + np.cos(time) * 4\n ts_warm = 20.0 + -np.sin(time) * 4\n ts_ground = np.linspace(1, 9, num=nx_per_sec)\n\n C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4\n eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)\n eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)\n C_m = 5000.0\n eta_mf = np.cos(time + np.pi / 8) / 10 + 1\n eta_mb = np.sin(time + np.pi / 8) / 10 + 1\n dalpha_r = 0.005284\n dalpha_m = 0.004961\n dalpha_p = 0.005607\n gamma = 482.6\n talph_fw = 0.95\n talph_bw = 0.85\n\n temp_real_kelvin = np.zeros((len(x), nt)) + 273.15\n temp_real_kelvin[:nx_per_sec] += ts_cold[None]\n temp_real_kelvin[nx_per_sec : 2 * nx_per_sec] += ts_warm[None]\n temp_real_kelvin[2 * nx_per_sec : 3 * nx_per_sec] += ts_ground[:, None]\n temp_real_kelvin[3 * nx_per_sec : 4 * nx_per_sec] += ts_ground[::-1, None]\n temp_real_kelvin[5 * nx_per_sec : 6 * nx_per_sec] += ts_ground[:, None] + 5\n temp_real_kelvin[6 * nx_per_sec : 7 * nx_per_sec] += ts_ground[:, None] + 5\n temp_real_kelvin[7 * nx_per_sec : 8 * nx_per_sec] += ts_warm[None]\n temp_real_kelvin[8 * nx_per_sec : 9 * nx_per_sec] += ts_cold[None]\n\n temp_real_celsius = temp_real_kelvin - 273.15\n\n st = (\n eta_pf[None]\n * C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real_kelvin)\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n st[3 * nx_per_sec :] *= talph_fw\n st[6 * nx_per_sec :] *= talph_fw\n ast = (\n eta_mf[None]\n * C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n rst = (\n eta_pb[None]\n * C_p\n * np.exp(-dalpha_r * (-x[:, None] + cable_len))\n * np.exp(-dalpha_p * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real_kelvin)\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n rst[: 3 * nx_per_sec] *= talph_bw\n rst[: 6 * nx_per_sec] *= talph_bw\n rast = (\n eta_mb[None]\n * C_m\n * np.exp(-dalpha_r * (-x[:, None] + cable_len))\n * np.exp(-dalpha_m * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n\n ds = DataStore(\n {\n \"TMPR\": ([\"x\", \"time\"], temp_real_celsius),\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"rst\": ([\"x\", \"time\"], rst),\n \"rast\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, x[nx_per_sec - 1])],\n \"warm\": [slice(x[nx_per_sec], x[2 * nx_per_sec - 1])],\n }\n ms = [\n (\n slice(x[2 * nx_per_sec], x[3 * nx_per_sec - 1]),\n slice(x[3 * nx_per_sec], x[4 * nx_per_sec - 1]),\n True,\n ),\n (\n slice(x[5 * nx_per_sec], x[6 * nx_per_sec - 1]),\n slice(x[6 * nx_per_sec], x[7 * nx_per_sec - 1]),\n False,\n ),\n ]\n\n ds.calibration_double_ended(\n sections=sections,\n st_var=0.5,\n ast_var=0.5,\n rst_var=0.1,\n rast_var=0.1,\n method=\"wls\",\n solver=\"sparse\",\n trans_att=[x[3 * nx_per_sec], x[6 * nx_per_sec]],\n matching_sections=ms,\n )\n\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=7)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=7)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=7)\n pass", "def energy_to_time(energy_ev=[], delay_us=np.NaN, source_to_detector_cm=np.NaN):\n # delay values is normal 2.99 us with NONE actual MCP delay settings\n energy_mev = energy_ev * 1000\n time_tot_us = np.sqrt(81.787 / energy_mev) * source_to_detector_cm / 0.3956\n time_record_us = time_tot_us - delay_us\n time_record_ns = time_record_us * 1000\n time_record_s = time_record_us / 1e6\n return time_record_us", "def setup(self, tau1=None, tau2=None, template_tmax=0.05, dt=None, \n delay=0.0, sign=1, eventstartthr=None, risepower=4.0, min_event_amplitude=2.0):\n assert sign in [-1, 1]\n self.sign = sign\n self.taus = [tau1, tau2]\n self.dt = dt\n self.template_tmax = template_tmax\n self.idelay = int(delay/dt) # points delay in template with zeros\n self.template = None # reset the template if needed.\n self.eventstartthr = eventstartthr\n self.risepower = risepower\n self.min_event_amplitude = min_event_amplitude", "def GetEpiAcqTimes(self, series):\n# Find minimum and maximum start times for each acquistion in series.\n self.epi_times = {}\n for entry in self.entry_map['epi']:\n# Loop through each file in this series.\n if self.info[entry]['series'] == series and \\\n self.info[entry]['tdim'] > 2:\n# Relate each entry to its time of acquisition.\n self.epi_times[self.info[entry]['acqtime']] = entry", "def run():\n step = 0\n o2r = 4 #orange to red delay time\n r2g = 2 #red to green delay time\n A_4235 = 0\n B_4235 = 1\n C_4235 = 2\n AB1_4235 = 3\n AB2_4235 = 4\n AC1_4235 = 5\n AC2_4235 = 6\n BA1_4235 = 7\n BA2_4235 = 8\n BC1_4235 = 9\n BC2_4235 = 10\n CA1_4235 = 11\n CA2_4235 = 12\n CB1_4235 = 13\n CB2_4235 = 14\n A_4219 = 0\n B_4219 = 1\n C_4219 = 2\n D_4219 = 3\n E_4219 = 4\n F_4219 = 5\n G_4219 = 6\n AB1_4219 = 7\n AB2_4219 = 8\n AC1_4219 = 9\n AC2_4219 = 10\n AD1_4219 = 11\n AD2_4219 = 12\n AE1_4219 = 13\n AE2_4219 = 14\n AF1_4219 = 16\n AF2_4219 = 17\n AG1_4219 = 18\n AG2_4219 = 19\n BA1_4219 = 20\n BA2_4219 = 21\n BC1_4219 = 22\n BC2_4219 = 23\n BD1_4219 = 24\n BD2_4219 = 25\n BE1_4219 = 26\n BE2_4219 = 27\n BF1_4219 = 28\n BF2_4219 = 29\n BG1_4219 = 30\n BG2_4219 = 31\n CA1_4219 = 32\n CA2_4219 = 33\n CB1_4219 = 34\n CB2_4219 = 35\n CD1_4219 = 36\n CD2_4219 = 37\n CE1_4219 = 38\n CE2_4219 = 39\n CF1_4219 = 40\n CF2_4219 = 41\n CG1_4219 = 42\n CG2_4219 = 43\n DA1_4219 = 44\n DA2_4219 = 45\n DB1_4219 = 46\n DB2_4219 = 47\n DC1_4219 = 48\n DC2_4219 = 49\n DE1_4219 = 50\n DE2_4219 = 51\n DF1_4219 = 52\n DF2_4219 = 53\n DG1_4219 = 54\n DG2_4219 = 55\n EA1_4219 = 56\n EA2_4219 = 57\n EB1_4219 = 58\n EB2_4219 = 59\n EC1_4219 = 60\n EC2_4219 = 61\n ED1_4219 = 62\n ED2_4219 = 63\n EF1_4219 = 64\n EF2_4219 = 65\n EG1_4219 = 66\n EG2_4219 = 67\n FA1_4219 = 68\n FA2_4219 = 69\n FB1_4219 = 70\n FB2_4219 = 71\n FC1_4219 = 72\n FC2_4219 = 73\n FD1_4219 = 74\n FD2_4219 = 75\n FE1_4219 = 76\n FE2_4219 = 77\n FG1_4219 = 78\n FG2_4219 = 79\n GA1_4219 = 80\n GA2_4219 = 81\n GB1_4219 = 82\n GB2_4219 = 83\n GC1_4219 = 84\n GC2_4219 = 85\n GD1_4219 = 86\n GD2_4219 = 87\n GE1_4219 = 88\n GE2_4219 = 89\n GF1_4219 = 90\n GF2_4219 = 91\n A_4220 = 0\n B_4220 = 1\n C_4220 = 2\n D_4220 = 3\n E_4220 = 4\n AB1_4220 = 5\n AB2_4220 = 6\n AC1_4220 = 7\n AC2_4220 = 8\n AD1_4220 = 9\n AD2_4220 = 10\n AE1_4220 = 11\n AE2_4220 = 12\n BA1_4220 = 13\n BA2_4220 = 14\n BC1_4220 = 15\n BC2_4220 = 16\n BD1_4220 = 17\n BD2_4220 = 18\n BE1_4220 = 19\n BE2_4220 = 20\n CA1_4220 = 21\n CA2_4220 = 22\n CB1_4220 = 23\n CB2_4220 = 24\n CD1_4220 = 25\n CD2_4220 = 26\n CE1_4220 = 27\n CE2_4220 = 28\n DA1_4220 = 29\n DA2_4220 = 30\n DB1_4220 = 31\n DB2_4220 = 32\n DC1_4220 = 33\n DC2_4220 = 34\n DE1_4220 = 35\n DE2_4220 = 36\n EA1_4220 = 37\n EA2_4220 = 38\n EB1_4220 = 39\n EB2_4220 = 40\n EC1_4220 = 41\n EC2_4220 = 42\n ED1_4220 = 43\n ED2_4220 = 44\n A_4221 = 0\n B_4221 = 1\n C_4221 = 2\n D_4221 = 3\n E_4221 = 4\n F_4221 = 5\n AB1_4221 = 6\n AB2_4221 = 7\n AC1_4221 = 8\n AC2_4221 = 9\n AD1_4221 = 10\n AD2_4221 = 11\n AE1_4221 = 12\n AE2_4221 = 13\n AF1_4221 = 14\n AF2_4221 = 15\n BA1_4221 = 16\n BA2_4221 = 17\n BC1_4221 = 18\n BC2_4221 = 19\n BD1_4221 = 20\n BD2_4221 = 21\n BE1_4221 = 22\n BE2_4221 = 23\n BF1_4221 = 24\n BF2_4221 = 25\n CA1_4221 = 26\n CA2_4221 = 27\n CB1_4221 = 28\n CB2_4221 = 29\n CD1_4221 = 30\n CD2_4221 = 31\n CE1_4221 = 32\n CE2_4221 = 33\n CF1_4221 = 34\n CF2_4221 = 35\n DA1_4221 = 36\n DA2_4221 = 37\n DB1_4221 = 38\n DB2_4221 = 39\n DC1_4221 = 40\n DC2_4221 = 41\n DE1_4221 = 42\n DE2_4221 = 43\n DF1_4221 = 44\n DF2_4221 = 45\n EA1_4221 = 46\n EA2_4221 = 47\n EB1_4221 = 48\n EB2_4221 = 49\n EC1_4221 = 50\n EC2_4221 = 51\n ED1_4221 = 52\n ED2_4221 = 53\n EF1_4221 = 54\n EF2_4221 = 55\n FA1_4221 = 56\n FA2_4221 = 57\n FB1_4221 = 58\n FB2_4221 = 59\n FC1_4221 = 60\n FC2_4221 = 61\n FD1_4221 = 62\n FD2_4221 = 63\n FE1_4221 = 64\n FE2_4221 = 65\n \n #while traci.simulation.getMinExpectedNumber() > 0:\n while step < 600:\n traci.simulationStep()\n if step == 0:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",6)\n if step == 6:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 10:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 12:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",75)\n if step == 87:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 91:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 93:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 108:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 112:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 114:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",12)\n if step == 126:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 130:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 132:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",32)\n if step == 164:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 168:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 170:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",50)\n if step == 220:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 224:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 226:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 241:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 245:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 247:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",14)\n if step == 261:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 265:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 267:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",13)\n if step == 280:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 284:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 286:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",74)\n if step == 360:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 364:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 366:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 381:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 385:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 387:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",10)\n if step == 397:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 401:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 403:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",16)\n if step == 419:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 423:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 425:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",74)\n if step == 499:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 503:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 505:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 520:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 524:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 526:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",8)\n if step == 534:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 538:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 540:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",16)\n if step == 556:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 560:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 562:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",38)\n if step == 0:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 6)\n if step == 6:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 10:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 12:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 20)\n if step == 32:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 36:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 38:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 43:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 47:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 49:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 31)\n if step == 80:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 84:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 86:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 12)\n if step == 98:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 102:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 104:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 29)\n if step == 133:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 137:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 139:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 20)\n if step == 159:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 163:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 165:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 170:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 174:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 176:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 38)\n if step == 214:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 218:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 220:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 13)\n if step == 233:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 237:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 239:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 34)\n if step == 273:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 277:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 279:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 22)\n if step == 301:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 305:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 307:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 312:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 316:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 318:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 31)\n if step == 349:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 353:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 355:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 18)\n if step == 373:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 377:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 379:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 36)\n if step == 415:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 419:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 421:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 19)\n if step == 440:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 444:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 446:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 451:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 455:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 457:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 29)\n if step == 486:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 490:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 492:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 18)\n if step == 510:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 514:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 516:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 41)\n if step == 557:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 561:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 563:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 19)\n if step == 582:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 586:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 588:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 8)\n if step == 596:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 0:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 17)\n if step == 17:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 21:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 23:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 9)\n if step == 32:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 36:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 38:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 67)\n if step == 105:\n traci.trafficlight.setPhase(\"gneJ41\", AB1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 109:\n traci.trafficlight.setPhase(\"gneJ41\", AB2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 111:\n traci.trafficlight.setPhase(\"gneJ41\", B_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 19)\n if step == 130:\n traci.trafficlight.setPhase(\"gneJ41\", BD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 134:\n traci.trafficlight.setPhase(\"gneJ41\", BD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 136:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 16)\n if step == 152:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 156:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 158:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 11)\n if step == 169:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 173:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 175:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 63)\n if step == 238:\n traci.trafficlight.setPhase(\"gneJ41\", AD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 242:\n traci.trafficlight.setPhase(\"gneJ41\", AD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 244:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 13)\n if step == 257:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 261:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 263:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 9)\n if step == 272:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 276:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 278:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 95)\n if step == 373:\n traci.trafficlight.setPhase(\"gneJ41\", AB1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 377:\n traci.trafficlight.setPhase(\"gneJ41\", AB2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 379:\n traci.trafficlight.setPhase(\"gneJ41\", B_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 19)\n if step == 398:\n traci.trafficlight.setPhase(\"gneJ41\", BD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 402:\n traci.trafficlight.setPhase(\"gneJ41\", BD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 404:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 24)\n if step == 428:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 432:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 434:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 13)\n if step == 447:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 451:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 453:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 56)\n if step == 509:\n traci.trafficlight.setPhase(\"gneJ41\", AB1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 513:\n traci.trafficlight.setPhase(\"gneJ41\", AB2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 515:\n traci.trafficlight.setPhase(\"gneJ41\", B_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 19)\n if step == 534:\n traci.trafficlight.setPhase(\"gneJ41\", BD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 538:\n traci.trafficlight.setPhase(\"gneJ41\", BD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 540:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 22)\n if step == 562:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 566:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 568:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 20)\n if step == 588:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 592:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 594:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 6)\n if step == 0:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 9)\n if step == 9:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 13:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 15:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 34:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 38:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 40:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 20)\n if step == 60:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 64:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 66:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 24)\n if step == 90:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 94:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 96:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 9)\n if step == 105:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 109:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 111:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 130:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 134:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 136:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 30)\n if step == 166:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 170:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 172:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 28)\n if step == 200:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 204:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 206:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 225:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 229:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 231:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 8)\n if step == 239:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 243:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 245:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 32)\n if step == 277:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 281:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 283:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 27)\n if step == 310:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 314:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 316:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 15)\n if step == 331:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 335:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 337:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 14)\n if step == 351:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 355:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 357:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 22)\n if step == 379:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 383:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 385:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 24)\n if step == 409:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 413:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 415:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 11)\n if step == 426:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 430:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 432:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 14)\n if step == 446:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 450:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 452:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 30)\n if step == 482:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 486:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 488:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 26)\n if step == 514:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 518:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 520:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 8)\n if step == 528:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 532:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 534:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 18)\n if step == 552:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 556:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 558:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 577:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 581:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 583:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 9)\n if step == 592:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 596:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 598:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 2)\n\n step += 1\n\n traci.close()\n sys.stdout.flush()", "def ptc_acquisition(self, explow=0.1, exphigh=2, expdelta=0.1, laserchannel = 2, lasercurrent=45.0):\n\n #\n self.laser.select(laserchannel)\n self.laser.setCurrent(laserchannel, lasercurrent)\n self.laser.enable()\n\n #self.powerup_CCD()\n self.reb.set_testtype('PTC')\n\n #self.DKD.setup_current_measurements(DKD_range)\n self.PhD.setup_current_measurements(2e-8)\n\n # Create the logging summary file\n summaryfile = os.path.join(eodir, 'summary.log')\n f = open(summaryfile, 'a')\n\n print >>f, \"# power\\t exposure time\\t file name\"\n\n effpow = self.laser.getPower(laserchannel)\n # First take bias frames\n self.log(\"Taking bias\")\n m = self.execute_reb_sequence('ClearBias', 0, 20, True )\n #to have only useful channels:\n fname = \"%s_ptc_bias_%s.fits\" % (serno, self.reb.reb.imgtag)\n i = self.conv_to_fits(channels=validamps)\n # to save FITS HDU with headers\n self.save_to_fits(i, m, fitsname=os.path.join(eodir, fname))\n\n print >>f, effpow, 0, fname\n\n for t in np.arange(explow, exphigh+expdelta, expdelta):\n # pair of flats\n for numpair in [1, 2]:\n effpow = self.laser.getPower(laserchannel)\n m = self.execute_reb_sequence('Acquisition', t)\n #to have only useful channels:\n fname = \"%s_ptc_flat%d_%05d_%s.fits\" % (serno, numpair, int(t*100), self.reb.reb.imgtag)\n i = self.conv_to_fits(channels=validamps)\n # to save FITS HDU with headers\n self.save_to_fits(i, m, fitsname=os.path.join(eodir, fname))\n\n print >>f, effpow, t, fname\n\n f.close()\n\n # Shutting down (not the lamp by default)\n self.laser.disable()\n #self.shutdown_CCD()\n # p = self.reb.start_waiting_sequence()", "def create_event_dur_score(self):\n for inst in self.instruments:\n #[rest/midipitch, dur, vel]\n inst_score=[]\n running_clock = 0\n for n, note in enumerate(inst.notes):\n freq = mp_to_adjusted_freq(note[0], self.ratios)\n if type(freq) != int: freq = np.asscalar(freq)\n if type(note[0]) != int: inst.notes[n][0] = np.asscalar(note[0])\n if type(note[1]) != int: inst.notes[n][1] = np.asscalar(note[1])\n if type(note[2]) != int: inst.notes[n][2] = np.asscalar(note[2])\n # if type(note[3]) != int: inst.notes[n][3] = np.asscalar(note[3])\n if note[1] != running_clock:\n inst_score.append(['Rest()', note[1] - running_clock, 0])\n inst_score.append([freq, note[2], note[3]])\n running_clock = note[1] + note[2]\n inst.event_dur_score = inst_score", "def apply_parameters(self):\n n_bins = int(self.record_length / self.bin_width)\n time_bins = self.bin_width * np.arange(n_bins)\n \n\n self.tau = np.arange(self.tau_start, self.tau_end+self.tau_delta, self.tau_delta)\n sequence = self.generate_sequence()\n self.n_laser = find_laser_pulses(sequence)\n \n FC.Configure(self.record_length, self.bin_width, self.n_laser)\n\n if self.keep_data and sequence == self.sequence and np.all(time_bins == self.time_bins): # if the sequence and time_bins are the same as previous, keep existing data\n self.old_count_data = self.count_data.copy()\n else:\n self.old_count_data = np.zeros_like(FC.GetData())\n \n self.sequence = sequence\n self.time_bins = time_bins\n self.n_bins = n_bins\n \n self.MW_source = {'mw':mw, 'mw2':mw2}[self.mw_source]", "def make_harmonic(self): \n function_values_and_time = {'t': [], 'h_t':[]}\n values_of_t = np.arange(self._t_start, self._t_stop+self._step_size, self._step_size) #for-loop instead of a lamda expression for easier interpretation\n for index, t in enumerate(values_of_t):\n function_value_at_t = 3*math.pi*math.exp(-(self.lambda_method(t)))\n if index > 1 and self._periodicity is None:\n self.find_period(function_values_and_time['h_t'][0],function_values_and_time['h_t'][1], \\\n function_value_at_t, function_values_and_time['h_t'][index-1],t)\n #self.check_global_minima(function_value_at_t, t, index)\n function_values_and_time['t'].append(t)\n function_values_and_time['h_t'].append(function_value_at_t)\n if self._periodicity is None: # This is necessary bcs we might look at an interval that is less than a period.\n self._periodicity = index\n return {'step_size': self._step_size, 'period': self._periodicity, 'data':function_values_and_time}", "def config_independent_frames(self):\n return {'standard': 'dispname', 'bias': None, 'dark': None}", "def __init__(self):\n super(ExponentialOutputs, self).__init__()\n #dictionary of time, outputs\n self.out_pop_time_series = []", "def _create_trace(self):\n\t\tself.trace=algebraic_dict(self.N_time_steps,self.N_actions)", "def trajectory1(self):\r\n\r\n trackt = [] # particle trajectory,\r\n trackx = [] # particle trajectory\r\n an = [] # analitical s**2 + x**2 = t**2\r\n s1 = [] # s = 10; s = 0, light\r\n s2 = [] # s = 20;\r\n s3 = [] # s = 40;\r\n for i in range(0, len(self.dt.obs.obt_g)):\r\n trackt.append(float(i))\r\n trackx.append(self.dt.x[i])\r\n an.append(math.sqrt(float(i) ** 2 + self.dt.x[i] ** 2))\r\n s1.append(math.sqrt(1.0 ** 2 + self.dt.x[i] ** 2))\r\n s2.append(math.sqrt(2.0 ** 2 + self.dt.x[i] ** 2))\r\n s3.append(math.sqrt(4.0 ** 2 + self.dt.x[i] ** 2))\r\n\r\n # plots:\r\n\r\n (fig, ax) = plt.subplots() # figsize=(7,5)\r\n\r\n # trajectory\r\n\r\n ax.plot(\r\n trackx,\r\n trackt,\r\n marker='+',\r\n linewidth=1,\r\n linestyle='-',\r\n color='green',\r\n label='treck',\r\n )\r\n\r\n # measurement t\r\n # ax.plot(self.dt.x, self.dt.t, marker=\"+\", linestyle=\" \", color=\"blue\", label=\"result of measurement\")\r\n\r\n ax.plot(\r\n self.dt.x,\r\n self.dt.t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurement',\r\n )\r\n\r\n # analitical t\r\n\r\n ax.plot(self.dt.x, an, linestyle='-', color='red',\r\n label='continuum')\r\n\r\n # light trajectory\r\n\r\n ax.plot(trackx, trackx, linestyle='-', color='yellow',\r\n label='s=0 (light)')\r\n\r\n # s(x) curves\r\n\r\n ax.plot(\r\n trackx,\r\n s1,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=1.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s2,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=2.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s3,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=4.0',\r\n )\r\n\r\n # error of measurement t\r\n\r\n ax.errorbar(self.dt.x, self.dt.t, fmt='k ', yerr=self.dt.t_err)\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('x in metres')\r\n xm = -1.0\r\n for i in range(len(self.dt.x)):\r\n if self.dt.x[i] > xm:\r\n xm = self.dt.x[i]\r\n stepx = round(xm / float(len(self.dt.x)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0.0, xm])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('t in metres of light time ')\r\n ym = -1.0\r\n for i in range(len(self.dt.t)):\r\n if self.dt.t[i] > ym:\r\n ym = self.dt.t[i]\r\n stepy = round(ym / float(len(self.dt.t)), 1)\r\n ym = round(ym + stepy, 1)\r\n ax.set_ylim([0.0, ym])\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()", "def get_timings(self):\n exp=lib.is_Exposure_d8(self.hcam,7)*1E-3\n frame_rate=lib.is_SetFrameRate(self.hcam,0x8000)\n return self.AcqTimes(exp,1./frame_rate)", "def motion_extraction():\n # iterate through frames\n global frame_height, frame_width\n global limb_coords, init_coords\n frame_count = 0\n has_frames, frame = capture.read()\n\n while has_frames:\n img_out = frame.copy()\n img_out = insert_padding(img_out, 14*14, 12*14)\n\n if frame_count == 0:\n # change global values of height and width\n frame_height = frame_height + 14*14*2\n frame_width = frame_width + 12*14*2\n get_start_positions(img_out)\n img_out2 = segment_red(img_out, 200, 130)\n #erode(img_out2, 4, 6)\n remove_artifacts(img_out2)\n #enhance_contrast(img_out2)\n\n if frame_count > 0:\n get_motion(prev_frame, img_out2, frame_count)\n\n prev_frame = img_out2.copy()\n frame_count += 1\n has_frames, frame = capture.read()", "def __set_defaults_to_runtime_variables(self) -> None:\n self.current_time_in_eighths = N_EIGHTHS_PER_MEASURE\n self.current_measure_durations = []\n self.past_movements = []\n self.current_motion_start_element = self.counterpoint[0]\n self.is_last_element_consonant = True", "def get_beat_data(data,columns,seconds='last',starteqend=False,preloadduration=0.3):\r\n if ispanda(data) and isastr(columns[0]):\r\n time=data.as_matrix(['X'])\r\n data=data.as_matrix(columns)\r\n elif isnparray(data) and isanum(columns[0]):\r\n data=np.array(data)\r\n time=data[:,[0]].copy()\r\n data=data[:,columns].copy()\r\n else:\r\n return None\r\n time=time.transpose()[0]\r\n if str(seconds)=='last':\r\n seconds=[time[-1]-1,time[-1]]\r\n elif isanum(seconds):\r\n if len(np.array([seconds]))>1:\r\n pass\r\n #now check if we wanted a beat#\r\n elif isinteger(seconds):\r\n pld=preloadduration\r\n seconds=[seconds-1+pld,seconds+pld]\r\n else:\r\n return None\r\n #start row(sr) and end row (er)\r\n sr=np.argmin(abs(time-seconds[0]))\r\n er=np.argmin(abs(time-seconds[-1]))\r\n data=data[sr:er+1]\r\n timeabs=time[sr:er+1].copy()\r\n timerel=timeabs-timeabs[0]\r\n if starteqend:\r\n if timerel[-1]<0.05:\r\n return None\r\n #fifty ms row\r\n fiftymsr=np.argmin(abs(timerel-0.05))\r\n #time as a column again, multiplied horizontally to reach the desired dim\r\n t=np.array([timerel[:fiftymsr+1]]).T \r\n t=np.tile(t,(1,data.shape[1]))\r\n #vf/vi (relationship between final value (vf) and initial value (vi))\r\n vf_vi=np.tile(data[-1]/data[0],(fiftymsr+1,1))\r\n adj_mat=(1-vf_vi)/0.05*t+vf_vi\r\n data[:fiftymsr+1,:]=data[:fiftymsr+1,:]*adj_mat\r\n return data,np.array([timeabs]).T,np.array([timerel]).T", "def eta(self):\r\n #Make a list for the output\r\n h = [0] * self._len_h\r\n\r\n if self._is_ts:\r\n # Loop over channels\r\n for i in range(self._len_h):\r\n data = self.data[i]\r\n u = np.unique(self.events[i])\r\n event_types = u[np.unique(self.events[i]) != 0]\r\n h[i] = np.empty((event_types.shape[0], self.len_et),\r\n dtype=complex)\r\n\r\n # This offset is used to pull the event indices below, but we\r\n # have to broadcast it so the shape of the resulting idx+offset\r\n # operation below gives us the (nevents, len_et) array we want,\r\n # per channel.\r\n offset = np.arange(self.offset,\r\n self.offset + self.len_et)[:, np.newaxis]\r\n # Loop over event types\r\n for e_idx in range(event_types.shape[0]):\r\n idx = np.where(self.events[i] == event_types[e_idx])[0]\r\n event_trig = data[idx + offset]\r\n #Correct baseline by removing the first point in the series\r\n #for each channel:\r\n if self._correct_baseline:\r\n event_trig -= event_trig[0]\r\n\r\n h[i][e_idx] = np.mean(event_trig, -1)\r\n\r\n #In case the input events are an Events:\r\n else:\r\n #Get the indices necessary for extraction of the eta:\r\n add_offset = np.arange(self.offset,\r\n self.offset + self.len_et)[:, np.newaxis]\r\n\r\n idx = (self.events.time / self.sampling_interval).astype(int)\r\n\r\n #Make a list for the output\r\n h = [0] * self._len_h\r\n\r\n # Loop over channels\r\n for i in range(self._len_h):\r\n #If this is a list with one element:\r\n if self._len_h == 1:\r\n event_trig = self.data[0][idx + add_offset]\r\n #Otherwise, you need to index straight into the underlying data\r\n #array:\r\n else:\r\n event_trig = self.data.data[i][idx + add_offset]\r\n\r\n h[i] = np.mean(event_trig, -1)\r\n\r\n h = np.array(h).squeeze()\r\n return ts.TimeSeries(data=h,\r\n sampling_interval=self.sampling_interval,\r\n t0=self.offset * self.sampling_interval,\r\n time_unit=self.time_unit)", "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def delay(self):\r\n return self.relative_phases / (2 * np.pi * self.frequencies)", "def delay(self):\r\n return self.relative_phases / (2 * np.pi * self.frequencies)", "def h_obs(self, cur_time, e_id):\n end_time = cur_time\n\n y_f = np.matrix(np.zeros((self.dim_obs, 1), float))\n\n # the predicted value should be the aggregated value during the evolution process.\n # the forecast flow\n if 'flow' in self.y_index.keys():\n for s_id in self.y_index['flow'].keys():\n\n # if it is on freeway, use self.f_flow\n if s_id in self.sensors['freeway']:\n cell_id = self.sensors[s_id]['cell']\n\n # forecast flow data during time interval\n # avoid doing matrix transformation to speed code up.\n # current length of data\n len_window = len(self.__f_flow['data'][e_id])\n\n start_time = end_time - self.sensors[s_id]['aggregation_sec']\n\n # get the index of the data instances to aggregate\n index_start = np.searchsorted(self.__f_flow['time'][0:len_window], start_time, side='right')\n index_end = np.searchsorted(self.__f_flow['time'][0:len_window], end_time, side='left')\n\n # time_index = (start_time < self.__f_flow['time'][0:len_window]) & \\\n # (self.__f_flow['time'][0:len_window]<= end_time)\n # # get the index\n # ele_index = [i for i,b in enumerate(time_index) if b]\n\n flow_sum = 0.0\n flow_num = 0\n for i in range(index_start, index_end + 1):\n flow_sum += self.__f_flow['data'][e_id][i][cell_id]\n flow_num += 1\n\n # tmp_flow = np.matrix( self.__f_flow['data'][e_id] ).T\n # data_interval = tmp_flow[cell_id, time_index]\n\n # save the average data as the forecast data\n if flow_num != 0:\n y_f[self.y_index['flow'][s_id]] = flow_sum / flow_num\n else:\n raise Exception('Error: take mean over empty array.')\n\n # if it is on onramp, use est_state_all[ x_index['onramp'][cell] ]\n elif s_id in self.sensors['onramp']:\n cell_id = self.sensors[s_id]['cell']\n # we can just reuse the same time grid to find the index in est_state_all\n # __f_flow['time'] include all th time points. Extract the times point till now\n\n start_time = end_time - self.sensors[s_id]['aggregation_sec']\n time_index = (start_time < self.__f_flow['time']) & (self.__f_flow['time'] <= end_time)\n\n # forecast onramp data during the time interval as the mean estimated value\n # TODO, it should be depending on each ensemble, but here we are just using the mean estimated state.\n data_interval = self.est_state_all[self.x_index['onramp'][cell_id], time_index]\n\n # save the average data as the forecast data\n y_f[self.y_index['flow'][s_id]] = data_interval.mean(1)\n\n # if it is on onramp, use est_state_all[ x_index['onramp'][cell] ]\n elif s_id in self.sensors['offramp']:\n cell_id = self.sensors[s_id]['cell']\n # we can just reuse the same time grid to find the index in est_state_all\n start_time = end_time - self.sensors[s_id]['aggregation_sec']\n time_index = (start_time < self.__f_flow['time']) & (self.__f_flow['time'] <= end_time)\n\n # forecast onramp data during the time interval\n # TODO, it should be depending on each ensemble\n data_interval = self.est_state_all[self.x_index['offramp'][cell_id], time_index]\n\n # save the average data as the forecast data\n y_f[self.y_index['flow'][s_id]] = data_interval.mean(1)\n\n else:\n raise Exception('Error: Flow sensors must locate on freeway grid or on/off ramps.')\n\n # the forecast velocity\n if 'speed' in self.y_index.keys():\n for s_id in self.y_index['speed'].keys():\n # the speed sensor must be on the freeway\n if s_id in self.sensors['freeway']:\n cell_id = self.sensors[s_id]['cell']\n\n len_window = len(self.__f_speed['data'][e_id])\n\n start_time = end_time - self.sensors[s_id]['aggregation_sec']\n\n # get the index of the data instances to aggregate\n index_start = np.searchsorted(self.__f_speed['time'][0:len_window], start_time, side='right')\n index_end = np.searchsorted(self.__f_speed['time'][0:len_window], end_time, side='left')\n\n # time_index = (start_time < self.__f_speed['time'][0:len_window]) & \\\n # (self.__f_speed['time'][0:len_window]<= end_time)\n # ele_index = [i for i,b in enumerate(time_index) if b]\n\n speed_sum = 0.0\n speed_num = 0\n for i in range(index_start, index_end + 1):\n speed_sum += self.__f_speed['data'][e_id][i][cell_id]\n speed_num += 1\n\n # get the data during the interval\n # tmp_speed = np.matrix( self.__f_speed['data'][e_id] ).T\n # len_window = tmp_speed.shape[1]\n # data_interval = tmp_speed[cell_id, time_index]\n\n # the forecast velocity is the average value\n if speed_num != 0:\n y_f[self.y_index['speed'][s_id]] = speed_sum / speed_num\n else:\n raise Exception('Error: take mean over empty array')\n\n else:\n raise Exception('Error: Speed sensors must locate on the freeway grid.')\n\n # the forecast travel time\n # TODO: need to update the bluetooth observation equation, now it is an average of the snap shot travel time during the interval\n if 'travel_time' in self.y_index.keys():\n for s_id in self.y_index['travel_time'].keys():\n\n start_cell, end_cell = self.sensors[s_id]['cell']\n # still use the __f_flow to get the time grid index\n start_time = end_time - self.sensors[s_id]['aggregation_sec']\n time_index = (start_time < self.__f_flow['time']) & (self.__f_flow['time'] <= end_time)\n\n # get the traffic density data from the interval\n data_interval = self.est_state_all[\n self.x_index['density'][start_cell]: self.x_index['density'][start_cell],\n time_index]\n\n travel_time = []\n speed_cells = []\n # compute a snap-shot travel time using each density profile\n for j in range(0, data_interval.shape[1]):\n for i in range(0, data_interval.shape[0]):\n cell_id = start_cell + i\n vel = self.__rho2v(self.vm_cells[cell_id, 0], self.beta_cells[cell_id, 0],\n self.rhoc_cells[cell_id, 0], self.wc_cells[cell_id, 0],\n data_interval[i, j])\n if vel == 0:\n raise Exception('Error: Got negative speed.')\n else:\n speed_cells.append(float(vel))\n\n speed_cells = np.array(speed_cells)\n\n # get the length of cells\n L_cells = self.len_cells[start_cell: end_cell]\n\n # append this snap shot travel time\n travel_time.append(np.sum(L_cells / speed_cells))\n\n # compute the mean travel time as the forecast travel time\n travel_time = np.array(travel_time)\n y_f[self.y_index['travel_time'][s_id]] = travel_time.mean()\n\n return y_f", "def captured_signal(waveform, shift, p):\n return time_varying_delay(waveform, shift, p)", "def on_timer(self, event):\n \n o = Unicorn()\n data = o.get_data(rt)\n k = len(data[0])\n y[:, :-k] = y[:, k:]\n y[:, -k:] = remap((data), -40, 40, -1, 1 ) \n t2 = _thread.start_new_thread(printT, ())\n #y2 = np.array([lfilter(b, a, y[i]) for i in range(17)])\n self.program['a_position'].set_data(y.ravel().astype(np.float32))\n self.update()", "def __init__(self, trap=2.5*10**16, Keq=1.0*10**17,\n EHdecay=1.0*10**-10, Etrap=2.0*10**-10, FHloss=8.0*10**-12,\n G3decay = 0, step=200*ps, pretime=2, reprate=80000000,\n verbose=False, trackQ=False, scalar=1, Gdecay=0, GHdecay=0,\n tolerance=0.005, G2decay=0. ,Gescape=1., Gform=1., G3loss=0.):\n # Some other variables used\n self.tolerance = tolerance\n self.scalar = scalar\n self.verbose = verbose\n self.reprate = reprate\n self.duration = 1.00 / reprate\n self.step = step\n self.steps = int(self.duration / self.step)\n self.powers = []\n self.pretime = pretime\n # Variables which hold state densities\n self.exciton = []\n self.hole = []\n self.electron = []\n self.trap = (trap) # Total number of traps\n self.filled = [] # Filled traps\n self.signal = []\n self.xsignal = []\n self.ehsignal = []\n self.xloss = []\n self.tloss = []\n self.pulses = []\n self.qk = []\n self.trackQ = trackQ\n # Rate and equilibrium constants, corrected for time step size\n self.Keq = Gescape/Gform # Equilibrium constant for X<-->e+h\n self.EHdecay = (EHdecay * step) # e+h->ground\n self.Etrap = (Etrap * step) # e+trap->filled\n self.FHloss = (FHloss * step) # filled+h->ground\n self.Gdecay = Gdecay * step\n self.G2decay = G2decay * step\n self.G3decay = G3decay * step\n self.GHdecay = GHdecay * step\n self.Gescape = Gescape * step\n self.G3loss = G3loss * step\n self.Gform = Gform * step", "def generate_signals(self):\n signals = {}\n \n\n # Create the set of short and long exponential moving averages over the \n # respective periods\n signals['short'] = self.bars.ewm(span = self.short_window , min_periods=self.long_window-1).mean()\n signals['long'] = self.bars.ewm(span = self.long_window , min_periods=self.long_window-1).mean()\n signals['MACD'] = signals['short'] - signals['long']\n signals['MACDsign'] = signals['MACD'].ewm(span = self.signal_window , min_periods=self.long_window-1).mean()\n signals['MACDdiff'] = signals['MACD'] - signals['MACDsign']\n\n \n return signals", "def _decoding_step_time_signal(self, target_embeds, decode_loop_step):\n # TODO(hongkuny): migrate to keras bert and design a module to handle this.\n output = target_embeds\n if self.embedding_postprocessor.use_position_embeddings:\n position_embeddings = tf.gather(\n self.embedding_postprocessor.position_embeddings, [decode_loop_step])\n # Broadcasts to all sequences inside a batch.\n output += position_embeddings\n\n output = self.embedding_postprocessor.output_layer_norm(output)\n output = self.embedding_postprocessor.output_dropout(output)\n return output", "def __init__(self, time_window_ps, center_wavelength_nm, power,frep_MHz = 100., NPTS = 2**10,\n power_is_avg = False,\n fileloc = '',\n flip_phase = True):\n Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)\n try:\n self.fileloc = fileloc\n # make sure we weren't passed mks units\n assert (center_wavelength_nm > 1.0) \n assert (time_window_ps > 1.0 )\n self.set_time_window_ps(time_window_ps)\n self.set_center_wavelength_nm(center_wavelength_nm) # reference wavelength (nm) \n \n # power -> EPP\n if power_is_avg:\n power = power / self.frep_mks\n \n # Read in retrieved FROG trace\n frog_data = np.genfromtxt(self.fileloc)\n wavelengths = frog_data[:,0]# (nm)\n intensity = frog_data[:,1]# (arb. units)\n phase = frog_data[:,2]# (radians)\n\n if flip_phase:\n phase = -1 * phase\n \n pulse_envelope = interp1d(wavelengths, intensity, kind='linear',\n bounds_error=False,fill_value=0)\n phase_envelope = interp1d(wavelengths, phase, kind='linear', \n bounds_error=False,fill_value=0)\n \n gridded_intensity = pulse_envelope(self.wl_nm)\n gridded_phase = phase_envelope(self.wl_nm)\n\n # Calculate time domain complex electric field A\n self.set_AW(gridded_intensity*np.exp(1j*gridded_phase))\n # Calculate normalization factor to achieve requested \n # pulse energy\n e_scale = np.sqrt(power / self.calc_epp() )\n self.set_AT(self.AT * e_scale )\n\n except IOError:\n print ('File not found.' )", "def generate_exptime_table(self, ):\n\n # Perform calculation for all stars in biased sample\n Ndraw = self.NBIAS\n\n np.random.seed(seed=None)\n\n # Allocate memory for exposure times\n t_tots = np.zeros(Ndraw)\n tpbpcs = []\n pct_obs_iwas = []\n lammax_obs_iwas = []\n specs = []\n\n \"\"\"\n Calculate the exposure times and spectra in each bandpass for each\n star in biased sample\n \"\"\"\n\n # Loop over stars in this sample\n for i in range(Ndraw):\n #print(\"HIP %i, %.2f pc, %s \" %(hip[i], dist[i], stype[i]))\n\n # Set system parameters for this star\n self.prep_ith_star(i)\n\n # Calculate the time to observe the complete spectrum\n t_tots[i], tpbpc, spectrum, iwa = self.complete_spectrum_time()\n\n tpbpcs.append(tpbpc)\n pct_obs_iwas.append(iwa[0])\n specs.append(spectrum)\n\n # Calculate channel widths\n deltas = []\n for channel in CHANNELS:\n l = default_luvoir(channel=channel)\n deltas.append(l.lammax - l.lammin)\n self.deltas = np.array(deltas)\n\n # Calculate channel fractional completeness\n self.channel_weights = (self.deltas / np.sum(self.deltas))\n\n # Calculate completeness for each star in sample\n self.completeness = np.sum(np.array(pct_obs_iwas) * self.channel_weights, axis = 1)\n\n \"\"\"\n Make a Lookup Table of Exposure times for each star in sample\n \"\"\"\n\n tpbpcs_rect = [] # Time per bandpass\n tpcs_rect = [] # Time per channel\n\n # Loop over all the stars in sample\n for idrew in range(self.NBIAS):\n\n tpbpcs_rect.append([])\n tpcs_rect.append([])\n bp_names = []\n bp_chan = []\n\n # Loop over all the LUVOIR channels\n for ichan in range(len(CHANNELS)):\n\n tpcs_rect[idrew].append(0.0)\n\n # Loop over all the bands in this channel\n for iband in range(len(tpbpcs[0][ichan])):\n\n bp_names.append(\"%s %i\" %(CHANNELS[ichan], iband+1))\n bp_chan.append(ichan)\n tpbpcs_rect[idrew].append(tpbpcs[idrew][ichan][iband])\n tpcs_rect[idrew][ichan] += tpbpcs[idrew][ichan][iband]\n\n # Make np arrays\n tpbpcs_rect = np.array(tpbpcs_rect)\n tpcs_rect = np.array(tpcs_rect)\n bp_names = np.array(bp_names)\n bp_chan = np.array(bp_chan)\n\n # Make infs --> nans\n infmask = ~np.isfinite(tpbpcs_rect)\n tpbpcs_rect[infmask] = np.nan\n infmask = ~np.isfinite(tpcs_rect)\n tpcs_rect[infmask] = np.nan\n\n # Set attributes\n self.tpbpcs_rect = tpbpcs_rect\n self.tpcs_rect = tpcs_rect\n self.bp_names = bp_names\n self.bp_chan = bp_chan\n\n \"\"\"\n New completeness calculations\n \"\"\"\n\n bandpasses = []\n\n # Loop over telescope channels\n for j, channel in enumerate(CHANNELS):\n\n # Channel dependent bandwidth?\n if type(self.bandwidth) is float:\n bandwidth = self.bandwidth\n else:\n assert len(self.bandwidth) == len(CHANNELS)\n bandwidth = self.bandwidth[j]\n\n # Get the channel specific telescope parameters\n luvoir = default_luvoir(channel=channel)\n self.cn.telescope = luvoir\n\n # Calculate the bandpass edges\n edges = calculate_bandpass_edges(luvoir.lammin, luvoir.lammax, bandwidth = bandwidth)\n\n # Calculate the number of bandpasses\n Nbands = len(edges) - 1\n\n # Loop over bandpasses\n for i in range(Nbands):\n\n # Get the max, min, and middle wavelenths for this bandpass\n lammin = edges[i]\n lammax = edges[i+1]\n\n bandpasses.append([lammin, lammax])\n\n bandpasses = np.array(bandpasses)\n lmin, lmax = np.min(np.hstack(bandpasses)), np.max(np.hstack(bandpasses))\n\n # Fractional completeness of each bandpass\n bp_frac = ((bandpasses[:,1] - bandpasses[:,0]) / (lmax - lmin)) / np.sum((bandpasses[:,1] - bandpasses[:,0]) / (lmax - lmin))\n\n # Completeness by target\n tot_completeness = np.sum(np.isfinite(self.tpbpcs_rect) * bp_frac, axis=1)\n\n # Fraction of stars in biased sample that can completely observe each bandpass\n frac_bias_bp = np.sum(np.isfinite(tpbpcs_rect)*1.0, axis=0) / self.NBIAS\n\n # Set attributes\n self.bandpasses = bandpasses\n self.bp_frac = bp_frac\n self.tot_completeness = tot_completeness\n self.frac_bias_bp = frac_bias_bp\n\n self._make_pandas_table()\n\n return", "def get_es(self):\n\t\tif not self.data_p.has_key(\"delta\"):\n\t\t\tself.getdelta()\n\t\tself.data_p[\"endsimmer\"] = self.data_p[\"tau_nuc\"]/self.data_p[\"tau_cdyn\"]*self.data_p[\"delta\"]**0.5", "def post_process(self):\n\t\ti_s = 0\n\t\ti_e = 0\n\t\tif self.trans_t_dict[0][1] == 0:\n\t\t\tif len(self.noise_itv) == 0:\n\t\t\t\tself.trans_t_dict[0][1] = self.fake_start_offset\n\t\t\telse:\n\t\t\t\tself.trans_t_dict[0][1] = self.noise_itv[0][1] # start_offset\n\t\t\tself.trans_t_dict[0][2] = 0.1\n\t\tif self.trans_t_dict[len(self.trans_t_dict)-1][1] == 0:\n\t\t\tif len(self.noise_itv) == 0:\n\t\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][1] = self.fake_end_offset\n\t\t\telse:\n\t\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][1] = self.noise_itv[-1][0] # end_offset\n\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][2] = 0.1\n\n\t\twhile i_s < len(self.trans_t_dict):\n\t\t\twhile i_s < len(self.trans_t_dict) and self.trans_t_dict[i_s][1] != 0:\n\t\t\t\ti_s += 1\n\t\t\tif i_s == len(self.trans_t_dict):\n\t\t\t\ti_e = len(self.trans_t_dict)\n\t\t\tif i_s < len(self.trans_t_dict):\n\t\t\t\ti_s -= 1\n\t\t\t\ti_e = i_s + 1\n\t\t\t\twhile i_e < len(self.trans_t_dict) and self.trans_t_dict[i_e][1] == 0:\n\t\t\t\t\ti_e += 1\n\t\t\t\tif i_e == len(self.trans_t_dict):\n\t\t\t\t\tbreak\n\n\t\t\t\t# incorperate the noise inverval\n\t\t\t\ts_time = self.trans_t_dict[i_s][1]\n\t\t\t\te_time = self.trans_t_dict[i_e][1]\n\t\t\t\t\"\"\"\n\t\t\t\tfor ts in self.noise_itv:\n\t\t\t\t\tif len(ts) == 2:\t\t\t\t\t\t\n\t\t\t\t\t\ttime1 = ts[0]\n\t\t\t\t\t\ttime2 = ts[1]\n\t\t\t\t\t\tif s_time < time1 and time2 < e_time:\n\t\t\t\t\t\t\te_time = min(e_time, time1)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttime0 = ts[0]\n\t\t\t\t\t\tif s_time < time0 and time0 < e_time:\n\t\t\t\t\t\t\te_time = min(e_time, time0)\n\t\t\t\t\"\"\"\n\t\t\t\tchar_len = 0\n\t\t\t\tfor i in range(i_s, i_e):\n\t\t\t\t\tchar_len += len(self.trans_t_dict[i][0])\n\t\t\t\t# ratio = float(self.trans_t_dict[i_e][1]-self.trans_t_dict[i_s][1]) / float(char_len)\n\t\t\t\tratio = float(e_time - s_time) / float(char_len)\n\t\t\t\tchar_len = 0\n\t\t\t\t# s_time = self.trans_t_dict[i_s][1]\n\t\t\t\tfor i in range(i_s+1, i_e):\n\t\t\t\t\tchar_len += len(self.trans_t_dict[i-1][0])\n\t\t\t\t\tself.trans_t_dict[i][1] = s_time + char_len * ratio\n\t\t\t\t\tself.trans_t_dict[i][2] = len(self.trans_t_dict[i][0]) * ratio\n\t\t\ti_s = i_e", "def test_plt_mag_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n # create the arrays per filter and readout pattern\n nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], []\n nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], []\n filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout']\n max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr']\n for i, val in enumerate(max_val_box):\n if '140' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(val)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(val)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif '110' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(val)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(val)\n nrsrapidd6_clear.append(np.NaN)\n else:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(val)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(val)\n # add to the bokeh data structure\n ta.source.data[\"nrsrapid_f140x\"] = nrsrapid_f140x\n ta.source.data[\"nrsrapid_f110w\"] = nrsrapid_f110w\n ta.source.data[\"nrsrapid_clear\"] = nrsrapid_clear\n ta.source.data[\"nrsrapidd6_f140x\"] = nrsrapidd6_f140x\n ta.source.data[\"nrsrapidd6_f110w\"] = nrsrapidd6_f110w\n ta.source.data[\"nrsrapidd6_clear\"] = nrsrapidd6_clear\n result = ta.plt_mag_time()\n\n assert bokeh_plot_type == type(result)", "def __init__(\n self, OUTPUT_DIREC, name=\"forgot_name_TC_Animator\", move_with=True, **kwargs\n ):\n self.co, self.sy = ip.read_in(OUTPUT_DIREC, name)\n # set up figure and animation\n self.move_with = move_with\n self.name = name\n self.algorithm_title = None\n self.algorithm_text = None\n self.timestep_text = None\n self.length_softening_distance = None\n # replot Energies etc.\n with plt.style.context((\"fivethirtyeight\")): # plot the normal things\n plt.clf()\n plot.plot_energy(self.co, self.sy)\n plot.plot_angular_momentum(self.co, self.sy)\n plot.plot_multi_AM(self.co, self.sy)\n with plt.style.context((\"dark_background\")):\n self.fig = plt.figure() # the main animation figure\n self.ax = self.fig.add_subplot(\n 111, aspect=\"equal\", autoscale_on=False, xlim=(-20, 20), ylim=(-20, 20)\n ) # ax has all these useful attributes\n # self.ax.grid() # Add gridlines\n self.no_particles = len(self.sy.coordinate_grid[0, :, 0])\n if self.no_particles < 10: # Deal with multi body animate test case\n (self.line,) = self.ax.plot(\n [], [], \"wo\", markersize=1, label=\"Test Masses\"\n )\n (self.galactic_centre,) = self.ax.plot(\n [0], [0], \"wo\", markersize=1, label=\"Galaxy A\"\n )\n (self.impactor,) = self.ax.plot(\n [0], [0], \"wo\", markersize=1, label=\"Galaxy B\"\n )\n else:\n (self.line,) = self.ax.plot(\n [], [], \"wo\", markersize=0.5, label=\"Test Masses\"\n )\n (self.galactic_centre,) = self.ax.plot(\n [0], [0], color=\"red\", marker=\"+\", label=\"Galaxy A\"\n )\n (self.impactor,) = self.ax.plot(\n [0], [0], color=\"green\", marker=\"+\", label=\"Galaxy B\"\n )\n if self.no_particles > 10:\n if self.co.halo:\n if self.move_with:\n print(\"I want to plot a Halo.\")\n print(\"May have plotted Halo if needed\")\n\n if move_with: # In the non inertial coordinate case\n plt.xlabel(r\"$X^{\\prime}$\")\n plt.ylabel(r\"$Y^{\\prime}$\")\n else:\n plt.xlabel(r\"$X$\")\n plt.ylabel(r\"$Y$\")\n\n self.add_details() # Comment out line if this is TMI\n\n # Add some text outputs in suitable areas of the figure\n self.time_text = self.ax.text(1.01, 0.95, \"\", transform=self.ax.transAxes)\n self.KE_text = self.ax.text(1.01, 0.88, \"\", transform=self.ax.transAxes)\n self.GPE_text = self.ax.text(1.01, 0.81, \"\", transform=self.ax.transAxes)\n self.energy_text = self.ax.text(1.01, 0.74, \"\", transform=self.ax.transAxes)\n plt.tight_layout() # This supposedly makes stops the label from falling off.\n\n print(\"Timer is \" + str(len(self.sy.short_timer)) + \" Long\")\n self.dt = (\n self.sy.timer[1] - self.sy.timer[0]\n ) # read the time step directly from the timer file", "def get_duration_steps(self):\n return {\n # acc. to ATV-A 121 chap. 5.2 (till 2012)\n ATV: (60 * 3, 60 * 48),\n # acc. to DWA-A 531 chap. 5.2.1\n DWA_adv: (60 * 3, 60 * 24),\n # acc. to DWA-A 531 chap. 5.2.1\n DWA: (60, 60 * 12)\n }[self.worksheet]", "def params(kernels, time, target, target_frame, observer, corr):\n return {\n 'kernels': kernels,\n 'times': time,\n 'target': target,\n 'target_frame': target_frame,\n 'observer': observer,\n 'aberration_correction': corr,\n }", "def sceneSetup(self, st_seq, cond_seq, st_arr, targ_arr, current_step, pred_h):\r\n obs_n = self.test_data.data_obj.obs_n\r\n\r\n start_step = current_step - 19\r\n end_step = int(current_step + pred_h/0.1)\r\n\r\n bc_der_i = (targ_arr[current_step, :]-targ_arr[current_step-1, :])*10\r\n st_seq_i = st_seq[start_step, -obs_n:,:] # -obs_n will allow for variable observation length\r\n\r\n\r\n cond_seq_i = [cond_seq[n][start_step,:,:] for n in range(5)]\r\n history_i = targ_arr[start_step:current_step+1, :]\r\n targ_i = targ_arr[current_step:end_step+1, :]\r\n st_i = st_arr[current_step:end_step+1, :]\r\n return st_seq_i, cond_seq_i, bc_der_i, history_i, st_i, targ_i", "def tm_delay(t2pulsar, tm_params_orig, new_params, plot=True):\n residuals = np.longdouble(t2pulsar.residuals().copy())\n # grab original timing model parameters and errors in dictionary\n orig_params = {}\n tm_params_rescaled = {}\n error_pos = {}\n for tm_scaled_key, tm_scaled_val in new_params.items():\n if \"DMX\" in tm_scaled_key:\n tm_param = \"_\".join(tm_scaled_key.split(\"_\")[-2:])\n else:\n tm_param = tm_scaled_key.split(\"_\")[-1]\n\n if tm_param == \"COSI\":\n orig_params[\"SINI\"] = np.longdouble(tm_params_orig[\"SINI\"][0])\n else:\n orig_params[tm_param] = np.longdouble(tm_params_orig[tm_param][0])\n\n if \"physical\" in tm_params_orig[tm_param]:\n # User defined priors are assumed to not be scaled\n if tm_param == \"COSI\":\n # Switch for sampling in COSI, but using SINI in libstempo\n tm_params_rescaled[\"SINI\"] = np.longdouble(\n np.sqrt(1 - tm_scaled_val**2)\n )\n else:\n tm_params_rescaled[tm_param] = np.longdouble(tm_scaled_val)\n else:\n if tm_param == \"COSI\":\n # Switch for sampling in COSI, but using SINI in libstempo\n rescaled_COSI = np.longdouble(\n tm_scaled_val * tm_params_orig[tm_param][1]\n + tm_params_orig[tm_param][0]\n )\n tm_params_rescaled[\"SINI\"] = np.longdouble(\n np.sqrt(1 - rescaled_COSI**2)\n )\n # print(\"Rescaled COSI used to find SINI\", np.longdouble(rescaled_COSI))\n # print(\"rescaled SINI\", tm_params_rescaled[\"SINI\"])\n else:\n tm_params_rescaled[tm_param] = np.longdouble(\n tm_scaled_val * tm_params_orig[tm_param][1]\n + tm_params_orig[tm_param][0]\n )\n\n # set to new values\n # print(tm_params_rescaled)\n # TODO: Find a way to not do this every likelihood call bc it doesn't change and it is in enterprise.psr._isort\n # Sort residuals by toa to match with get_detres() call\n isort = np.argsort(t2pulsar.toas(), kind=\"mergesort\")\n t2pulsar.vals(tm_params_rescaled)\n new_res = np.longdouble(t2pulsar.residuals().copy())\n\n # remeber to set values back to originals\n t2pulsar.vals(orig_params)\n if plot:\n plotres(t2pulsar, new_res, residuals, tm_params_rescaled)\n else:\n # Return the time-series for the pulsar\n return new_res[isort], residuals[isort]\n # return -(new_res[isort] - residuals[isort])", "def get_exposure_times(self):\n exposure_time = self.meta.exposure.exposure_time\n duration = self.meta.exposure.duration\n start_time = self.meta.exposure.start_time\n mid_time = self.meta.exposure.mid_time\n end_time = self.meta.exposure.end_time\n return (exposure_time, duration, start_time, mid_time, end_time)", "def generate_singlesine(time = 0, samples_nb = 1000, rep_frequency = 10 , pulse_frequency = 50, amplitude = 1 , edge = 1, phase_offset = 0, noise = 0):\r\n\r\n\tif edge not in [0,1]:\r\n\t\tprint(colorama.Back.RED + colorama.Style.BRIGHT + \"ERROR: invalid phase (either 0 for a rising or a 1 for a falling edge) , exit.\"+ colorama.Style.NORMAL + colorama.Back.RESET)\r\n\t\t# Return code for error (empty input file):\r\n\t\tsys.exit(10)\r\n\r\n\r\n\t#Creating empty lists for t and y\r\n\tt = np.zeros(samples_nb)\r\n\r\n\tif noise == 0:\r\n\t\ty = np.zeros(samples_nb)\r\n\telse:\r\n\t\ty = np.random.normal(0, noise, samples_nb)\r\n\r\n\t#Determining the interval limits of t\r\n\tt_limit =1/float(rep_frequency*2)\r\n\r\n\t#Updating the t interval\r\n\tt = np.arange(-samples_nb/2,samples_nb/2)/float(samples_nb*rep_frequency) + 1/float(samples_nb*rep_frequency)\r\n\r\n\r\n\t#calculating the time_shift\r\n\t#delta_t = phase_offset/(2*np.pi*pulse_frequency)\r\n\tdelta_t = phase_offset/(2*np.pi*rep_frequency)\r\n\r\n\t#Setting the pulse amplitude\r\n\ta_pulse = amplitude\r\n\tif edge == 1:\r\n\t\ta_pulse *= -1\r\n\r\n\t#Calculating the pulse limits\r\n\tp_limit = 1/float(2*pulse_frequency)\r\n\tp_interval = list ([-p_limit,p_limit])\r\n\r\n\r\n\tfor n in range (0,len(t)) :\r\n\t\tif (t[n] + delta_t) > p_interval[0] and (t[n] + delta_t) <= p_interval[1]:\r\n\t\t\ty[n] += a_pulse * np.sin(2*np.pi*pulse_frequency*(t[n]+delta_t))\r\n\r\n\r\n\r\n\t#plt.plot(t,y)\r\n\t#plt.show()\r\n\r\n\tresult = {}\r\n\tresult ['time'] = time\r\n\tresult ['t'] = t\r\n\tresult ['y'] = y\r\n\r\n\treturn result", "def _nextAnimFrame(step=0):\n lfp_frame.set_data(timestamps[step:step+frame_size], lfp[step:step+frame_size])\n r_raw_frame.set_data(timestamps[step:step+frame_size], raw_ripple[step:step+frame_size])\n r_pow_frame.set_data(timestamps[step:step+frame_size], ripple_power[step:step+frame_size])\n lfp_measure.set_text(txt_template % timestamps[step])\n # Updating the limits is needed still so that the correct range of data\n # is displayed! It doesn't update the axis labels though - That's a\n # different ballgame!\n plot_axes.set_xlim(timestamps[step], timestamps[step+frame_size])\n return lfp_frame, r_raw_frame, r_pow_frame, lfp_measure", "def determine_exposure_time(cn, bandlims, wantSNR = 10.0, wantetime = 5.0, ref_lam = 0.550,\n plot_snr_curves = False, plot_spectrum = False,\n title = \"\"):\n\n # Specify Kat's fiducial S/N\n iref = np.argmin(np.fabs(cn.lam - ref_lam))\n\n if bandlims is not None:\n\n # Specify band via wavelength\n icont = np.array([np.argmin(np.fabs(cn.lam - bandlims[0])), np.argmin(np.fabs(cn.lam - bandlims[1]))])\n iband = np.arange(icont[0]+1, icont[1])\n ibottom = np.argmin(np.fabs(cn.Cratio - np.min(cn.Cratio[iband])))\n\n # Calculate the continuum planet photon counts and contrast ratio\n ccont = cg.observe.interp_cont_over_band(cn.lam, cn.cp, icont, iband)\n ccrat = cg.observe.interp_cont_over_band(cn.lam, cn.Cratio, icont, iband)\n\n # Calculate varies SNRs as a function of exposure time\n Nt = 1000\n times = np.linspace(1.0, 100.0, Nt)\n band_snrs = np.zeros(len(times))\n bot_snrs = np.zeros(len(times))\n cont_snrs = np.zeros(len(times))\n fid_snrs = np.zeros(len(times))\n for i, time in enumerate(times):\n cn.make_fake_data(texp = times[i])\n fid_snrs[i] = cn.SNRt[iref]\n if bandlims is not None:\n band_snrs[i] = cg.observe.SNR_band(cn.cp, ccont, cn.cb, iband, itime=times[i])\n bot_snrs[i] = cn.SNRt[ibottom]\n cont_snrs[i] = np.mean(cn.SNRt[icont])\n\n # Fit for time to desired snr value\n etime_fid = find_time_from_snr(times, fid_snrs, wantSNR) #times[np.argmin(np.fabs(fid_snrs - wantSNR))]\n if bandlims is not None:\n etime_band = find_time_from_snr(times, band_snrs, wantSNR) #times[np.argmin(np.fabs(band_snrs - wantSNR))]\n etime_bot = find_time_from_snr(times, bot_snrs, wantSNR) #times[np.argmin(np.fabs(bot_snrs - wantSNR))]\n etime_cont = find_time_from_snr(times, cont_snrs, wantSNR) #times[np.argmin(np.fabs(cont_snrs - wantSNR))]\n\n # Check for incomplete bands which can cause anomalously low exposure times\n if bandlims is None:\n etime_band = np.nan\n etime_bot = np.nan\n etime_cont = np.nan\n else:\n if (False in np.isfinite(cn.Cobs[iband])):\n etime_band = np.nan\n\n # Make plot of SNR vs exposure time\n if plot_snr_curves:\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.set_xlabel(\"Exposure Time [hrs]\")\n ax.set_ylabel(\"S/N\")\n if bandlims is not None:\n ax.plot(times, band_snrs, label = \"detect band rel. to cont.\")\n ax.plot(times, bot_snrs, label = \"bottom of band\")\n ax.plot(times, cont_snrs, label = \"avg. continuum\")\n ax.plot(times, fid_snrs, label = \"at %.2f $\\mu$m\" %cn.lam[iref])\n if bandlims is not None:\n ax.scatter(etime_band, wantSNR, c=\"C0\")\n ax.scatter(etime_bot, wantSNR, c=\"C1\")\n ax.scatter(etime_cont, wantSNR, c=\"C2\")\n ax.scatter(etime_fid, wantSNR, c=\"C3\")\n ax.axhline(wantSNR, ls = \"--\", c = \"grey\")\n if bandlims is not None:\n ax.axvline(etime_band, ls = \"--\", c = \"C0\")\n ax.axvline(etime_bot, ls = \"--\", c = \"C1\")\n ax.axvline(etime_cont, ls = \"--\", c = \"C2\")\n ax.axvline(etime_fid, ls = \"--\", c = \"C3\")\n ylims = ax.get_ylim()\n if bandlims is not None:\n ax.text(etime_band, ylims[1]-.5*ylims[1], \"%.2f\" %etime_band, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C0\")\n ax.text(etime_bot, ylims[1]-.1*ylims[1], \"%.2f\" %etime_bot, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C1\")\n ax.text(etime_cont, ylims[1]-.15*ylims[1], \"%.2f\" %etime_cont, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C2\")\n ax.text(etime_fid, ylims[1]-.20*ylims[1], \"%.2f\" %etime_fid, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C3\")\n ax.legend(framealpha = 0.75, fontsize = 14)\n\n if plot_spectrum:\n\n # Construct noised spectrum plot\n if bandlims is not None:\n cn.make_fake_data(texp = etime_band)\n else:\n cn.make_fake_data(texp = etime_fid)\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.plot(cn.lam, cn.Cratio, ls = \"steps-mid\", color = \"grey\")\n ax.errorbar(cn.lam, cn.Cobs, yerr=cn.Csig, fmt = \"o\", ms = 2.0, alpha = 0.7, color = \"k\")\n ax.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax.set_ylabel(\"Fp/Fs\")\n ax.set_title(title)\n\n if bandlims is not None:\n # Identify specific points in band\n for i in icont:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n for i in iband:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C1\", marker = \"o\", zorder = 100)\n ax.scatter(cn.lam[ibottom], cn.Cratio[ibottom], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n # Identify specific continuum points in band\n for i, ic in enumerate(iband):\n ax.scatter(cn.lam[ic], ccrat[i], s = 20.0, c = \"C9\", marker = \"o\", zorder = 100)\n\n # Return exposure times\n return etime_band, etime_bot, etime_cont, etime_fid", "def cycle_phase_plot(data_file, period, ref_mjd=58369.30, save=False,\n show=False, min_freq=200, max_freq=2500, nbins=40, obs_t=False):\n\n burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict = open_json(data_file)\n\n try:\n frb = data_file.split('/')[-1].split('_')[0]\n except:\n frb = 'FRB'\n # Defining phase and cycle\n l = {}\n c = {}\n for k in burst_dict:\n l[k] = get_phase(burst_dict[k], period, ref_mjd=ref_mjd)\n c[k] = get_cycle(burst_dict[k], period, ref_mjd=ref_mjd)\n n_cycles = int(max([m for k in c.keys() for m in c[k]]))+1\n\n obs_start_phases = {}\n obs_start_cycles = {}\n obs_duration_phase = {}\n burst_per_phase_dict = {}\n phase_lst = []\n for i,k in enumerate(burst_dict.keys()):\n obs_start_phases[k] = get_phase(obs_startmjds_dict[k], period,\n ref_mjd=ref_mjd)\n obs_start_cycles[k] = get_cycle(obs_startmjds_dict[k], period,\n ref_mjd=ref_mjd)\n obs_duration_phase[k] = np.array(obs_duration_dict[k])/(24*period)\n phase_lst.append(list(get_phase(np.array(burst_dict[k]), period,\n ref_mjd=ref_mjd)))\n burst_per_phase_dict[k], _ = np.histogram(phase_lst[-1],\n bins=nbins, range=(0,1))\n\n phase_tot = [p for l in phase_lst for p in l]\n phase_tot.sort()\n burst_tot, _ = np.histogram(phase_tot, bins=nbins, range=(0,1))\n\n flat_phase_lst = [p for phase in phase_lst for p in phase]\n print(flat_phase_lst)\n print(\"Phase min: {:.2f}\".format(min(flat_phase_lst)))\n print(\"Phase max: {:.2f}\".format(max(flat_phase_lst)))\n print(\"Reference MJD: {}\".format(ref_mjd))\n\n # Defining duty cycle\n frequency_hours = '%fH' % (24 * period)\n t = Time(ref_mjd, format='mjd')\n t0 = t+((period/2)*u.day)\n tf = datetime.datetime.now()\n\n t0_low = t+((period/2)*u.day) - (2.6 * u.day)\n t0_high = t+((period/2)*u.day) + (2.6 * u.day)\n\n df_period = [t0]\n df_duty_low = [t0_low]\n df_duty_high = [t0_high]\n t_activity, t_low, t_high = t0, t0_low, t0_high\n while t_activity < tf:\n t_activity += period\n t_low += period\n t_high += period\n df_period.append(t_activity)\n df_duty_low.append(t_low)\n df_duty_high.append(t_high)\n\n n_periods = len(df_period)\n\n # DEFINING COLORS\n cm = plt.cm.get_cmap('Spectral_r')\n burst_hist_colors = []\n obs_hist_colors = {}\n for i,k in enumerate(obs_duration_dict.keys()):\n freq = np.log10(fcen_dict[k])\n col = (np.log10(max_freq)-freq)/(np.log10(max_freq)-np.log10(min_freq))\n # c = i/len(obs_duration_dict.keys())\n color = cm(col)\n if k in burst_dict.keys():\n burst_hist_colors.append(color)\n obs_hist_colors[k] = color\n\n # PLOTTING\n fig = plt.figure(figsize=(6,10))\n gs = gridspec.GridSpec(2,1, hspace=0.01, height_ratios=[1,3])\n\n # Cycle vs. phase\n ax1 = fig.add_subplot(gs[1, 0]) #ax[0]\n for i,k in enumerate(burst_dict.keys()):\n ax1.scatter(l[k], c[k], color=obs_hist_colors[k],\n edgecolors='k', linewidth=0.5, label=k, zorder=10)\n ax1.hlines(range(n_cycles), [0 for i in range(n_cycles)],\n [1 for i in range(n_cycles)], linestyles='-', alpha=0.1, zorder=0)\n\n # # Observation times\n if obs_t:\n for i, k in enumerate(obs_duration_dict.keys()):\n obs_patches = []\n for j,s in enumerate(obs_start_phases[k]):\n obs = Rectangle((s, obs_start_cycles[k][j]-0.5),\n obs_duration_phase[k][j], 1)\n obs_patches.append(obs)\n pc = PatchCollection(obs_patches, facecolor=obs_hist_colors[k],\n alpha=0.5, edgecolor=obs_hist_colors[k], label=k, zorder=5)\n ax1.add_collection(pc)\n\n ax1.text(0.05, 0.95, \"P = {0} days\".format(period),\n transform=ax1.transAxes, verticalalignment='top', fontsize=14)\n\n ax1.set_xlabel('Phase')\n ax1.set_ylabel('Cycle')\n ax1.set_xlim(0,1)\n ax1.set_ylim(-0.5, n_cycles+0.5)\n ax1.legend()\n\n # Phase histogram\n ax2 =fig.add_subplot(gs[0, 0])\n yhist,xhist,_ = ax2.hist(phase_lst, bins=nbins, range=(0,1), stacked=True,\n density=False, label=burst_dict.keys(),\n edgecolor='black', linewidth=0.5, color=burst_hist_colors)\n\n ax2.set_xlim(0,1)\n ax2.set_xticklabels([])\n ax2.set_title(frb)\n\n if save:\n plt_out = '/home/ines/Documents/projects/repeaters/figures/{}_cycle_p{:.2f}.png'.format(frb, period)\n print(\"Figure saved\", plt_out)\n plt.savefig(plt_out, pad_inches=0, bbox_inches='tight', dpi=200)\n plt.show()", "def plot_traces(self, cellname, targettime, historytime, srctype, syntype):\n self.tstart = targettime - historytime\n self.istart = int(self.tstart / self.plotdt + 0.5)\n self.tend = targettime + historytime\n self.iend = int(self.tend / self.plotdt + 0.5)\n self.tseries = np.linspace(self.tstart, self.tend, \n self.iend - self.istart)\n if cellname not in self.datafile['/Vm']:\n return []\n vm = self.datafile['/Vm/' + cellname] \n plt.plot(self.tseries, \n normalize(vm[self.istart:self.iend]),\n label=cellname)\n stimdata = np.asarray(self.datafile['/stimulus/stim_bg'])\n stim_start = int(self.tstart/self.simdt+0.5)\n stim_end = int(self.tend/self.simdt+0.5)\n stimdata = stimdata[stim_start: stim_end]\n plt.plot(np.linspace(self.tstart, self.tend, len(stimdata)),\n normalize(stimdata),\n 'r--', \n label='STIMULUS')\n precells = self.plot_presynaptic(cellname, srctype, syntype)\n return precells", "def ets(self):\r\n\r\n #Make a list for the output\r\n h = [0] * self._len_h\r\n\r\n if self._is_ts:\r\n # Loop over channels\r\n for i in range(self._len_h):\r\n data = self.data[i]\r\n u = np.unique(self.events[i])\r\n event_types = u[np.unique(self.events[i]) != 0]\r\n h[i] = np.empty((event_types.shape[0], self.len_et),\r\n dtype=complex)\r\n\r\n # This offset is used to pull the event indices below, but we\r\n # have to broadcast it so the shape of the resulting idx+offset\r\n # operation below gives us the (nevents, len_et) array we want,\r\n # per channel.\r\n offset = np.arange(self.offset,\r\n self.offset + self.len_et)[:, np.newaxis]\r\n # Loop over event types\r\n for e_idx in range(event_types.shape[0]):\r\n idx = np.where(self.events[i] == event_types[e_idx])[0]\r\n event_trig = data[idx + offset]\r\n #Correct baseline by removing the first point in the series\r\n #for each channel:\r\n if self._correct_baseline:\r\n event_trig -= event_trig[0]\r\n\r\n h[i][e_idx] = stats.sem(event_trig, -1)\r\n\r\n #In case the input events are an Events:\r\n else:\r\n #Get the indices necessary for extraction of the eta:\r\n add_offset = np.arange(self.offset,\r\n self.offset + self.len_et)[:, np.newaxis]\r\n\r\n idx = (self.events.time / self.sampling_interval).astype(int)\r\n\r\n #Make a list for the output\r\n h = [0] * self._len_h\r\n\r\n # Loop over channels\r\n for i in range(self._len_h):\r\n #If this is a list with one element:\r\n if self._len_h == 1:\r\n event_trig = self.data[0][idx + add_offset]\r\n #Otherwise, you need to index straight into the underlying data\r\n #array:\r\n else:\r\n event_trig = self.data.data[i][idx + add_offset]\r\n\r\n h[i] = stats.sem(event_trig, -1)\r\n\r\n h = np.array(h).squeeze()\r\n return ts.TimeSeries(data=h,\r\n sampling_interval=self.sampling_interval,\r\n t0=self.offset * self.sampling_interval,\r\n time_unit=self.time_unit)", "def get_initial_params(self, x, y, yerr):\n spec = fft(y)\n freq_spec = fftfreq(len(y),d=x[1]-x[0])\n estimated_freq = max(freq_spec[find(max(abs(spec[:,nonzero(freq_spec>0)[0]]))==abs(spec))])\n estimated_time = 1./estimated_freq\n p0 = array([estimated_time, 0, .5, 1000.])\n return p0", "def generate_config(filename: str,\n mw_pulse_duration: int = 20,\n flux_pulse_duration: int=40,\n ro_duration: int = 800,\n mw_mw_buffer=0,\n mw_flux_buffer=0,\n flux_mw_buffer=0,\n ro_latency: int = 0,\n mw_latency: int = 0,\n fl_latency: int = 0,\n init_duration: int = 200000):\n\n qubits = ['q0', 'q1', 'q2', 'q3', 'q4', 'q5', 'q6']\n lut_map = ['i {}', 'rx180 {}', 'ry180 {}', 'rx90 {}', 'ry90 {}',\n 'rxm90 {}', 'rym90 {}', 'rphi90 {}', 'spec {}', 'rx12 {}',\n 'square {}']\n flux_tuples = [(\"q2\", \"q0\"), (\"q0\", \"q2\"),\n (\"q0\", \"q3\"), (\"q3\", \"q0\"),\n (\"q3\", \"q1\"), (\"q1\", \"q3\"),\n (\"q1\", \"q4\"), (\"q4\", \"q1\"),\n (\"q2\", \"q5\"), (\"q5\", \"q2\"),\n (\"q5\", \"q3\"), (\"q3\", \"q5\"),\n (\"q3\", \"q6\"), (\"q6\", \"q3\"),\n (\"q6\", \"q4\"), (\"q4\", \"q6\")]\n\n cfg = {\n \"eqasm_compiler\": \"cc_light_compiler\",\n \"hardware_settings\": {\n \"qubit_number\": 7,\n \"cycle_time\": 20,\n \"mw_mw_buffer\": mw_mw_buffer,\n \"mw_flux_buffer\": mw_flux_buffer,\n \"mw_readout_buffer\": 0,\n \"flux_mw_buffer\": flux_mw_buffer,\n \"flux_flux_buffer\": 0,\n \"flux_readout_buffer\": 0,\n \"readout_mw_buffer\": 0,\n \"readout_flux_buffer\": 0,\n \"readout_readout_buffer\": 0},\n \"instructions\": {},\n \"resources\":\n {\"qubits\": {\"count\": 7},\n \"qwgs\": {\"count\": 3,\n \"connection_map\":\n {\n \"0\": [0, 1],\n \"1\": [2, 3, 4],\n \"2\": [5, 6]\n }\n },\n \"meas_units\": {\"count\": 7,\n \"connection_map\": {\"0\": [0],\n \"1\": [1],\n \"2\": [2],\n \"3\": [3],\n \"4\": [4],\n \"5\": [5],\n \"6\": [6]\n }\n },\n # FIXME OpenQL #103\n # \"meas_units\": {\"count\": 2,\n # \"connection_map\": {\"0\": [0, 2, 3, 5, 6],\n # \"1\": [1, 4]}\n # },\n \"edges\": {\"count\": 8,\n \"connection_map\": {\n \"0\": [2],\n \"1\": [3],\n \"2\": [0],\n \"3\": [1],\n \"4\": [6],\n \"5\": [7],\n \"6\": [4],\n \"7\": [5]\n }\n }\n },\n \"topology\":\n {\n \"x_size\": 5,\n \"y_size\": 3,\n \"qubits\":\n [\n {\"id\": 0, \"x\": 1, \"y\": 2},\n {\"id\": 1, \"x\": 3, \"y\": 2},\n {\"id\": 2, \"x\": 0, \"y\": 1},\n {\"id\": 3, \"x\": 2, \"y\": 1},\n {\"id\": 4, \"x\": 4, \"y\": 1},\n {\"id\": 5, \"x\": 1, \"y\": 0},\n {\"id\": 6, \"x\": 3, \"y\": 0}\n ],\n \"edges\":\n [\n {\"id\": 0, \"src\": 2, \"dst\": 0},\n {\"id\": 1, \"src\": 0, \"dst\": 3},\n {\"id\": 2, \"src\": 3, \"dst\": 1},\n {\"id\": 3, \"src\": 1, \"dst\": 4},\n {\"id\": 4, \"src\": 2, \"dst\": 5},\n {\"id\": 5, \"src\": 5, \"dst\": 3},\n {\"id\": 6, \"src\": 3, \"dst\": 6},\n {\"id\": 7, \"src\": 6, \"dst\": 4}\n ]\n },\n\n \"gate_decomposition\": {\n \"measz %0\": [\"measure %0\"],\n \"x %0\": [\"rx180 %0\"],\n \"y %0\": [\"ry180 %0\"],\n \"roty90 %0\": [\"ry90 %0\"],\n \"cnot %0,%1\": [\"ry90 %1\", \"cz %0,%1\", \"ry90 %1\"],\n\n # To support other forms of writing the same gates\n \"x180 %0\": [\"rx180 %0\"],\n \"y180 %0\": [\"ry180 %0\"],\n \"y90 %0\": [\"ry90 %0\"],\n \"x90 %0\": [\"rx90 %0\"],\n \"my90 %0\": [\"rym90 %0\"],\n \"mx90 %0\": [\"rxm90 %0\"],\n\n # Clifford decomposition per Epstein et al. Phys. Rev. A 89, 062321\n # (2014)\n \"cl_0 %0\": ['i %0'],\n \"cl_1 %0\": ['ry90 %0', 'rx90 %0'],\n \"cl_2 %0\": ['rxm90 %0', 'rym90 %0'],\n \"cl_3 %0\": ['rx180 %0'],\n \"cl_4 %0\": ['rym90 %0', 'rxm90 %0'],\n \"cl_5 %0\": ['rx90 %0', 'rym90 %0'],\n \"cl_6 %0\": ['ry180 %0'],\n \"cl_7 %0\": ['rym90 %0', 'rx90 %0'],\n \"cl_8 %0\": ['rx90 %0', 'ry90 %0'],\n \"cl_9 %0\": ['rx180 %0', 'ry180 %0'],\n \"cl_10 %0\": ['ry90 %0', 'rxm90 %0'],\n \"cl_11 %0\": ['rxm90 %0', 'ry90 %0'],\n\n \"cl_12 %0\": ['ry90 %0', 'rx180 %0'],\n \"cl_13 %0\": ['rxm90 %0'],\n \"cl_14 %0\": ['rx90 %0', 'rym90 %0', 'rxm90 %0'],\n \"cl_15 %0\": ['rym90 %0'],\n \"cl_16 %0\": ['rx90 %0'],\n \"cl_17 %0\": ['rx90 %0', 'ry90 %0', 'rx90 %0'],\n \"cl_18 %0\": ['rym90 %0', 'rx180 %0'],\n \"cl_19 %0\": ['rx90 %0', 'ry180 %0'],\n \"cl_20 %0\": ['rx90 %0', 'rym90 %0', 'rx90 %0'],\n \"cl_21 %0\": ['ry90 %0'],\n \"cl_22 %0\": ['rxm90 %0', 'ry180 %0'],\n \"cl_23 %0\": ['rx90 %0', 'ry90 %0', 'rxm90 %0']\n },\n }\n\n for q in qubits:\n cfg[\"instructions\"][\"prepz {}\".format(q)] = {\n \"duration\": init_duration,\n \"latency\": 0,\n \"qubits\": [q],\n \"matrix\": [[0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0]],\n \"disable_optimization\": True,\n \"type\": \"none\",\n \"cc_light_instr_type\": \"single_qubit_gate\",\n \"cc_light_instr\": \"prepz\",\n \"cc_light_codeword\": 0,\n \"cc_light_opcode\": 2\n }\n\n for q in qubits:\n cfg[\"instructions\"][\"measure {}\".format(q)] = {\n \"duration\": ro_duration,\n \"latency\": ro_latency,\n \"qubits\": [q],\n \"matrix\": [[0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0]],\n \"disable_optimization\": False,\n \"type\": \"readout\",\n \"cc_light_instr_type\": \"single_qubit_gate\",\n \"cc_light_instr\": \"measz\",\n \"cc_light_codeword\": 0,\n \"cc_light_opcode\": 4\n }\n\n for CW in range(len(lut_map)):\n for q in qubits:\n cfg[\"instructions\"][lut_map[CW].format(q)] = {\n \"duration\": mw_pulse_duration,\n \"latency\": mw_latency,\n \"qubits\": [q],\n \"matrix\": [[0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0]],\n \"disable_optimization\": False,\n \"type\": \"mw\",\n \"cc_light_instr_type\": \"single_qubit_gate\",\n \"cc_light_instr\": \"cw_{:02}\".format(CW),\n \"cc_light_codeword\": CW,\n \"cc_light_opcode\": 8+CW}\n\n cfg[\"instructions\"]['c1'+lut_map[CW].format(q)] = {\n \"duration\": mw_pulse_duration,\n \"latency\": mw_latency,\n \"qubits\": [q],\n \"matrix\": [[0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0]],\n \"disable_optimization\": False,\n \"type\": \"mw\",\n \"cc_light_instr_type\": \"single_qubit_gate\",\n \"cc_light_instr\": \"C1_cw_{:02}\".format(CW),\n \"cc_light_codeword\": CW,\n \"cc_light_opcode\": 32+8+CW,\n \"cc_light_cond\": 1} # 1 means : do if last meas. == 1\n\n\n cfg[\"instructions\"]['c0'+lut_map[CW].format(q)] = {\n \"duration\": mw_pulse_duration,\n \"latency\": mw_latency,\n \"qubits\": [q],\n \"matrix\": [[0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0]],\n \"disable_optimization\": False,\n \"type\": \"mw\",\n \"cc_light_instr_type\": \"single_qubit_gate\",\n \"cc_light_instr\": \"C0_cw_{:02}\".format(CW),\n \"cc_light_codeword\": CW,\n \"cc_light_opcode\": 32+16+CW,\n \"cc_light_cond\": 2} # 2 means : do if last meas. == 0\n\n\n for CW in range(32):\n for q in qubits:\n cfg[\"instructions\"][\"cw_{:02} {}\".format(CW, q)] = {\n \"duration\": mw_pulse_duration,\n \"latency\": mw_latency,\n \"qubits\": [q],\n \"matrix\": [[0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0]],\n \"disable_optimization\": False,\n \"type\": \"mw\",\n \"cc_light_instr_type\": \"single_qubit_gate\",\n \"cc_light_instr\": \"cw_{:02}\".format(CW),\n \"cc_light_codeword\": CW,\n \"cc_light_opcode\": 8+CW}\n\n for q in qubits:\n cfg[\"instructions\"][\"compensate {}\".format(q)] = {\n \"duration\": mw_pulse_duration,\n \"latency\": mw_latency,\n \"qubits\": [q],\n \"matrix\": [[0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0]],\n \"disable_optimization\": False,\n \"type\": \"mw\",\n \"cc_light_instr_type\": \"single_qubit_gate\",\n \"cc_light_instr\": \"cw_00\",\n \"cc_light_codeword\": 0,\n \"cc_light_opcode\": 8+0}\n\n # N.B. The codewords for CZ pulses need to be further specified.\n # I do not expect this to be correct for now.\n for ft in flux_tuples:\n # FIXME add space back in\n cfg[\"instructions\"][\"cz {},{}\".format(ft[0], ft[1])] = {\n \"duration\": flux_pulse_duration,\n \"latency\": fl_latency,\n \"qubits\": [ft[0], ft[1]],\n \"matrix\": [[0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0]],\n \"disable_optimization\": True,\n \"type\": \"flux\",\n \"cc_light_instr_type\": \"two_qubit_gate\",\n \"cc_light_instr\": \"fl_cw_{:02}\".format(1),\n \"cc_light_right_codeword\": 1,\n \"cc_light_left_codeword\": 1,\n \"cc_light_opcode\": 128+1\n }\n\n for cw_flux in range(8):\n for ft in flux_tuples:\n cfg[\"instructions\"][\"fl_cw_{:02} {},{}\".format(cw_flux,\n ft[0], ft[1])] = {\n \"duration\": flux_pulse_duration,\n \"latency\": fl_latency,\n \"qubits\": [ft[0], ft[1]],\n \"matrix\": [[0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 0.0]],\n \"disable_optimization\": True,\n \"type\": \"flux\",\n \"cc_light_instr_type\": \"two_qubit_gate\",\n \"cc_light_instr\": \"fl_cw_{:02}\".format(cw_flux),\n \"cc_light_right_codeword\": cw_flux,\n \"cc_light_left_codeword\": cw_flux,\n \"cc_light_opcode\": 128+cw_flux\n }\n\n with open(filename, 'w') as f:\n json.dump(cfg, f, indent=4)", "def _convert_time_to_frames(pars, framerate):\n pars['stimulus-frames'] = \\\n int(round(pars['stimulus-training-ms']/1000.0*framerate))\n pars['stimulus-offset-frames'] = \\\n int(round(pars['stimulus-training-offset-ms']/1000.0*framerate))\n pars['classification-frames'] = \\\n int(round(pars['classification-ms']/1000.0*framerate))\n\n # Make the exclusion time a tuple rather than an int\n if isinstance(pars['excluded-time-around-onsets-ms'], int):\n pars['excluded-time-around-onsets-ms'] = (\n pars['excluded-time-around-onsets-ms'],\n pars['excluded-time-around-onsets-ms'])\n\n # Then convert to frames\n pars['excluded-time-around-onsets-frames'] = (\n int(round(pars['excluded-time-around-onsets-ms'][0]/1000.0*framerate)),\n int(round(pars['excluded-time-around-onsets-ms'][1]/1000.0*framerate)))\n\n pars['temporal-prior-fwhm-frames'] = \\\n int(round(pars['temporal-prior-fwhm-ms']/1000.0*framerate))\n\n return pars", "def make_test_data(self):\r\n\r\n \r\n\r\n print (\"Creating Test Sample:\")\r\n\r\n print (' Period, rate, reps, phases: ', self.period, self.framerate, self.nrepetitions, self.nPhases)\r\n\r\n nframes = int(self.period * self.framerate * self.nrepetitions)\r\n\r\n print (' nframes: ', nframes)\r\n\r\n if self.bkgdNoise > 0.:\r\n\r\n d = np.random.normal(size=(nframes,self.imageSize[0],self.imageSize[1]),\r\n\r\n loc=self.bkgdIntensity, scale=self.bkgdNoise).astype('float32')\r\n\r\n else:\r\n\r\n d = self.bkgdIntensity*np.ones((nframes,self.imageSize[0],self.imageSize[1])).astype('float32')\r\n\r\n \r\n\r\n ds = d.shape\r\n\r\n print (' data shape: ', ds)\r\n\r\n dx = int(ds[2]/4)\r\n\r\n xc = int(ds[2]/2)\r\n\r\n xo = [xc-dx, xc+dx]\r\n\r\n ywidth = int(ds[2]/(self.nPhases+2))\r\n\r\n framedelay = 4\r\n\r\n\r\n\r\n if not self.mode:\r\n\r\n self.phasex = []\r\n\r\n self.phasey = []\r\n\r\n for i in range(0,self.nPhases):\r\n\r\n dy = int((i+1)*ds[2]/(self.nPhases+2)) # each phase is assigned to a region\r\n\r\n self.resp = np.zeros((nframes,))\r\n\r\n self.resp = np.cos(\r\n\r\n np.linspace(0, 2.0*np.pi*nframes/(self.period*self.framerate), nframes-framedelay)+i*np.pi/8 - np.pi/2.0)\r\n\r\n self.resp = np.concatenate((np.zeros(framedelay), self.resp))\r\n\r\n d[:, xo[0]:xo[1], dy:dy+ywidth ] += self.resp[:, np.newaxis, np.newaxis]\r\n\r\n self.phasey.append( (2+(dy+int(ds[2]/self.nPhases))/2))\r\n\r\n self.phasex.append((6+int(ds[1]/2)/2)) # make the signal equivalent of digitized one (baseline 3000, signal at 1e-4 of baseline)\r\n\r\n else:\r\n\r\n self.nPhases = 4\r\n\r\n self.spotsize = 16\r\n\r\n nrpts = 20\r\n\r\n nsites = 4\r\n\r\n one_rep = int(self.period*self.framerate)\r\n\r\n isi = int(self.period*self.framerate/self.nPhases)\r\n\r\n print('period, isi: ', self.period, isi)\r\n\r\n r = np.arange(0, nrpts, 1.)\r\n\r\n alpha = 4.\r\n\r\n A = r/alpha *np.exp(-(r-alpha)/alpha) # scaled alpha function\r\n\r\n self.spot= self.gauss_spot(self.spotsize, 3.) # the 2d spot\r\n\r\n sigsize = np.random.normal(size=self.nPhases, loc=self.signal_size, scale=self.signal_size*2)\r\n\r\n sigsize = [np.abs(s) for s in sigsize] # restrict to positive amplitudes\r\n\r\n print ('sigsize: ', sigsize)\r\n\r\n for j in range(self.nrepetitions):\r\n\r\n for i in range(self.nPhases):\r\n\r\n self.resp = np.zeros((nrpts, self.spot.shape[0], self.spot.shape[1]))\r\n\r\n for k in range(nrpts):\r\n\r\n self.resp[k,:,:] += sigsize[i]*A[k] * self.spot # make response an alpha time course of gaussian spot\r\n\r\n start = j*one_rep + i*isi + framedelay\r\n\r\n stop = start + nrpts\r\n\r\n dy = int((i+1)*ds[2]/(self.nPhases+2)) # location for phase\r\n\r\n #dy = dy + 2*z\r\n\r\n# print ('start, stop: ', start, stop)\r\n\r\n for z in range(nsites):\r\n\r\n #self.resp = np.concatenate((np.zeros(framedelay), self.resp))\r\n\r\n xp = xo[0] + i*10 - 10*z\r\n\r\n yp = dy - i*10 + 10*z\r\n\r\n d[start:stop, xp:xp+self.spotsize, yp:yp+self.spotsize ] += self.resp\r\n\r\n self.imageData = d # reduce to a 16-bit map to match camera data type\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.times = np.arange(0, nframes/self.framerate, 1.0/self.framerate)\r\n\r\n print( \" Test Image Created\")\r\n\r\n # imv = pg.ImageView()\r\n\r\n # imv.show()\r\n\r\n # imv.setImage(self.imageData)\r\n\r\n\r\n\r\n if self.layout is not None:\r\n\r\n self.layout.addWidget(imv, 0, 0)\r\n\r\n\r\n\r\n avgImage = np.mean(self.imageData, axis=0)\r\n\r\n ima = pg.ImageView()\r\n\r\n ima.setImage(avgImage)\r\n\r\n self.layout.addWidget(ima, 0, 1)\r\n\r\n self.adjust_image_data()\r\n\r\n self.avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n print (' Test file, original Image Info: ')\r\n\r\n self.print_image_info()\r\n\r\n self.rebin_image()\r\n\r\n #self.clean_windowerrors()\r\n\r\n # pg.image(self.imageData)\r\n\r\n # pg.show()\r\n\r\n # mpl.figure(1)\r\n\r\n # mpl.show()\r\n\r\n if not self.mode: # FFT analysis\r\n\r\n self.analysis_fourier_map(target=1, mode=0)\r\n\r\n self.plot_maps(mode=2, gfilter=self.gfilter)\r\n\r\n else:\r\n\r\n self.analysis_dFF_map()\r\n\r\n mpl.show()", "def embed_seq(self,X_seq,Y_seq):\n X_embed = -tr.ones(len(X_seq),self.og_signal_dim+self.og_noise_dim)\n # find trials of corresponding types\n pm_trials_bool = X_seq >= self.ntokens_og\n pm_trials = np.where(pm_trials_bool)\n og_trials = np.where(np.logical_not(pm_trials_bool))\n # take signal_dim (time,edim_signal_dim)\n pm_embeds = self.emat_pm[X_seq[pm_trials] - self.ntokens_og] \n og_embeds = self.emat_og[X_seq[og_trials]] \n # make noise (time,edim_noise)\n pm_noise = tr_noise_pm([len(pm_embeds),self.pm_noise_dim])\n og_noise = tr_noise_og([len(og_embeds),self.og_noise_dim])\n # cat signal_dim and noise (time,edim)\n pm_embeds = tr.cat([pm_embeds,pm_noise],-1)\n og_embeds = tr.cat([og_noise,og_embeds],-1)\n # put into respective positions\n X_embed[pm_trials] = pm_embeds\n X_embed[og_trials] = og_embeds \n # include batch dim \n X_embed = tr.unsqueeze(X_embed,1)\n Y_embed = tr.unsqueeze(tr.LongTensor(Y_seq),1)\n return X_embed,Y_embed", "def water_delay(block_size):\n\n\tdirectory = \"/local/scratch/sam5g13/Sam_5th-yr_Project/test_data\"\n\tfile_name = \"{}/tip4p2005_50_TOTEST.npy\".format(directory)\n\tgnuplot = r'/usr/bin/gnuplot'\n\n\n\tfile_data = np.load(file_name, mmap_mode='r')\n\n\t_, _, _, gamma, _ = file_data \n\n\tgamma_sample = blocksav(gamma, block_size)\n\n\tgamma_file = \"{}/tip4p2005_50_blocksize_{}_gamma.txt\".format(directory, block_size)\n\twith open(gamma_file, 'w') as outfile:\n\t\tnp.savetxt(outfile, gamma_sample)\n\n\tgamma_file_name = \"{}/tip4p2005_50_blocksize_{}_gamma.txt\".format(directory, block_size)\n\n\tcorrelations = subprocess.check_output([\"corr\", gamma_file_name])\n\t\n\tmutual_information = subprocess.check_output([\"mutual\", gamma_file_name])\n\n\tcorrelation_array = np.array(correlations.split()[5:], dtype=float)\n\tmutual_information_array = np.array(mutual_information.split()[2:], dtype=float)\n\n\tidx_odd = range(1,199,2)\n\tidx_even = range(0,200,2)\n\n\tidx_odd1 = range(1,43,2)\n\tidx_even1 = range(0,44,2)\n\n\t#correlation_values = correlation_array[idx_odd]\n\tmutual_information_values = mutual_information_array[idx_odd1]\n\tprint 'LOOK HERE...........................................', mutual_information_array[idx_odd1], len(mutual_information_array[idx_odd1])\n\n\t\"\"\"\n\tdelay_length = 0\n\n\tfor o in range(len(correlation_values) - 1):\n\t\tprint o, correlation_values[o], correlation_values[o+1]\n\t\tif correlation_values[o] > correlation_values[o+1]:\n\t\t\tdelay_length = o \n\t\telse: break\n\t\n\tdelay_length = delay_length + 1\n\n\tprint \"The delay length is\", delay_length\n\t\"\"\"\n\n\tmutual_info_length = 0\n\n\tfor o in range(len(mutual_information_values) - 1):\n\t\t#print o, correlation_values[o], correlation_values[o+1]\n\t\tif mutual_information_values[o] > mutual_information_values[o+1]:\n\t\t\tmutual_info_length = o \n\t\telse: break\n\t\n\tmutual_info_length = mutual_info_length + 1\n\t\n\tprint \"The mutual info length is\", mutual_info_length\n\n\t#assert \tdelay_length == mutual_info_length, \"The minimums of the mutual information and the correlations are not equal! %d %d\" % (delay_length, mutual_info_length)\n\t\n\tproduce_delays = subprocess.check_output([\"delay\", gamma_file_name, \"-d\" + str(mutual_info_length)])\n\n\t\n\tdelay_file = \"{}/tip4p2005_50_blocksize_{}_gamma_delay_{}.txt\".format(directory, block_size, mutual_info_length)\n\tf = open(delay_file, 'w')\n\tf.write(produce_delays)\n\tf.close()\n\n\t\"\"\"\n\n\tprint produce_delays\n\tprint len(produce_delays), len(mutual_information_values)\n\tplt.figure(\"produce_delays vs mutual information\")\n\tplt.xlabel(\"produce_delays\")\n\tplt.ylabel(\"Mutual information\")\n\tplt.plot(produce_delays, mutual_information_values)\n\tplt.show()\n\t\n\t\"\"\"\n\t\n\tembedding = subprocess.check_output([\"false_nearest\", gamma_file_name])\n\n\tembedding_dimension = int(raw_input(\"What embedding dimension would you like to use? \"))\n\t\n\trun_calc = subprocess.check_output(['gnuplot', '-e', \"filename='{}/tip4p2005_50_blocksize_{}_gamma_delay_{}.txt';ofilename='tip4p2005_50_blocksize_{}_gamma_delay_{}_graph.png'\".format(directory, block_size, mutual_info_length, block_size, mutual_info_length ),\"plot.gnu\"])\n\n\n\t\"\"\"Imports the time series and specifies each aspect used in building the recurrence matrix\"\"\"\n\n\tsettings = Settings(time_series = gamma_sample, embedding_dimension = embedding_dimension, time_delay = mutual_info_length, similarity_measure = EuclideanMetric, neighbourhood = FixedRadius(radius = 13), min_diagonal_line_length = 2, min_vertical_line_length = 2)\n\n\t\"\"\"Performs the computation and prints out all the results\"\"\"\n\n\trqacomputation = RQAComputation.create(settings, verbose = True)\n\n\trqaresult = rqacomputation.run()\n\n\tprint rqaresult\n\n\t\"\"\"Creates the Recurrence matrix for viewing\"\"\"\n\n\trpcomputation = RecurrencePlotComputation.create(settings)\n\n\trpresult = rpcomputation.run()\n\n\tImageGenerator.save_recurrence_plot(rpresult.recurrence_matrix, 'recurrence_plot.png')", "def get_time_values_from_setup(setup):\n setup_t = setup.get('time')\n t_max = setup_t.get('t_max')\n t = setup_t.get('t')\n delta_t = setup_t.get('delta_t')\n n_steps = int((t_max - t) / delta_t + 1)\n return t, delta_t, n_steps, t_max", "def source_adj_gsdf(gmdata_sim,gmdata_obs,IsolationFilter,num_pts,dt): \n t = np.arange(num_pts)*dt\n ts=np.flip(-t[1:], axis=0)\n lTime = np.concatenate((ts,t), axis=0)#Lag time \n \n #convolve the waveforms for the cross- and auto-correlagrams \n cross = np.correlate(IsolationFilter,gmdata_obs,'full')\n auto = np.correlate(IsolationFilter,gmdata_sim,'full') \n \n #GSDF Parameters \n w0=2*np.pi/(lTime[-1]) \n# wN=2*np.pi/(2*dt)\n# w(:,1)=-wN:w0:wN\n wf=w0*np.linspace(-int(num_pts/2),int(num_pts/2),num_pts) \n fi = [0.05, 0.075, 0.1]\n# fi = [0.02, 0.03, 0.04, 0.05]\n# fi = [0.05, 0.075, 0.1, 0.125, 0.15, 0.175, 0.2]\n Tw = 2/np.mean(fi) # Effective window\n# sw = 2*np.pi*0.72/Tw; # Sigma w ~ 0.2827433388230814\n sw=0.1 \n \n# #% A local maximum will be selected closest to 0-lag\n# I_O=np.argmax(cross)\n# I_S=np.argmax(auto) \n\n I_O, peaks_O = find_peaks(np.abs(hilbert(cross))/np.max(np.abs(hilbert(cross))), height=0.25)\n I_S, peaks_S = find_peaks(np.abs(hilbert(auto))/np.max(np.abs(hilbert(auto))), height=0.25)\n\n PkO = peaks_O.get(\"peak_heights\", \"\")\n PkS = peaks_S.get(\"peak_heights\", \"\")\n\n if (I_O==[] or I_S==[]):\n I_O=np.argmax(cross)\n I_S=np.argmax(auto)\n else:\n I_O_min = np.argmin(np.multiply((1+np.abs(lTime[I_O]))**2,np.abs(1-PkO)))\n I_O = I_O[I_O_min]\n\n I_S_min = np.argmin(np.multiply((1+np.abs(lTime[I_S]))**2,np.abs(1-PkS)))\n I_S = I_S[I_S_min]\n \n ##Windowing\n win1=np.exp(-(0.5*sw**2)*(lTime-lTime[I_O])**2)\n win2=np.exp(-(0.5*sw**2)*(lTime-lTime[I_S])**2) \n \n #\n WO = np.multiply(win1,cross)\n WS = np.multiply(win2,auto)\n WS = WS*np.max(WO)/np.max(WS) #Normalized window by amplitude\n #% Parameters for \"bootstraping\"\n InOR=np.argmax(WO)\n InSR=np.argmax(WS) \n \n #% Isolation filter FFT for perturbation kernel\n tff=np.conj(fftshift(fft(IsolationFilter)))*1/num_pts \n \n adj_sim_decompose = np.zeros((len(fi),num_pts))\n adj_sim_sum = np.zeros(num_pts)\n TauP_arr = np.zeros(len(fi)) \n \n ne = int(np.min([2/np.min(fi)/dt,num_pts/2])) #% Effective bandwidth for inversion\n \n for i in range(0,len(fi)): \n si = 0.1*fi[i]\n #Crosscorrelagram and Autocorrelagram filtering\n dO=computebandfftfilter_gauss(WO,dt,fi[i],si,lTime);\n dS=computebandfftfilter_gauss(WS,dt,fi[i],si,lTime); \n \n # % Check bootstraping\n InO=np.argmax(np.real(dO))\n InS=np.argmax(np.real(dS)) \n \n BS = 1; Cn = 0;\n while BS == 1 or Cn < 10:\n InO=int(InO)\n if (lTime[InO] < lTime[InOR]+0.51/fi[i]) and (lTime[InO] >= lTime[InOR]-0.51/fi[i]):\n BS = 0\n elif (lTime[InO] >= (lTime[InOR]+0.45/fi[i])):\n InO=InO-np.round(1/fi[i]/dt)\n elif (lTime[InO] < lTime[InOR]-0.45/fi[i]):\n InO=InO+np.round(1/fi[i]/dt)\n Cn = Cn+1\n \n BS = 1; Cn = 0;\n while BS == 1 or Cn < 10:\n InS=int(InS) \n if (lTime[InS] < lTime[InSR]+0.51/fi[i]) and (lTime[InS] >= lTime[InSR]-0.51/fi[i]):\n BS = 0\n elif (lTime[InS] >= (lTime[InSR]+0.45/fi[i])):\n InS=InS-np.round(1/fi[i]/dt)\n elif (lTime[InS] < lTime[InSR]-0.45/fi[i]):\n InS=InS+np.round(1/fi[i]/dt)\n Cn = Cn+1 \n\n # Five parameter Gaussian wavelet fitting \n Ao = np.max(envelope(np.real(dO))); Io = np.argmax(envelope(np.real(dO)));\n As = np.max(envelope(np.real(dS))); Is = np.argmax(envelope(np.real(dS))); \n ##Constrain the initial values \n # Parameters for curve_fit\n wi=2*np.pi*fi[i] \n \n try:\n GaO, params_covariance = curve_fit(Eqn, lTime[Io-ne-1:Io+ne], np.real(dO[Io-ne-1:Io+ne]))\n GaS, params_covariance = curve_fit(Eqn, lTime[Is-ne-1:Is+ne], np.real(dS[Is-ne-1:Is+ne])) \n except:\n GaO = [Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]\n GaS = [As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]] \n\n# GaO, params_covariance = curve_fit(Eqn, lTime[Io-ne-1:Io+ne], np.real(dO[Io-ne-1:Io+ne]),bounds=(0,[Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]))\n# GaS, params_covariance = curve_fit(Eqn, lTime[Is-ne-1:Is+ne], np.real(dS[Is-ne-1:Is+ne]),bounds=(0,[As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]])) \n \n# % Check fitting\n if ((GaO[0]/GaS[0]) > 10**5) or np.abs(GaO[4]-GaS[4]) > lTime[-1]/2:\n GaO = [Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]\n GaS = [As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]] \n \n wP=((si**2)*wf+(sw**2)*wi)/(sw**2+si**2)\n wPP=((si**2)*wf-(sw**2)*wi)/(sw**2+si**2)\n siP=((si**2)*(sw**2)/(sw**2+si**2))**0.5 \n #Estimate waveform perturbation kernel (WPK)\n IW=(siP/(sw*GaS[0]))*np.multiply(np.exp(-0.5*(wf-2*np.pi*fi[i])**2/(sw**2+si**2)),np.divide(tff,wP))+\\\n (siP/(sw*GaS[0]))*np.exp(-0.5*(wf+2*np.pi*fi[i])**2/(sw**2+si**2))*tff/wPP\n \n IW[0:int(len(IW)/2)]=0*IW[0:int(len(IW)/2)]\n \n itff = ifft(fftshift(num_pts*IW)) \n \n #Save the GSDF measurements\n TauP_arr[i] = GaO[4]-GaS[4]; #% delta_P\n \n# Jp = np.real(itff)\n# Jp = np.imag(itff)\n Jp = -np.imag(itff) \n adj_sim_decompose[i,:] = np.flip(Jp,axis=0)*TauP_arr[i] \n \n #if i>0:\n adj_sim_sum = adj_sim_sum + adj_sim_decompose[i,:] \n \n return adj_sim_sum, TauP_arr", "def time_step(self):\n\n rho_rel = np.abs(self.rho_dt / self.rho)\n rho_rel_max = np.max(rho_rel)\n e_rel = np.abs(self.e_dt / self.e)\n e_rel_max = np.max(e_rel)\n x_rel = np.abs(self.u / self.dx)\n x_rel_max = np.max(x_rel)\n y_rel = np.abs(self.w / self.dy)\n y_rel_max = np.max(y_rel)\n rel = [rho_rel_max, e_rel_max, x_rel_max, y_rel_max]\n delta = np.max(np.abs(rel))\n\n if 0.1 <= delta <= 1e3:\n self.dt = self.p / delta\n else:\n self.dt = self.p", "def on_episode_end(self, episode, logs):\n duration = timeit.default_timer() - self.episode_start[episode]\n episode_steps = len(self.observations[episode])\n\n # Format all metrics.\n metrics = np.array(self.metrics[episode])\n metrics_template = ''\n metrics_variables = []\n with warnings.catch_warnings():\n warnings.filterwarnings('error')\n for idx, name in enumerate(self.metrics_names):\n if idx > 0:\n metrics_template += ', '\n try:\n value = np.nanmean(metrics[:, idx])\n metrics_template += '{}: {:f}'\n except Warning:\n value = '--'\n metrics_template += '{}: {}'\n metrics_variables += [name, value]\n metrics_text = metrics_template.format(*metrics_variables)\n\n nb_step_digits = str(int(np.ceil(np.log10(self.params['nb_steps']))) + 1)\n # template = '{step: nb_step_digits}/{nb_steps}: episode: {episode}, duration: {duration:.3f}s, episode steps: {episode_steps}, steps per second: {sps:.0f}, episode reward: {episode_reward:.3f}, mean reward: {reward_mean:.3f} [{reward_min:.3f}, {reward_max:.3f}], mean action: {action_mean:.3f} [{action_min:.3f}, {action_max:.3f}], mean observation: {obs_mean:.3f} [{obs_min:.3f}, {obs_max:.3f}], {metrics}'\n template = '(( episode: {episode}, step: {steps}/{nb_steps}, episode_steps: {episode_steps} --- target_reached: {target_reached}, target_reached_in_steps: {target_reached_in_steps} --- episode_reward: {episode_reward:.3f}, mean_reward: {reward_mean:.3f} --- metrics: {metrics} ))'\n variables = {\n # 'step': self.step,\n 'nb_steps': self.params['nb_steps'],\n 'episode': episode + 1,\n # 'duration': duration,\n 'episode_steps': episode_steps,\n # 'sps': float(episode_steps) / duration,\n # 'episode_reward_sum': np.sum(self.rewards[episode]),\n 'reward_mean': np.mean(self.rewards[episode]),\n # 'reward_min': np.min(self.rewards[episode]),\n # 'reward_max': np.max(self.rewards[episode]),\n # 'action_mean': np.mean(self.actions[episode]),\n # 'action_min': np.min(self.actions[episode]),\n # 'action_max': np.max(self.actions[episode]),\n # 'obs_mean': np.mean(self.observations[episode]),\n # 'obs_min': np.min(self.observations[episode]),\n # 'obs_max': np.max(self.observations[episode]),\n 'steps': logs['steps'],\n 'target_reached': logs['target_reached'],\n 'target_reached_in_steps': logs['target_reached_in_steps'],\n 'episode_reward': logs['episode_reward'],\n 'metrics': metrics_text,\n }\n print(template.format(**variables))\n\n # Free up resources.\n del self.episode_start[episode]\n del self.observations[episode]\n del self.rewards[episode]\n del self.actions[episode]\n del self.metrics[episode]", "def preview_trajectory(self, state, remain_timestep, vis=False):\n print('in preview trajectory')\n state_origin = copy.deepcopy(state)\n sim_state = [state[0][0].copy(), state[0][1]] \n\n joints = get_joints(self.joint_listener)\n ef_pose = get_ef_pose(self.pose_listener)\n ef_pose_origin = ef_pose.copy()\n joint_plan = [joints]\n ef_pose_plan = [ef_pose]\n\n for episode_steps in range(remain_timestep):\n state[0] = sim_state\n gaddpg_input_state = select_target_point(state)\n step = min(max(remain_timestep - episode_steps, 1), 25)\n action, _, _, aux_pred = agent.select_action(gaddpg_input_state, remain_timestep=step)\n action_pose = unpack_action(action)\n ef_pose = ef_pose.dot(action_pose)\n joints = solve_ik(joints, pack_pose(ef_pose))\n joint_plan.append(joints)\n ef_pose_plan.append(ef_pose)\n sim_next_point_state = se3_transform_pc(se3_inverse(action_pose), sim_state[0]) \n sim_state[0] = sim_next_point_state\n\n if vis:\n # vis entire traj. Might be useful\n poses_ = robot.forward_kinematics_parallel(\n wrap_value(joint_plan[0])[None], offset=True)[0]\n poses = [pack_pose(pose) for pose in poses_]\n line_starts, line_ends = grasp_gripper_lines(np.array(ef_pose_plan))\n points = state_origin[0][0]\n points = se3_transform_pc(ef_pose_origin, points)\n point_color = get_point_color(points)\n rgb = self.planner.planner_scene.renderer.vis(poses, list(range(10)), \n shifted_pose=np.eye(4),\n interact=2,\n V=np.array(V),\n visualize_context={\n \"white_bg\": True,\n \"project_point\": [points],\n \"project_color\": [point_color],\n \"static_buffer\": True,\n \"reset_line_point\": True,\n \"thickness\": [2],\n \"line\": [(line_starts[0], line_ends[0])],\n \"line_color\": [[255, 0, 0]], \n }\n )\n\n num = len(joint_plan)\n traj = np.zeros((num, 9), dtype=np.float32)\n for i in range(num):\n traj[i, :] = joint_plan[i]\n return traj", "def init_timeparams(options):\n params = OrderedDict()\n # for time prediction\n '''\n W_t = np.zeros(options['dim_proj'])\n params['W_t'] = W_t.astype(config.floatX)\n b_t = np.zeros(1)\n params['b_t'] = b_t.astype(config.floatX)\n '''\n W_t = init_params_weight(options['dim_proj'], 1)\n params['W_t'] = W_t.astype(config.floatX)\n b_t = init_params_weight(1, 1)\n params['b_t'] = b_t.astype(config.floatX)\n # w_g = np.zeros(1)\n # params['w_g'] = w_g.astype(config.floatX)\n\n return params", "def motion_model(particle_poses, speed_command, odom_pose, odom_pose_prev, dt):\n \n M = particle_poses.shape[0]\n \n # TODO. For each particle calculate its predicted pose plus some\n # additive error to represent the process noise. With this demo\n # code, the particles move in the -y direction with some Gaussian\n # additive noise in the x direction. Hint, to start with do not\n # add much noise.\n\n #time is in ns 1e-9\n dt = dt * 1e-9\n \n if dt ==0:\n return particle_poses\n\n for m in range(M):\n\n theta = particle_poses[m, 2]\n\n v = speed_command[0]\n omega = speed_command[1]\n \n if motion_model_velocity: #Velocity\n\n if omega == 0: #straight\n vel_dx = v * cos(theta) * dt\n vel_dy = v * sin(theta) * dt\n vel_dtheta = 0\n\n else:\n vel_dx = -v / omega * sin(theta) + v / omega * sin(theta + omega * dt)\n vel_dy = v / omega * cos(theta) - v / omega * cos(theta + omega * dt)\n vel_dtheta = omega * dt\n \n\n\n if motion_model_odom:\n odom_mov = rev_odm(odom_pose, odom_pose_prev)\n\n #particle_poses[m] = fwd_odm(particle_poses[m], odom_mov)\n\n #odom_dpose = fwd_odm2(particle_poses[m], odom_mov)\n (odom_dx, odom_dy, odom_dtheta) = fwd_odm2(particle_poses[m], odom_mov)\n\n\n\n\n #fusion\n w = motion_weighting\n dx = w * odom_dx + (1-w) * vel_dx\n dy = w * odom_dy + (1-w) * vel_dy\n dtheta = w * odom_dtheta + (1-w) * vel_dtheta\n \n \n\n \n \n #process noise\n if motion_model_noise:\n noise_x= np.random.normal(0, motion_sigma_x)\n noise_y= np.random.normal(0, motion_sigma_y)\n noise_theta= np.random.normal(0, motion_sigma_theta)\n \n #local noise\n if motion_model_noise_alt:\n localnoise_x = np.random.normal(0, motion_sigma_x)\n localnoise_y = np.random.normal(0, motion_sigma_y)\n\n noise_x = localnoise_x * cos(theta) - localnoise_y * sin(theta)\n noise_y = localnoise_y * sin(theta) + localnoise_y * cos(theta)\n noise_theta = np.random.normal(0, motion_sigma_theta)\n\n\n\n particle_poses[m, 0] += dx + noise_x\n particle_poses[m, 1] += dy + noise_y\n particle_poses[m, 2] = wraptopi(theta + dtheta + noise_theta)\n\n \n return particle_poses", "def test_single_ended_matching_sections_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n nx = 200\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, nx)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n ts_ambient = np.ones(nt) * 12\n ts_valid = np.ones(nt) * 16\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask1 = np.logical_and(x > 0.125 * cable_len, x < 0.25 * cable_len)\n cold_mask2 = np.logical_and(x > 0.625 * cable_len, x < 0.75 * cable_len)\n warm_mask1 = np.logical_and(x > 0.75 * cable_len, x < 0.875 * cable_len)\n warm_mask2 = np.logical_and(x > 0.25 * cable_len, x < 0.375 * cable_len)\n valid_mask = np.logical_and(x > 0.40 * cable_len, x < 0.50 * cable_len)\n temp_real = np.ones((len(x), nt)) * 12 + 273.15\n temp_real[cold_mask1 + cold_mask2] = ts_cold + 273.15\n temp_real[warm_mask1 + warm_mask2] = ts_warm + 273.15\n temp_real[valid_mask] = ts_valid + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n # Add attenuation\n tr_att = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.4) :] *= tr_att\n tr_att2 = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.6) :] *= tr_att2\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n \"ambient\": ([\"time\"], ts_ambient),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"cold\": [slice(0.13 * cable_len, 0.24 * cable_len)],\n \"warm\": [slice(0.26 * cable_len, 0.365 * cable_len)],\n }\n\n matching_sections = [\n (\n slice(0.01 * cable_len, 0.09 * cable_len),\n slice(0.51 * cable_len, 0.59 * cable_len),\n True,\n ),\n (\n slice(0.01 * cable_len, 0.09 * cable_len),\n slice(0.91 * cable_len, 0.99 * cable_len),\n True,\n ),\n ]\n\n ds_test = ds.copy(deep=True)\n\n # WLS\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing gamma + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_gamma=(482.6, 0),\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing dalpha + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_dalpha=(6.46e-05, 0),\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing gamma & dalpha + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_gamma=(482.6, 0),\n fix_dalpha=(6.46e-05, 0),\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n # Test conf. ints. for the combination of everything\n ds_test.conf_int_single_ended(\n p_val=\"p_val\",\n p_cov=\"p_cov\",\n st_var=1.0,\n ast_var=1.0,\n conf_ints=[2.5, 50.0, 97.5],\n mc_sample_size=50,\n )\n\n ds_test_1 = ds_test.isel(time=-1)\n # ds_test_1.tmpf\n # ds_test_1.tmpf_mc.isel(CI=0).values\n # ds_test_1.tmpf_mc.isel(CI=2).values\n\n assert np.all(\n np.less(ds_test_1.tmpf_mc.isel(CI=0).values, ds_test_1.tmpf)\n ), \"Single-ended, trans. att.; 2.5% confidence interval is incorrect\"\n\n assert np.all(\n np.greater(ds_test_1.tmpf_mc.isel(CI=2).values, ds_test_1.tmpf)\n ), \"Single-ended, trans. att.; 97.5% confidence interval is incorrect\"", "def dY_dt(self, y, t=0):\n\t\t \n\t\t#variables\n\t\tpSgg = y[0] / float(sum(y))\n\t\tpSgh = y[3] / float(sum(y))\n\t\tpSh = y[3] / float(y[3] + y[4] + y[5])\n\t\t\n\t\t#exit flows\n\t\texit_Sg = y[0] * (1 / time_active) * t \n\t\texit_Pg = y[1] * (1 / time_active) * t\n\t\texit_PPg = y[2] * (1 / time_active) * t\n\t\texit_Sh = y[3] * (1 / time_active) * t\n\t\texit_Ph = y[4] * (1 / time_active) * t\n\t\texit_PPh = y[5] * (1 / time_active) * t\n\t\t#episodic flows\n\t\tSg_to_h = y[0] * (1 / tin_g) * t\n\t\tPg_to_h = y[1] * (1 / tin_g) * t\n\t\tPPg_to_h = y[2] * (1 / tin_g) * t\n\t\tSh_to_g = y[3] * (1 / tin_h) * t\n\t\tPh_to_g = y[4] * (1 / tin_h) * t\n\t\tPPh_to_g = y[5] * (1 / tin_h) * t\n\t\t#entry flows\n\t\tinto_g = new_g * t\n\t\tinto_h = new_h * t\n\t\t#infection flows\n\t\tnewinf_gg = ((y[1] + y[4]) * B1 + (y[2] + y[5]) * B2) * Cg * pSgg * t\n\t\tnewinf_gh = ((y[1] + y[4]) * B1 + (y[2] + y[5]) * B2) * Cg * pSgh * t\n\t\tnewinf_h = (y[4] * B1 + y[5] * B2) * Ch * pSh * t\n\t\t#stage progression flows\n\t\tPg_to_PPg = y[1] * D1 * t\n\t\tPPg_to_d = y[2] * D2 * t\n\t\tPh_to_PPh = y[4] * D1 * t\n\t\tPPh_to_d = y[5] * D2 * t\n\t\t\t\n\t\tstate = [- exit_Sg - newinf_gg - Sg_to_h + into_g + Sh_to_g,\n\t\t\t\t - exit_Pg - Pg_to_PPg - Pg_to_h + newinf_gg + Ph_to_g,\n\t\t\t\t - exit_PPg - PPg_to_d - PPg_to_h + Pg_to_PPg + PPh_to_g,\n\t\t\t\t - exit_Sh - newinf_gh - newinf_h - Sh_to_g + into_h + Sg_to_h,\n\t\t\t\t - exit_Ph - Ph_to_PPh - Ph_to_g + newinf_gh + newinf_h + Pg_to_h,\n\t\t\t\t - exit_PPh - PPh_to_d - PPh_to_g + Ph_to_PPh + PPg_to_h]\n\t\n\t\treturn state", "def time_to_energy(time_record_s, delay_us=np.NaN, source_to_detector_cm=np.NaN):\n time_tot_us = 1e6 * time_record_s + delay_us\n energy_mev = 81.787 / (0.3956 * time_tot_us / source_to_detector_cm) ** 2\n energy_ev = energy_mev / 1000\n return energy_ev", "def get_initial_params(self, x, y, yerr):\n spec = fft(y)\n freq_spec = fftfreq(len(y),d=x[1]-x[0])\n estimated_freq = max(freq_spec[find(max(abs(spec[:,nonzero(freq_spec>0)[0]]))==abs(spec))])\n estimated_time = 1./estimated_freq\n p0 = array([estimated_time, 0, .5])\n return p0", "def __init__(self, frame):\n super().__init__(frame)\n self.frames = None\n self.delay = None", "def _update_motion_data(self, msg):\n if self._auv_motion != msg.motion:\n self._target_euler[\"alpha\"] = self._actual_euler[\"alpha\"]\n self._target_euler[\"beta\"] = self._actual_euler[\"beta\"]\n self._target_euler[\"gamma\"] = self._actual_euler[\"gamma\"]\n self._auv_motion = msg.motion\n self._thrusters_actual_speed[\"1\"] = msg.thrusters_speed.thruster_id1_speed\n self._thrusters_actual_speed[\"2\"] = msg.thrusters_speed.thruster_id2_speed\n self._thrusters_actual_speed[\"3\"] = msg.thrusters_speed.thruster_id3_speed\n self._thrusters_actual_speed[\"4\"] = msg.thrusters_speed.thruster_id4_speed\n self._thrusters_actual_speed[\"5\"] = msg.thrusters_speed.thruster_id5_speed\n self._thrusters_actual_speed[\"6\"] = msg.thrusters_speed.thruster_id6_speed\n self._thrusters_actual_speed[\"7\"] = msg.thrusters_speed.thruster_id7_speed\n self._thrusters_actual_speed[\"8\"] = msg.thrusters_speed.thruster_id8_speed", "def test_gen_delays(self):\n min_freq = 1e6\n max_freq = 50e6\n exp = StarkRamseyXY(\n physical_qubits=[0],\n stark_amp=0.1,\n min_freq=min_freq,\n max_freq=max_freq,\n )\n test_delays = exp.delays()\n ref_delays = np.arange(0, 1 / min_freq, 1 / max_freq / 2)\n np.testing.assert_array_equal(test_delays, ref_delays)", "def generate_motion_patters(self):\n\n\t\t# Motion primimtives for the forward direction.....................\n\t\td_del = 0.08\t\n\t\tdt = self.dt\n\t\tv = 2\t# Assuming a constant longitudinal velocity\n\t\tdelta = np.arange(-np.pi*self.max_steer/180, d_del + np.pi*self.max_steer/180, d_del)\n\t\tprint(\"Number of motion patterns in forward directon: {}\".format(len(delta)))\n\t\tfor d in delta:\n\t\t\tx0 = self.x0\n\t\t\ty0 = self.y0\n\t\t\ttheta0 = self.theta0\n\t\t\tp = np.array([x0, y0, theta0])\n\t\t\t\n\t\t\tfor i in range(self.num_steps):\n\t\t\t\tx0 += v*cos(theta0)*dt\n\t\t\t\ty0 += v*sin(theta0)*dt\n\t\t\t\ttheta0 += v*tan(d)*dt/self.L\n\t\t\t\tp = np.vstack((p,np.array([x0, y0, theta0])))\n\n\t\t\t# Adding the motion primitive array to the list\n\t\t\tself.motion_primitives.append(p)\n\n\t\t\n\t\t# Motion primitives for the backward direction ...................\n\t\td_del = 0.1\n\t\tv = -1.2\n\t\tdelta = np.arange(-np.pi*self.max_steer/180, d_del + np.pi*self.max_steer/180, d_del)\n\t\tprint(\"Number of motion patterns for the backward direction: {}\".format(len(delta)))\n\t\tfor d in delta:\n\t\t\tx0 = self.x0\n\t\t\ty0 = self.y0\n\t\t\ttheta0 = self.theta0\n\t\t\tp = np.array([x0, y0, theta0])\n\n\t\t\tfor i in range(self.num_steps):\n\t\t\t\tx0 += v*cos(theta0)*dt\n\t\t\t\ty0 += v*sin(theta0)*dt\n\t\t\t\ttheta0 += v*tan(d)*dt/self.L\n\t\t\t\tp=np.vstack((p, np.array([x0, y0, theta0])))\n\t\t\t# Adding the motion primitive array to the list\n\t\t\tself.motion_primitives.append(p)", "def set_td(self):\n td_max = np.log2(self.td_frame_top)\n td_min = td_max - self.td_octaves\n max_extent = td_max - td_min\n extent = lin_interp(self.td_width, 0.125, max_extent)\n min_center = td_min + (extent/2)\n max_center = td_max - (extent/2)\n center = lin_interp(self.td_center, min_center, max_center)\n log_td_bounds = [center - extent/2, center + extent/2]\n # make this log? currently, won't this trend up?\n log_td = np.random.uniform(log_td_bounds[0], log_td_bounds[1])\n self.td_bounds = 2 ** (np.array(log_td_bounds))\n self.td = 2 ** (log_td)", "def make_obs_phase_plot(data_file, period, ref_mjd=58369.30, nbins=40, save=False,\n show=False, log=False, min_freq=200, max_freq=2500):\n\n burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict = open_json(data_file)\n\n bursts = []\n for k in burst_dict.keys():\n bursts = bursts + burst_dict[k]\n\n obs_duration = []\n for k in obs_duration_dict.keys():\n obs_duration = obs_duration + obs_duration_dict[k]\n\n obs_startmjds = []\n for k in obs_startmjds_dict.keys():\n obs_startmjds = obs_startmjds + obs_startmjds_dict[k]\n\n assert len(obs_startmjds) == len(obs_duration)\n\n bursts = np.array(bursts)\n obs_duration = np.array(obs_duration)\n obs_startmjds = np.array(obs_startmjds)\n\n obs_start_phases = get_phase(obs_startmjds, period, ref_mjd=ref_mjd)\n hist, bin_edges_obs = np.histogram(obs_start_phases, bins=nbins)\n\n obs_start_phases_dict = {}\n duration_per_phase_dict = {}\n burst_per_phase_dict = {}\n duration_per_phase_tot = np.empty(nbins)\n for k in obs_startmjds_dict.keys():\n obs_start_phases_dict[k] = get_phase(np.array(obs_startmjds_dict[k]),\n period, ref_mjd=ref_mjd)\n durations = np.array(obs_duration_dict[k])\n start_phases = obs_start_phases_dict[k]\n\n d_hist = []\n for i in range(len(bin_edges_obs)):\n if i>0:\n dur = durations[(start_phases < bin_edges_obs[i]) &\n (start_phases > bin_edges_obs[i-1])].sum()\n d_hist.append(dur)\n duration_per_phase_tot[i-1] += dur\n duration_per_phase_dict[k] = np.array(d_hist)\n\n obs_duration = np.array(obs_duration)\n duration_hist = []\n for i in range(len(bin_edges_obs)):\n if i>0:\n duration_hist.append(\n obs_duration[(obs_start_phases < bin_edges_obs[i]) &\n (obs_start_phases > bin_edges_obs[i-1])].sum())\n\n duration_hist = np.array(duration_hist)\n bin_mids = (bin_edges_obs[:-1] + bin_edges_obs[1:])/2\n phase_lst = []\n for i,k in enumerate(burst_dict.keys()):\n print(\"phase list\", k, len(burst_dict[k]))\n phase_lst.append(list(get_phase(np.array(burst_dict[k]), period,\n ref_mjd=ref_mjd)))\n burst_per_phase_dict[k], _ = np.histogram(phase_lst[-1],\n bins=nbins, range=(0,1))\n\n phase_tot = [p for l in phase_lst for p in l]\n phase_tot.sort()\n burst_tot, _ = np.histogram(phase_tot, bins=nbins, range=(0,1))\n\n # PRINTING AVERAGE RATE PER INSTRUMENT\n for i,k in enumerate(burst_dict.keys()):\n tobs = np.sum(obs_duration_dict[k])\n nbursts = len(burst_dict[k])\n rate = nbursts / tobs\n print(\"Average rate {}: {:.3f} / h\".format(k, rate))\n\n # off = np.where(burst_per_phase_dict[k] == 0)[0]\n # on = np.where(burst_per_phase_dict[k] > 0)[0]\n # print(\"Hours Apertif observed TOTAL: {:.2f}\".format(\n # np.sum(duration_per_phase_dict[k])))\n # print(\"Hours Apertif observed during on phase: {:.2f}\".format(\n # np.sum(duration_per_phase_dict[k][on])))\n # print(\"Hours Apertif observed during off phase: {:.2f}\".format(\n # np.sum(duration_per_phase_dict[k][off])))\n\n # DEFINING COLORS\n cm = plt.cm.get_cmap('Spectral_r')\n\n burst_hist_colors = []\n obs_hist_colors = {}\n if 'uGMRT650' in obs_duration_dict.keys():\n fcen_dict['uGMRT650'] = 1000\n for i,k in enumerate(obs_duration_dict.keys()):\n freq = np.log10(fcen_dict[k])\n col = (np.log10(max_freq)-freq)/(np.log10(max_freq)-np.log10(min_freq))\n color = cm(col)\n print(k, mpl.colors.to_hex(color))\n if k in burst_dict.keys():\n burst_hist_colors.append(color)\n obs_hist_colors[k] = color\n rate_colors = {\n 'high': cm((np.log10(max_freq)-np.log10(1800))/(np.log10(max_freq)-np.log10(min_freq))),\n 'middle': cm((np.log10(max_freq)-np.log10(500))/(np.log10(max_freq)-np.log10(min_freq))),\n 'low': cm((np.log10(max_freq)-np.log10(300))/(np.log10(max_freq)-np.log10(min_freq)))\n }\n if 'uGMRT650' in obs_duration_dict.keys():\n fcen_dict['uGMRT650'] = 650\n\n # PLOTTING\n fig, ax = plt.subplots(2, 1, sharex=True, figsize=(9,7),\n gridspec_kw={'height_ratios': [1,1]})\n ax1 = ax[0]\n yhist,xhist,_ = ax1.hist(phase_lst, bins=bin_edges_obs, stacked=True,\n density=False, label=burst_dict.keys(),\n edgecolor='black', linewidth=0.5, color=burst_hist_colors)\n\n ax1.set_ylabel('N. Bursts')\n ax1.set_xlim(0,1)\n print(\"YLIM\", 0, int(yhist[-1].max()*1.1))\n ax1.set_ylim(0, max(int(yhist[-1].max()*1.1), 4))\n ax1.legend(loc=2)\n ax1.text(-0.07, 0.95, \"a\", transform=ax1.transAxes, weight='bold')\n\n ax2 = ax[1]\n cum_ds = np.zeros(nbins)\n for i, k in enumerate(duration_per_phase_dict):\n d = duration_per_phase_dict[k]\n ax2.bar(bin_edges_obs[:-1], d, width=bin_edges_obs[1]-bin_edges_obs[0],\n align='edge', bottom=cum_ds, alpha=1,\n label=\"{} {:d} MHz\".format(k, int(fcen_dict[k])),\n edgecolor='black', linewidth=0.2, color=obs_hist_colors[k])\n cum_ds += d\n ax2.set_xlabel('Phase')\n ax2.set_ylabel('Obs. Duration (h)')\n ax2.legend(loc=2)\n ax2.text(-0.07, 0.95, \"b\", transform=ax2.transAxes, weight='bold')\n plt.tight_layout()\n\n if save:\n print('Plot saved: ./burst_obs_phase_hist.png')\n plt.savefig('./burst_obs_phase_hist.png', pad_inches=0,\n bbox_inches='tight', dpi=200)\n plt.savefig('./burst_obs_phase_hist.pdf', pad_inches=0,\n bbox_inches='tight', dpi=200)\n if show:\n plt.show()\n\n # SAVING COUNTS, OBS_DURATION AND PHASE BIN\n if log:\n print(\"Writing log\")\n dir_out = '/home/ines/Documents/projects/R3/periodicity/burst_phases/'\n with open(dir_out+'counts_per_phase_p{:.2f}.txt'.format(period), 'w') as f:\n f.write(\"# phase_bin counts chime_counts arts_counts lofar_counts obs_duration chime_duration arts_duration lofar_duration\\n\")\n for i in range(nbins):\n f.write(\"{:.3f} {} {} {} {} {:.3f} {:.3f} {:.3f} {:.3f}\\n\".format(\n bin_mids[i], burst_tot[i],\n burst_per_phase_dict[\"CHIME/FRB\"][i],\n burst_per_phase_dict[\"Apertif\"][i],\n burst_per_phase_dict[\"LOFAR\"][i],\n duration_per_phase_tot[i],\n duration_per_phase_dict[\"CHIME/FRB\"][i],\n duration_per_phase_dict[\"Apertif\"][i],\n duration_per_phase_dict[\"LOFAR\"][i]))\n for i,k in enumerate(burst_dict.keys()):\n if k == \"CHIME/FRB\":\n inst = k.replace(\"/FRB\", \"\")\n else:\n inst = k\n np.save(dir_out + 'phase_{}_p{:.2f}_f{:.1f}'.format(inst, period,\n fcen_dict[k]), [burst_dict[k], phase_lst[i]])", "def pre_process(self):\n t1_start = perf_counter()\n wav_arr_raw = np.array(self.raw_data['spectrum_0'].attrs['wavelengths'])\n self.wavelengths = wav_arr_raw\n self.back_spectra_arr = np.array(self.raw_data['spectrum_0'].attrs['background'])\n\n corr_data = []\n times_proc = []\n\n # extract reference point for 0 seconds\n time_ref = str(self.raw_data['spectrum_0'].attrs['creation_timestamp'])\n\n # spectrometer adds 'b' and quotation marks to timestamps that must be removed\n # some spectra are taken on X.000000s which does not have a .%f component - use try and except\n try:\n time_ref = datetime.strptime((time_ref.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n time_ref = datetime.strptime((time_ref.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S\")\n\n print('Measurement was started at {}, \\n normalising times and applying a background correction \\n'.format(time_ref))\n\n # applies background correction\n for counter, spectra in enumerate(self.raw_data.keys()):\n corr_data.append(self.raw_data[spectra]-self.back_spectra_arr)\n time = str(self.raw_data[spectra].attrs['creation_timestamp'])\n try:\n time = datetime.strptime((time.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n time = datetime.strptime((time.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S\")\n deltatime = time - time_ref\n times_proc.append(deltatime.total_seconds())\n\n self.times = np.array(times_proc)\n print('Measurement contains {} spectra with {} wavelengths \\n'.format(len(self.times),len(self.wavelengths)))\n\n # data is stored as a pd Dataframe with elapsed times as indices and wavelengths as columns\n pre_proc_data = pd.DataFrame(corr_data, index = self.times, columns = self.wavelengths)\n\n # data may be disordered in time when iterated through\n # sort the data by elapsed time\n self.pre_proc_data = pre_proc_data.sort_index(axis=0)\n self.times = np.sort(self.times)\n\n t1_stop = perf_counter()\n print(\"Elapsed time for pre-processing:\", t1_stop-t1_start)\n\n return self.pre_proc_data", "def event(self,evt,evn):\n #import pdb; pdb.set_trace()\n if (evt.get(\"skip_event\")):\n return\n # check if FEE data is one or two dimensional\n data = evt.get(Camera.FrameV1, self.src)\n if data is None:\n one_D = True\n data = evt.get(Bld.BldDataSpectrometerV1, self.src)\n else:\n one_D = False\n # get event timestamp\n timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)) # human readable format\n\n if data is None:\n self.nnodata +=1\n #self.logger.warning(\"event(): No spectrum data\")\n evt.put(skip_event_flag(),\"skip_event\")\n\n if timestamp is None:\n evt.put(skip_event_flag(),\"skip_event\")\n #self.logger.warning(\"event(): No TIMESTAMP, skipping shot\")\n\n elif data is not None:\n self.nshots +=1\n # get data as array and split into two half to find each peak\n if one_D:\n # filtering out outlier spikes in FEE data\n data = np.array(data.hproj().astype(np.float64))\n for i in range(len(data)):\n if data[i]>1000000000:\n data[i]=data[i]-(2**32)\n if self.dark is not None:\n data = data - self.dark\n spectrum = data\n spectrum1 = data[:data.shape[0]//2]\n spectrum2 = data[data.shape[0]//2:]\n else:\n data = np.array(data.data16().astype(np.int32))\n if self.dark is not None:\n data = data - self.dark\n data = np.double(data)\n data_split1 = data[:,:data.shape[1]//2]\n data_split2 = data[:,data.shape[1]//2:]\n # make a 1D trace of entire spectrum and each half to find peaks\n spectrum = np.sum(data,0)/data.shape[0]\n spectrum1 = np.sum(data_split1,0)/data_split1.shape[0]\n spectrum2 = np.sum(data_split2,0)/data_split2.shape[0]\n if not one_D:\n # the x-coordinate of the weighted center of peak region\n weighted_peak_one_positions = []\n for i in range(self.peak_one_range_min,self.peak_one_range_max):\n weighted_peak_one_positions.append(spectrum[i]*i)\n weighted_sum_peak_one = np.sum(weighted_peak_one_positions)\n weighted_peak_one_center_position = weighted_sum_peak_one/np.sum(spectrum[self.peak_one_range_min:self.peak_one_range_max])\n\n weighted_peak_two_positions = []\n for i in range(self.peak_two_range_min,self.peak_two_range_max):\n weighted_peak_two_positions.append(spectrum[i]*i)\n weighted_sum_peak_two = np.sum(weighted_peak_two_positions)\n weighted_peak_two_center_position = weighted_sum_peak_two/np.sum(spectrum[self.peak_two_range_min:self.peak_two_range_max])\n\n # normalized integrated regions between the peaks\n #int_left_region = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n int_left_region = np.sum(spectrum[:weighted_peak_two_center_position/2])\n\n #int_left_region_norm = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n int_left_region_norm = np.sum(spectrum[:weighted_peak_two_center_position/2])/len(spectrum[:weighted_peak_two_center_position/2])\n\n int_right_region = np.sum(spectrum[self.peak_two_range_max:])\n\n int_right_region_norm = np.sum(spectrum[self.peak_two_range_max:])/len(spectrum[self.peak_two_range_max:])\n\n # normalized integrated peaks\n int_peak_one = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])\n\n int_peak_one_norm = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])/len(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])\n\n int_peak_two = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n\n int_peak_two_norm = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n\n if not one_D:\n if int_peak_one_norm/int_peak_two_norm > self.peak_ratio:\n print(\"event(): inflection peak too high\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n if int_left_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:\n print(\"event(): noisy left of low energy peak\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n if int_right_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:\n print(\"event(): noisy right of high energy peak\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n #self.logger.info(\"TIMESTAMP %s accepted\" %timestamp)\n self.naccepted += 1\n self.ntwo_color += 1\n print(\"%d Remote shot\" %self.ntwo_color)\n print(\"%s Remote timestamp\" %timestamp)", "def _get_anim_seq(self, keyframes, fps=30, interpolation='linear'):\n # Misc. setup\n fr = 0\n a = np.array\n func = mixes[interpolation]\n #skip_props = ['surface.{subject}.right', 'surface.{subject}.left', ] #'projection',\n # Get keyframes\n keyframes = sorted(keyframes, key=lambda x:x['time'])\n # Normalize all time to frame rate\n fs = 1./fps\n for k in range(len(keyframes)):\n t = keyframes[k]['time']\n t = np.round(t/fs)*fs\n keyframes[k]['time'] = t\n allframes = []\n for start, end in zip(keyframes[:-1], keyframes[1:]):\n t0 = start['time']\n t1 = end['time']\n tdif = float(t1-t0)\n # Check whether to continue frame sequence to endpoint\n use_endpoint = keyframes[-1]==end\n nvalues = np.round(tdif/fs).astype(int)\n if use_endpoint:\n nvalues += 1\n fr_time = np.linspace(0, 1, nvalues, endpoint=use_endpoint)\n # Interpolate between values\n for t in fr_time:\n frame = {}\n for prop in start.keys():\n if prop=='time':\n continue\n if (start[prop] is None) or (start[prop] == end[prop]) or isinstance(start[prop], (bool, str)):\n frame[prop] = start[prop]\n continue\n val = func(a(start[prop]), a(end[prop]), t)\n if isinstance(val, np.ndarray):\n frame[prop] = val.tolist()\n else:\n frame[prop] = val\n allframes.append(frame)\n return allframes", "def set_times(fits, options, arg):\n if not options.adjust_time:\n return\n\n full_dateobs = extract_full_dateobs(fits, arg)\n\n if full_dateobs is not None:\n initial_t = Time(full_dateobs, scale='utc', format='isot')\n if options.verbose:\n print(\"{0}: {1} [start]\".format(arg, initial_t.isot))\n \n tz_offset = float(options.tz_offset)\n tz_delta_t = TimeDelta(tz_offset*60*60, format='sec')\n adjusted_t = initial_t + tz_delta_t\n\n exp_time = float(options.exp_time)\n if exp_time == 0 and 'EXPTIME' in fits[0].header:\n exp_time = float(fits[0].header['EXPTIME'])\n\n midpoint_delta_t = TimeDelta(exp_time/2, format='sec')\n midpoint_t = adjusted_t + midpoint_delta_t\n\n if options.use_midpoint_for_dateobs:\n print(\"{0}: using midpoint for DATE-OBS\".format(arg))\n new_date_obs = midpoint_t.isot\n else:\n new_date_obs = adjusted_t.isot\n\n fits[0].header['DATE-OBS'] = new_date_obs\n if options.verbose:\n print(\"{0}: {1} [adjusted]\".format(arg, new_date_obs))\n\n fits[0].header['MIDPOINT'] = midpoint_t.isot\n if options.verbose:\n print(\"{0}: {1} [midpoint]\".format(arg, midpoint_t.isot))\n\n fits[0].header['JD'] = midpoint_t.jd\n if options.verbose:\n print(\"{0}: {1} [JD]\".format(arg, midpoint_t.jd))", "def __init__(self, time, rate, type='sine'):\n self.t_v = np.zeros(time*rate)\n self.heading = np.zeros(self.t_v.shape)\n self.commands = np.zeros(self.t_v.shape)\n self.derivative = np.zeros(self.t_v.shape)", "def post_process(y, number_of_mel_samples):\n # align input in a fixed (n_samples, n_prediction) shape, filling with NaNs if neccesary.\n time, aligned_y = np.asarray(VoiceActivationFeatureExtractor.frame_level_predict(y, number_of_mel_samples))\n # reduce n_samples, n_prediction to n_samples by mean\n reduced_y = FeatureExtractor.get_mean_voice_activation(aligned_y)\n y = reduced_y\n return time, y", "def CorrectMotion(self):\n if self.verbose:\n print \"Correct for motion\"\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n\n if os.path.exists(info['imgfile_m'] + info['suffix']):\n return\n# Always use brik for 3dDeconvolve.\n suffix = '+orig'\n epifile = '%s%s' % (info['imgfile'], suffix)\n prefix = info['imgfile_m']\n base_entry = info['base_entry']\n if info['base'] == 'start':\n# Use the first frame specified in template file. Defaults\n# to zero.\n base = info['motion_ref_frame']\n else:\n# Use the last frame.\n base = self.info[base_entry]['tdim'] - info['skip']-1\n base = ('%d' % base).replace(' ','')\n\n# Correct for slice-timing.\n self.SliceTimeCorrect(info, epifile)\n\n plane = info['plane']\n anat_tgt = info['anat_tgt']\n# anat_entry = self.anat_entry[plane]\n\n if info['catmats']:\n# Include additonal transformation in motion correction such\n# that final image is in register with the fieldmap, which has\n# been registered to the structural image that will be used for\n# spatial normalization.\n self.MotcorCatenate(info, base, anat_tgt)\n else:\n# Assume fieldmap is in register with the structural.\n self.Motcor(info, base)\n\n if info.get('fmapname', None) is None:\n# No fieldmap correction.\n if self.fsl_flip:\n# Flip the way fslview likes it.\n self.FSLFlip(info['imgfile_m'], info['imgfile_final'])\n elif info['suffix'] == '.nii':\n# Copy motion-corrected images from /tmp to output directory\n outfile = info['imgfile_final'] + info['suffix']\n cmd = '3dcopy %s+orig %s' % (info['imgfile_m'], outfile)\n self.CheckExec(cmd, [outfile], force=True)\n cmd = '/bin/rm %s+orig*' % info['imgfile_m']\n self.CheckExec(cmd, [], force=True)", "def parsesetting(conf, rate, loopnum):\n global numpy, math, funcchoose\n cp = numpy.array([float(val)/1000 for val in conf[0] if val != ''])\n ncp = len(cp)\n ct = numpy.array([float(val)/1000 for val in conf[1][:ncp]])\n cv = numpy.array([float(val) for val in conf[2][:ncp]])\n \n dcp = numpy.array([float(val)/1000 for val in conf[3][:ncp]]) \n dct = numpy.array([float(val)/1000 for val in conf[4][:ncp]]) \n dcv = numpy.array([float(val)/1000 for val in conf[5][:ncp]]) \n\n special = numpy.array([int(val) for val in conf[6][:ncp]])\n reserve = [val for val in conf[7][:ncp]]\n for i in range(len(reserve)):\n reserve[i] = [float(part) for part in reserve[i].split(';')]\n\n cp += loopnum * dcp\n totalt = cp[-1] + ct[-1] # the last interval plus change\n\n changes = []\n for i in range(ncp):\n vprev = cv[i-1] + loopnum * dcv[i-1]\n vthis = cv[i] + loopnum * dcv[i]\n timescale = ct[i] + loopnum * dct[i]\n if timescale == 0:\n changes += [[vthis]]\n else:\n intervals = int(timescale * rate) # implicit rounding down\n tsteps = numpy.linspace(0, intervals/rate, intervals + 1)\n\n try:\n funcshape = funcchoose[special[i]]\n except KeyError:\n raise NotImplementedError(\"Time dependence: %d\" %special[i])\n\n if funcshape == 'adiabatic':\n A, B = numpy.power([vprev, vthis], -0.5)\n a = (A - B) / timescale\n vals = 1 / (A - a * tsteps)**2\n elif funcshape == 'exponential':\n timeconstant = reserve[i][0] / 1000 # it is in ms\n if vthis < vprev:\n vals = numpy.max([vprev * numpy.exp(-tsteps/timeconstant), [vthis] * (intervals+1)], axis=0)\n else:\n vals = numpy.min([vprev * numpy.exp(tsteps/timeconstant), [vthis] * len(tsteps)], axis=0)\n elif funcshape == 'sine':\n params = reserve[i]\n\n deltaamp = params[2]\n deltafreq = params[3]\n amplitude = params[0] + loopnum * deltaamp\n freq = params[1] + loopnum * deltafreq\n\n vals = 0.5 * amplitude * numpy.sin(2 * numpy.pi * tsteps * freq) + vthis\n elif funcshape == 'linear':\n vals = (vthis - vprev) * tsteps / timescale + vprev\n else:\n raise ValueError\n\n if tsteps[-1] < timescale:\n vals = numpy.append(vals, vthis)\n vals = numpy.append(vals, vthis)\n changes += [list(vals)]\n\n intervals = int(math.ceil(totalt * rate))\n tlist = numpy.linspace(0, intervals/rate, intervals+1)\n\n icp = 0\n counter = 0\n values = []\n for t in tlist:\n if icp < (ncp-1) and t >= cp[icp + 1]:\n icp += 1\n counter = 0\n\n if counter == 0:\n nvals = len(changes[icp])\n\n if counter < nvals:\n newval = changes[icp][counter]\n counter += 1\n else:\n newval = changes[icp][-1]\n values += [newval]\n return numpy.array(values)", "def EconomizedCycle(Ref,Qin,Te,Tc,DTsh,DTsc,eta_oi,f_p,Ti,Ts_Ph='Ts',skipPlot=False,axis=None,**kwargs):\n\n warnings.warn(\"This function has been deprecated. Please consider converting it to an object inheriting from \\\"BaseCycle\\\".\",DeprecationWarning)\n from scipy.optimize import newton\n\n m=1\n\n T=np.zeros((11))\n h=np.zeros_like(T)\n p=np.zeros_like(T)\n s=np.zeros_like(T)\n rho=np.zeros_like(T)\n\n T[0]=np.NAN\n s[0]=np.NAN\n T[1]=Te+DTsh\n pe=PropsSI('P','T',Te,'Q',1.0,Ref)\n pc=PropsSI('P','T',Tc,'Q',1.0,Ref)\n pi=PropsSI('P','T',Ti,'Q',1.0,Ref)\n p[1]=pe\n h[1]=PropsSI('H','T',T[1],'P',pe,Ref)\n s[1]=PropsSI('S','T',T[1],'P',pe,Ref)\n rho[1]=PropsSI('D','T',T[1],'P',pe,Ref)\n h2s=PropsSI('H','S',s[1],'P',pi,Ref)\n wdot1=(h2s-h[1])/eta_oi\n h[2]=h[1]+(1-f_p[0])*wdot1\n p[2]=pi\n #T[2]=T_hp(Ref,h[2],pi,T2s)\n T[2]=PropsSI('T','H',h[2],'P',pi,Ref)\n\n s[2]=PropsSI('S','T',T[2],'P',pi,Ref)\n rho[2]=PropsSI('D','T',T[2],'P',pi,Ref)\n\n T[5]=Tc-DTsc\n h[5]=PropsSI('H','T',T[5],'P',pc,Ref)\n s[5]=PropsSI('S','T',T[5],'P',pc,Ref)\n rho[5]=PropsSI('D','T',T[5],'P',pc,Ref)\n\n p[5]=pc\n p[6]=pi\n h[6]=h[5]\n\n p[7]=pi\n p[8]=pi\n p[6]=pi\n T[7]=Ti\n h[7]=PropsSI('H','T',Ti,'Q',1,Ref)\n s[7]=PropsSI('S','T',Ti,'Q',1,Ref)\n rho[7]=PropsSI('D','T',Ti,'Q',1,Ref)\n T[8]=Ti\n h[8]=PropsSI('H','T',Ti,'Q',0,Ref)\n s[8]=PropsSI('S','T',Ti,'Q',0,Ref)\n rho[8]=PropsSI('D','T',Ti,'Q',0,Ref)\n x6=(h[6]-h[8])/(h[7]-h[8]) #Vapor Quality\n s[6]=s[7]*x6+s[8]*(1-x6)\n rho[6]=1.0/(x6/rho[7]+(1-x6)/rho[8])\n T[6]=Ti\n\n #Injection mass flow rate\n x=m*(h[6]-h[8])/(h[7]-h[6])\n\n p[3]=pi\n h[3]=(m*h[2]+x*h[7])/(m+x)\n #T[3]=T_hp(Ref,h[3],pi,T[2])\n T[3]=PropsSI('T','H',h[3],'P',pi,Ref)\n s[3]=PropsSI('S','T',T[3],'P',pi,Ref)\n rho[3]=PropsSI('D','T',T[3],'P',pi,Ref)\n T4s=newton(lambda T: PropsSI('S','T',T,'P',pc,Ref)-s[3],T[2]+30)\n h4s=PropsSI('H','T',T4s,'P',pc,Ref)\n p[4]=pc\n wdot2=(h4s-h[3])/eta_oi\n h[4]=h[3]+(1-f_p[1])*wdot2\n #T[4]=T_hp(Ref,h[4],pc,T4s)\n T[4]=PropsSI('T','H',h[4],'P',pc,Ref)\n s[4]=PropsSI('S','T',T[4],'P',pc,Ref)\n rho[4]=PropsSI('D','T',T[4],'P',pc,Ref)\n\n p[9]=pe\n h[9]=h[8]\n T[9]=Te\n hsatL_e=PropsSI('H','T',Te,'Q',0,Ref)\n hsatV_e=PropsSI('H','T',Te,'Q',1,Ref)\n ssatL_e=PropsSI('S','T',Te,'Q',0,Ref)\n ssatV_e=PropsSI('S','T',Te,'Q',1,Ref)\n vsatL_e=1/PropsSI('D','T',Te,'Q',0,Ref)\n vsatV_e=1/PropsSI('D','T',Te,'Q',1,Ref)\n x9=(h[9]-hsatL_e)/(hsatV_e-hsatL_e) #Vapor Quality\n s[9]=ssatV_e*x9+ssatL_e*(1-x9)\n rho[9]=1.0/(x9*vsatV_e+(1-x9)*vsatL_e)\n\n s[10]=s[1]\n T[10]=T[1]\n h[10]=h[1]\n p[10]=p[1]\n\n Tbubble_e=Te\n Tbubble_c=Tc\n sbubble_e=PropsSI('S','T',Tbubble_e,'Q',0,Ref)\n sbubble_c=PropsSI('S','T',Tbubble_c,'Q',0,Ref)\n sdew_e=PropsSI('S','T',Te,'Q',1,Ref)\n sdew_c=PropsSI('S','T',Tc,'Q',1,Ref)\n\n Wdot1=m*wdot1\n Wdot2=(m+x)*wdot2\n if skipPlot==False:\n if axis==None:\n ax=matplotlib.pyplot.gca()\n else:\n ax=axis\n if Ts_Ph in ['ph','Ph']:\n ax.plot(h,p)\n ax.set_yscale('log')\n elif Ts_Ph in ['Ts','ts']:\n ax.plot(np.r_[s[7],s[3]],np.r_[T[7],T[3]],'b')\n s_copy=s.copy()\n T_copy=T.copy()\n dT=[0,-5,5,-12,5,12,-12,0,0,0]\n ds=[0,0.05,0.05,0,0.05,0,0.0,0.05,-0.05,-0.05]\n for i in range(1,len(s)-1):\n ax.plot(s[i],T[i],'bo',mfc='b',mec='b')\n ax.text(s[i]+ds[i],T[i]+dT[i],str(i),ha='center',va='center')\n\n s=list(s)\n T=list(T)\n s.insert(10,sdew_e)\n T.insert(10,Te)\n s.insert(5,sbubble_c)\n T.insert(5,Tbubble_c)\n s.insert(5,sdew_c)\n T.insert(5,Tc)\n ax.plot(s,T,'b')\n\n s=s_copy\n T=T_copy\n else:\n raise TypeError('Type of Ts_Ph invalid')\n\n COP=m*(h[1]-h[9])/(m*(h[2]-h[1])+(m+x)*(h[4]-h[3]))\n for i in range(1,len(T)-1):\n print('%d & %g & %g & %g & %g & %g \\\\\\\\' %(i,T[i]-273.15,p[i],h[i],s[i],rho[i]))\n print(x,m*(h[1]-h[9]),(m*(h[2]-h[1])+(m+x)*(h[4]-h[3])),COP)\n mdot=Qin/(h[1]-h[9])\n mdot_inj=x*mdot\n print('x9',x9,)\n print('Qcond',(mdot+mdot_inj)*(h[4]-h[5]),'T4',T[4]-273.15)\n print(mdot,mdot+mdot_inj)\n f=3500/60.\n eta_v=0.7\n print('Vdisp1: ',mdot/(rho[1]*f*eta_v)*1e6,'cm^3')\n print('Vdisp2: ',(mdot+mdot_inj)/(rho[1]*f*eta_v)*1e6,'cm^3')\n return COP\n\n #class SimpleCycle(object):\n # \"\"\"A class that calculates a simple thermodynamic cycle\"\"\"\n # def __init__(self, *args, **kwargs):\n # object.__init__(self, *args, **kwargs)\n # (states, steps, fluid):" ]
[ "0.55629534", "0.52799535", "0.5275234", "0.52682", "0.5261582", "0.52376616", "0.5227446", "0.52257586", "0.5189562", "0.51574963", "0.5152989", "0.5131383", "0.51249796", "0.5111892", "0.5099293", "0.5094038", "0.50885504", "0.5081776", "0.5076241", "0.50693774", "0.5060204", "0.50515777", "0.50510776", "0.50426346", "0.5041191", "0.50408787", "0.5038678", "0.50324047", "0.5021295", "0.5006632", "0.50015146", "0.49876755", "0.498138", "0.49811375", "0.49587724", "0.4952261", "0.4951998", "0.49440756", "0.49428776", "0.49413687", "0.49296194", "0.49288532", "0.4921991", "0.49176994", "0.49160933", "0.49160933", "0.49077827", "0.49053895", "0.49015066", "0.48996416", "0.48993948", "0.48797485", "0.48788932", "0.4875738", "0.48751736", "0.48716736", "0.48661917", "0.48633876", "0.48603183", "0.4857156", "0.48556128", "0.4848808", "0.48439038", "0.48354593", "0.48235932", "0.48144948", "0.48127723", "0.48006853", "0.47998405", "0.47979927", "0.47908643", "0.4789284", "0.47883606", "0.47871628", "0.4783947", "0.47732964", "0.4770036", "0.47673333", "0.47604448", "0.4756613", "0.47501066", "0.47496644", "0.47478303", "0.47433877", "0.47405246", "0.47353172", "0.47340032", "0.4731918", "0.47311118", "0.47303128", "0.47252533", "0.4724573", "0.4706198", "0.47057122", "0.47036877", "0.4703559", "0.47019306", "0.47011456", "0.47009352", "0.4700782", "0.47004297" ]
0.0
-1
Strip of leading directory names to make a pretty path for display.
def GetBase(self, fname, suffix): wds = fname.split('/') suff = suffix.replace('.BRIK','') suff = suff.replace('.HEAD','') if len(wds) > 1: return '.../%s' % '/'.join(wds[-2:]) + suff else: return fname + suff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prettify_path(path, leading=None):\r\n leading = (leading or os.getcwd()).replace(os.altsep, os.sep)\r\n s = os.path.splitext(path.replace(os.altsep, os.sep))[0]\r\n if s.startswith(leading):\r\n s = s.replace(leading, '')\r\n return s.strip(os.sep)", "def clean_directory_path(path):\n allowed = string.digits + string.ascii_letters + string.whitespace\n stripped = \"\".join(c for c in path if c in allowed)\n return stripped.replace(\" \", \"_\")", "def strip_path(self):\n return self.path.replace('/', '')", "def remove_upper_level_references(path):\n return os.path.normpath(\"/\" + path).lstrip(\"/\")", "def pretty_path(path):\n return path.replace(REPO_DIR + '/', '')", "def stripDirs(path, count):\n # TODO: This is a hack and not robust.\n parts = path.split(os.sep)\n return os.sep.join(parts[count:])", "def normdirpath(path):\n if not path.endswith('/') and path != '':\n path += '/'\n return path", "def format_name(name_dir):\n if(name_dir.endswith('/')):\n name_dir = name_dir.rstrip('/')\n return(name_dir)", "def sanatize_path(self, path):\n # Remove extra whitespace\n path = path.strip()\n\n # Remove slash from end of path\n path = path.rstrip(os.sep)\n\n return path", "def strip_path(path):\n name_re = re.compile(\"[^/]*\\.([a-z]+)$\")\n return name_re.search(path).group(0)", "def simplifyPath(self, path):\n pwd = [] # stack, present working directory\n path = path.split(\"/\")\n for curr in path:\n if not curr or curr == \".\": # skip current dir\n continue\n elif curr == \"..\":\n if pwd: # if we're not in the root directory, go back\n pwd.pop()\n else:\n pwd.append(curr)\n return \"/\" + \"/\".join(pwd)", "def _trim_path(path):\n if path.endswith(\"/\"):\n path = path[:-1] # remove / at the end\n \n return path", "def stripCheckDirectory(self, dirName):\n if not dirName:\n return \"\"\n n = dirName.strip()\n while n and n.startswith('\"'):\n n = n.strip('\"')\n while n and n.startswith(\"'\"):\n n = n.strip(\"'\")\n if n:\n n.strip()\n\n if os.path.isdir(n):\n return n\n else:\n print(('not a valid directory: %s (%s)'% (n, dirName)))\n return ''", "def prettyfypath(path):\n home = os.path.expanduser(\"~/\")\n home_n = pathnormalize(home)\n path_n = pathnormalize(path)\n if path_n.startswith(home_n):\n path = os.path.join(\"~\", os.path.relpath(path, home))\n return path", "def simplifyPath(self, path):\n assert isinstance(path, str)\n stack = ['/']\n newDir = ''\n for char in path:\n if char in ['.', '/']:\n instructionFlag = True\n else:\n\n newDir += stack.pop()", "def dir(self) -> str:\n return f'{os.path.dirname(self.path)}/'.lstrip('/')", "def standardize_path(path):\n path.rstrip('/')\n if not path.startswith('.*'):\n path = '/' + path\n path = re.compile('/+').sub('/', path)\n return path", "def _sanitize_relative_path(self, path):\n last = None\n path = os.path.normpath(path)\n while path != last:\n last = path\n # Note: os.path.join treats '/' as os.sep on Windows\n path = path.lstrip(os.sep).lstrip('/')\n path = path.lstrip(os.pardir).lstrip('..')\n drive, path = os.path.splitdrive(path) # for Windows\n return path", "def _cleanup_path(path):\n return string.join(filter(None, string.split(path, '/')), '/')", "def normalize_directory_name(directory_name: str) -> str:\n return directory_name.lower()", "def shortpath(path):\r\n import os\r\n if path.startswith(base_dir):\r\n return path[len(base_dir) + len(os.path.sep) : ]\r\n return path", "def getFormattedDirectory(directory):\n outdir = directory\n if not(outdir.endswith(\"/\")):\n outdir = outdir+\"/\"\n return outdir", "def _normalize_path(path):\n\n i = 0\n for c in path:\n if c != \"/\":\n break\n i = i + 1\n\n if i:\n return path[(i - 1) :]\n\n return path", "def clean_dirname(self, dirname):\n return remove(dirname,self.unwanted_chars_in_directories)", "def split_leading_directory(file_path):\n\tdelim = '/'\n\tpath_split = file_path.split(delim)\n\tif len(path_split) > 0:\n\t\tlead_dir = path_split[0]\n\telse:\n\t\tlead_dir = ''\n\tif len(path_split) > 1:\n\t\trest = delim.join(path_split[1:])\n\telse:\n\t\trest = ''\n\treturn lead_dir, rest", "def filter_pathdir(val: Optional[str]) -> str:\n return os.path.dirname(val or '')", "def extract_dir_name(input_file):\r\n fname = PurePath(input_file).__str__()\r\n s = fname.split('.')\r\n name = '.'.join(s[:-1])\r\n return name", "def clean_folder_name(folder_name):\n folder_name = folder_name.strip('/')\n if folder_name != '':\n folder_name = os.path.normpath(folder_name)\n return folder_name", "def _isolated_path_format(self, path):\n if self._root_dir.is_parent_of(path):\n return '%s:%s' % (\n self._root_dir,\n self._api.path.join(*path.pieces[len(self._root_dir.pieces):])\n )\n else:\n assert path == self._root_dir, \\\n \"isolated path must be equal to or within %s\" % self._root_dir\n return '%s:.' % self._root_dir", "def pretty_path(input_path):\n home_path = os.path.expanduser('~')\n cwd_path = os.getcwd()\n output_path = input_path.replace(home_path, '~').replace(cwd_path, './')\n return output_path", "def dir_path(path):\n pattern='^(.*)[/]$'\n matchobj=re.match(pattern,path)\n if matchobj:\n return path\n else:\n return path+'/'", "def sanitize_fname(directory, fname):\n return opath.join(\n bytes(directory, encoding='ascii'),\n opath.normpath(\n b'/' + fname).lstrip(b'/'))", "def _strip_prefix(repository, commit, extra_strip_prefix):\n repository_split = repository.split(\"/\")\n\n if len(repository_split) != 2:\n fail(\"repository must be formatted as organization/project\")\n\n _, project = repository_split\n\n # GitHub archives omit the \"v\" in version tags, for some reason.\n if commit[0] == \"v\":\n strip_commit = commit[1:]\n else:\n strip_commit = commit\n\n result = project + \"-\" + strip_commit.replace(\"/\", \"-\")\n if extra_strip_prefix:\n result += \"/\" + extra_strip_prefix\n return result", "def cleanUpPath(path):\n # Remove extra quotes and spaces\n cleanPath = path.strip()\n cleanPath = cleanPath.strip(\"\\\"\")\n # The normalize operation needs to happen before prepend project directory \n # variable operation. After appending the placeholder variable to the output \n # is not a standard path so the normalize operation does not work correctly.\n cleanPath = ntpath.normpath(cleanPath)\n # Append project dir\n cleanPath = ntpath.join(PROJ_DIR_STR, cleanPath)\n return cleanPath", "def remove_disk_directory(filepath):\n if filepath.startswith(DISK_DIR):\n return filepath.replace(DISK_DIR, '')\n return filepath", "def _remove_path_head(path, head):\n # Bugfix 13 Oct 2017: path.replace(head,'') will remove head from everywhere in the path. This\n # is especially problematic if the user gives the local dir as \".\" (i.e. the current directory)\n # because it will remove periods from filenames\n\n # Find the head at the beginning of the path only. Escape any characters in head that have special\n # meaning in a regular expression (e.g. \".\" means \"any character\")\n head_regex = '^{}'.format(re.escape(head))\n path = re.sub(head_regex, '', path)\n if path.startswith('/'):\n path = path[1:]\n\n return path", "def fix_dir_separator(slash_delim_path):\n return slash_delim_path.replace('/', os.path.sep)", "def clean_slashes(path):\n return path.strip(\"/\")", "def remove_trailing_slash(path):\n if len(path) > 0:\n if path[len(path) - 1] == \"/\":\n return path[0:-1]\n else:\n return path\n else:\n return path", "def clean_path(path: str) -> str:\n previous_path = \"\"\n next_path = path\n while next_path != previous_path:\n previous_path = next_path\n next_path = copy_annotations(path, next_path.replace(\"//\", \"/\"))\n while next_path.endswith(\"/\"):\n next_path = next_path[:-1]\n return next_path", "def noTrailingSlash(path):\n return path.split('/')[0]", "def dirname(path: str) -> str:\n pass", "def format_folder_path(folder_path):\n if folder_path[-1] != '/':\n folder_path += '/'\n\n return folder_path", "def clean_path(path):\n path = path.replace(\"~\", str(Path.home()))\n if path[-1] != \"/\":\n path += \"/\"\n return path", "def leadingSlash(path):\n if path == None or len(path) == 0 or path == '/':\n return '/'\n if path[0] == '/':\n return path\n else:\n return '/' + path", "def normalized_path(pathstring: str) -> str:\n pathstring = os.path.abspath(pathstring)\n return pathstring.replace('\\\\', '/').rstrip()", "def normalizePath(path):\n if path == None or len(path) == 0 or path == '/':\n return '/'\n buff = '/' + path if path[0] != '/' else path\n return buff.replace('//', '/')", "def fix_path(name):\n saveslash = \"/\" if (name[0] == \"/\") else \"\"\n name = re.split(\"\\\\\\|/\", name)\n new = name[0]\n for i in range(1,len(name)):\n new = os.path.join(new, name[i])\n new = \"%s%s\" % (saveslash, new)\n return new", "def _removeRootPath(self, fileBlock):\n if(fileBlock.startswith(self.root+'/')):\n result = fileBlock.replace(self.root+'/', \"\", 1)\n result = result.strip('/')\n if(not result): result = '/'\n return result \n else:\n if(self.verb >= DLS_VERB_WARN):\n msg = \"Warning: Error when adapting name. FileBlock %s \" % (fileBlock)\n msg += \"does not start with root path (%s).\" % (self.root+'/')\n print msg\n return fileBlock", "def normpath(cwd):\n this_level = level(cwd)\n this_idx = levels.index(this_level)\n parts = cwd.split(os.path.sep)\n return os.path.join(*parts[-this_idx:])", "def _path_parts(path):\n # clean it up. this removes duplicate '/' characters and any that may\n # exist at the front or end of the path.\n return [pp for pp in path.split(\"/\") if pp]", "def strip_path(fpath):\n if not fpath:\n return fpath\n try:\n file_path, file_name = os.path.split(fpath)\n except Exception:\n file_name = fpath\n return file_name", "def mangle_path(path):\n # Remove assigns\n path = servers.get_file_server().manglepath( str(path) )\n # Remove parent special directories\n path = os.path.abspath( path )\n # Convert path to Nebula format (slashes instead of backslashes)\n path = servers.get_file_server().manglepath( str(path) )\n # Convert drive letter to lowercase\n if len(path) > 1:\n if path[1] == ':':\n path = path[:1].lower() + path[1:]\n return path", "def strip_wpt_path(self, wpt_path):\n if self.is_wpt_path(wpt_path):\n return wpt_path[len(self.wpt_prefix()):]\n # Path is absolute or does not start with the prefix.\n # Assume the path already points to a valid WPT and pass through.\n return wpt_path", "def chop(self, pathname):\n assert pathname.startswith(self.dist_dir)\n return pathname[len(self.dist_dir):]", "def _normalize_path(path):\n if path is None:\n directory = BASE_PATH\n path = ''\n else:\n path = op.normpath(path)\n directory = op.normpath(op.join(BASE_PATH, path))\n\n if not is_in_folder(BASE_PATH, directory):\n abort(404)\n\n if not op.exists(directory):\n abort(404)\n\n return BASE_PATH, directory, path", "def directory_name(initial_name):\n import string\n allowed_chars = string.digits + string.ascii_letters + \" _.\"\n result_name = \"\"\n for ch in initial_name:\n if allowed_chars.find(ch) != -1:\n result_name += ch\n return result_name if result_name != \"\" else \"course_folder\"", "def format_path(path):\n return path if path.endswith('/') else path + '/'", "def safepath(p):\n return p.replace('/', os.sep)", "def _relativize(base: str, current: str) -> str:\n if current.startswith(base):\n return current.replace(base, \"\", 1)\n return current", "def dirname(path):\r\n return split(path)[0]", "def StripFolder(path):\n\n if not path.endswith((\"/\", \"\\\\\")):\n path = path + \"\\\\\"\n folders = [path]\n allf = []\n while folders:\n folder = folders.pop(0)\n allf.append(folder)\n for lister in os.listdir(folder):\n if os.path.isdir(folder + lister):\n folders.append(folder + lister + \"\\\\\")\n elif not path == folder:\n CopyFolder(folder, path)\n shutil.rmtree(folder)\n\n return tuple(allf)", "def clean_path(self, path):\n if('.flaccuesplit.' in path):\n path, flaccue_details = path.split('.flaccuesplit.')\n if(path.startswith(self.mount)):\n # Strip off the mount point.\n path = path[len(self.mount):]\n return path", "def normalizePath(path):\n\tfrom os.path import normpath, sep\n\tresult = normpath(path)\n\tresult = result.replace(\"/\",sep)\n\tresult = result.replace(\"\\\\\",sep)\n\treturn adaptPath(result)", "def get_directory(path):\n return mangle_path(path).rsplit('/',1)[0]", "def clean_path(source):\n source.file_name = source.file_name.replace('\\\\','/')\n return source", "def _clean_name(self, name):\n # Useful for windows' paths\n return os.path.normpath(name).replace(\"\\\\\", \"/\")", "def __clean_path(self, path):\n matches = re.finditer(r'\\%\\(.*?\\)[diouxXeEfFgGcrsa]', path)\n for _, match in enumerate(matches):\n pattern = match.group()\n path = path.replace(pattern, u'')\n pos = path.find(os.path.sep*2)\n if pos>=0:\n path = path[:pos+1]\n return path", "def __parse_full_path(path):\n dir = path[:path.rfind('/') + 1]\n name = path[path.rfind('/') + 1:]\n return dir, name", "def spath_stripoptions(spath):\n l = [comp.split(\"?\", 1)[0] for comp in spath.split(\"/\")]\n return \"/\".join(l)", "def format_path(s,\n path=None,\n replace_long_filename=False):\n # TODO: could possibly simplify by moving representation logic to FileNode\n replaced_path_name = False\n if path is not None:\n if s.startswith(path):\n replaced_path_name = True\n s = s[len(path)+1:]\n if replace_long_filename:\n head, tail = os.path.split(s)\n name_prefix = head.replace('/','_')\n if '/' in head and len(name_prefix) > 0:\n s = s.replace(name_prefix, '. . . ')\n if replaced_path_name:\n s = \"[DIR]/\" + s\n return \"/\\\\n\".join(s.split('/'))", "def format_path(file: str) -> str:\n return os.path.abspath([file.replace('/', os.path.sep)][0])", "def flatten_path(path):\n return path.split(\"/\")[-1]", "def format_dir_list(curdir, search=\"\"):\n dir_list = format_dir_list_recurse(curdir, search=search)\n return dir_list[::-1]", "def fix_path(self):\n paths = self.data['path'].tolist()\n prefixes = [re.findall(r'[A-Z\\-0-9]+', path) for path in paths]\n prefix_good = [str(prefix[0]) + \".json\" for prefix in prefixes]\n self.data['path'] = prefix_good", "def normalized_file_path(path: str) -> str:\n # Convert Unix path to Windows path for WSL\n if PLATFORM == \"WSL\":\n return path.replace(\"/\", \"\\\\\")\n\n return path", "def __sanitize(name):\n if name[-1] == \"/\":\n return name[:-1]\n return name", "def normpath (path):\n return os.path.normpath(path)", "def get_parent_directory(src: str) -> str:\n return src[: src.rfind(os.path.sep)]", "def directory_slash(destination):\n\n if destination[-1] != '/':\n return destination + '/'\n\n return destination", "def url_clean(path):\n return path[path.find('/'+settings.URL_ADMIN_SEP):]", "def _strip_package_name(name):\n name = _strip(name)\n if name.find('.') != -1:\n name = name.split('.')[0]\n return name", "def remove_root(root, paths):\r\n return [pth.replace(root + '/', '') for pth in paths]", "def _normpath(self, path):\n return os.path.normpath(os.path.normcase(path))", "def cleanup(name):\n cleaned_name = name.rstrip(\".\")\n return cleaned_name", "def collapse_segments(path):\n # replace backslashes\n # note: this is _against_ the specification (which would require\n # backslashes to be left alone, and finally quoted with '%5C')\n # But replacing has several positive effects:\n # - Prevents path attacks on Windows systems (using \\.. parent refs)\n # - Fixes bad URLs where users used backslashes instead of slashes.\n # This is a far more probable case than users having an intentional\n # backslash in the path name.\n if path.startswith('\\\\'):\n path = path.replace('\\\\', '/')\n # shrink multiple slashes to one slash\n path = _slashes_ro.sub(\"/\", path)\n # collapse redundant path segments\n path = _thisdir_ro.sub(\"\", path)\n path = _samedir_ro.sub(\"/\", path)\n # collapse parent path segments\n # note: here we exploit the fact that the replacements happen\n # to be from left to right (see also _parentdir_ro above)\n newpath = _parentdir_ro.sub(\"/\", path)\n while newpath != path:\n path = newpath\n newpath = _parentdir_ro.sub(\"/\", path)\n # collapse parent path segments of relative paths\n # (ie. without leading slash)\n newpath = _relparentdir_ro.sub(\"\", path)\n while newpath != path:\n path = newpath\n newpath = _relparentdir_ro.sub(\"\", path)\n\n path = path.rstrip('.')\n return path", "def clean_paths(row):\n return os.path.basename(row['oldfractal'])", "def Directory(self) -> str:", "def name_check(dirname):\r\n\tif dirname[-1] == \" \":\r\n\t\tdirname = dirname[:-1]\r\n\tif dirname[-1] != \"/\":\r\n\t\tdirname += \"/\"\r\n\treturn dirname", "def dir2ns(fname,checkfs=True):\n\tif checkfs: assert os.path.isdir(fname)\n\tif checkfs: fname = os.path.abspath(fname)\n\tfname = fname.rstrip(\"/\")\n\tmark = \"rosetta_source/src\"\n\tassert fname.find(mark) > 0\n\tfname = fname[fname.find(mark)+len(mark)+1:]\n\treturn fname.replace(\"/\",\"::\")", "def _normalized_path(path):\n return os.path.abspath(os.path.expanduser(path))", "def escapeForPath(s):\n return s.replace(os.sep, \"\")", "def clean_file_path(path):\r\n\r\n return path.split(\"/\")[-1]", "def _GetRelPath(self, filename):\n assert filename.startswith(self.subdir), (filename, self.subdir)\n return filename[len(self.subdir):].lstrip(r\"\\/\")", "def _possible_dir_name(contents):\n top_level_dirs = _find_top_level_directories(contents, sep='/')\n if len(top_level_dirs) == 0:\n raise InvalidFile, 'has no contents'\n elif len(top_level_dirs) > 1:\n raise MultipleTopLevels, 'more than one top levels: %s' % top_level_dirs\n d = abspath(top_level_dirs[0])\n assert exists(d), 'missing dir: %s' % d\n if not isdir(d):\n # eg: http://pypi.python.org/pypi/DeferArgs/0.4\n raise SingleFile, 'contains a single file: %s' % d\n return d", "def osnorm(self):\n import os\n if os.sep=='/' and \"\\\\\" in str(self):\n return Path(os.path.normpath(str(self).replace('\\\\','/' )))\n elif os.sep=='\\\\' and \"/\" in str(self):\n return Path(os.path.normpath(str(self).replace('/','\\\\' )))\n else:\n return self.norm()", "def strip_beginning_slashes(url):\n find = re.search(r\"^/+\", url)\n if find:\n url = re.sub(find.group(0), \"\", url)\n return url", "def clean_song_name(self, songname):\n # Reverse-sort the music_dirs list by string length, as if one \n # music_dir is a subset of the other (e.g. \"/music\" and \"/music/jazz\"),\n # we could end up cutting off too little\n for md in sorted(self.music_dirs, key=len, reverse=True):\n if songname.find(md) == 0:\n songname = songname.replace(md, \"\")\n break # shouldn't need to do any more replacements\n return songname", "def normalize_base_dir(base_dir: Optional[str]) -> str:\n if base_dir is None:\n base_dir = os.path.abspath(\"\")\n elif not is_absolute_path(base_dir):\n base_dir = os.path.abspath(base_dir)\n while base_dir != '/' and base_dir.endswith('/'):\n base_dir = base_dir[:-1]\n return base_dir", "def clean_path(path):\n return resolved_path(path)", "def pathnormalize(p):\n return os.path.normcase(os.path.normpath(p))" ]
[ "0.7440191", "0.67219174", "0.67075866", "0.6677584", "0.6655997", "0.6626382", "0.65079343", "0.6468608", "0.6372779", "0.6348614", "0.63264537", "0.632086", "0.6295324", "0.62762046", "0.6208288", "0.6200279", "0.619991", "0.61735916", "0.61682737", "0.6166878", "0.61517286", "0.6142667", "0.61116725", "0.6085429", "0.60334426", "0.6025311", "0.6003026", "0.59721357", "0.59555566", "0.58910525", "0.58588725", "0.58506024", "0.5847451", "0.5831965", "0.58305365", "0.58220476", "0.58156455", "0.58133155", "0.58027416", "0.58024204", "0.5786784", "0.5774427", "0.5756169", "0.5750962", "0.57429004", "0.5701968", "0.56931734", "0.56875503", "0.5681426", "0.56794435", "0.56789917", "0.56658053", "0.5646202", "0.56289923", "0.5621241", "0.56188416", "0.5616474", "0.5609818", "0.560842", "0.5602451", "0.55996925", "0.559708", "0.55908525", "0.55867916", "0.5584848", "0.5575623", "0.55744374", "0.5565832", "0.5564407", "0.55632764", "0.55541223", "0.55319065", "0.5504", "0.5501783", "0.54974586", "0.5492336", "0.5490624", "0.54903895", "0.54778844", "0.54660124", "0.5464151", "0.545771", "0.5457156", "0.54523784", "0.5433324", "0.5427432", "0.54253775", "0.5423912", "0.54229337", "0.54195464", "0.54152614", "0.5402163", "0.5402059", "0.5390971", "0.5386744", "0.53677964", "0.5361514", "0.53561175", "0.5349396", "0.5343144", "0.53413486" ]
0.0
-1
Create a text string summarizing how the motion correction was done.
def SummarizeMotionTargets(self): text = '\nSummary of motion-correction: \n' for epi in self.entry_map['epi']: info = self.info[epi] text += self.GetBase(epi, '') base = self.GetBase(info['base_entry'], '') text += ' ->3dvolreg-> %s[%s]' % (base, info['base']) if info['fmap_entry'] is not None: fmap = info['fmap_entry'] text += ' ->assume-registered-> %s' % self.GetBase(fmap, '') anat = self.info[fmap]['anat_ref'] if info['catmats']: text += ' ->3dAllineate-> %s' % \ self.GetBase(anat, '') else: text += ' ->assume-registered-> %s' % self.GetBase(anat, '') else: anat = info['anat_tgt'] text += ' ->assume-registered-> %s' % self.GetBase(anat, '') text += '\nEPIs should be in register with %s\n' % \ self.GetBase(self.anatomical, '') return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prescription(self):\n prescription = \"\\n{0:>10}\\t{1:>10}\\t{2:>10}\\t{3:>10}\\n\".format(\"R\",\"Material\",\"d\",\"diameter\")\n for surface in self.lensSurfaces():\n prescription += \"{0:>10.2f}\\t{1:>10}\\t{2:>10.2f}\\t{3:>10.2f}\\n\".format(surface.R, str(surface.mat), surface.spacing, surface.diameter)\n return prescription", "def getDebugText(self):\n timeDifference = time.time() - self.time_created\n hours = math.floor(timeDifference / 3600)\n minutes = math.floor((timeDifference % 3600) / 60)\n seconds = math.floor(timeDifference % 3600 % 60)\n\n output = \"\\n\" * 50\n output += \"Time started: %s\\n\" % time.ctime(self.time_created)\n output += \"Time now: %s\\n\" % time.ctime()\n output += \"Time elapsed: %02d:%02d:%02d\\n\" % (hours, minutes, seconds)\n output += (\"=\" * 80) + \"\\n\"\n output += \"Health potions used: %d\\n\" % self.hp_pots_used\n output += \"Health potions per hour: %d\\n\" % (self.hp_pots_used / (\n timeDifference / 3600))\n output += \"Mana potions used: %d\\n\" % self.mana_pots_used\n output += \"Mana potions per hour: %d\\n\" % (self.mana_pots_used / (\n timeDifference / 3600))\n return output", "def snapshot(self):\n text = \"\"\n text += \"{}:\\n{}\\n\".format('chi', np.array2string(self.chi))\n return text", "def get_description(self):\n text = \"is a student's t distribution; characterised by its degrees of freedom, which here is\"+str(self.dofs)+\".\"\n return text", "def summarize(self):\n txtSumm = ''\n\n if self.legtype == 'Takeoff':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Landing':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Other':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Observing':\n txtSumm = \"%02d -- %s, RA: %s, Dec: %s, LegDur: %s, ObsDur: %s\" %\\\n (self.legno, self.target, self.ra, self.dec,\n str(self.duration),\n str(self.obsdur))\n txtSumm += \"\\n\"\n if self.nonsid is True:\n txtSumm += \"NONSIDERIAL TARGET -- NAIFID: %d\" % (self.naifid)\n txtSumm += \"\\n\"\n txtSumm += \"(The SOFIA project sincerely hopes you enjoy \"\n txtSumm += \"your observing breaks due to XFORMS crashes)\"\n txtSumm += \"\\n\"\n txtSumm += \"ObsPlan: %s, ObsBlk: %s\" % (self.obsplan, self.obsblk)\n txtSumm += \"\\n\\n\"\n txtSumm += \"Elevation Range: %.1f, %.1f\" % (self.range_elev[0],\n self.range_elev[1])\n txtSumm += \"\\n\\n\"\n txtSumm += \"ROF Range: %.1f, %.1f\" % (self.range_rof[0],\n self.range_rof[1])\n txtSumm += \"\\n\"\n txtSumm += \"ROF Rate Range: %.1f, %.1f %s\" % (self.range_rofrt[0],\n self.range_rofrt[1],\n self.range_rofrtu)\n txtSumm += \"\\n\\n\"\n txtSumm += \"True Heading Range: %.1f, %.1f\" % (self.range_thdg[0],\n self.range_thdg[1])\n txtSumm += \"\\n\"\n txtSumm += \"True Heading Rate Range: %.1f, %.1f %s\" %\\\n (self.range_thdgrt[0],\n self.range_thdgrt[1],\n self.range_thdgrtu)\n txtSumm += \"\\n\"\n txtSumm += \"Moon Angle: %.1f, Moon Illumination: %s\" %\\\n (self.moonangle, self.moonillum)\n\n return txtSumm", "def __str__(self):\n return f'{self.text}: {self.chs}, correct answer: {self.solution}'", "def description_text(self, P=None):\n\n if not P:\n P = self.parameters.values_to_dict()\n\n text = \"Calculated with {hamiltonian}, converged to \"\n # Convergence\n if P[\"convergence\"] == \"normal\":\n text += \"the 'normal' level of 1.0e-04 kcal/mol.\"\n elif P[\"convergence\"] == \"precise\":\n text += \"the 'precise' level of 1.0e-06 kcal/mol.\"\n elif P[\"convergence\"] == \"relative\":\n text += \"a factor of {relative} times the normal criterion.\"\n elif P[\"convergence\"] == \"absolute\":\n text += \"converged to {absolute}.\"\n\n if self.parameters[\"uhf\"].is_expr:\n text += (\n \" Whether to use spin-unrestricted SCF (UHF) for closed-shell molecules\"\n \"will be determined by '{uhf}'.\"\n )\n elif self.parameters[\"uhf\"].get():\n text += \" The SCF will be spin-unrestricted (UHF) for all molecules.\"\n else:\n text += (\n \" The SCF will be restricted for closed-shell molecules (RHF) and \"\n \"spin-unrestricted (UHF) for all others.\"\n )\n\n # MOZYME localized molecular orbitals.\n if P[\"MOZYME\"] == \"always\":\n text += (\n \"\\n\\nThe SCF will be solved using localized molecular orbitals \"\n \"(MOZYME), which is faster than the traditional method for larger \"\n \"systems.\"\n )\n used_mozyme = True\n elif P[\"MOZYME\"] == \"for larger systems\":\n text += (\n \"\\n\\nThe SCF will be solved using localized molecular orbitals \"\n \"(MOZYME) for systems with {nMOZYME} atoms or more. This method is \"\n \"faster than the traditional method for larger systems.\"\n )\n used_mozyme = True\n else:\n used_mozyme = False\n\n if used_mozyme:\n follow_up = P[\"MOZYME follow-up\"]\n if \"exact\" in follow_up:\n text += (\n \" The energy given by MOZYME slowly accumulates error due to the \"\n \"increasing non-orthogonality of the localized orbitals after \"\n \"many iterations. A single point energy using the traditional \"\n \"method will be run to get the correct energy.\"\n )\n elif \"new\" in follow_up:\n text += (\n \" The energy given by MOZYME slowly accumulates error due to the \"\n \"increasing non-orthogonality of the localized orbitals after \"\n \"many iterations. A single point energy using fresh localized \"\n \"orbitals will be run to get the correct energy.\"\n )\n elif follow_up == \"none\":\n text += (\n \" The energy given by MOZYME slowly accumulates error due to the \"\n \"increasing non-orthogonality of the localized orbitals after \"\n \"many iterations. No follow-up calculation will be done, so be \"\n \"careful with the final energies produced.\"\n )\n used_mozyme = False\n else:\n logger.error(f\"Don't recognize the MOZYME follow-up: '{follow_up}'\")\n\n # Handle COSMO\n if self.parameters[\"COSMO\"].is_expr:\n text += (\n \"\\n\\n'{COSMO}' will determine whether to use the COSMO solvation \"\n \"model. If it is used the parameters will be \"\n )\n elif self.parameters[\"COSMO\"].get():\n text += \"\\n\\nThe COSMO solvation model will be used with \"\n\n if self.parameters[\"COSMO\"].is_expr or self.parameters[\"COSMO\"].get():\n text += (\n \"dielectric constant = {eps}, solvent radius = {rsolve}, \"\n \"{nspa} grid points per atom, and a cutoff of {disex}.\"\n )\n\n # And bond orders\n if P[\"bond orders\"] == \"yes\":\n text += \"\\n\\nThe bond orders will be calculated.\"\n elif P[\"bond orders\"] == \"yes, and apply to structure\":\n text += (\n \"\\n\\nThe bond orders will be calculated and used to set the bonding \"\n \"for the structure.\"\n )\n\n return self.header + \"\\n\" + __(text, **P, indent=4 * \" \").__str__()", "def summarize(self):\n txtStr = \"%s to %s, %d flight legs.\" %\\\n (self.origin, self.destination, self.nlegs)\n txtStr += \"\\nTakeoff at %s\\nLanding at %s\\n\" %\\\n (self.takeoff, self.landing)\n txtStr += \"Flight duration of %s including %s observing time\" %\\\n (str(self.flighttime), self.obstime)\n\n return txtStr", "def format_body(self):\n mt = deque(str(self.movetext).split(' ') + [])\n out = mt.popleft()\n ll = len(out)\n while True:\n if len(mt) is 0:\n break\n\n n = mt.popleft()\n # If the current line length + space + character is less than\n # 80 chars long\n if ll + len(n) + 1 < 80:\n to_add = \" \" + n\n out += \" \" + n\n ll += len(to_add)\n else:\n out += \"\\n\" + n\n ll = len(n)\n return out + str(self.score)", "def create_analysis(self):\n text = self.input_main.get(\"1.0\", \"end-1c\")\n if not text:\n return \"\"\n if self.ignore_case_value.get():\n text = text.lower()\n\n char_map = calc.char_mapping(text)\n unique_chars = len(char_map)\n entropy = calc.entropy(text)\n metric_entropy = calc.metric_entropy(text)\n optimal = calc.optimal_bits(text)\n\n info = \"\"\"Length: {}\nUnique chars: {}\nEntropy: {}\nMetric entropy: {}\nOptimal bit usage: {}\"\"\".format(\n len(text),\n unique_chars,\n entropy,\n metric_entropy,\n optimal\n )\n\n table_head = \" Char | Probability | Bits | Occurrences \"\n table_body = \"\\n\".join(\n [\n \" {:<4} | {:>11.7f} | {:>11.7f} | {:>11}\".format(\n char,\n prob, calc.prob_to_info(prob),\n text.count(char)\n )\n for char, prob in char_map\n ]\n )\n table = \"\\n\".join([table_head, table_body])\n\n return \"\\n\\n\".join([info, table])", "def get_summary(self):\n \n text = \"word: {}, total_score: {} \\n\".format(self.clue, self.total_score)\n for card, score in self.sorted_card_score_pairs:\n card_text = \"\\t card.name:{} (team:{}), similarity: {} \\n\".format(card.name, card.color, score)\n text += card_text\n return text", "def getOutput(self):\n text = \"\"\n text += \"*\"*self.getLevel() + \" \"\n if self.isTODO():\n text += \"TODO \"\n if self.isDONE():\n text += \"DONE \"\n text += self.getTitle()\n return text", "def prepareExplainerText(amount, ranges):\n text = \"\\n\"\n for currKey in amount:\n text += f\"{currKey}: {ranges[currKey]} | {amount[currKey]}\\n\"\n text += \"\\n\\n\"\n return text", "def result(self):\n return (\"MRR@\" + str(self.length) + \": \"), (self.pos / self.test)", "def summary_string(self) -> str:", "def summary_string(self) -> str:\n return f\"dixonoid: {self.plain_rules}\"", "def reconcile_output(self):\n final_string = \"\"\n final_string += \"TEXT: {0}\\n\".format(self._text)\n final_string += \"ID: {0}\\n\".format(self._index)\n final_string += \"Count: {0}\\n\".format(self._count)\n\n final_string += \"=Doc Count Begin=\\n\"\n for doc in list(self._doc_count.keys()):\n final_string += \"{0} $!$ {1}\\n\".format(doc, self._doc_count[doc])\n final_string += \"=Doc Count End=\\n\"\n\n final_string += \"=CEs Begin=\\n\"\n for doc in list(self.word_pairs.keys()):\n for ce in set(self.word_pairs[doc]):\n #format: doc $!$ ce \n final_string += \"{0} $!$ {1} $!$ {2}\\n\".format(doc, ce, self.word_pairs[doc].count(ce))\n final_string += \"=CEs End=\\n\"\n\n final_string += \"=Self Tags Begin=\\n\"\n for source in list(self.this_semantic_tags.keys()):\n for tag in set(self.this_semantic_tags[source]):\n final_string += \"{0} $!$ {1} $!$ {2}\\n\".format(source, tag, self.this_semantic_tags[source].count(tag))\n final_string += \"=Self Tags End=\\n\"\n\n final_string += \"=Semantic Begin=\\n\"\n for source in list(self.lexico_semantic.keys()):\n #for some reason, I had each semantic class getting assigned the\n #same overall count?\n #sem_counts = self.getLexicoSemanticCounts(source)\n for ce in self.lexico_semantic[source]:\n for sem_cls in set(self.lexico_semantic[source][ce]):\n c = self.lexico_semantic[source][ce].count(sem_cls)\n final_string += \"{0} $!$ {1} $!$ {2} $!$ {3}\\n\".format(source, ce, sem_cls, c)\n final_string += \"=Semantic End=\\n\"\n final_string += \"$!$\\n\"\n return final_string", "def _get_delta_text_string(self):\n textstring = \"\"\n if (\n self.is_commit_test is True\n ): # include commits if this is an analysis of commit history\n # Write SHA1 commits under examination\n if len(self.delta_fp_string_dict.delta_dict[\"commits\"]) > 0:\n textstring += (\n os.linesep + \"Commit history SHA1 for this analysis:\" + os.linesep\n )\n for sha1_commit in self.delta_fp_string_dict.delta_dict[\"commits\"]:\n textstring += \" \" + sha1_commit + os.linesep\n textstring += os.linesep\n elif (\n self.is_branch_test is True\n ): # include branches if this is a branch v branch analysis\n if len(self.delta_fp_string_dict.delta_dict[\"branches\"]) > 0:\n textstring += os.linesep + \"Branches under analysis:\" + os.linesep\n for branch in self.delta_fp_string_dict.delta_dict[\"branches\"]:\n textstring += \" \" + branch + os.linesep\n textstring += os.linesep\n\n # include added files\n if len(self.delta_fp_string_dict.delta_dict[\"added\"]) > 0:\n for added_file in self.delta_fp_string_dict.delta_dict[\"added\"]:\n add_append_string = \"[A]:\" + added_file + os.linesep\n textstring += add_append_string\n # include deleted files\n if len(self.delta_fp_string_dict.delta_dict[\"deleted\"]) > 0:\n for deleted_file in self.delta_fp_string_dict.delta_dict[\"deleted\"]:\n del_append_string = \"[D]:\" + deleted_file + os.linesep\n textstring += del_append_string\n # include modified files\n if len(self.delta_fp_string_dict.delta_dict[\"modified\"]) > 0:\n for modified_file in self.delta_fp_string_dict.delta_dict[\"modified\"]:\n mod_append_string = \"[M]:\" + modified_file + os.linesep\n textstring += mod_append_string\n\n return textstring", "def get_text(self):\n text_complet = \"\"\n rez_dict = self.__results\n for i in range(0, len(rez_dict[\"text\"])):\n text = rez_dict[\"text\"][i]\n conf = int(rez_dict[\"conf\"][i])\n if conf > self.__min_confidence:\n text_complet += text + \" \"\n return text_complet", "def text(cfg, phase, high=6):\n short = cfg[\"fake\"].sentence(\n nb_words=high, variable_nb_words=True, ext_word_list=None\n )\n return \"{} {}\\n\\n{}\".format(\" \".join(cfg[phase]), short, blurb(cfg))", "def _create_formatted_string(self):\n string = NALSyntax.StatementSyntax.Start.value + \\\n self.get_subject_term().get_formatted_string()\n\n string += \" \" + self.get_copula_string() + \" \"\n\n string += self.get_predicate_term().get_formatted_string() + \\\n NALSyntax.StatementSyntax.End.value\n\n return string", "def get_str_metadata(self):\n return \"\\n\".join([\"Guessed by {}\".format(self.guessed_by), \"{} metaphors used\".format(self.metaphors_used)])", "def create_report(self):\n text_string = \"\\n\"\n text_string += \"Donor Name | Total Given | Num Gifts |\\\n Average Gift\\n\\\n ------------------------------------------------------------------\\n\"\n for donor in sorted(self.donors, key=Donor.sort_key, reverse=True):\n text_string += f\"{donor.name:<26} $ {donor.total_donations:>11.2f}\\\n {donor.number_of_donations:>10} $ {donor.average_gift:>11.2f}\\n\"\n\n return text_string", "def gen_analysis_text(num_data, usage_flag, labelled_landmark, landmark_name, error_summary):\n analysis_text = r'<p style=\"color:red;\">Basic information:</p>'\n analysis_text += '<p style=\"color:black;\">Landmark name: {0}.</p>'.format(landmark_name)\n analysis_text += '<p style=\"color:black;\"># cases in total: {0}.</p>'.format(num_data)\n labelled_landmarks_stat = get_landmarks_stat(labelled_landmark)\n \n analysis_text += r'<p style=\"color:black;\"># cases having this landmark (Pos. cases): {0}.</p>'.format(\n len(labelled_landmarks_stat[landmark_name]['pos']))\n analysis_text += r'<p style=\"color:black;\"># cases missing this landmark (Neg. cases): {}.</p>'.format(\n len(labelled_landmarks_stat[landmark_name]['neg']))\n if len(labelled_landmarks_stat[landmark_name]['neg']) > 0:\n missing_cases = copy.deepcopy(labelled_landmarks_stat[landmark_name]['neg'])\n missing_cases.sort()\n analysis_text += r'{}'.format(missing_cases)\n\n if usage_flag == 2:\n tp_cases = error_summary.tp_cases[landmark_name]\n tn_cases = error_summary.tn_cases[landmark_name]\n fp_cases = error_summary.fp_cases[landmark_name]\n fn_cases = error_summary.fn_cases[landmark_name]\n num_pos_cases = len(tp_cases) + len(fn_cases)\n num_neg_cases = len(tn_cases) + len(fp_cases)\n # compute TPR, TNR, FPR, FNR\n TPR = len(tp_cases) / max(1, num_pos_cases) * 100 \\\n if len(tp_cases) != 0 or num_pos_cases != 0 else 100\n TNR = len(tn_cases) / max(1, num_neg_cases) * 100 \\\n if len(tn_cases) != 0 or num_neg_cases != 0 else 100\n FPR = 100 - TNR\n FNR = 100 - TPR\n mean_error = error_summary.mean_error_tp[landmark_name]\n std_error = error_summary.std_error_tp[landmark_name]\n median_error = error_summary.median_error_tp[landmark_name]\n max_error = error_summary.max_error_tp[landmark_name]\n analysis_text += r'<p style=\"color:red;\"> Landmark classification error: </p>'\n analysis_text += r'<p style=\"color:black;\">TP (TPR): {0} ({1:.2f}%)</p>'.format(\n len(tp_cases), TPR)\n analysis_text += r'<p style=\"color:black;\">TN (TNR): {0} ({1:.2f}%)</p>'.format(\n len(tn_cases), TNR)\n analysis_text += r'<p style=\"color:black;\">FP (FPR): {0} ({1:.2f}%)</p>'.format(\n len(fp_cases), FPR)\n analysis_text += r'<p style=\"color:black;\">FN (FNR): {0} ({1:.2f}%)</p>'.format(\n len(fn_cases), FNR)\n analysis_text += r'<p style=\"color:red;\"> Landmark distance error for the {} TP cases (unit: mm): </p>'.format(\n len(tp_cases))\n analysis_text += r'<p style=\"color:black;\">mean (std): {0:.2f} ({1:.2f})</p>'.format(\n mean_error, std_error)\n analysis_text += r'<p style=\"color:black;\">median: {0:.2f}</p>'.format(median_error)\n analysis_text += r'<p style=\"color:black;\">max: {0:.2f}</p>'.format(max_error)\n\n return analysis_text", "def get_intro_message() -> str:\n return \"\"\"You are about to begin a new record.\nType the text sample you want to record.\nThis first sample MUST be typed by the real user (no impostor data).\"\"\"", "def description(self) -> str:\r\n descrip = 'The player must aim to put the most possible units of a ' \\\r\n 'given colour c on the outer perimeter of ' \\\r\n 'the board. The ' \\\r\n 'player’s score is the total number of unit cells ' \\\r\n 'of colour ' \\\r\n 'c that are on the perimeter. There is a ' \\\r\n 'premium on corner ' \\\r\n 'cells: they count twice towards the score. '\r\n return descrip", "def add_details(self):\n\n if self.co.algorithm == \"vv\":\n algo = \"Verlocity Verlot\"\n if self.co.algorithm == \"rk4o\":\n algo = \"Runge Kutta Forth Order\"\n if self.co.algorithm == \"herm\":\n algo = \"Hermite Fourth Order\"\n\n self.algorithm_title = self.ax.text(\n 1.01, 0.65, \"Algorithm:\", transform=self.ax.transAxes\n )\n self.algorithm_text = self.ax.text(\n 1.01, 0.58, algo, transform=self.ax.transAxes\n )\n self.timestep_text = self.ax.text(\n 1.01, 0.51, \"dt =\" + str(self.co.tstep), transform=self.ax.transAxes\n )\n self.length_softening_distance = self.ax.text(\n 1.01,\n 0.44,\n r\"$\\epsilon$ = \" + str(self.co.epsilon),\n transform=self.ax.transAxes,\n )", "def text_report(self):\n\n word_count = self.word_count()\n\n print(\"\\nThere are {} words in the text.\".format(word_count))\n mean, median, mode = self.average_word_length()\n\n print(\"\\nMean, median and mode word length is {}, {}, {}.\".format(mean, median, mode))\n\n if word_count < 10:\n print(\"\\nLongest words:\")\n else:\n print(\"\\n10 longest words:\")\n for s in self.longest_words():\n print(s)\n\n print(\"\\nMost common words:\")\n for s in self.common_words():\n print(\"{} x {}\".format(s[1], s[0]))\n\n longest_grams = []\n\n # find n_longest n-grams\n n_longest = 10\n # strongly doubt that there will be n-grams longer than 50\n for i in range(min(50, word_count), 1, -1):\n if len(longest_grams) >= n_longest:\n break\n grams = self.find_ngrams(i)\n grams_list = sorted(grams, key=grams.get, reverse=True)\n\n for g in grams_list:\n if grams[g] > 4:\n # do not want to include n-grams which are substrings of longer n-grams\n substring = False\n for s in longest_grams:\n if g in s[1]:\n substring = True\n break\n if not substring:\n longest_grams.append([grams[g], g])\n\n print(\"\\nLongest n-grams:\")\n for g in longest_grams:\n print(\"{} x {}\".format(g[0], g[1]))\n print('\\n')", "def GetDescription(cls):\n return textwrap.dedent('''\n This trace step includes a diagram of the Ego long. acceleration in the report.\n ''').strip()", "def __str__(self):\n return \"{}\\n{}\\n{}\\n{}\".format(self.header,self.sequence,self.line3,self.quality)", "def _write_overlay_info(self):\n cv2.putText(\n img=self.output,\n text=f'X: {float(self.estimated_distance[0]):6.2f} m',\n org=(25, 25),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n color=(0, 0, 255),\n fontScale=0.5\n )\n cv2.putText(\n img=self.output,\n text=f'Y: {float(self.estimated_distance[1]):6.2f} m',\n org=(25, 50),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n color=(0, 0, 255),\n fontScale=0.5\n )\n cv2.putText(\n img=self.output,\n text=f'Z: {float(self.estimated_distance[2]):6.2f} m',\n org=(25, 75),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n color=(0, 0, 255),\n fontScale=0.5\n )\n cv2.putText(\n img=self.output,\n text=f'Rotation: {float(self.estimated_rotation):6.2f} rad',\n org=(25, 100),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n color=(0, 0, 255),\n fontScale=0.5\n )", "def __str__(self):\n return str(self.t1)+\"<-->t1, \\t\"+str(self.t2)+\"<-->t2, \\t\"+str(self.phi)+\"<-->phi, \\t\"+str(self.m)+\"<-->m, \\t\"+str(self.t31)+\"<-->t31, \\t\"+str(self.t32)+\"<-->t32, \\n\"", "def __str__(self):\n origin = [len(u) for u in self.sequences]\n currentorigin = origin\n returnstring = \"The score for this alignment is: \" + str(self.retrievematrixelement(origin).score) + \"\\n\\n\"\n\n if self.retrievematrixelement(origin) == None:\n return \"The matrix has not been solved yet. Call the solve() method to solve the matrix.\"\n\n else:\n result = []\n resultstrings = [\"\"] * len(self.sequences)\n while currentorigin != [0] * len(self.sequences):\n result.insert(0, self.retrievematrixelement(currentorigin).coordinate)\n currentorigin = result[0]\n\n result.append(origin)\n\n for u in range(1, len(result)):\n origin = result[u - 1]\n destination = result[u]\n for v in range(len(resultstrings)):\n if origin[v] == destination[v]:\n resultstrings[v] += \".\"\n else:\n resultstrings[v] += self.sequences[v][destination[v] - 1]\n\n for value in resultstrings:\n returnstring += value + '\\n'\n\n return returnstring", "def full_string(self):\n return \"{}: {}\".format(str(self.word), \" \".join([str(adj) for adj in self.adjectives]))", "def __str__(self):\n r = []\n for item in sorted(self._data.keys()):\n correct, incorrect = self._data[item][True], self._data[item][False]\n acc = correct / (correct + incorrect)\n s = f\"{item:4} | Accuracy: {acc:.2f}% (diff {'+' if acc-item >=0 else ''}{acc-item:.2f}%) | correct: {correct:2}, incorrect: {incorrect:2}\" \n r.append(s)\n\n return \"\\n\".join(r)", "def summary(self):\n if not self: return u''\n s = u'Character\\n=====\\nName: %s\\n' % \\\n self.get('name', u'')\n bio = self.get('biography')\n if bio:\n s += u'Biography: %s\\n' % bio[0]\n filmo = self.get('filmography')\n if filmo:\n a_list = [x.get('long imdb canonical title', u'')\n for x in filmo[:5]]\n s += u'Last movies with this character: %s.\\n' % u'; '.join(a_list)\n return s", "def description(self) -> str:\r\n description = \"The player must aim to put the most possible units \" + \\\r\n \"of \" + colour_name(self.colour) + \" on the outer\" +\\\r\n \" perimeter.\"\r\n return description", "def details(self) -> str:\n return f\"- **language**: [{self.language}]\\n\" \\\n f\"- **opengame**: [{self.opengame}]\\n\" \\\n f\"- **system**: [{self.system}]\\n\" \\\n f\"- **mode**: [{self.mode}]\\n\" \\\n f\"- **attributes**: [{self.attributes}]\\n \" \\\n f\"- **score_threshold**: [{self.score_threshold}]\\n \" \\\n f\"- **monsters**: [{self.monsters}]\\n\"", "def result(self):\n return (\"Recall@\" + str(self.length) + \": \"), (self.hit / self.test)", "def calc_conservation_string(aln):\n\n percids = calc_conservation(aln)\n\n # find identity positions\n identity = \"\"\n for pid in percids:\n if pid == 1:\n identity += \"*\"\n elif pid > .5:\n identity += \".\"\n else:\n identity += \" \"\n\n return identity", "def make_ts_report(self):\n self.ts_report = ''\n if self.chosen_ts_method is not None:\n self.ts_report += 'TS method summary for {0} in {1}\\n'.format(self.label, self.rxn_label)\n self.ts_report += 'Methods that successfully generated a TS guess:\\n'\n if self.successful_methods:\n for successful_method in self.successful_methods:\n self.ts_report += successful_method + ','\n if self.unsuccessful_methods:\n self.ts_report += '\\nMethods that were unsuccessfully in generating a TS guess:\\n'\n for unsuccessful_method in self.unsuccessful_methods:\n self.ts_report += unsuccessful_method + ','\n self.ts_report += '\\nThe method that generated the best TS guess and its output used for the' \\\n ' optimization: {0}'.format(self.chosen_ts_method)", "def helptext(self):\n return \"\"\"\n <b>A</b> to start the aperture or set the value<br/>\n <b>S</b> to select an existing aperture<br/>\n <b>C</b> to clear the selection<br/>\n <b>F</b> to find a peak close to the cursor<br/>\n <b>[</b> to edit the left edge of selected or closest<br/>\n <b>]</b> to edit the right edge of selected or closest<br/>\n <b>L</b> to edit the location of selected or closest<br/>\n <b>D</b> to delete the selected or closest aperture\n \"\"\"", "def print_intro(self):\n \n print('Did you know mammals tend to have the shortest migration routes because walking takes more energy than flying or swimming?')", "def __repr__(self):\n s= 'text model name: ' + self.name + '\\n'\n s+= 'number of words: ' + str(len(self.words)) + '\\n'\n s+='number of word lengths: ' + str(len(self.word_lengths))+'\\n'\n s+='number of word stems: ' + str(len(self.stems)) + '\\n'\n s+='number of sentence lengths: ' + str(len(self.sentence_lengths)) +'\\n'\n s+='number of word suffixes: '+ str(len(self.endings))\n \n return s", "def __str__(self):\n analysis = []\n for analyze in self.analysis:\n if self.analysis[analyze] is not None:\n analysis.append(self.analysis[analyze])\n return \"Analises: {} \\n\".format(analysis)", "def create_help_message():\r\n help_message = \"Improve your vocabulary using *VocabBot*! \\n\\n\" \\\r\n \"*Created By* - _Vishesh Vishwakarma_ \\n\\n\"\\\r\n \"You can ask the bot the below listed things: \\n\"\\\r\n \"*meaning* - type the word \\n\"\\\r\n \"*example* - type the word \\n\"\\\r\n \"*synonyms* - type the word \\n\"\\\r\n \"*antonyms* - type the word \\n\"\r\n return help_message", "def __repr__(self):\n \n s = 'text model name: ' + self.name + '\\n' \n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' number of word stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of commas counts: ' + str(len(self.commas_per_sentence)) + '\\n'\n return s", "def get_warning_text(self):\n \n to_print = []\n if self['skipped_subchannel'] > 0:\n to_print.append(\"Some event with large weight have been discarded.\"+\\\n \" This happens %s times.\" % self['skipped_subchannel'])\n if self['n_madloop_calls'] > 0:\n fraction = self['exceptional_points']/float(self['n_madloop_calls'])\n if fraction > 1.0e-4:\n to_print.append(\"Some PS with numerical instability have been set \"+\\\n \"to a zero matrix-element (%.3g%%)\" % (100.0*fraction))\n \n return ('\\n'.join(to_print)).replace(\"'\",\" \")", "def _printable(self):\n toPrint = \"Measurement Outcome header. \"\n toPrint += \"measurement outcome: \" + str(self.outcome) + \" \"\n\n return toPrint", "def __str__(self):\n status = (\"\\na: %.2f \\n\" % self.a +\n \"e: %.2f \\n\" % self.e +\n \"inc: %.2f deg \\n\" % (self.inc * 180/math.pi) +\n \"om: %.2f deg \\n\" % (self.om * 180/math.pi) +\n \"Om: %.2f deg \\n\" % (self.Om * 180/math.pi) +\n \"H: %.2f \\n\" % self.H\n )\n return status", "def __str__(self):\n table_data = [\n ['', 'C', 'G', 'A', 'T'],\n ['total', str(self.total['C']), str(self.total['G']), str(self.total['A']), str(self.total['T'])],\n ['reverse half strand', str(self.reverse['C']), str(self.reverse['G']), str(self.reverse['A']),\n str(self.reverse['T'])],\n ['forward half strand', str(self.forward['C']), str(self.forward['G']), str(self.forward['A']),\n str(self.forward['T'])]\n ]\n table = AsciiTable(table_data)\n return \"Generation #{}\\n\".format(self.epoch) + table.table", "def make_final_text(text: str) -> str:\n baseline_text_template = \"\"\"Use this thread to discuss anything (within the rules of the subreddit):\n\n* What you didn't think was worthy of its own post\n* What club game you're most excited for\n* Where you're staying to watch a friendly\n* Which players should be called in\n{}\n* What the mods told you to re-post here\n* Etc\n\n### Schedules\n{}\n\"\"\"\n\n with open('random_dumb_questions.txt', 'r') as files:\n list_of_questions = files.readlines()\n\n question = list_of_questions[randint(0, len(list_of_questions))].replace('\\n', '')\n\n return baseline_text_template.format(question, text)", "def nice_output(self):\n return 'Pitch: {0} at {1}: {2}'.format(\n self.pitch_type, self.start_speed, self.des)", "def __str__(self):\n\n result = \"n: \" + str(self.n) + \"\\n\"\n result += \"m: \" + str(self.m) + \"\\n\"\n result += \"ns: \" + str(self.ns) + \"\\n\"\n result += \"s0: \" + str(self.s0) + \"\\n\"\n result += \"goals: \" + str([self.goals[i] for i in range(self.ng)]) + \"\\n\"\n result += \"horizon: \" + str(self.horizon) + \"\\n\"\n result += \"gamma: \" + str(self.gamma) + \"\\n\\n\"\n\n result += \"S(s, a, s'):\\n%s\" % (str(np.array([self.S[i] \\\n for i in range(self.n * self.m * self.ns)]).reshape((self.n, self.m, self.ns)))) + \"\\n\\n\"\n\n result += \"T(s, a, s'):\\n%s\" % (str(np.array([self.T[i] \\\n for i in range(self.n * self.m * self.ns)]).reshape((self.n, self.m, self.ns)))) + \"\\n\\n\"\n\n result += \"R(s, a):\\n%s\" % (str(np.array([self.R[i] \\\n for i in range(self.n * self.m)]).reshape((self.n, self.m)))) + \"\\n\\n\"\n\n return result", "def __str__(self):\n if len(self.label) > 0:\n descr = [\"'%s', target='%s' [%s]\" % (self.label, self.target.name, self.target.body_type)]\n else:\n descr = [\"target='%s' [%s]\" % (self.target.name, self.target.body_type)]\n if self.baseline:\n descr[0] += ', initial baseline offset=%f' % (self.baseline.poly[-1],)\n if self.beam:\n descr[0] += ', beam height=%f' % (self.beam.height,)\n for scan_ind, scan in enumerate(self.scans):\n descr.append('%4d: %s' % (scan_ind, str(scan)))\n return '\\n'.join(descr)", "def __str__(self):\n # Power/toughness, seen only if it's a creature\n pt = \"\"\n if \"power\" in self:\n pt = \"{0}/{1}\".format(self.power,\n self.toughness).replace(\"*\", \"\\*\")\n # Append loyalty to the end of oracle text if the creature is a\n # planeswalker\n if \"loyalty\" in self:\n self.oracle_text = \"{0}\\nStarting Loyalty: {1}\".format(\n self.oracle_text, self.loyalty)\n\n flavor = \"*{0}*\".format(\n self.flavor_text) if \"flavor_text\" in self else \"\"\n\n return \"**{0}** {1}\\n{2} {3}\\n{4}\\n{5}\\n\\n\".format(self.name,\n self.mana_cost,\n self.type_line,\n pt,\n self.oracle_text,\n flavor)", "def as_text(self) -> str:\n txt = ''\n with self._th_lock:\n # purge expired value (reach ttl_s) from values dict\n purge_l = []\n for key, (_value, _timestamp_ms, expire_at) in self._values_d.items():\n if expire_at and time.monotonic() > expire_at:\n purge_l.append(key)\n for rm_key in purge_l:\n self._values_d.pop(rm_key)\n # if any value exists, format an exposition message\n if self._values_d:\n # add a comment line if defined\n if self.comment:\n # apply escapes to comment\n esc_comment = str(self.comment)\n for rep_args in [('\\\\', '\\\\\\\\'), ('\\n', '\\\\n')]:\n esc_comment = esc_comment.replace(*rep_args)\n txt += f'# HELP {self.name} {esc_comment}\\n'\n # add a type line if defined\n if self.type is not MetricType.UNTYPED:\n txt += f'# TYPE {self.name} {self.type.value}\\n'\n # add every \"name{labels} value [timestamp]\" for the metric\n for lbl_id_str, (value, ts, _expire_at) in self._values_d.items():\n if self._type is MetricType.HISTOGRAM:\n txt += self._data2txt_histogram(lbl_id_str, value)\n elif self._type is MetricType.SUMMARY:\n txt += self._data2txt_summary(lbl_id_str, value)\n else:\n txt += self._data2txt_default(lbl_id_str, value, ts)\n return txt", "def format(self):\r\n\r\n earth = \"???\" if self.maskearth else self.earth\r\n air = \"???\" if self.maskair else self.air\r\n fire = \"???\" if self.maskfire else self.fire\r\n water = \"???\" if self.maskwater else self.water\r\n\r\n if any((self.earth, self.fire, self.water)):\r\n statsline = f'Stats: {earth}/{air}/{fire}/{water}'\r\n elif self.air:\r\n statsline = f'Air: {air}'\r\n else:\r\n statsline = ''\r\n\r\n return (\r\n f'Character {self.name}, [{self.token}]. '\r\n f'Init: {self.init} {statsline} Owner: {self.user.name}'\r\n )", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths))\\\n + '\\n'\n s += ' number of punctuation types: ' + str(len(self.punctuation))\n return s", "def text(self) -> str:", "def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def to_meme(self):\n motif_id = self.id.replace(\" \", \"_\")\n m = \"MOTIF %s\\n\" % motif_id\n m += \"BL MOTIF %s width=0 seqs=0\\n\"% motif_id\n m += \"letter-probability matrix: alength= 4 w= %s nsites= %s E= 0\\n\" % (len(self), np.sum(self.pfm[0]))\n m +=\"\\n\".join([\"\\t\".join([\"%s\" % x for x in row]) for row in self.pwm])\n return m", "def _masses_string(self):\n return_str = 'Masses\\n\\n'\n for at in self.atom_types:\n return_str += '{} {:9.5f} # {}\\n'.format( at.atom_type_index, float(at.mass), at.label)\n return_str += '\\n'\n return return_str", "def _get_text_rendering(self, table):\n text_table = ''\n base_variable = table.get_variable().replace(\"'\", '')\n\n if base_variable == self._system.get_settings().user_input:\n text_table += '\\n[user]\\t'\n elif base_variable == self._system.get_settings().system_output:\n text_table += '[system]\\t'\n else:\n text_table += '[' + base_variable + ']\\t'\n\n for value in table.get_values():\n if not isinstance(value, NoneVal):\n content = str(value)\n if table.get_prob(value) < 0.98:\n content += ' (' + StringUtils.get_short_form(table.get_prob(value)) + ')'\n\n text_table += content + '\\n\\t\\t'\n\n if base_variable == self._system.get_settings().user_input:\n text_table += '\\n'\n\n text_table = text_table[0:-3]\n return text_table", "def to_transfac(self):\n m = \"%s\\t%s\\t%s\\n\" % (\"DE\", self.id, \"unknown\")\n for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())):\n m += \"%i\\t%s\\t%s\\n\" % (i, \"\\t\".join([str(int(x)) for x in row]), cons)\n m += \"XX\"\n return m", "def describe(self):\n composition = str()\n for n, comp in enumerate(self.components):\n if self.molefractions[n] > 0.0:\n composition += comp.name\n composition += \" %.2f\" % self.molefractions[n]\n composition += \"; \"\n return composition", "def _render_analysis_help_text() -> str:\n return \"\\n\".join(f'- \"{name}\": {analysis.description}'\n for name, analysis in analyses.items())", "def Malignancy(self):\n s = self.malignancy\n assert s in range(1,6), \"Malignancy score out of bounds.\"\n return _char_to_word_[ s-1 ] + ' Malignancy'", "def _repr_(self):\n s = \"Space of Vector-Valued harmonic weak Maass forms\"\n s += \" on \" + str(self.multiplier().group()) + \" of weight \" + str(self._weight_rat) + \" \"\n s += \" and values in CC[ZZ/\" + str(len(self.multiplier().D())) + \"ZZ].\"\n s += \"\\nRepresentation is \" + str(self.multiplier())\n return s", "def output_text(self, part: PDFTextLinePart, text, factor: Number=1) -> str:\n stream = part.state.compare(self.last_state)\n self.last_state = part.state\n\n tw = round(part.space_width * (factor - 1), 3)\n if self.last_factor != tw:\n if tw == 0:\n tw = 0\n stream += ' {} Tw'.format(tw)\n self.last_factor = tw\n\n if text != '':\n # TODO: How can we add unicode to PDF string\n stream += ' ({})Tj'.format(text)\n return stream", "def summary(self):\n name = 'name : ' + self.get_name()\n damage = 'damage : ' + str(self.get_damage())\n ammos = 'ammo : ' + str(self.get_ammos())\n owner = 'owner : ' + str(self.get_owner())\n return '\\n'.join([name, damage, ammos, owner])", "def str_targetting(self):\n if not self.targetting: # set to none etc.\n return \"doing nothing\"\n if self.targetting[0] == \"marching\":\n return \"marching -> \" + self.targetting[1].color_name()\n if self.targetting[0] == \"defending\":\n return \"staying put\"\n assert self.targetting[0] == \"sneaking\"\n return \"sneaking -> \" + self.targetting[1].color_name()", "def __str__(self):\n\n strme = []\n move = \"move {} {} {}\".format(self.key, len(self.movers), self.pfreq)\n strme.append(move)\n\n for mover in self.movers:\n strme.append(self.print_mover(mover))\n\n return \"\\n\".join(strme)", "def add_information_to_text(answer):\n text = answer[\"text\"]\n\n if answer[\"intent\"] == \"sorteio\":\n months_names = {1: \"Janeiro\", 2: \"Fevereiro\",\n 3: \"Março\", 4: \"Abril\", 5: \"Maio\", 6: \"Junho\", 7: \"Julho\", 8: \"Agosto\", 9: \"Setembro\", 10: \"Outubro\", 11: \"Novembro\", 12: \"Dezembro\"}\n month_number = random.randrange(1, 12)\n text = text.replace(\"monthName\", months_names[month_number])\n\n elif answer[\"intent\"] == \"empate\":\n day_number = random.randrange(1, 31)\n text = text.replace(\"dayNumber\", str(day_number))\n\n return text", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def __str__(self):\n return self._str_hsp_header() + \"\\n\" + self._str_aln()", "def __str__(self):\n # TODO also show relative abundance\n s = \"{} ion species\\n\".format(len(self.ions))\n for ion in self.ions:\n s += \" {:2s} (Z = {:3d}) {:.3e} particles\\n\".format(ion.getName(), ion.getCharge(), ion.getParticleNumber())\n \n return s", "def test_explained_text(self):\n result = self._do_output(o.ExplainedTextOutput(o.Color.Never), self._demo_msgs)\n self.assertEqual(result,\n \"mock: mock.cmake(1): error: short text\\n\"\n \" * long text\\n\"\n \" * You can ignore this problem with --ignore mock_msg\\n\"\n \"mock: mock.cmake(2): warning: short text\\n\"\n \"mock: mock.cmake(3): notice: short text\\n\"\n \"mock: error: short text\\n\"\n \"mock: mock.cmake: error: short text\\n\"\n )", "def __str__(self):\n astr = ' variables:\\t[ '\n for var in self.variables:\n astr += str(var) + ', '\n astr = astr[:-2] + ' ]\\n assumptions :\\t[ '\n for assumption in self.assumptions.cnf:\n astr += assumption.formula + ', '\n astr = astr[:-2] + ' ]\\n guarantees :\\t[ '\n for guarantee in self.guarantees.cnf:\n astr += guarantee.formula + ', '\n # astr = astr[:-2] + ' ]\\n guarantees_unsat :\\t[ '\n # for guarantee in self.guarantees.cnf:\n # astr += guarantee.unsaturated + ', '\n return astr[:-2] + ' ]\\n'", "def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(WangLandau.key, self.delta0, self.c_upd, self.n_upd)\n if self.smooth:\n strme = \"{} {}\".format(strme, self.smooth)\n\n return strme", "def get_formatted_text(self, n_cols):", "def __str__(self):\n cols = {'theta': self.theta, 'std': self.stde, 'tstat': self.tstat}\n res = pd.DataFrame(cols, index=self.names)[['theta', 'std', 'tstat']]\n res_str = res.to_string(float_format=lambda x: '%.4f' % x)\n width = len(res_str) // (res.shape[0] + 1)\n show = '-' * 60\n show += '\\nGMM final results:\\n'\n show += width * '-' + '\\n'\n show += res_str\n show += '\\n' + width * '-'\n show += '\\nJ-stat = %0.2f' % self.jstat\n show += '\\ndf = ' + str(self.degf)\n show += '\\np-value = %0.2f' % self.jpval\n show += '\\n' + '-' * 60\n return show", "def __str__(self):\n return 'elasped: %s (%s)' % (str(self.diff()), self.times())", "def regimes(self):\n coupling = self.coupling()\n quantum_theta = self.quantum_theta()\n\n if coupling <= 0.01:\n coupling_str = f\"Weakly coupled regime: Gamma = {coupling}.\"\n elif coupling >= 100:\n coupling_str = f\"Strongly coupled regime: Gamma = {coupling}.\"\n else:\n coupling_str = f\"Intermediate coupling regime: Gamma = {coupling}.\"\n\n if quantum_theta <= 0.01:\n quantum_theta_str = (\n f\"Fermi quantum energy dominant: Theta = {quantum_theta}\"\n )\n elif quantum_theta >= 100:\n quantum_theta_str = (\n f\"Thermal kinetic energy dominant: Theta = {quantum_theta}\"\n )\n else:\n quantum_theta_str = (\n f\"Both Fermi and thermal energy important: Theta = {quantum_theta}\"\n )\n\n return [coupling_str, quantum_theta_str]", "def avl_mass_string(self):\n x,y,z = self.center_of_gravity_global.as_tuple()\n ixx, iyy, izz = self.inertia_xx, self.inertia_yy, self.inertia_zz\n template = \"{0} {1} {2} {3} {4} {5} {6}\".format(self.mass,\n x,\n y,\n z,\n ixx,\n iyy,\n izz)\n return template", "def _concatenate_instance(\n self,\n emotion: str,\n target_utterance: str,\n evidence_utterance: str,\n conversation_history: str,\n ) -> str:\n concatenated_text = (\n \" \"\n + emotion\n + \" <SEP> \"\n + target_utterance\n + \" <SEP> \"\n + evidence_utterance\n + \" <SEP> \"\n + conversation_history\n )\n\n return concatenated_text", "def display_string(text_area_no: int) -> str:\n if text_area_no == 1:\n text = ''\n for v in utterances['P1']:\n text += v + '\\n'\n return text\n elif text_area_no == 2:\n text = ''\n for v in utterances['S1']:\n text += v + '\\n'\n return text\n elif text_area_no == 3:\n text = ''\n for v in utterances['S2']:\n text += v + '\\n'\n return text\n elif text_area_no == 4:\n text = ''\n for v in utterances['S3']:\n text += v + '\\n'\n return text\n elif text_area_no == 5:\n text = ''\n for v in utterances['S4']:\n text += v + '\\n'\n return text\n elif text_area_no == 6:\n text = ''\n for v in utterances['S4']:\n text += v + '\\n'\n return text\n elif text_area_no == 7:\n text = ''\n for v in utterances['C1']:\n text += v + '\\n'\n return text\n elif text_area_no == 8:\n text = ''\n for v in utterances['C2']:\n text += v + '\\n'\n return text", "def logic_program_form(self):\r\n return '% -------------------------------------\\n' +\\\r\n '% Theory ' + self.name + '\\n' +\\\r\n '% -------------------------------------\\n\\n' +\\\r\n GENERAL_AXIOMS", "def get_text(self):\n # If percentage is zero, round it\n if self.percentage == 0:\n self.percentage = str(\"< 0.01\")\n text = str(self.percentage) + \"% on line \" + self.line\n return text", "def generatePoem(self, seed, length):\n full_text = seed\n for _ in range(length):\n token_list = self.tokenizer.texts_to_sequences([full_text])[0]\n token_list = pad_sequences([token_list], maxlen=self.max_sequence_len - 1, padding='pre')\n predicted = np.argmax(self.model.predict(token_list), axis=-1)\n output_word = \"\"\n for word, index in self.tokenizer.word_index.items():\n if index == predicted:\n output_word = word\n break\n full_text += \" \" + output_word\n return full_text", "def result(target_text):\n\n display_text(target_text)\n readability(target_text)", "def print_info(self):\n outstr = '================================================= Ambient Noise Cross-correlation Database =================================================\\n'\n outstr += self.__str__()+'\\n'\n outstr += '--------------------------------------------------------------------------------------------------------------------------------------------\\n'\n if 'NoiseXcorr' in self.auxiliary_data.list():\n outstr += 'NoiseXcorr - Cross-correlation seismogram\\n'\n if 'StaInfo' in self.auxiliary_data.list():\n outstr += 'StaInfo - Auxiliary station information\\n'\n if 'DISPbasic1' in self.auxiliary_data.list():\n outstr += 'DISPbasic1 - Basic dispersion curve, no jump correction\\n'\n if 'DISPbasic2' in self.auxiliary_data.list():\n outstr += 'DISPbasic2 - Basic dispersion curve, with jump correction\\n'\n if 'DISPpmf1' in self.auxiliary_data.list():\n outstr += 'DISPpmf1 - PMF dispersion curve, no jump correction\\n'\n if 'DISPpmf2' in self.auxiliary_data.list():\n outstr += 'DISPpmf2 - PMF dispersion curve, with jump correction\\n'\n if 'DISPbasic1interp' in self.auxiliary_data.list():\n outstr += 'DISPbasic1interp - Interpolated DISPbasic1\\n'\n if 'DISPbasic2interp' in self.auxiliary_data.list():\n outstr += 'DISPbasic2interp - Interpolated DISPbasic2\\n'\n if 'DISPpmf1interp' in self.auxiliary_data.list():\n outstr += 'DISPpmf1interp - Interpolated DISPpmf1\\n'\n if 'DISPpmf2interp' in self.auxiliary_data.list():\n outstr += 'DISPpmf2interp - Interpolated DISPpmf2\\n'\n if 'FieldDISPbasic1interp' in self.auxiliary_data.list():\n outstr += 'FieldDISPbasic1interp - Field data of DISPbasic1\\n'\n if 'FieldDISPbasic2interp' in self.auxiliary_data.list():\n outstr += 'FieldDISPbasic2interp - Field data of DISPbasic2\\n'\n if 'FieldDISPpmf1interp' in self.auxiliary_data.list():\n outstr += 'FieldDISPpmf1interp - Field data of DISPpmf1\\n'\n if 'FieldDISPpmf2interp' in self.auxiliary_data.list():\n outstr += 'FieldDISPpmf2interp - Field data of DISPpmf2\\n'\n outstr += '============================================================================================================================================\\n'\n print outstr\n return", "def the_display(self):\r\n return f\"\"\"\r\n {self.display[0]}\\n\r\n {self.display[1]}\\n\r\n {self.display[2]}\\n\r\n {self.display[3]}\\n\r\n {self.display[4]}\\n\r\n \"\"\"", "def message(self):\n if self.display_time:\n return \"Time: {}\".format(int(self.physics_engine.time_since_start()%self.time_cycle_secs))", "def VMD_string(self, time):\n # First add preamble:\n # N_data\n # Point = time\n # ...\n VMD_output_string = '%i\\nPoint = %i\\n' % (len(self.particles), time)\n # Concatenate labels and positions for each particle\n for p in self.particles:\n VMD_output_string += \"s\"+str(p) + '\\n'\n\n return VMD_output_string", "def __str__(self):\n return 'GradientAnisotropicDiffusion:\\n' \\\n ' time_step: {self.time_step}\\n' \\\n ' conductance: {self.conductance}\\n' \\\n ' conductance_scaling_update_interval: {self.conductance_scaling_update_interval}\\n' \\\n ' no_iterations: {self.no_iterations}\\n' \\\n .format(self=self)", "def format_step(self):\n if self.terminal:\n totrwdstr = \" %6.3f\" % self.total_reward\n else:\n totrwdstr = \"\"\n \n logging.info(\" %3d %1.0f => %7.1f %4.1f %7.1f %7.1f %4.1f %4.1f = %i %6.3f%s\" % (\n self.nsteps,\n self.action['heater_on'],\n self.state['heat_cost'],\n self.state['set_temp'],\n self.state['room_temp'],\n self.state['room_temp_change'],\n self.state['outside_temp'],\n self.state['outside_temp_change'],\n self.terminal,\n self.reward,\n totrwdstr,\n ))", "def diff_to_text(_missing_left, _missing_right, _different):\n _diff_text = \"\"\n\n raise Exception(\"diff_to_text is not implemented\")", "def display(self):\n statement = f\"\"\"\n ------\n By {self.prescribed_by.name.upper()}\n ------\n Patient Detail!\n Name: {self.prescribed_to.name.capitalize()}\n Age: {self.prescribed_to.age}\n Gender: {self.prescribed_to.gender}\n Prescribed Medicines!\"\"\"\n print(statement)\n self.display_cure()" ]
[ "0.6384936", "0.634679", "0.63305056", "0.63101876", "0.62891483", "0.62351215", "0.62246543", "0.6216952", "0.61375284", "0.6118015", "0.6031679", "0.597089", "0.59577096", "0.59560966", "0.5955112", "0.59385556", "0.5877444", "0.58648694", "0.58440596", "0.58404136", "0.58381337", "0.5812276", "0.58087564", "0.5801023", "0.57973653", "0.57815874", "0.5734633", "0.57278526", "0.57041997", "0.56971264", "0.56864107", "0.5681703", "0.56728506", "0.5672847", "0.5669525", "0.5662268", "0.56585777", "0.5644435", "0.56424576", "0.5638137", "0.56376284", "0.5635938", "0.5634997", "0.5630233", "0.5629715", "0.56253767", "0.5625258", "0.5624886", "0.5619459", "0.56184006", "0.56052786", "0.55996686", "0.55969757", "0.55925184", "0.55858874", "0.5579843", "0.55786246", "0.55703235", "0.55702156", "0.55600405", "0.55354166", "0.5525351", "0.55098295", "0.5507426", "0.5504892", "0.550269", "0.5502317", "0.5498617", "0.5494484", "0.5491278", "0.54732215", "0.54727876", "0.5468213", "0.54599106", "0.5457956", "0.54546595", "0.5448395", "0.5447098", "0.54443306", "0.54434556", "0.5439236", "0.5436355", "0.5430392", "0.54282916", "0.5424036", "0.54158425", "0.54120386", "0.54065865", "0.54050905", "0.53982896", "0.5386651", "0.5380446", "0.5379399", "0.53779936", "0.5376076", "0.5368329", "0.5367383", "0.5367018", "0.536165", "0.53608716" ]
0.69408065
0
Find the correct ref.dat file for each pfile.
def _GetRefdat(self): for rfile in self.refdats.keys(): # Get times for ref.dat files with a time-stamp. words = rfile.replace('.','_').split('_') if len(words) == 6 and words[-2].count(':') == 20: # This file was time-stamped by the sequence. Get the # date and time. file name format: # ref_Sep_9_2007_11:28:32.dat rtime[rfile] = hms_to_secs(words[-2]) for pfile in self.pfiles: min_difftime = 1.e20 self.info[pfile]['refdat'] = None for rfile in self.refdats.keys(): if rfile[:3] == 'ref' and 'dat' in rfile: # This is a reference data file. First see if the orientation is # appended. If the file has neither a time-stamp nor a plane and # there is more than one ref.dat, the epi reconstruction will # be aborted. rinfo = {} ref_file = None if 'sag' in rfile and self.info[pfile]['plane'] == 'sagittal': # self.info[pfile]['refdat'] = rfile ref_file = rfile break elif 'cor' in rfile and self.info[pfile]['plane'] == 'coronal': # self.info[pfile]['refdat'] = rfile ref_file = rfile break elif 'axial' in rfile and self.info[pfile]['plane'] == 'axial': # self.info[pfile]['refdat'] = rfile ref_file = rfile break elif len(self.refdats.keys()) == 1: # Use the only one if that is all there is. ref_file = rfile epi_time = hms_to_secs(self.info[pfile]['acqtime'].split()[-2]) if epi_time - rtime[rfile] < min_difftime and \ rftime[rfile] > epi_time: # Use the reference file that acquired nearest to the EPI # but before it. min_difftime = epi_time - rtime[rfile] # self.info[pfile]['refdat'] = rfile ref_file = rfile if ref_file: # Found a candidate. if not self.info[pfile]['refdat']: # Haven't found one yet, use it. self.info[pfile]['refdat'] = ref_file else: # Found two. Choose one in the same directory. oldpath = os.path.dirname(self.info[pfile]['refdat']) newpath = os.path.dirname(ref_file) pfile_path = os.path.dirname(pfile) if oldpath == newpath: # Same path, use the old one. self.info[pfile]['refdat'] = ref_file elif newpath == pfile_path: self.info[pfile]['refdat'] = ref_file # else Do nothing, use existing choice. elif not os.path.exists(rfile): self.info[pfile]['refdat'] = None elif os.stat(rfile).st_size > 0: # This path is taken if no info is encoded in the file name. # Don't use empty ref.dat files. self.info[pfile]['refdat'] = rfile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}", "def get_input_file(self, *args, refsep='$', docopy=True):\n # filename = self.get_data(*args, docopy=docopy)\n filename = args[1]\n ref_files = ref_from_image(filename, ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE',\n 'DGEOFILE', 'MDRIZTAB'])\n print(\"Looking for REF_FILES: {}\".format(ref_files))\n\n for ref_file in ref_files:\n if ref_file.strip() == '':\n continue\n if refsep not in ref_file: # Local file\n refname = self.get_data('customRef', ref_file)\n else: # Download from FTP, if applicable\n refname = os.path.join(ref_file)\n if self.use_ftp_crds:\n download_crds(refname, self.timeout)\n return filename", "def getFileReferences():\n refNodes = pm.ls(rf=True)\n fileRefs = [r.referenceFile() for r in refNodes]\n return fileRefs", "def _find_ref_fname(fname, ref_fname):\n curr_dir = \"\"\n next_dir = os.path.dirname(os.path.abspath(fname))\n while next_dir != curr_dir:\n curr_dir = next_dir\n rcfile = os.path.join(curr_dir, ref_fname)\n if os.path.exists(rcfile):\n return rcfile\n next_dir = os.path.dirname(curr_dir)\n return \"\"", "def step040():\n logger.logMessage('Begin: matching work files')\n sKey = ''\n mKey = ''\n def readFile(f):\n line = f.readline().rstrip()\n if line == '':\n key = 'ZZZZZZZZZZZZZZZZZZZZZZZZZ'\n return None,key\n else:\n sp = line.split(';')\n key = '{0:25s}'.format(sp[1])[0:19]\n return sp,key\n\n m = open(dbDumpFile,'r')\n s = open(sortedCandidatesFile,'r')\n numrecs = 0\n with open(matchFile,'w') as match:\n mFields,mKey = readFile(m)\n sFields,sKey = readFile(s)\n while mFields != None or sFields != None:\n if sKey == mKey:\n match.write('{0:014d};{1:25s};{2:32s};{3:31s}\\n'.format(int(mFields[0]),mKey,sFields[2],sFields[3]))\n numrecs += 1\n if numrecs % 1000 == 0:\n logger.logMessage(level='DEBUG',message=\"{0:9d} records matched\".format(numrecs))\n sFields,sKey = readFile(s)\n mFields,mKey = readFile(m)\n elif sKey < mKey:\n sFields,sKey = readFile(s)\n else:\n logger.logMessage(level='WARNING',message='Record not matched: {0}'.format(mFields))\n mFields,mKey = readFile(m)\n logger.logMessage(\"Total matched: {0:d}\".format(numrecs))\n\n m.close()\n s.close()\n logger.logMessage('End : matching work files')", "def find_FEfiles(self):\n \n\t\t# take each item in the list of files to eve.\n for i in self.files_to_find:\n filename1 = i[0] \n file1_dye = i[1]\n filename2 = i[2]\n file2_dye = i[3]\n out_file_prefix=i[4]\n\n # set filename as none to help identify when no match has been found below\n file1_filename = None\n file2_filename = None\n\n # search for a FE file which matches the filename pattern\n for afile in os.listdir(self.chosenfolder):\n # file 1\n if fnmatch.fnmatch(afile, filename1):\n file1_filename = afile\n \n # file 2\n if fnmatch.fnmatch(afile, filename2):\n file2_filename = afile\n \n # if both files have been identified add this to a new list\n if file1_filename and file2_filename:\n self.list_of_files.append((file1_filename, file1_dye, file2_filename, file2_dye,out_file_prefix))\n\t\t\t# if either file could not be identified report this.\n else:\n raise ValueError(\"no match for \" + filename1 + \" and \" + filename2)", "def main():\n \n lookupslocation = 'C:\\\\Users\\\\gwilliams\\\\Desktop\\\\Python Experiments\\\\work projects\\\\FaresIndexSourceData\\\\regulated_fares_data\\\\'\n destination = 'C:\\\\Users\\\\gwilliams\\\\Desktop\\\\Python Experiments\\\\work projects\\\\FaresIndexSourceData\\\\regulated_fares_data\\\\comparison output\\\\'\n lookupfileslist, count = getdata(lookupslocation)\n\n print(f\"there are {count} files found.\")\n\n newlookup = lookupfileslist[0]\n oldlookup = lookupfileslist[1]\n\n #join new to old // old to new\n new_uniquevalues = pd.merge(left=newlookup,right=oldlookup,how='left',\n left_on=['orig','dest','route','ticket'],right_on=['orig','dest','route','ticket'])\n\n old_uniquevalues = pd.merge(left=newlookup,right=oldlookup,how='right',\n left_on=['orig','dest','route','ticket'],right_on=['orig','dest','route','ticket'])\n\n print(\"These are values unique to new lookup\") \n new_uniquevalues = new_uniquevalues[new_uniquevalues.ticketa.isnull()==True]\n exportfile(new_uniquevalues,destination,'unique_new_values',1)\n\n print(\"These are values unique to old lookup\")\n old_uniquevalues = old_uniquevalues[old_uniquevalues.new_flag.isnull()==True]\n exportfile(old_uniquevalues,destination,'unique_old_values',1)", "def readdata(self, reflist , comment = '#' , regexp = None , substr = None, filename = True):\n self.kpunten = []\n datalist = []\n prefixlist = []\n if os.path.isfile(str(reflist)):\n reflist = [reflist] #if we work with only one file this wraps it automatically in right format\n for ref in reflist:\n print('start with the collection of data from file %s' %ref)\n plotf = open(ref, 'r')\n if not filename:\n prefixlist.append( os.path.dirname(ref) + '/')\n else:\n prefixlist.append(re.sub('\\.dat$' , '' , ref))\n try:\n if regexp != None:\n raise ValueError\n dataf = np.loadtxt(plotf,comments = comment)\n print 'we readed data in with np.loadtxt'\n except:\n print('reading in data with numpy loadtxt failed or use reg exp to extract information')\n dataf = np.array([])\n kpuntenf = []\n plotf.seek(0) #go back to beginning of file\n for line in plotf:\n if regexp is not None:\n analyse = re.search(regexp,line)\n if analyse:\n kpuntenf.append((analyse.group(1), len(dataf)-1 ))\n print 'we found the following matches: %s' % analyse.group(0)\n if substr != None: \n line = re.sub(substr, '' , line)\n if line[0] != comment:\n #print line\n pline = np.array(map(float,line.split()))\n if len(dataf) <= 1:\n dataf = pline\n else:\n try:\n dataf = np.vstack((dataf,pline))\n except:\n continue\n self.kpunten.append(kpuntenf)\n datalist.append(dataf)\n\n plotf.close()\n self.datarg = datalist\n self.prefix = prefixlist\n self.reader = dr.ReaderOutput(reflist[0]) #Some plotting functions need a bit more information this info is extracted from the header of the files\n self.reader.depvar['depvar'] += ' (a.u.)'", "def merge_tables():\r\n filename = \"ppxf_results_best.dat\"\r\n s1 = np.genfromtxt(filename, usecols=(0,), dtype=None).tolist()\r\n sref = s1[:]\r\n sref.sort()\r\n x, y = get_positions(sref).T\r\n r = np.sqrt(x * x + y * y)\r\n pa = np.rad2deg(np.arctan2(x, y))\r\n pa[pa < 0.] += 360.\r\n data1 = np.loadtxt(filename, usecols=np.arange(1, 11))\r\n ##########################################################################\r\n # Account for difference in resolution\r\n # Not used anymore because the resolution is now matched in pPXF\r\n # fwhm_dif = (2.5 - 2.1) * c / 5500. / 2.3548\r\n # data1[:,2] = np.sqrt(data1[:,2]**2 - fwhm_dif**2)\r\n ##########################################################################\r\n data1 = match_data(s1, sref, data1)\r\n results = np.column_stack((sref, x, y, r, pa, data1))\r\n header = ['FILE', \"X[kpc]\", \"Y[kpc]\",\r\n \"R[kpc]\", \"PA\",\r\n 'V', 'dV', 'S', 'dS', 'h3', 'dh3',\r\n 'h4', 'dh4', 'chi/DOF', 'S/N']\r\n with open(outtable, \"w\") as f:\r\n for i, field in enumerate(header):\r\n print \"# {0} : {1}\\n\".format(i, field)\r\n f.write(\"# {0} : {1}\\n\".format(i, field))\r\n np.savetxt(f, results, fmt=\"%s\")\r\n return", "def FindFile(self, fd):\n hashes = self._HashFile(fd)\n\n urns_to_check = []\n\n for hash_type, hash_digest in hashes.ListFields():\n hash_digest = str(hash_digest)\n hash_type = hash_type.name\n\n fingerprint_type = \"generic\"\n if hash_type.startswith(\"pecoff_\"):\n fingerprint_type = \"pecoff\"\n hash_type = hash_type[len(\"pecoff_\"):]\n if hash_type not in self.HASH_TYPES[fingerprint_type]:\n continue\n\n file_store_urn = self.PATH.Add(fingerprint_type).Add(\n hash_type).Add(hash_digest)\n\n urns_to_check.append(file_store_urn)\n\n return [data[\"urn\"] for data in aff4.FACTORY.Stat(urns_to_check,\n token=self.token)]", "def phys_match():\n # Get list of physiological files\n ppg_files = glob(phys_dir+'PPGData*')\n resp_files = glob(phys_dir+'RESPData*')\n ecg_files = glob(phys_dir+'ECG2Data*')\n # Match to runs\n for rn in dcm_dict.keys():\n # Initiate dictionary entries\n dcm_dict[rn]['ppg_file'] = 'File missing'\n dcm_dict[rn]['resp_file'] = 'File missing'\n dcm_dict[rn]['ecg_file'] = 'File missing'\n # Match time stamp\n # Using only hour and minute due to second mismatch\n # Need to fix\n time_stamp = dcm_dict[rn]['end_time'].strftime('%m%d%Y%H_%M')\n for ppg in ppg_files:\n if time_stamp in ppg:\n dcm_dict[rn]['ppg_file'] = ppg\n for resp in resp_files:\n if time_stamp in resp:\n dcm_dict[rn]['resp_file'] = resp\n for ecg in ecg_files:\n if time_stamp in resp:\n dcm_dict[rn]['ecg_file'] = ecg", "def _get_refpaths(data_dict, reference_file_types, observatory):\n if not reference_file_types: # [] interpreted as *all types*.\n return {}\n with crds_cache_locking.get_cache_lock():\n bestrefs = crds.getreferences(\n data_dict, reftypes=reference_file_types, observatory=observatory)\n refpaths = {filetype: filepath if \"N/A\" not in filepath.upper() else \"N/A\"\n for (filetype, filepath) in bestrefs.items()}\n return refpaths", "def match(p_file, s_file, matched_p_file, matched_s_file):\n\n log.info('Matching p and s arrivals')\n\n p_arr = pd.read_csv(p_file, header=None, names=column_names, sep=' ')\n s_arr = pd.read_csv(s_file, header=None, names=column_names, sep=' ')\n\n blocks = pd.merge(p_arr[['source_block', 'station_block']],\n s_arr[['source_block', 'station_block']],\n how='inner',\n on=['source_block', 'station_block'])\n matched_P = pd.merge(p_arr, blocks, how='inner',\n on=['source_block', 'station_block'])[column_names]\n matched_S = pd.merge(s_arr, blocks, how='inner',\n on=['source_block', 'station_block'])[column_names]\n matched_P.to_csv(matched_p_file, index=False, header=False, sep=' ')\n matched_S.to_csv(matched_s_file, index=False, header=False, sep=' ')", "def find_reference_files():\n for root, _, files in os.walk(\"./tests/references/\"):\n for basename in fnmatch.filter(files, \"*.tex\"):\n yield os.path.join(root, basename)", "def _parse_reference_fofn( reference_fofn, target_locus ):\n log.info(\"Parsing reference sequence FOFN\")\n with open( reference_fofn, 'r' ) as handle:\n for line in handle:\n filename, locus = line.strip().split()\n print locus, target_locus\n if locus == target_locus:\n return filename\n msg = 'No fasta file for target locus found!'\n log.error( msg )\n raise ValueError( msg )", "def get_tool_version_files():\n similar_files = defaultdict(list)\n for path in Runtime_Datasets.RAW_FILE_PATHS:\n filename = get_file_name(path)\n filename = filename.rsplit('_', 1)[0]\n similar_files[filename].append(path)\n\n Runtime_Datasets.RAW_FILE_PATHS = similar_files", "def get_nsite_DMRfind(inputf,output,samples,path_to_allc=\"\",mc_type=[\"C\"],num_procs=1,use_mc_status=True,min_cov=0):\n #dictionary of sample_name -> file handle\n allc_files = {}\n allc_lines = {}\n allc_fields = {}\n allc_prevbyte = {} #sample_name -> prevbyte (started from) in the file\n with open(inputf,'r') as f, open(output,'w') as g:\n line = f.readline()\n line = line.rstrip(\"\\n\")\n fields = line.split(\"\\t\")\n prefix_len = len(fields) #number of fields in original file\n mc_type = expand_nucleotide_code(mc_type)\n g.write(\"\\t\".join(fields[:prefix_len])+\"\\t\"+\"\\t\".join([\"nsite_\"+sample for sample in samples])+\"\\n\")\n prev_chrom = \"\"\n prev_end = \"\"\n dmr_lines=[]\n methylation_levels = {}\n for line in f:\n line = line.rstrip(\"\\n\")\n dmr_lines.append(line)\n if num_procs == 1:\n for sample in samples:\n methylation_levels[sample]=get_nsite_DMRfind_worker(dmr_lines,mc_type,sample,path_to_allc,output,min_cov,use_mc_status=False)\n else:\n pool = Pool(num_procs)\n results = {}\n for sample in samples:\n results[sample]=pool.apply_async(get_nsite_DMRfind_worker,(dmr_lines,mc_type,sample,path_to_allc,output,min_cov),{\"use_mc_status\":False})\n pool.close()\n pool.join()\n for sample in results:\n methylation_levels[sample]=results[sample].get()\n temp_files = {}\n for sample in samples:\n temp_files[sample]=open(output.replace(\".tsv\",\"\")+\"_\"+sample+\"_temp_nsite.tsv\",'r')\n\n for index,line in enumerate(dmr_lines):\n g.write(line)\n for sample in samples:\n #g.write(\"\\t\"+methylation_levels[sample][index])\n g.write(\"\\t\"+temp_files[sample].readline().rstrip(\"\\n\"))\n g.write(\"\\n\")\n for sample in samples:\n temp_files[sample].close()\n subprocess.check_call(shlex.split(\"rm \"+output.replace(\".tsv\",\"\")+\"_\"+sample+\"_temp_nsite.tsv\"))", "def lookup_ifproc_file(obsnum, path='/data_lmt/ifproc/', debug=False):\n paths = [path]\n\n if 'ifproc' not in path:\n paths += ['/data_lmt/ifproc/']\n if 'lmtttpm' not in path:\n paths += ['/data_lmt/lmttpm/']\n if 'tel' not in path:\n paths += ['/data_lmt/tel/']\n\n if debug:\n print(paths)\n\n for path in paths:\n filenames = glob.glob(os.path.join(path, '*_%06d_*.nc' % obsnum))\n if len(filenames) > 0:\n if debug:\n print('found %s' % (filenames[0]))\n return filenames[0]\n return ''\n #filename = ''\n #for file in os.listdir(path):\n # if fnmatch.fnmatch(file,'*_%06d_*.nc'%(obsnum)):\n # print('found %s'%(file))\n # filename = path+file\n #if filename == '':\n #print('lookup_ifproc_file: no file for obsnum ', obsnum)\n #if 'lmttpm' not in path:\n # print('look in lmttpm')\n # return lookup_ifproc_file(obsnum,path='/data_lmt/lmttpm/')\n #return(filename)", "def diff_files(self):\n pdup = []\n # Print out files that are only found in the DB\n if self.comparison_info['dbonly']:\n print(\"Files only found in the database --------- \")\n for fname in sorted(self.comparison_info['dbonly']):\n fdb = self.files_from_db[fname]\n print(f\"\\t{fdb['path']}/{fname}\")\n\n # print out files that are only found on disk\n if self.comparison_info['diskonly']:\n print(\"\\nFiles only found on disk --------- \")\n for fname in sorted(self.comparison_info['diskonly']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fdisk['relpath']}/{fname}{addon}\")\n if self.comparison_info['pathdup']:\n print(\"\\n The following files had multiple paths on disk (path filesize):\")\n listing = {}\n for fname in self.comparison_info['pathdup']:\n pdup.append(fname)\n listing[self.comparison_info['pathdup']['relpath']] = self.comparison_info['pathdup']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different paths on disk and in the DB\n if self.comparison_info['path']:\n print(\"\\nPath mismatch (file name, db path, disk path) --------- \")\n for fname in sorted(self.comparison_info['path']):\n addon = \"\"\n if fname in self.duplicates:\n addon = \" *\"\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname}\\t{fdb['path']}\\t{fdisk['relpath']}{addon}\")\n if self.comparison_info['duplicates']:\n print(\" The following files have multiple disk paths on disk (path filesize):\")\n for fname in self.comparison_info['duplicates']:\n pdup.append(fname)\n listing[self.comparison_info['duplicates']['relpath']] = self.comparison_info['duplicates']['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if fname in self.files_from_db and self.files_from_db[fname]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{fname} {listing[pth]:d}{addon}\")\n\n # Print files that have different file sizes on disk and in the DB\n if self.comparison_info['filesize']:\n print(\"\\nFilesize mismatch (File name, size in DB, size on disk) --------- \")\n for fname in sorted(self.comparison_info['filesize']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['filesize']} {fdisk['filesize']}\")\n\n # Print files that have different md5sum on disk and in DB\n if self.md5sum and 'md5sum' in self.comparison_info and self.comparison_info['md5sum']:\n print(\"\\nmd5sum mismatch (File name, sum in DB, sum on disk) --------- \")\n for fname in sorted(self.comparison_info['md5sum']):\n fdb = self.files_from_db[fname]\n fdisk = self.files_from_disk[fname]\n print(f\"\\t{fname} {fdb['md5sum']} {fdisk['md5sum']}\")\n\n # Print out files that have multiple paths on disk\n if len(self.duplicates) > len(pdup):\n print(\"\\nThe following files have multiple disk paths on disk (path filesize):\")\n for dup in sorted(self.duplicates):\n if dup not in pdup:\n listing = {}\n for fls in self.duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_db and self.files_from_db[dup]['path'] == pth:\n addon = \" (DB Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")\n\n # Print out files that have multiple endtries in the DB\n if self.db_duplicates:\n print(\"\\nThe following files have multiple entries in the database (path filesize):\")\n for dup in sorted(self.db_duplicates):\n listing = {}\n for fls in self.db_duplicates[dup]:\n listing[fls['relpath']] = fls['filesize']\n first = True\n for pth in sorted(listing):\n start = \" \"\n if first:\n start = \"*\"\n first = False\n addon = \"\"\n if dup in self.files_from_disk and self.files_from_disk[dup]['path'] == pth:\n addon = \" (Disk Match)\"\n print(f\" {start} {pth}/{dup} {listing[pth]:d}{addon}\")", "def test_013_find_files(self):\n HEADING()\n db = self.db\n\n db.connect()\n\n # Clear all jobs currently in the database to ensure a correct final assertion\n db.clear()\n\n # Add the jobs outlined in the YAML file\n db.add_from_yaml(\"etc/jobs.yaml\")\n inputs, outputs = db.find_jobs_with_file(\"in1.txt\")\n\n # Assert that the lengths of the inputs and outputs arrays are correct\n count_fgrep = len(Shell.fgrep(\"in1.txt\", \"etc/jobs.yaml\").strip().split(\"\\n\"))\n assert(len(inputs) == count_fgrep)", "def check_file_references(name):\n references = []\n for root, dirs, files in walk(Path(\".\")):\n for file_name in files:\n if is_notebook(file_name):\n nb_markdown_cells = markdown_cells(os.path.join(root, file_name))\n for cell in nb_markdown_cells:\n for line in cell:\n if name in line:\n references.append(file_name)\n else:\n with open(os.path.join(root, file_name), encoding=\"utf8\", errors='ignore') as non_nb_file:\n if name in non_nb_file.read():\n references.append(file_name)\n return references", "def add_references_to_papers(infile, dir):\n papers = json.load(open(infile))\n for paper in papers:\n for file in os.listdir(dir):\n if file.split(\".txt\")[0] == paper['doi']: # Must find the correct file to parse\n filename = TEXT_DIR+file\n refs =extract_references_from_txt(filename) #Uses the text files to find references\n paper['references']=refs\n return papers", "def XPLMFindDataRef(inDataRefName):\n return int", "def get_refpaths_from_filename(filename, reference_file_types, observatory=None):\n from .. import datamodels\n with datamodels.open(filename) as model:\n refpaths = get_multiple_reference_paths(model, reference_file_types, observatory)\n return refpaths", "def test_get_output_filepaths(self):\r\n\r\n actual_fna_fp, actual_log_fp = get_output_filepaths(\".\",\r\n '/home/tests/seqs.fna')\r\n\r\n expected_fna_fp = \"./seqs_rev_primer_truncated.fna\"\r\n expected_log_fp = \"./rev_primer_truncation.log\"\r\n\r\n self.assertEqual(actual_fna_fp, expected_fna_fp)\r\n self.assertEqual(actual_log_fp, expected_log_fp)", "def get_reffile(self, refs, detector):\n for key in refs:\n if detector in key:\n return refs[key]\n self.logger.error(\"WARNING: no file found for detector {} in {}\"\n .format(detector, refs))", "def openfile(self,files):\n for f in files:\n if f in self.fmap:\n continue\n try:\n fd=open(f,'r');\n self.files.append(fd)\n self.fmap[f]=fd\n if len(self.handle)<2:\n self.handle.append(len(self.files)-1)\n self.fname.append(f)\n self.total+=[0]\n self.inst+=[{}]\n self.excl+=[{}]\n self.incl+=[{}]\n self.caller_callee+=[{}]\n self.loadfile(fd)\n except IOError:\n pass\n print('%s not exist!!'%(f))", "def identify_primary_reference_datasets(conn, log):\n\n primary_ref = {}\n\n primary_ref['refimg_id_ip'] = phot_db.find_primary_reference_image_for_field(conn)\n\n query = 'SELECT facility, filter, software FROM reference_images WHERE refimg_id=\"'+str(primary_ref['refimg_id_ip'])+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n\n primary_ref['facility_id'] = t['facility'][0]\n primary_ref['software_id'] = t['software'][0]\n\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"ip\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref['ip'] = t['filter_id'][0]\n\n for f in ['rp', 'gp']:\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"'+f+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref[f] = t['filter_id'][0]\n\n query = 'SELECT refimg_id FROM reference_images WHERE facility=\"'+str(primary_ref['facility_id'])+\\\n '\" AND software=\"'+str(primary_ref['software_id'])+\\\n '\" AND filter=\"'+str(t['filter_id'][0])+'\"'\n qs = phot_db.query_to_astropy_table(conn, query, args=())\n\n if len(qs) > 0:\n primary_ref['refimg_id_'+f] = qs['refimg_id'][0]\n else:\n log.info('WARNING: Database contains no primary reference image data in filter '+f)\n\n log.info('Identified the primary reference datasets for this field as:')\n for key, value in primary_ref.items():\n log.info(str(key)+' = '+str(value))\n\n return primary_ref", "def pavs (dirName,pat,dx,dy):\r\n ntotpat=0\r\n\r\n tabf=np.zeros((dx,dy),np.uint8)\r\n _tabsroi=np.zeros((dx,dy,3),np.uint8)\r\n _tabscan = np.zeros((dx,dy),np.int16)\r\n\r\n (top,tail)=os.path.split(dirName)\r\n print 'pav :',tail,'pattern :',pat\r\n patpickle=[]\r\n nampadir=os.path.join(patchpath,pat)\r\n nampadirl=os.path.join(nampadir,locabg)\r\n if not os.path.exists(nampadir):\r\n os.mkdir(nampadir)\r\n os.mkdir(nampadirl)\r\n\r\n pathpicklepat=os.path.join(picklepathdir,pat)\r\n# print pathpicklepat\r\n pathpicklepatl=os.path.join(pathpicklepat,locabg)\r\n patchpicklenamepatient=tail+'_'+patchpicklename\r\n\r\n pathpicklepatfile=os.path.join(pathpicklepatl,patchpicklenamepatient)\r\n if not os.path.exists(pathpicklepat):\r\n os.mkdir(pathpicklepat)\r\n if not os.path.exists(pathpicklepatl):\r\n os.mkdir(pathpicklepatl)\r\n if os.path.exists(pathpicklepatfile):\r\n os.remove(pathpicklepatfile)\r\n\r\n for scannumb in range (0,dy):\r\n tabp = np.zeros((dx, dy), dtype=np.uint8)\r\n tabf=np.copy(tabroipat3d[pat][scannumb])\r\n\r\n tabfc=np.copy(tabf)\r\n nbp=0\r\n if tabf.max()>0:\r\n vis=contour2(tabf,pat,dx,dy)\r\n if vis.sum()>0:\r\n _tabsroi = np.copy(tabsroi3d[scannumb])\r\n imn=cv2.add(vis,_tabsroi)\r\n imn=tagview(imn,pat,0,20)\r\n tabsroi3d[scannumb]=imn\r\n imn = cv2.cvtColor(imn, cv2.COLOR_BGR2RGB)\r\n\r\n sroifile='tr_'+str(scannumb)+'.'+typeroi\r\n filenamesroi=os.path.join(sroidir,sroifile)\r\n cv2.imwrite(filenamesroi,imn)\r\n\r\n np.putmask(tabf,tabf>0,1)\r\n\r\n atabf = np.nonzero(tabf)\r\n\r\n xmin=atabf[1].min()\r\n xmax=atabf[1].max()\r\n ymin=atabf[0].min()\r\n ymax=atabf[0].max()\r\n\r\n\r\n _tabscan=tabscan3d[scannumb]\r\n\r\n i=xmin\r\n while i <= xmax:\r\n j=ymin\r\n while j<=ymax:\r\n tabpatch=tabf[j:j+dimpavy,i:i+dimpavx]\r\n\r\n area= tabpatch.sum()\r\n targ=float(area)/pxy\r\n\r\n if targ >thrpatch:\r\n imgray = _tabscan[j:j+dimpavy,i:i+dimpavx]\r\n imagemax= cv2.countNonZero(imgray)\r\n min_val, max_val, min_loc,max_loc = cv2.minMaxLoc(imgray)\r\n\r\n if imagemax > 0 and max_val - min_val>2:\r\n nbp+=1\r\n patpickle.append(imgray)\r\n x=0\r\n #we draw the rectange\r\n while x < dimpavx:\r\n y=0\r\n while y < dimpavy:\r\n tabp[y+j][x+i]=150\r\n if x == 0 or x == dimpavx-1 :\r\n y+=1\r\n else:\r\n y+=dimpavy-1\r\n x+=1\r\n #we cancel the source\r\n tabf[j:j+dimpavy,i:i+dimpavx]=0\r\n j+=dimpavy-1\r\n j+=1\r\n i+=1\r\n\r\n if nbp>0:\r\n tabfc =tabfc+tabp\r\n ntotpat=ntotpat+nbp\r\n if scannumb not in listsliceok:\r\n listsliceok.append(scannumb)\r\n stw=tail+'_slice_'+str(scannumb)+'_'+pat+'_'+locabg+'_'+str(nbp)\r\n stww=stw+'.txt'\r\n flw=os.path.join(jpegpath,stww)\r\n mfl=open(flw,\"w\")\r\n mfl.write('#number of patches: '+str(nbp)+'\\n')\r\n mfl.close()\r\n stww=stw+'.'+typej\r\n flw=os.path.join(jpegpath,stww)\r\n scipy.misc.imsave(flw, tabfc)\r\n pickle.dump(patpickle, open(pathpicklepatfile, \"wb\"),protocol=-1)\r\n\r\n return ntotpat", "def lfp_extract(files):\r\n \r\n if 'lfpdata' in locals():\r\n del lfpdata\r\n \r\n for i, file in enumerate(files):\r\n \r\n ### load data\r\n matdat = sio.loadmat(file, variable_names = ['lfpsegs', 'lfpdata', 'fs', 'chnAreas'], \r\n struct_as_record = False, squeeze_me = True) \r\n \r\n \r\n \r\n ### extract the noused channels, only calculate once\r\n if i == 0:\r\n \r\n # chnAreas\r\n chnAreas = matdat['chnAreas'].tolist()\r\n \r\n # fs: sample rate\r\n fs = matdat['fs'] \r\n \r\n \r\n\r\n ### dealing lfp data\r\n \r\n # lfp (np.ndarray): nareas * ntemp * ntrials or ntemp * nareas * ntrials\r\n if 'lfpdata' in matdat.keys():\r\n lfpdata_1file = matdat['lfpdata']\r\n elif 'lfpsegs' in matdat.keys():\r\n lfpdata_1file = matdat['lfpsegs']\r\n\r\n n1, n2, n3 = lfpdata_1file.shape\r\n if n1 > n2: # ntemp * nareas * ntrials\r\n lfpdata_1file = np.transpose(lfpdata_1file, (1, 0, 2))\r\n \r\n # concatenate to lfpdata for all files\r\n if 'lfpdata' not in locals():\r\n lfpdata = lfpdata_1file\r\n else:\r\n lfpdata = np.concatenate((lfpdata, lfpdata_1file), axis = 2)\r\n \r\n \r\n return lfpdata, chnAreas, fs", "def findFileForRun(self,run,time=0):\n graphid = 0\n if time:\n query = \"SELECT graphid FROM Version WHERE timeStamp=%s AND maxRunNumber>=%s AND minRunNumber<=%s\"%(time,run,run)\n tup = self.fetchOne(query)\n if tup and tup[0]:\n graphid = tup[0]\n\n\tquery = \"SELECT locationFileId FROM Location WHERE run=%s\"%(run)\n if graphid:\n query+=\" AND graphid=%s\"%graphid\n elif not graphid and time:\n print \"No matched timeStamp found, continue searching in all graphs.\"\n\ttup = self.fetchAll(query)\n print \"For given run %s\"%(run,)\n\tif not len(tup):\n\t print \"No files found\"\n\t return\n\tfor x in tup:\n\t locId = x[0]\n\t query = \"SELECT fileName FROM FileID WHERE fileId=%s\"%locId\n\t res = self.fetchOne(query)\n locFileName = res[0]\n\t # locInfo=[streamNames,pdsIDList,oDict,recordSize,positionOfFirstRecord]\n\t locInfo = lpds_dump.locationFileParser(locFileName)\n for pdsId in locInfo[1]:\n\t query = \"SELECT fileName FROM FileID WHERE fileId=%s\"%pdsId\n\t\tresult= self.fetchOne(query)\n print result[0]", "def get_reference_properties(filename):\n try: # Hopefully it's a nice new standard filename, easy\n return decompose_newstyle_name(filename)\n except AssertionError: # cryptic legacy paths & names, i.e. reality\n pass\n # If not, dig inside the FITS file, slow\n return ref_properties_from_header(filename)", "def locate(tgt_fpath, survey):\n flen = os.stat(tgt_fpath).st_size\n fpaths = survey.get(flen, ())\n if not fpaths:\n return None\n\n for fbase_path in fpaths:\n # print(' '*5, tgt_fpath, fbase_path)\n if not filecmp.cmp(tgt_fpath, fbase_path, shallow=True):\n continue # early reject, try other candidates\n if filecmp.cmp(tgt_fpath, fbase_path, shallow=False):\n # identically equal\n return fbase_path\n\n return None", "def load_isprc(isprc_filename, ref_name, primer_hits):\n with open(isprc_filename) as handle:\n for title, seq in SimpleFastaParser(handle):\n amp_region, name, length, left, right = title.split()\n assert (\n left,\n right,\n ) in primer_hits, f\"Stale cache? Why {name}, {left}, {right}\"\n assert (\n length == f\"{len(seq)}bp\"\n ), f\"Expected length {len(seq)} from sequence, yet {length}\"\n seq = seq.upper()\n # chrom, rest = region.rsplit(\":\", 1)\n # if \"+\" in rest:\n # strand = \"+\"\n # start, end = rest.split(\"+\")\n # else:\n # strand = \"-\"\n # start, end = rest.split(\"-\")\n primer_hits[left, right].append((ref_name, seq))", "def filename(i):\n rand_name = os.path.join(os.getcwd(), \"input-%d.txt\" % i)\n ref_name = os.path.join(os.getcwd(), \"input-%d.ref\" % i)\n return rand_name, ref_name", "def prepare_final(fastq_in, refs, aligndir, outdir):\n base = os.path.join(aligndir, os.path.splitext(os.path.basename(fastq_in))[0])\n for ref in refs:\n base_ref = os.path.basename(ref[\"file\"])\n if ref.get(\"chr_dist\", False):\n _copy_galaxy_files(base, base_ref, outdir)\n if ref.get(\"feature_prep\", False):\n _copy_igv_files(base, base_ref, ref[\"file\"], outdir)", "def readWrite_gpt2_1w(xdir, station, site_lat, site_lon):\n# this should use the environment variable\n outfile = xdir + '/input/' + station + '_refr.txt'\n if os.path.isfile(outfile):\n print('refraction file for this station already exists')\n else:\n print('refraction output file will be written to ', outfile)\n\n# change to radians\n dlat = site_lat*np.pi/180 \n dlon = site_lon*np.pi/180 \n\n# read VMF gridfile in pickle format \n pname = xdir + '/input/' + 'gpt_1wA.pickle'\n print('large refraction file is stored here:', pname)\n try:\n f = open(pname, 'rb')\n [All_pgrid, All_Tgrid, All_Qgrid, All_dTgrid, All_U, All_Hs, All_ahgrid, All_awgrid, All_lagrid, All_Tmgrid] = pickle.load(f)\n f.close()\n except:\n print('I did not find the large refraction file where it is supposed to be, but I will try looking in your home directory')\n try:\n pname = 'gpt_1wA.pickle'\n f = open(pname, 'rb')\n [All_pgrid, All_Tgrid, All_Qgrid, All_dTgrid, All_U, All_Hs, All_ahgrid, All_awgrid, All_lagrid, All_Tmgrid] = pickle.load(f)\n f.close()\n except:\n print('hmm, failed again. Go into gnssIR_lomb.py, set RefractionCorrection to false, and rerun the code.... ')\n sys.exit()\n\n# print(np.shape(All_pgrid))\n# really should e zero to four, but whatever\n indx = np.zeros(4,dtype=int)\n indx_lat = np.zeros(4,dtype=int)\n indx_lon = np.zeros(4,dtype=int)\n\n\n#figure out grid index\n# % only positive longitude in degrees\n if (dlon < 0):\n plon = (dlon + 2*np.pi)*180/np.pi;\n else:\n plon = dlon*180/np.pi \n#\n# transform to polar distance in degrees\n ppod = (-dlat + np.pi/2)*180/np.pi \n\n#% find the index (line in the grid file) of the nearest point\n# % changed for the 1 degree grid (GP)\n ipod = np.floor(ppod+1) \n ilon = np.floor(plon+1) \n \n# % normalized (to one) differences, can be positive or negative\n# % changed for the 1 degree grid (GP)\n diffpod = (ppod - (ipod - 0.5)) \n difflon = (plon - (ilon - 0.5)) \n# % added by HCY\n# % changed for the 1 degree grid (GP)\n if (ipod == 181):\n ipod = 180 \n if (ilon == 361):\n ilon = 1 \n if (ilon == 0):\n ilon = 360\n\n# get the number of the corresponding line\n#\t changed for the 1 degree grid (GP)\n indx[0] = (ipod - 1)*360 + ilon \n# save the lat lon of the grid points\n indx_lat[0] = 90-ipod+1 \n indx_lon[0] = ilon-1 \n# % near the poles: nearest neighbour interpolation, otherwise: bilinear\n# % with the 1 degree grid the limits are lower and upper (GP)\n\n bilinear = 0 \n max_ind = 1 \n if (ppod > 0.5) and (ppod < 179.5):\n bilinear = 1 \n if (bilinear == 1):\n max_ind =4 \n\n# % bilinear interpolation\n# % get the other indexes \n \n ipod1 = ipod + np.sign(diffpod) \n ilon1 = ilon + np.sign(difflon) \n# % changed for the 1 degree grid (GP)\n if (ilon1 == 361):\n ilon1 = 1 \n if (ilon1 == 0):\n ilon1 = 360 \n# get the number of the line\n# changed for the 1 degree grid (GP)\n# four indices ???\n indx[1] = (ipod1 - 1)*360 + ilon; # % along same longitude\n indx[2] = (ipod - 1)*360 + ilon1;# % along same polar distance\n indx[3] = (ipod1 - 1)*360 + ilon1;# % diagonal\n#\n# save the lat lon of the grid points lat between [-90 ;90] lon [0 360] \n indx_lat[1] = 90 - ipod1+np.sign(diffpod) \n indx_lon[1] = ilon-1 \n indx_lat[2] = 90-ipod +1\n indx_lon[2] = ilon1 - np.sign(difflon) \n indx_lat[3] = 90 -ipod1+np.sign(diffpod) \n indx_lon[3] = ilon1- np.sign(difflon);\n\n# extract the new grid\n# will need to do 0-4 instead of 1-5 because stored that way in python\n# which values to use in the bigger array\n# assign the correct values\n indx = indx - 1\n indx_list = indx.tolist()\n# print(indx_list)\n# print(indx)\n#print(np.shape(indx_lat))\n#print(np.shape(indx_lon))\n w = 0\n# need to write values for a given station to a plain text file\n#\n fout = open(outfile, 'w+')\n for a in indx_list:\n for k in [0,1,2,3,4]:\n fout.write(\" {0:4.0f} {1:5.0f} {2:13.4f} {3:10.4f} {4:10.6f} {5:10.4f} {6:12.5f} {7:12.5f} {8:10.6f} {9:10.6f} {10:10.6f} {11:10.4f} \\n\".format( indx_lat[w], indx_lon[w],All_pgrid[a,k],All_Tgrid[a,k],All_Qgrid[a,k]*1000,All_dTgrid[a,k]*1000,All_U[a,0],All_Hs[a,0], All_ahgrid[a,k]*1000, All_awgrid[a,k]*1000, All_lagrid[a,k], All_Tmgrid[a,k] ))\n\n w+=1\n fout.close()\n print('file written')", "def findExtInDF(df, ext='abf', labrow=1, refCol=0, outfile=None):\n fils = []\n for i in range(df.shape[0]): # For every row\n for c in df.columns:\n try:\n temp_ = df.iloc[i][c]\n except:\n print(i, c)\n try:\n if temp_.split('.')[-1] == ext:\n try: # Could be multiple files\n temp_ = ''.join([u for u in temp_ if u != ' '])\n csplit = temp_.split(',')\n for ent in csplit:\n fils.append([df.iloc[i][df.columns[refCol]], ent, df[c].values[labrow]])\n except:\n fils.append([df.iloc[i][df.columns[refCol]], temp_, df[c].values[labrow]]) # there was only one file\n except:\n pass\n print('Found %i hits' %len(fils))\n \n # This part will save only the filenames in a txt doc\n # which can be passed to getPaths.sh to return the full paths to each file.\n if outfile is not None:\n with open(outfile, 'w') as fOut:\n for f in fils:\n fOut.write(','.join([str(i) for i in f]))\n fOut.write('\\n')\n return fils", "def fileCheck(path):\n print('[+] Checking For File patching ')\n for url in check_files:\n try:\n #File Rereive\n data = query(url)\n file_name = url.split(\"/\")[-1]\n _,tmp_file = tempfile.mkstemp(prefix=\"exitmap_%s_\" % file_name)\n with open(tmp_file, \"wb\") as fd:\n fd.write(data)\n for i in check_files_patch_results:\n if str(i.url) == str(url):\n if str(i.filehash) != str(sha512_file(tmp_file)):\n print('[+] ALERT File Patch FOUND !')\n print(' | exitnode : %s' % str(i.exitnode) )\n print(' |_________> url: %s' % str(i.url) )\n print(' |_________> filePath: %s' % str(i.filepath) )\n print(' |_________> fileHash: %s' % str(i.filehash) )\n #check_files_patch_results.append( File_Check_Results(url, file_name, tmp_file, path, sha512_file(tmp_file)) )\n else :\n print('[+] File (%s) seems to be ok' % i.url)\n break\n\n except Exception as err:\n print('[-] Error ! %s' % err)\n traceback.print_exc()\n pass\n return time.time()", "def FindFile(self, fd):\n hashes = self._HashFile(fd)\n if not hashes:\n return False\n\n hash_urn = self.PATH.Add(str(hashes.sha1))\n\n for data in aff4.FACTORY.Stat([hash_urn], token=self.token):\n return data[\"urn\"]\n\n return False", "def prepare_reference(reference_fp):\n # create .fai if doesn't yet exist\n if not os.path.isfile(re.sub(r'.[^.]*$', '.fai', reference_fp)):\n logging.info('preparing reference .fai')\n tool_args = ('samtools', 'faidx', reference_fp)\n logging.info(f'executing the following command: {\" \".join(tool_args)}')\n subprocess.check_output(tool_args)\n\n # create .dict file if doesn't yet exist\n if not os.path.isfile(re.sub(r'.[^.]*$', '.dict', reference_fp)):\n logging.info('preparing reference .dict')\n tool_args = ('picard', 'CreateSequenceDictionary',\n f'R={reference_fp}',\n 'O=' + re.sub(r'.[^.]*$', '.dict', reference_fp))\n logging.info(f'executing the following command: {\" \".join(tool_args)}')\n subprocess.check_output(tool_args)", "def edited_file_locations(self):", "def fileCheckOriginal():\n\n print('[+] Populating File Hasing for later check')\n for url in check_files:\n try:\n data = query(url)\n file_name = url.split(\"/\")[-1]\n _,tmp_file = tempfile.mkstemp(prefix=\"exitmap_%s_\" % file_name)\n\n with open(tmp_file, \"wb\") as fd:\n fd.write(data)\n print('[+] Saving File \\\"%s\\\".' % tmp_file)\n check_files_patch_results.append( File_Check_Results(url, file_name, tmp_file, \"NO\", sha512_file(tmp_file)) )\n print('[+] First Time we see the file..')\n print(' |_________> exitnode : None' )\n print(' |_________> :url: %s' % str(url) )\n print(' |_________> :filePath: %s' % str(tmp_file))\n print(' |_________> :file Hash: %s' % str(sha512_file(tmp_file)))\n except Exception as err:\n print('[-] Error ! %s' % err)\n traceback.print_exc()\n pass\n return time.time()", "def run_prepfold(filenm, outfile, errfile):\n # Open candfile\n f = open(filenm, 'r')\n \n i = 0\n for line in f:\n # This just skips down to where the files are\n if line.startswith('#'):\n i = 1\n continue\n if i==1:\n namecand = line.split()[0]\n #namesplit = namecand.split(\"_\"+str(params.zmax)+\":\")\n namesplit = namecand.rsplit(\"_\", 1)\n if len(namesplit) != 2:\n continue\n else:\n bname = namesplit[0]\n cnum = namesplit[1].split(\":\")[1]\n psname = bname + \"_Cand_\" + cnum + \".pfd.ps\"\n if os.path.exists(psname):\n print \"File \"+psname+\" already made, skipping\"\n else:\n candfile = namecand.split(':')[0] + '.cand'\n datfile = namecand.split('_ACCEL_')[0] + '.dat'\n outname = namecand.split('_ACCEL_')[0]\n if ( os.path.exists(candfile) and os.path.exists(datfile) ):\n cmd = \"prepfold -noxwin -accelcand %s -accelfile %s -o %s %s\"\\\n %(cnum, candfile, outname, datfile)\n try_cmd(cmd, stdout=outfile, stderr=errfile)\n else:\n print \"Could not find %s\" %candfile\n print \"and/or %s\" %datfile\n # Close file, we're done\n f.close()", "def pool_test_dataset_dir(obs_dir_fp,exp_dir_fp,file_name_delimiter=\"--\",\\\n file_name_field_order=\\\n {'file_type':0,\"prediction_method\":1,\"weighting_method\":2,\"holdout_method\":3,\\\n \"distance\":4,\"organism\":5},strict=False, verbose=True,pool_by=['distance']):\n trials = defaultdict(list)\n #We'll want a quick unzip fn for converting points to trials\n #TODO: separate out into a 'get_paired_data_from_dirs' function\n\n pooled_observations = {}\n pooled_expectations = {}\n pairs = iter_prediction_expectation_pairs(obs_dir_fp,exp_dir_fp,file_name_field_order,file_name_delimiter,verbose=verbose)\n file_number = 0\n for obs_table,exp_table,filename in pairs:\n #print \"analyzing filename:\",filename \n filename_metadata= get_metadata_from_filename(filename,file_name_field_order,\\\n file_name_delimiter,verbose=verbose)\n \n #base_tag = '%s\\t%s\\t' %(filename_metadata['holdout_method'],filename_metadata['prediction_method'])\n #tags = [base_tag+'all_results']\n if 'file_type' in pool_by:\n pool_by.remove('file_type') #we do this manually at the end\n combined_tag = ['all']*len(file_name_field_order.keys())\n for field in file_name_field_order.keys():\n #print combined_tag\n #print file_name_field_order\n idx = file_name_field_order[field]\n #print idx\n if field in pool_by:\n combined_tag[idx] = filename_metadata[field]\n tags=[file_name_delimiter.join(combined_tag)]\n \n if verbose:\n print \"Pooling by:\", pool_by\n print \"Combined tags:\",tags\n \n pooled_observations,pooled_expectations =\\\n update_pooled_data(obs_table,exp_table,tags,pooled_observations,\\\n pooled_expectations,str(file_number),verbose=verbose)\n file_number += 1\n\n return pooled_observations,pooled_expectations", "def get_data_in_paths(dfile, paths):\n for pth in paths:\n for f in os.listdir(pth):\n if f == dfile:\n return os.path.abspath(os.path.join(pth, dfile))", "def test_load_ref():\n\n itraj = os.path.join(path, \"alanine_dipeptide.nc\")\n iref = os.path.join(path, \"alanine_dipeptide.ncrst\")\n itop = os.path.join(path, \"alanine_dipeptide.parm7\")\n\n with pytest.raises(ValueError):\n ref = tools.load_ref(itraj, itop)\n\n ref = tools.load_ref(iref, itop)\n\n assert ref.n_frames == 1\n assert ref.n_atoms == 1912", "def add_reffile_overrides(self):\n all_obs_info, unique_obs_info = self.info_for_all_observations()\n\n # Add empty placeholders for reference file entries\n empty_col = np.array([' ' * 500] * len(self.info['Instrument']))\n superbias_arr = deepcopy(empty_col)\n linearity_arr = deepcopy(empty_col)\n saturation_arr = deepcopy(empty_col)\n gain_arr = deepcopy(empty_col)\n distortion_arr = deepcopy(empty_col)\n photom_arr = deepcopy(empty_col)\n ipc_arr = deepcopy(empty_col)\n transmission_arr = deepcopy(empty_col)\n badpixmask_arr = deepcopy(empty_col)\n pixelflat_arr = deepcopy(empty_col)\n\n # Loop over combinations, create metadata dict, and get reffiles\n for status in unique_obs_info:\n updated_status = deepcopy(status)\n (instrument, detector, filtername, pupilname, readpattern, exptype) = status\n\n if instrument == 'FGS':\n if detector in ['G1', 'G2']:\n detector = detector.replace('G', 'GUIDER')\n updated_status = (instrument, detector, filtername, pupilname, readpattern, exptype)\n\n # If the user entered reference files in self.reffile_defaults\n # use those over what comes from the CRDS query\n #sbias, lin, sat, gainfile, dist, ipcfile, pam = self.reffiles_from_dict(status)\n manual_reffiles = self.reffiles_from_dict(updated_status)\n for key in manual_reffiles:\n if manual_reffiles[key] == 'none':\n manual_reffiles[key] = 'crds'\n\n # Identify entries in the original list that use this combination\n match = [i for i, item in enumerate(all_obs_info) if item==status]\n\n # Populate the reference file names for the matching entries\n superbias_arr[match] = manual_reffiles['superbias']\n linearity_arr[match] = manual_reffiles['linearity']\n saturation_arr[match] = manual_reffiles['saturation']\n gain_arr[match] = manual_reffiles['gain']\n distortion_arr[match] = manual_reffiles['distortion']\n photom_arr[match] = manual_reffiles['photom']\n ipc_arr[match] = manual_reffiles['ipc']\n transmission_arr[match] = manual_reffiles['transmission']\n badpixmask_arr[match] = manual_reffiles['badpixmask']\n pixelflat_arr[match] = manual_reffiles['pixelflat']\n\n self.info['superbias'] = list(superbias_arr)\n self.info['linearity'] = list(linearity_arr)\n self.info['saturation'] = list(saturation_arr)\n self.info['gain'] = list(gain_arr)\n self.info['astrometric'] = list(distortion_arr)\n self.info['photom'] = list(photom_arr)\n self.info['ipc'] = list(ipc_arr)\n self.info['transmission'] = list(transmission_arr)\n self.info['badpixmask'] = list(badpixmask_arr)\n self.info['pixelflat'] = list(pixelflat_arr)", "def get_hash_curr_files(self):\n temp = None\n for f in self.file_list:\n if not os.stat(f).st_size:\n self.print_to_log('Skipping Zero Length File: ' + f)\n else:\n try:\n\n batch_file = open(f,'U')\n time_stamp = self.get_timestamp()\n temp = ['pass',\n time_stamp,\n self.get_hash(batch_file),\n '1',\n time_stamp,\n batch_file.name[batch_file.name.rfind('\\\\') + 1 :]]\n\n batch_file.close()\n self.hash_curr_files[temp[2]] = temp\n self.print_to_log(\"successfully hashed file: \" + temp[5])\n except IOError:\n self.print_to_log('Cannot Open File: ' + f)\n except:\n self.print_to_log('Unknown Error, Exiting')\n raise", "def _get_references_by_species(self) -> dict:\n fasta_parser = FastaParser()\n references_by_species = {}\n for (\n species,\n reference_files,\n ) in self._pathcreator.ref_seq_paths_by_species.items():\n references_by_species[species] = []\n for reference_file in reference_files:\n with open(reference_file, \"r\") as fasta_fh:\n for header, sequence in fasta_parser.entries(fasta_fh):\n header_id = fasta_parser.header_id(header)\n references_by_species[species].append(header_id)\n return references_by_species", "def _get_filepaths(self):\n self._printer(str(self.__len__()) + \" file paths have been parsed in \" + str(self.timer.end))\n if self._hash_files:\n return pool_hash(self.filepaths)\n else:\n return self.filepaths", "def mergePed(bnlist=[],faff=[],ofaff=[],newbasename='newped',fo=0):\r\n lcdmap = getLCD(bnlist) # list of chr,offset,rs for all snp common to all files\r\n print 'got %d lcd snps-%s' % (len(lcdmap),lcdmap[:5])\r\n cfped = []\r\n coped = []\r\n cfgeno = []\r\n cogeno = []\r\n allrsa = {}\r\n ignorers = {}\r\n for i,basename in enumerate(bnlist):\r\n fped,oped,fgeno,ogeno,trsadict = subsetPed(basename,lcdmap,faff[i],ofaff[i])\r\n print '%s gave %d fgeno' % (basename,len(fgeno))\r\n for rs in trsadict.keys():\r\n tk = trsadict[rs].keys()\r\n if len(tk) > 2:\r\n print 'for %s, rs %s has alleles %s' % (basename, rs, trsadict[rs])\r\n if not allrsa.get(rs,None):\r\n allrsa[rs] = {}\r\n for a in tk:\r\n if not allrsa[rs].get(a,None):\r\n allrsa[rs][a] = trsadict[rs][a]\r\n else:\r\n allrsa[rs][a] += trsadict[rs][a]\r\n tk = allrsa[rs].keys()\r\n if len(tk) > 2 and not ignorers.get(rs,None): # new\r\n #print 'After merge basename %s, rs %s has alleles %s' % (basename, rs,allrsa[rs])\r\n ignorers[rs] = rs\r\n cfped += fped\r\n coped += oped\r\n cfgeno += fgeno\r\n cogeno += ogeno\r\n print 'after merge all have %d fgeno and %d ogeno' % (len(cfgeno),len(cogeno))\r\n # now have offspring and founder rows in lcdmap order\r\n # write map file\r\n print '### found %d markers > 2 alleles' % (len(ignorers.keys()))\r\n keepmarkers = [x for x in range(len(lcdmap)) if not ignorers.get(lcdmap[x][2],None)]\r\n newmap = ['\\t'.join((lcdmap[x][0],lcdmap[x][2],'0','%d' % lcdmap[x][1])) for x in keepmarkers] # chrom,offset,rs\r\n f = file('%s.map' % newbasename,'w')\r\n f.write('%s\\n' % '\\n'.join(newmap))\r\n f.close()\r\n for i,geno in enumerate(cfgeno): # convert each array into a list and keep the good markers\r\n gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers])\r\n g = array.array('c',gs) # good ones\r\n cfgeno[i] = g # replace\r\n print 'cfgeno converted'\r\n if not fo: # not founders only - note arrays are not lists!\r\n cfped += copy.copy(coped) #\r\n del coped\r\n for i,geno in enumerate(cogeno): # convert each array into a list and keep the good markers\r\n gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers])\r\n g = array.array('c',gs) # good ones\r\n cfgeno.append(g) # extend founders\r\n del cogeno\r\n print 'after if not fo now have %d cfgeno' % (len(cfgeno))\r\n f = file('%s.ped' % newbasename,'w')\r\n for n,ped in enumerate(cfped):\r\n l = ' '.join(ped + list(cfgeno[n]))\r\n if n % 100 == 0 and n > 0:\r\n print 'writing line %d' % n\r\n f.write(l)\r\n f.write('\\n')\r\n f.close()\r\n print 'wrote %d map rows and %d ped rows to %s' % (len(newmap),len(cfped),newbasename)", "def identify_file(self, file):", "def _resolve_xml_and_data_paths(self):\n\n supported_extensions = ['.dat', '.lfp', '.eeg']\n self.filename = Path(self.filename)\n self.binary_file = Path(self.binary_file) if self.binary_file is not None else None\n \n if self.filename.suffix == '.xml':\n xml_file_path = self.filename\n data_file_path = self.binary_file \n elif self.filename.suffix == '':\n xml_file_path = self.filename.with_suffix(\".xml\")\n data_file_path = self.binary_file\n elif self.filename.suffix in supported_extensions:\n xml_file_path = self.filename.with_suffix(\".xml\")\n data_file_path = self.filename\n else:\n raise KeyError(f\"Format {self.filename.suffix} not supported, filename format should be {supported_extensions} or .xml\")\n \n if data_file_path is None:\n possible_file_paths = (xml_file_path.with_suffix(extension) for extension in supported_extensions)\n data_file_path = next((path for path in possible_file_paths if path.is_file()), None)\n if data_file_path is None:\n raise FileNotFoundError(f\"data binary not found for file {xml_file_path.stem} with supported extensions: {supported_extensions}\")\n\n \n assert xml_file_path.is_file(), f\"xml file not found at the expected location {xml_file_path}\"\n assert data_file_path.is_file(), f\"binary file not found at the expected location {data_file_path}\"\n\n self.xml_file_path = xml_file_path\n self.data_file_path = data_file_path", "def read_refcopy(f):\n\n d_refcopy = {}\n for n, line in enumerate(open(f)):\n if line.startswith('#'):\n continue\n\n line = line.rstrip()\n if line == '':\n continue\n\n _lis = line.split('\\t')\n taxa, num, = _lis[:2]\n skip = False\n for word in EXCLUDE:\n if word in taxa:\n skip = True\n break\n\n if skip:\n continue \n\n # the parsing of taxa works for both mothur output and this\n taxa = taxa.rstrip(';') # for mothur classfy.seqs output\n lis = taxa.split(';')\n lis2 = []\n for item in lis:\n item = item.strip() # for copyrigher copy table ' ;' separater\n if item.endswith(')'):\n item = item.rsplit('(', 1)[0].strip()\n\n # remove taxon level prefix, e.g. 'p__Firmicutes'\n if '__' in item:\n item = item.split('__', 1)[1]\n\n #item = item.strip('\"')\n\n # green gene taxonomy has sapce\n item = item.replace(' ', '_')\n\n item = item.lower()\n if item in ['', 'unknown', 'other', 'unassigned']:\n item = 'Unclassifed'\n\n item = item.capitalize()\n lis2.append(item)\n\n length = len(lis2)\n assert length <= LEVELS, '> {} levels found ({})'.format(\n LEVELS, length)\n if length != LEVELS:\n lis2 = lis2 + ['Unclassified']*(LEVELS - length)\n\n tu = tuple(lis2)\n d_refcopy[tu] = float(num)\n\n return d_refcopy", "def reffile(self):\n return os.path.join(self.__folder, self.__name + '.ref')", "def parse_geno_file(folder,return_flag):\n\n perc_alt = defaultdict(list)\n perc_ref = defaultdict(list)\n abs_alt = defaultdict(list)\n abs_ref = defaultdict(list)\n\n perc_alt_inv = defaultdict(dict)\n perc_ref_inv = defaultdict(dict)\n abs_alt_inv = defaultdict(dict)\n abs_ref_inv = defaultdict(dict)\n\n for geno_file in glob.glob(folder+'*_test_summary.tsv'):\n strain = geno_file.split('/')[-1].split('_')[0]\n #print strain\n prev_coordinate = \"0\"\n count = 0\n alt_allele = {}\n amb_allele = {}\n ref_allele = {}\n flag = 0 \n\n TEMP_HANDLE = open(geno_file,'r')\n for line in TEMP_HANDLE:\n line = line.rstrip('\\n')\n\n if(line[0]!='v'): ## Skip the header\n coordinate = line.split('\\t')[0].split('::')[-1]\n if(coordinate != prev_coordinate):\n #prev_coordinate = coordinate\n count = count + 1\n if(count == 1):\n if(line.split('\\t')[-3]!='alt'): ## No reads supporting the alternate allele\n flag = 1 \n alt_allele[coordinate] = 0\n amb_allele[coordinate] = int(line.split('\\t')[-1])\n #print line\n else:\n alt_allele[coordinate] = int(line.split('\\t')[-1])\n if(count == 2):\n amb_allele[coordinate] = int(line.split('\\t')[-1])\n if(count == 3):\n if(line.split('\\t')[-3]!='ref'): ## No reads supporting the reference allele (all are ambiguous)\n ref_allele[coordinate] = 0\n else:\n ref_allele[coordinate] = int(line.split('\\t')[-1])\n prev_coordinate = coordinate\n count = 0\n if(flag == 1): ## The case where there are no alternate allele reads, counter is incremented to account for changed numbering\n count = count + 1 \n flag = 0 \n\n \n for key in alt_allele:\n if(alt_allele[key]+ref_allele[key]!= 0): ## Check to see if the denominator is not zero\n abs_alt[strain].append(float(alt_allele[key]))\n abs_ref[strain].append(float(ref_allele[key]))\n perc_alt[strain].append(float(alt_allele[key])/(alt_allele[key]+ref_allele[key]))\n perc_ref[strain].append(float(ref_allele[key])/(alt_allele[key]+ref_allele[key]))\n\n\n abs_alt_inv[strain][key] = float(alt_allele[key])\n abs_ref_inv[strain][key] = float(ref_allele[key])\n perc_alt_inv[strain][key] = float(alt_allele[key])/(alt_allele[key]+ref_allele[key])\n perc_ref_inv[strain][key] = float(ref_allele[key])/(alt_allele[key]+ref_allele[key])\n \n \n\n ## Keep only the common inversions, i.e. those between MC and the rest \n all_inversions = []\n common_inversions = []\n abs_alt_set = defaultdict(list)\n perc_alt_set = defaultdict(list)\n\n abs_alt_inv_set = defaultdict(dict)\n perc_alt_inv_set = defaultdict(dict)\n abs_ref_inv_set = defaultdict(dict)\n perc_ref_inv_set = defaultdict(dict)\n\n Rock = ['AC', 'CL','CM','CN','TI','PN','MC']\n Sand = ['MZ','DC','LF','MP','MS','CV']\n\n\n sand_inversions = []\n rock_inversions = []\n\n for strain in abs_alt_inv.keys():\n for inversion in abs_alt_inv[strain].keys():\n if(strain in Rock):\n rock_inversions.append(inversion)\n else:\n sand_inversions.append(inversion)\n all_inversions.append(inversion)\n \n \n common_inversions_sand = Counter(sand_inversions)\n common_inversions_rock = Counter(rock_inversions)\n #count_sand = 0\n common_inversions = Counter(all_inversions)\n return_inversions = []\n \n \n #print common_inversions\n for inversion in common_inversions.keys():\n if(common_inversions[inversion]==13):\n return_inversions.append(inversion)\n for strain in abs_alt_inv.keys():\n abs_alt_set[strain].append(abs_alt_inv[strain][inversion])\n perc_alt_set[strain].append(perc_alt_inv[strain][inversion])\n\n abs_alt_inv_set[strain][inversion] = abs_alt_inv[strain][inversion]\n perc_alt_inv_set[strain][inversion] = perc_alt_inv[strain][inversion]\n abs_ref_inv_set[strain][inversion] = abs_ref_inv[strain][inversion]\n perc_ref_inv_set[strain][inversion] = perc_ref_inv[strain][inversion]\n\n\n for inversion in abs_alt_inv_set['MC']:\n alternate_allele_sum_rock = 0\n reference_allele_sum_rock = 0\n alternate_allele_sum_sand = 0\n reference_allele_sum_sand = 0 \n for strain in Rock:\n alternate_allele_sum_rock = alternate_allele_sum_rock + abs_alt_inv_set[strain][inversion]\n reference_allele_sum_rock = reference_allele_sum_rock + abs_ref_inv_set[strain][inversion]\n\n for strain in Sand:\n alternate_allele_sum_sand = alternate_allele_sum_sand + abs_alt_inv_set[strain][inversion]\n reference_allele_sum_sand = reference_allele_sum_sand + abs_ref_inv_set[strain][inversion]\n\n abs_alt_set['Rock'].append(alternate_allele_sum_rock)\n perc_alt_set['Rock'].append(float((alternate_allele_sum_rock)/(alternate_allele_sum_rock + reference_allele_sum_rock)))\n \n abs_alt_set['Sand'].append(alternate_allele_sum_sand)\n perc_alt_set['Sand'].append(float((alternate_allele_sum_sand)/(alternate_allele_sum_sand + reference_allele_sum_sand)))\n \n with open('log_file.txt','a') as LOG_FILE:\n if(float((alternate_allele_sum_rock)/(alternate_allele_sum_rock + reference_allele_sum_rock))>float(sys.argv[2]) or float((alternate_allele_sum_sand)/(alternate_allele_sum_sand + reference_allele_sum_sand))>float(sys.argv[2])):\n print >> LOG_FILE,inversion \n \n\n print \"Sand : \"+str(count_sand)\n\n if return_flag == True:\n #print len([abs_alt_inv_set,abs_ref_inv_set,perc_alt_inv_set,perc_ref_inv_set])\n return perc_alt_inv_set\n else:\n return [abs_alt_set,perc_alt_set]", "def getFileInfo(region, ub, queuename, guids, dsname, dsdict, lfns, pinitdir, analysisJob, tokens, DN, sitemover, error, workdir, dbh, DBReleaseIsAvailable, \\\n scope_dict, pfc_name=\"PoolFileCatalog.xml\", filesizeIn=[], checksumIn=[], thisExperiment=None):\n\n fileInfoDic = {} # FORMAT: fileInfoDic[file_nr] = (guid, pfn, size, checksum, filetype, copytool) - note: copytool not necessarily the same for all file (e.g. FAX case)\n replicas_dic = {} # FORMAT: { guid1: [replica1, .. ], .. } where replica1 is of type replica\n surl_filetype_dictionary = {} # FORMAT: { sfn1: filetype1, .. } (sfn = surl, filetype = DISK/TAPE)\n copytool_dictionary = {} # FORMAT: { surl1: copytool1, .. }\n totalFileSize = 0L\n ec = 0\n pilotErrorDiag = \"\"\n\n tolog(\"Preparing to build paths for input files\")\n\n # Get the site information object\n si = getSiteInformation(thisExperiment.getExperiment())\n\n # In case we are staging in files from an object store, we can do a short cut and skip the catalog lookups below\n copytool, dummy = getCopytool(mode=\"get\")\n if \"objectstore\" in copytool:\n tolog(\"Objectstore stage-in: cutting a few corners\")\n\n # Format: fileInfoDic[file_nr] = (guid, gpfn, size, checksum, filetype, copytool)\n # replicas_dic[guid1] = [replica1, ..]\n\n espath = si.getObjectstorePath(\"eventservice\") #getFilePathForObjectStore(filetype=\"eventservice\")\n logpath = si.getObjectstorePath(\"logs\") #getFilePathForObjectStore(filetype=\"logs\")\n\n i = 0\n try:\n for lfn in lfns:\n if \".log.\" in lfn:\n fullpath = os.path.join(logpath, lfns[i])\n else:\n fullpath = os.path.join(espath, lfns[i])\n fileInfoDic[i] = (guids[i], fullpath, filesizeIn[i], checksumIn[i], 'DISK', copytool) # filetype is always DISK on objectstores\n replicas_dic[guids[i]] = [fullpath]\n surl_filetype_dictionary[fullpath] = 'DISK' # filetype is always DISK on objectstores\n i += 1\n except Exception, e:\n tolog(\"!!WARNING!!2233!! Failed to create replica and file dictionaries: %s\" % (e))\n ec = -1\n tolog(\"fileInfoDic=%s\" % str(fileInfoDic))\n tolog(\"replicas_dic=%s\" % str(replicas_dic))\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # If the pilot is running on a Tier 3 site, then neither LFC nor PFC should be used\n if si.isTier3():\n tolog(\"Getting file info on a Tier 3 site\")\n\n # Create file path to local SE (not used for scope based paths)\n path = sitemover.getTier3Path(dsname, DN) # note: dsname will only be correct for lib files, otherwise fix dsdict, currently empty for single lib file input?\n file_nr = -1\n for lfn in lfns:\n file_nr += 1\n\n # Use scope based path if possible\n# #if scope_dict and readpar('useruciopaths').lower() == \"true\":\n# if scope_dict and (\"/rucio\" in readpar('seprodpath') or \"/rucio\" in readpar('sepath')):\n# se_path = sitemover.getRucioPath(file_nr, tokens, scope_dict, lfn, path, analysisJob)\n# else:\n# se_path = os.path.join(path, lfn)\n se_path = os.path.join(path, lfn)\n\n # Get the file info\n ec, pilotErrorDiag, fsize, fchecksum = sitemover.getLocalFileInfo(se_path, csumtype=\"default\")\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # Fill the dictionaries\n fileInfoDic[file_nr] = (guids[file_nr], se_path, fsize, fchecksum, 'DISK', copytool) # no tape on T3s, so filetype is always DISK\n surl_filetype_dictionary[fullpath] = 'DISK' # filetype is always DISK on T3s\n\n # Check total file sizes to avoid filling up the working dir, add current file size\n try:\n totalFileSize += long(fsize)\n except:\n pass\n else:\n # Get the PFC from the proper source\n ec, pilotErrorDiag, xml_from_PFC, xml_source, replicas_dic, surl_filetype_dictionary, copytool_dictionary = \\\n getPoolFileCatalog(ub, guids, lfns, pinitdir, analysisJob, tokens, workdir, dbh,\\\n DBReleaseIsAvailable, scope_dict, filesizeIn, checksumIn,\\\n sitemover, pfc_name=pfc_name, thisExperiment=thisExperiment)\n\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n tolog(\"Using XML source %s\" % (xml_source))\n if xml_from_PFC == '':\n pilotErrorDiag = \"Failed to get PoolFileCatalog\"\n tolog(\"!!FAILED!!2999!! %s\" % (pilotErrorDiag))\n tolog(\"Mover get_data finished (failed)\")\n return error.ERR_NOPFC, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n xmldoc = minidom.parseString(xml_from_PFC) \n fileList = xmldoc.getElementsByTagName(\"File\")\n\n # Extracts the guids from the file list\n guids_filelist = getGuids(fileList)\n fileInfoDictionaryFromDispatcher = getFileInfoDictionaryFromDispatcher(lfns, filesizeIn, checksumIn) \n file_nr = -1\n for thisfile in fileList:\n file_nr += 1\n # Get the SURL and GUID from the XML\n gpfn = str(thisfile.getElementsByTagName(\"pfn\")[0].getAttribute(\"name\"))\n guid = guids_filelist[file_nr]\n\n # Get the filesize and checksum from the primary location (the dispatcher)\n _lfn = getLFN(gpfn, lfns) #os.path.basename(gpfn)\n\n # Remove any __DQ2 substring from the LFN if necessary\n if \"__DQ2\" in _lfn:\n _lfn = stripDQ2FromLFN(_lfn)\n fsize, fchecksum = getFileInfoFromDispatcher(_lfn, fileInfoDictionaryFromDispatcher)\n\n # Get the file info from the metadata [from LFC]\n if not fsize or not fchecksum:\n ec, pilotErrorDiag, fsize, fchecksum = getFileInfoFromMetadata(thisfile, guid, replicas_dic, region, sitemover, error)\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # Even though checksum and file size is most likely already known from LFC, more reliable file\n # info is stored in Rucio. Try to get it from there unless the dispatcher has already sent it to the pilot\n if dsdict == {}:\n _dataset = dsname\n else:\n _dataset = getDataset(os.path.basename(gpfn), dsdict)\n _filesize, _checksum = sitemover.getFileInfoFromRucio(scope_dict[_lfn], _dataset, guid)\n if _filesize != \"\" and _checksum != \"\":\n if _filesize != fsize:\n tolog(\"!!WARNING!!1001!! Catalog file size (%s) not the same as Rucio file size (%s) (using Rucio value)\" % (fsize, _filesize))\n if _checksum != fchecksum:\n tolog(\"!!WARNING!!1001!! Catalog checksum (%s) not the same as Rucio checksum (%s) (using Rucio value)\" % (fchecksum, _checksum))\n fsize = _filesize\n fchecksum = _checksum\n\n # Get the filetype for this surl\n filetype = getFiletypeFromDictionary(gpfn, surl_filetype_dictionary)\n\n # Extract the copytool for this PFN\n _copytool = extractCopytoolForPFN(gpfn, copytool_dictionary)\n\n # Store in the file info dictionary\n fileInfoDic[file_nr] = (guid, gpfn, fsize, fchecksum, filetype, _copytool)\n\n # Check total file sizes to avoid filling up the working dir, add current file size\n try:\n totalFileSize += long(fsize)\n except:\n pass\n\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic", "def test_main(data, tmp_path):\n\n main(data, tmp_path)\n\n FILES = (\n \"gd32f888x(0-1)xx-pinctrl.h\",\n \"gd32f888x(2-3)xx-pinctrl.h\",\n \"gd32f888y(0-1)xx-pinctrl.h\",\n \"gd32f999x(0-1)xx-pinctrl.h\",\n \"gd32f999x(2-3)xx-pinctrl.h\",\n \"gd32f999y(0-1)xx-pinctrl.h\",\n )\n\n for file in FILES:\n ref_file = data / file\n gen_file = tmp_path / file\n\n assert gen_file.exists()\n\n with open(ref_file) as ref, open(gen_file) as gen:\n assert ref.read() == gen.read()", "def _findfile(self, path):\n\n # Build list of possible local file paths\n if not self._isurl(path):\n # Valid local paths\n filelist = self._possible_names(path)\n # Paths in self._destpath\n filelist += self._possible_names(self.abspath(path))\n else:\n # Cached URLs in self._destpath\n filelist = self._possible_names(self.abspath(path))\n # Remote URLs\n filelist = filelist + self._possible_names(path)\n\n for name in filelist:\n if self.exists(name):\n if self._isurl(name):\n name = self._cache(name)\n return name\n return None", "def ProcessEntryFile(fd):\n\tglobal reference\n\n\tname = ''\n\tfilename = ''\n\tdd = {}\n\teof = False\n\twhile not eof:\n\t\tline = fd.readline()\n\t\tif len(line) == 0:\n\t\t\teof = True\n\t\t\tif name in reference.keys():\n\t\t\t\treference[name] = dd\n\t\t\telif name != '':\n\t\t\t\treference[name] = dd\n\t\t\t#if verbose: print reference\n\t\telse:\n\t\t\tline = line.strip()\n\t\t\tif line.startswith('name'):\n\t\t\t\tif name in reference.keys() or name != '':\n\t\t\t\t\treference[name] = dd\n\t\t\t\ttokens = line.split()\n\t\t\t\tnn = tokens[0].split('=')\n\t\t\t\tname = nn[1]\n\t\t\t\tdd = {}\n\t\t\telif line.startswith('file'):\n\t\t\t\tfilename = line[len('file='):]\n\t\t\t\tif name in reference.keys():\n\t\t\t\t\tdd \t= reference[name]\n\t\t\t\t\tif dd.has_key(filename):\n\t\t\t\t\t\tfilename = ''\n\t\t\telse:\n\t\t\t\tif filename != '':\n\t\t\t\t\ttokens = line.split()\n\t\t\t\t\tlength = len(tokens)\n\t\t\t\t\t#print tokens\n\t\t\t\t\tfirst = True\n\t\t\t\t\tfor t in range(0,length,2):\n\t\t\t\t\t\tpos = tokens[t].find('=')\n\t\t\t\t\t\tcountline = int(tokens[t][pos+1:])\n\t\t\t\t\t\tpos = tokens[t+1].find('=')\n\t\t\t\t\t\tref = tokens[t+1][pos+1:]\n\t\t\t\t\t\ttline = (countline,ref)\n\t\t\t\t\t\tif first:\n\t\t\t\t\t\t\tdd[filename] = [tline]\n\t\t\t\t\t\t\tfirst = False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tff = dd[filename] #list of tuples (line,ref)\t\t\t\t\n\t\t\t\t\t\t\tff.append(tline)\n\t\t\t\t\t\t\tdd[filename] = ff", "def load_files_to_compare(self):\n self.first_source_data = load_path(self.path1)\n self.second_source_data = load_path(self.path2)", "def process_one_file(guid, key, env):\n logging.info(f'Processing file: {key}')\n try:\n query_seq = read_s3_file(key).seq\n results = []\n for name, reference_seq in REFERENCE_RECORDS.items():\n offset = reference_seq.seq.find(query_seq)\n if offset != -1:\n result = {'filename': name,\n 'offset': offset,\n 'name': reference_seq.name,\n 'desc': reference_seq.description\n }\n results.append(result)\n logging.info(f'found in {name} at {offset}')\n update_database(guid, 'done', env, results)\n logging.info(f'Update succeeded for guid={guid} in env={env}')\n except Exception as err:\n report = {'time': str(datetime.utcnow()),\n 'guid': guid,\n 'env': env,\n 'key': key,\n 'trace' : traceback.format_exc()\n }\n results = [{'error' : report}]\n update_database(guid, 'error', env, results)\n raise", "def extract_all_references(dig_parent_dir, readfile):\n dig_parent_path_obj = Path(dig_parent_dir)\n extracted = {\"refs\": {}, \"hrefsToRefs\": {}}\n for split_page_num in [282, 283, 284]:\n split_page_dir = dig_parent_path_obj / \"dig/html/split\"\n refs_html = readfile(\n \"report\" + str(split_page_num) + \"b.html\", split_page_dir\n )\n data = extract_references_page(refs_html)\n extracted['refs'].update(data['refs'])\n extracted['hrefsToRefs'].update(data['hrefsToRefs'])\n\n return extracted", "def data_file(self, file_name):\n\n fpattern = os.path.join(self.data_dir, file_name)\n fpaths = glob.glob(fpattern)\n for fpath in fpaths:\n self.pytex.add_dependencies(fpath)\n\n if not fpaths:\n raise ValueError(\"No files found matching this name or pattern.\")\n elif len(fpaths) == 1:\n return fpaths[0]\n else:\n return fpaths", "def _group_references_by_file(self, references: List[ReferenceDict]\n ) -> Dict[str, List[Tuple[Point, str]]]:\n grouped_references = {} # type: Dict[str, List[Tuple[Point, str]]]\n for reference in references:\n file_path = uri_to_filename(reference[\"uri\"])\n point = Point.from_lsp(reference['range']['start'])\n\n # get line of the reference, to showcase its use\n reference_line = get_line(self.view.window(), file_path, point.row)\n\n if grouped_references.get(file_path) is None:\n grouped_references[file_path] = []\n grouped_references[file_path].append((point, reference_line))\n\n # we don't want to cache the line, we always want to get fresh data\n linecache.clearcache()\n\n return grouped_references", "def getFilenamesAndGuid(thisfile):\n\n pfn = str(thisfile.getElementsByTagName(\"pfn\")[0].getAttribute(\"name\"))\n filename = os.path.basename(pfn)", "def validate_matching_cif_files():\n for refcode in FRAMEWORKS_DF['CSD refcode'].str:\n assert Path(CIF_DIR / (str(refcode) + '.cif')).is_file", "def grep_data(cutoff, files):\n res = {}\n for file in files:\n if is_gaussian(file) and is_fluorescence(file):\n res, name = update_dict_with_name(file, res)\n res, root = find_root(file, res, name)\n res = find_spectral_data(file, res, name, root, cutoff)\n return res", "def readfiles(self, dirname , search , notsearch = 'rgvar' , notdir = 'xyvwa'):\n print('We are in the following directory: %s looking for files that contain %s and not %s' %(dirname, search , notsearch))\n dirlist = os.listdir(dirname)\n for filep in dirlist:\n filep = os.path.join(dirname,filep) \n if os.path.islink(filep):\n pass\n elif os.path.isdir(filep):\n m = re.search(notdir , filep)\n if m is None:\n self.readfiles(filep , search, notsearch = notsearch, notdir = notdir )\n elif os.path.isfile(filep) and '.dat' in filep: \n nm = re.search(notsearch, filep)\n m = re.search(search , filep)\n #print m , nm\n if m is not None and nm is None:\n self.plotfiles.append(filep)\n else:\n pass", "def _find_histfile_var(file_list, default=None):\n for f in file_list:\n f = expanduser_abs_path(f)\n if not os.path.isfile(f):\n continue\n with open(f, 'r') as rc_file:\n for line in rc_file:\n if line.startswith('HISTFILE='):\n hist_file = line.split('=', 1)[1].strip('\\'\"\\n')\n hist_file = expanduser_abs_path(hist_file)\n if os.path.isfile(hist_file):\n return hist_file\n else:\n if default:\n default = expanduser_abs_path(default)\n if os.path.isfile(default):\n return default", "def findgene(fname, dbpaths=dbpaths):\n scaf = []\n gbeg = []\n gend = []\n gfor = []\n gsta = []\n gdif = []\n cuffgenes = {}\n\n fobj = open(fname)\n for line in fobj:\n col = line.split()\n scaf.append( re.search('[sCcafold]*[0-9]+', col[3]).group() )\n gbeg.append( int(re.search(':(.*)-', col[3]).groups()[0]) )\n gend.append( int(re.search('-(.*)', col[3]).groups()[0]) )\n gfor.append(float(col[7]))\n gsta.append(float(col[8]))\n gdif.append(float(col[9]))\n\n fobj.close()\n print \"Significant transcripts read\"\n\n\n for result in range(len(scaf)):\n cur_scaf = scaf[result]\n cur_gbeg = gbeg[result]\n cur_gend = gend[result]\n cur_gfor = gfor[result]\n cur_gsta = gsta[result]\n cur_gdif = gdif[result]\n fobj = open(dbpaths['gff'])\n for line in fobj:\n col = line.split()\n if col[2] == \"mRNA\":\n if col[0] == cur_scaf:\n if float(col[3]) <= cur_gend and float(col[4]) >= cur_gbeg:\n try:\n cuffgenes[(cur_scaf, cur_gbeg)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gfor, cur_gsta, cur_gdif)\n except AttributeError:\n print col[8]\n fobj.close()\n\n return cuffgenes", "def processFileLocInfo(self):\n\t\t# If bit 1 of the flags field is set\n\t\tif int(self.header['flags']) & 2 > 0:\n\n\t\t\t# Read size of file location info\n\t\t\ttxt = self.fpLnk.read(4)\n\t\t\tself.file_loc = {}\n\t\t\tself.file_loc['size'] = struct.unpack(\"<I\", txt)[0]\n\t\t\t\t\n\t\t\t# Read size of file location info and prepend the previous read value.\n\t\t\t# Txt was prepended to remove a special condition case need to skip\n\t\t\t# the re-reading of the size field.\n\t\t\tfile_loc_raw = txt + self.fpLnk.read(self.file_loc['size'] - 4)\n\n\t\t\t# Loop throuh predefine file format, extracting field into a new data\n\t\t\t# file location header dictionary.\n\t\t\t# XXX: do we really want to clobber the dictionary we just created\n\t\t\t# and not self.file_loc.update(parseStructuredData())?\n\t\t\tself.file_loc = parseStructuredData(file_loc_raw, FILE_LOC_HEADER)\n\t\t\n\t\t\t# Process local volume info if flag is set\n\t\t\tif (self.file_loc['flags'] & 1) > 0:\n\t\t\t\tlocalVolTbl = processVolTbl(file_loc_raw, \n\t\t\t\t\tself.file_loc['local_vol_info_offset'], LOCAL_VOL_TBL)\n\t\t\t\tself.file_loc['localVolTbl'] = localVolTbl\n\t\t\t\toffset = self.file_loc['local_base_path_offset']\n\t\t\t\tbasePathname = file_loc_raw[offset:].split('\\x00')[0]\n\t\t\t\tself.file_loc['basePathname'] = basePathname\n\t\t\telse:\n\t\t\t\tself.file_loc['localVolTbl'] = None\n\n\t\t\t# Process net volume info if flag is set\n\t\t\tif (self.file_loc['flags'] & 2) > 0:\n\t\t\t\tnetVolTbl = processVolTbl(file_loc_raw, \n\t\t\t\t\tself.file_loc['net_vol_info_offset'], NET_VOL_TBL)\n\t\t\t\tself.file_loc['netVolTbl'] = netVolTbl\n\t\t\telse:\n\t\t\t\tself.file_loc['netVolTbl'] = None\n\n\t\t\t# Process remaining portion of pathname\n\t\t\toffset = self.file_loc['remain_pathname_offset']\n\t\t\tremainPathname = file_loc_raw[offset:].split('\\x00')[0]\n\t\t\tself.file_loc['remainPathname'] = remainPathname", "def _find_named_files(self):\n for name, description in self.named_files.iteritems():\n name = name.format(job_name=self.job_name)\n f_path = '{}/{}'.format(self.rism3d_folder, name)\n if os.path.isfile(f_path):\n self.file_path_dic[description] = f_path\n else:\n self._not_found_error(f_path)", "def get_file_data(filename):", "def _sort_data ( self, resample_opts ):\n self._data_pntr = []\n for refl_file in self.atcorr_refl:\n if os.path.exists ( os.path.join ( self.datadir, refl_file ) ):\n if resample_opts is None:\n fname = os.path.join ( self.datadir, refl_file )\n else:\n fname = reproject_cut ( os.path.join ( self.datadir, refl_file ),\n **resample_opts )\n self._data_pntr.append (\n gdal.Open ( fname ) )\n else:\n\n raise IOError, \"GDAL cannot open this file: %s\" % ( os.path.join (\n self.datadir, refl_file) )\n self.resample_opts = resample_opts", "def _sort_data ( self, resample_opts ):\n self._data_pntr = []\n for refl_file in self.atcorr_refl:\n if os.path.exists ( os.path.join ( self.datadir, refl_file ) ):\n if resample_opts is None:\n fname = os.path.join ( self.datadir, refl_file )\n else:\n fname = reproject_cut ( os.path.join ( self.datadir, refl_file ),\n **resample_opts )\n self._data_pntr.append (\n gdal.Open ( fname ) )\n else:\n\n raise IOError, \"GDAL cannot open this file: %s\" % ( os.path.join (\n self.datadir, refl_file) )\n self.resample_opts = resample_opts", "def map_files(key):\n \n datadir=os.path.join(os.path.dirname(__file__),'ncnr_sample_data')\n filedict={'empty_1m':os.path.join(datadir,'SILIC001.SA3_SRK_S101'),\n 'empty_4m':os.path.join(datadir,'SILIC002.SA3_SRK_S102'),\n 'empty_cell_1m':os.path.join(datadir,'SILIC003.SA3_SRK_S103'),\n 'blocked_1m':os.path.join(datadir,'SILIC004.SA3_SRK_S104'),\n 'trans_empty_cell_4m':os.path.join(datadir,'SILIC005.SA3_SRK_S105'),\n 'trans_sample_4m':os.path.join(datadir,'SILIC006.SA3_SRK_S106'),\n 'blocked_4m':os.path.join(datadir,'SILIC007.SA3_SRK_S107'),\n 'empty_cell_4m':os.path.join(datadir,'SILIC008.SA3_SRK_S108'),\n 'sample_1m':os.path.join(datadir,'SILIC009.SA3_SRK_S109'),\n 'sample_4m':os.path.join(datadir,'SILIC010.SA3_SRK_S110'),\n 'mask':os.path.join(datadir,'DEFAULT.MASK'),\n 'div':os.path.join(datadir,'PLEX_2NOV2007_NG3.DIV'),\n }\n return filedict[key]", "def _process_dir( d, files, columns, _file, mapping ):\n\t\tfhandles = {}\n\t\t\n\t\tif( d == \"template\"):\n\t\t\treturn\n\n\t\tfor f in files:\n\t\t\ttry:\n\t\t\t\tfhandles[f] = open(d + \"/\" + mapping[\"alias\"][f], 'r')\n\t\t\texcept IOError:\n\t\t\t\tprint (\"Error on opening file: \" + d + \"/\" + mapping[\"alias\"][f])\n\t\t\t\tsys.exit(1)\n\n\t\tflag = True\t\t\n\n\t\twhile( flag ):\n\n\t\t\tfor h in fhandles.values():\n\t\t\t\tline = h.readline()\n\n\t\t\t\tif( not line ):\n\t\t\t\t\tflag = False\n\t\t\t\t\tbreak\n\n\t\t\t\tline = line.rstrip()\n\n\t\t\t\trlist = re.findall(r\"-?\\d*\\.{0,1}\\d+\", line)\n\n\t\t\t\tfileName = os.path.split(h.name)[1]\n\n\t\t\t\trkeys = mapping[\"columns\"][fileName]\n\n\t\t\t\trobj = row()\n\t\t\t\tlocals()[mapping[\"reversealias\"][fileName]] = robj\t\t\t\n\n\t\t\t\ti = 0\t\t\t\n\t\t\t\tfor k in rkeys:\t\t\t\t\t\n\t\t\t\t\tif(rkeys[i] == \"timestamp\"):\n\t\t\t\t\t\tsetattr(robj, rkeys[i], int(rlist[i]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif( rkeys[i].endswith(\"*\")):\n\t\t\t\t\t\t\tcol = rkeys[i].replace(\"*\",\"\")\n\n\t\t\t\t\t\t\tif(not hasattr(robj, col)):\n\t\t\t\t\t\t\t\tl = []\n\t\t\t\t\t\t\t\tsetattr(robj, col, l)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tl = getattr(robj, col)\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tl.append(float(rlist[i]))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsetattr(robj, rkeys[i], float(rlist[i]))\t\n\t\t\t\t\ti = i + 1\n\n\t\t\tif( not flag ):\n\t\t\t\tbreak\n\n\t\t\tfor c in columns:\n\t\t\t\tval = eval(c.strip())\n\t\t\t\tprint( float(val) , end = ' ', file = _file)\n\t\t\t\t\n\t\t\tprint('', file = _file)\n\t\t\t\n\t\t\t_file.flush()\n\n\t\tfor h in fhandles.values():\n\t\t\th.close()", "def load_pfile(self, **kwargs):\r\n pfile = kwargs['pfile']\r\n filetype = kwargs['filetype']\r\n\r\n # Loads the pfile and finds the indices, still need to sync and parse.\r\n self.pfile = PFILE(pfile, filetype=filetype)\r\n # self.pfile.sync(tstep='auto')\r", "def seqff(self):\r\n\r\n start = time.time()\r\n\r\n # load bininfo\r\n bininfo = load_bininfo(self.bininfodata_loc)\r\n\r\n # load input files\r\n if os.path.isdir(self.input_loc):\r\n input_list = [self.input_loc + x for x in os.listdir(self.input_loc)]\r\n\r\n elif os.path.isfile(self.input_loc):\r\n input_list = [self.input_loc]\r\n\r\n else:\r\n raise FileNotFoundError(\"error occurred : inputData is not a Directory or File\")\r\n\r\n for i, file in enumerate(input_list):\r\n filetype = file.split(\".\")[-1]\r\n # filetype : 'sam' or 'bam' or 'newtemp'\r\n if 'sam' in filetype:\r\n bincount = load_sam(file)\r\n\r\n elif 'newtemp' in filetype:\r\n bincount = load_counts(file)\r\n file = file.replace(\".newtemp\", \"\") # TEMP .newtemp -> .bam\r\n\r\n elif 'bam' in filetype:\r\n bincount = load_bam(file)\r\n\r\n else:\r\n continue\r\n\r\n #CREATE newtemp file in \"output_loc\"/newtemp/\r\n create_newtemp(bincount, file, self.newtemp_loc)\r\n\r\n newtemp = pd.DataFrame.from_dict(bincount, orient='index')\r\n newtemp.reset_index(level=0, inplace=True)\r\n newtemp.rename(columns={'index': 'binName', 0: 'counts'}, inplace=True)\r\n\r\n temp_bininfo = bininfo.copy(deep=True)\r\n temp_bininfo = temp_bininfo.merge(newtemp, on='binName',\r\n how='left') # missing value : NaN, not NA in pandas\r\n temp_bininfo['counts'] = temp_bininfo['counts'].fillna(0)\r\n\r\n temp_bininfo.sort_values(by='binorder', inplace=True)\r\n temp_bininfo.reset_index(drop=True)\r\n\r\n ####DATA PROCESSING #######################\r\n autosomebinsonly = []\r\n for index in range(61927):\r\n boolean = (temp_bininfo['FRS'][index] != 'NA') and \\\r\n (float(temp_bininfo['GC'][index]) > 0.316) and \\\r\n (temp_bininfo['CHR'][index] != 'chrX') and \\\r\n (temp_bininfo['CHR'][index] != 'chrY')\r\n autosomebinsonly.append(boolean)\r\n autosomebinsonly = pd.Series(autosomebinsonly)\r\n\r\n alluseablebins = []\r\n for index in range(61927):\r\n boolean = (temp_bininfo['FRS'][index] != \"NA\") and (float(temp_bininfo['GC'][index]) > 0.316)\r\n alluseablebins.append(boolean)\r\n alluseablebins = pd.Series(alluseablebins)\r\n\r\n #CREATE alluseablebins file in \"output_loc\"/alluseablebins\r\n #create_alluseablebins(alluseablebins, file, self.alluseablebins_loc)\r\n\r\n sum_counts = pd.Series(temp_bininfo['counts'])\r\n sum_counts = sum_counts[autosomebinsonly].sum(skipna=True)\r\n\r\n autoscaledtemp = pd.Series(temp_bininfo['counts'].loc[(autosomebinsonly)],\r\n copy=True) / sum_counts # NA-related code removed\r\n allscaledtemp = pd.Series(temp_bininfo['counts'].loc[(alluseablebins)], copy=True) / sum_counts\r\n\r\n gc_index = {}\r\n cnt = 0\r\n for index, isauto in enumerate(autosomebinsonly):\r\n if isauto:\r\n if temp_bininfo['GC'].iat[index] in gc_index:\r\n gc_index[temp_bininfo['GC'].iat[index]].append(float(autoscaledtemp.iat[cnt]))\r\n cnt += 1\r\n\r\n else:\r\n gc_index[temp_bininfo['GC'].iat[index]] = [float(autoscaledtemp.iat[cnt])]\r\n cnt += 1\r\n\r\n key_list = []\r\n val_list = []\r\n for key, val in gc_index.items():\r\n key_list.append(key)\r\n val_list.append(np.median(val))\r\n\r\n loess_var = loess(key_list, val_list) # default span : 0.75\r\n loess_var.fit()\r\n # y = loess.loess_prediction(newData, loessVar)\r\n # temp_loessPredict.loess_debugging(loessVar)\r\n\r\n ###prediction###\r\n loess_x = [float(gc) for index, gc in enumerate(temp_bininfo['GC']) if (alluseablebins[index])]\r\n # print(temp_bininfo['GC'])\r\n loess_fitted = loess_var.predict(loess_x)\r\n loess_fitted = list(loess_fitted.values)\r\n # print(loess_fitted)\r\n\r\n median_autoscaledtemp = np.median(autoscaledtemp)\r\n median_autoscaledtemp = float(median_autoscaledtemp) # for fixed constant\r\n\r\n normalizedbincount = [(x + (median_autoscaledtemp - loess_fitted[index])) for index, x in\r\n enumerate(allscaledtemp)]\r\n\r\n #CREATE normalizedbincount in \"output_loc\"/normalizedbincount\r\n create_normalizedbincount(normalizedbincount, file, self.normalizedbincount_loc)\r\n\r\n bincounts = pd.Series(data=np.repeat(a=0.0, repeats=61927), index=temp_bininfo['binName'], dtype=np.float64)\r\n\r\n sum_normalizedbincount = sum([val for val in normalizedbincount if not math.isnan(val)])\r\n sum_normalizedbincount = float(sum_normalizedbincount) # deep copy temporarily\r\n\r\n cnt = 0\r\n for index, x in enumerate(alluseablebins):\r\n if x == True:\r\n data = (normalizedbincount[cnt] / sum_normalizedbincount) * len(normalizedbincount)\r\n bincounts.iat[index] = data\r\n cnt += 1\r\n\r\n #CREATE bincounts in \"output_loc\"/bincounts\r\n create_bincounts(bincounts, file, self.bincounts_loc)\r\n\r\n wrsc = self.prediction(bincounts, self.B, self.mu, self.parameter_1, self.parameter_2)\r\n enet = np.dot(bincounts, (self.elnetbeta)) + (self.elnetintercept)\r\n ff = (wrsc+enet) / 2\r\n\r\n result_lines = list()\r\n result_lines.append(\"SeqFF\\tEnet\\tWRSC\")\r\n result_lines.append(\"{}\\t{}\\t{}\".format(ff, enet, wrsc))\r\n\r\n #CREATE results of seqff (seqff paper result covered) in \"output_loc\"/results\r\n create_results(result_lines, file, self.results_loc)\r\n\r\n end = time.time()\r\n elapsed = end - start\r\n h = int(elapsed) // 3600\r\n m = (int(elapsed) - (h * 3600)) // 60\r\n s = (int(elapsed) % 60)\r\n print(\"elapsed time: %d hr %d min %d sec\" % (h, m, s))\r\n print(\"elapsed :\", elapsed)\r\n print(\"progress : {} / {}\".format(i + 1, self.progress))", "def _get_best_ref(self, header_in):\n header_in = dict(header_in)\n log.verbose(\"Getting bestrefs:\", self.basename, verbosity=55)\n expr_header = utils.condition_header_keys(header_in)\n self.check_rmap_omit(expr_header) # Should bestref be omitted based on rmap_omit expr?\n self.check_rmap_relevance(expr_header) # Should bestref be set N/A based on rmap_relevance expr?\n # Some filekinds, .e.g. ACS biasfile, mutate the header\n header = self._precondition_header(self, header_in) # Execute type-specific plugin if applicable\n header = self.map_irrelevant_parkeys_to_na(header) # Execute rmap parkey_relevance conditions\n try:\n bestref = self.selector.choose(header)\n except Exception as exc:\n # Check conditions for Do Not Reprocess dataset parameters, set to NA if True\n dnr = self.dnr_check(header)\n if dnr is True:\n log.verbose(\"DNR dataset identified - setting reference to NA\", str(exc), verbosity=55)\n raise crexc.IrrelevantReferenceTypeError(\"Reference type not required for DNR dataset.\") from exc\n\n log.verbose(\"First selection failed:\", str(exc), verbosity=55)\n header = self._fallback_header(self, header_in) # Execute type-specific plugin if applicable\n try:\n if header:\n header = self.minimize_header(header)\n log.verbose(\"Fallback lookup on\", repr(header), verbosity=55)\n header = self.map_irrelevant_parkeys_to_na(header) # Execute rmap parkey_relevance conditions\n bestref = self.selector.choose(header)\n else:\n raise\n except Exception as exc:\n log.verbose(\"Fallback selection failed:\", str(exc), verbosity=55)\n if self._reffile_required in [\"YES\", \"NONE\"]:\n log.verbose(\"No match found and reference is required:\", str(exc), verbosity=55)\n raise\n else:\n log.verbose(\"No match found but reference is not required:\", str(exc), verbosity=55)\n raise crexc.IrrelevantReferenceTypeError(\"No match found and reference type is not required.\") from exc\n log.verbose(\"Found bestref\", repr(self.instrument), repr(self.filekind), \"=\", repr(bestref), verbosity=55)\n if MappingSelectionsDict.is_na_value(bestref):\n raise crexc.IrrelevantReferenceTypeError(\"Rules define this type as Not Applicable for these observation parameters.\")\n if MappingSelectionsDict.is_omit_value(bestref):\n raise crexc.OmitReferenceTypeError(\"Rules define this type to be Omitted for these observation parameters.\")\n return bestref", "def find_new_files(self):\r\n in_file_system = self._find_data_files(self.basepath,\r\n self.data_file_extension, [])\r\n status_msg('Search for data files. Found: {0}'.format(\r\n len(in_file_system)), True\r\n )\r\n status_msg('Check that there are no duplicate filenames', True)\r\n in_db = self._get_files_in_db()\r\n status_msg('Get list of files in data base. Found: {0}'.format(\r\n len(in_db)), True\r\n )\r\n for filename in in_file_system:\r\n # Filename has been added to the database both where a part of the\r\n # path was a part of the unique name and where it was not,\r\n # therefore we need to check for both AND we ignore filenames that\r\n # start with IGNORE\r\n if form_unique_name(filename) not in in_db and\\\r\n os.path.basename(filename) not in in_db and not\\\r\n os.path.basename(filename).startswith('IGNORE'):\r\n self.new_data_files.append(filename)\r\n\r\n print # Adds a new line to the statusses before parsing\r\n status_msg('Collecting new files. Found: {0}'.format(\r\n len(self.new_data_files)), True\r\n )", "def getFileLoc(self):\n\t\trval = []\n\t\tlocalVolTbl = self.file_loc['localVolTbl']\n\t\tnetVolTbl = self.file_loc['netVolTbl']\n\t\t\n\t\tif localVolTbl != None:\n\t\t\trval.extend((FILE_LOC[0],\n\t\t\t\tFILE_LOC[1] + self.file_loc['basePathname'] + \\\n\t\t\t\t\"\\\\\" + self.file_loc['remainPathname']))\n\t\n\t\t\tfor ii in range(len(VOL_TYPE)):\n\t\t\t\tif (self.header['file_attributes'] & (2 ** (ii + 1))) > 0:\n\t\t\t\t\trval.append(VOL_TYPE[ii])\n\t\t\t\t\n\t\t\trval.extend((FILE_LOC[2] + localVolTbl['volume_label'],\n\t\t\t\tFILE_LOC[3] + str(localVolTbl['vol_serial_num'])))\t\t\n\t\n\t\tif netVolTbl != None:\n\t\t\trval.append(FILE_LOC[4] + netVolTbl['net_sharename'] + \\\n\t\t\t\t\"\\\\\" + self.file_loc['remainPathname'])\n\t\treturn rval", "def repair(self,fileRefs):\n #--Progress/Logging\n log = self.log\n logBDD = _('BAD DELETE>>DELETED %d %d %s')\n logBRR = _('BAD REF>>REMATCHED %d %d %s %d')\n logBRN = _('BAD REF>>NO MASTER %d %d %s')\n logBRD = _('BAD REF>>DOUBLED %d %d %s')\n #----\n isMod = (fileRefs.fileInfo.isMod())\n reObjNum = re.compile('[0-9A-Z]{8}$')\n emptyDict = {}\n cellRefIds = self.cellRefIds\n cntRepaired = 0\n cntDeleted = 0\n cntUnnamed = 0\n for cell in fileRefs.cells:\n #--Data arrays\n usedKeys = []\n badDeletes = []\n badObjects = []\n doubleObjects = []\n refMods = {}\n #--Cell Id\n cellId = cell.getId()\n log.setHeader(cellId)\n #--Debris cell name?\n if not isMod:\n cellName = cell.cellName\n if not (cell.flags & 1) and cellName and (cellName not in self.extCellNames):\n log(_(\"Debris Cell Name: \")+cellName)\n cell.flags &= ~32\n cell.cellName = ''\n cell.setChanged()\n cntUnnamed += 1\n refIds = cellRefIds.get(cellId,emptyDict) #--Empty if cell is new in fileRefs.\n objects = cell.getObjects()\n for object in objects.list():\n (iMod,iObj,objId,objRecords) = object[:4]\n refKey = (iMod,iObj)\n #--Used Key?\n if refKey in usedKeys:\n log(logBRD % object[:3])\n objects.remove(object)\n doubleObjects.append(object)\n cell.setChanged()\n #--Local object?\n elif not iMod:\n #--Object Record\n for objRecord in objRecords:\n #--Orphan delete?\n if objRecord.name == 'DELE':\n log(logBDD % object[:3])\n objects.remove(object)\n badDeletes.append(object)\n cntDeleted += 1\n cell.setChanged()\n break\n #--Not Deleted?\n else: #--Executes if break not called in preceding for loop.\n usedKeys.append(refKey)\n #--Modified object?\n else:\n refId = refIds.get(refKey,None)\n objIdBase = reObjNum.sub('',objId) #--Strip '00001234' id num from object\n #--Good reference?\n if refId and (isMod or (refId == objIdBase)):\n usedKeys.append(refKey)\n #--Missing reference?\n else:\n badObjects.append(object)\n cell.setChanged()\n #--Fix bad objects.\n if badObjects:\n #--Build rematching database where iMod = refMods[(iObj,objId)]\n refMods = {}\n repeatedKeys = []\n for refId in refIds.keys():\n (iMod,iObj) = refId\n objId = refIds[refId]\n key = (iObj,objId)\n #--Repeated Keys?\n if key in refMods: \n repeatedKeys.append(key)\n else:\n refMods[key] = iMod\n #--Remove remaps for any repeated keys\n for key in repeatedKeys:\n if key in refMods: del refMods[key]\n #--Try to remap\n for object in badObjects:\n (iMod,iObj,objId) = object[:3]\n objIdBase = reObjNum.sub('',objId) #--Strip '00001234' id num from object\n refModsKey = (iObj,objIdBase)\n newMod = refMods.get(refModsKey,None)\n #--Valid rematch?\n if newMod and ((newMod,iObj) not in usedKeys):\n log(logBRR % (iMod,iObj,objId,newMod))\n usedKeys.append((newMod,iObj))\n objects.replace(object,fileRefs.remapObject(object,newMod))\n cntRepaired += 1\n elif not newMod:\n log(logBRN % tuple(object[:3]))\n objects.remove(object)\n cntDeleted += 1\n else:\n log(logBRD % tuple(object[:3]))\n objects.remove(object)\n cntDeleted += 1\n #--Done\n fileRefs.updateScptRefs()\n return (cntRepaired,cntDeleted,cntUnnamed)", "def lookup(file, category='undefined'):\n path = os.path.join(self.base_path, doc, file)\n existing_path = os.path.exists(path) and path\n link = doc+'/'+file\n self.log.debug(' %s file %s' % (category, existing_path or\n path+\" (not found)\"))\n return existing_path, link", "def _load_files(self):\n for filedoc in self._docset.get_files():\n path = filedoc.get_path()\n if not path:\n # In case of only partially loaded file information,\n # the path information is not set for unloaded files.\n continue\n if not os.path.isabs(path):\n path = os.path.join(self._source_root, path)\n extension = os.path.splitext(path)[1]\n # We don't care about Markdown files that only produce pages\n # (and fail the directory check below).\n if extension == '.md':\n continue\n dirdoc = filedoc.get_directory()\n if not dirdoc:\n self._reporter.xml_assert(filedoc.get_xml_path(),\n \"file is not in any directory in Doxygen\")\n continue\n relpath = self._get_rel_path(path)\n fileobj = self._files.get(relpath)\n if not fileobj:\n fileobj = File(path, relpath, self._docmap[dirdoc])\n self._files[relpath] = fileobj\n fileobj.set_doc_xml(filedoc, self)\n self._docmap[filedoc] = fileobj", "def get_patches(df, data_dir, img_type = \"ADC\", data_type = \"Train\"):\n print(\"Cleaning Output Directory\")\n os.system(\"rm -rf %s\" % (data_dir + \"/patches/\" + img_type ) )\n os.system(\"mkdir %s\" % (data_dir+\"/patches/\" + img_type) )\n os.system(\"mkdir %s\" % (data_dir+\"/patches/\" + img_type + \"/POS\") )\n os.system(\"mkdir %s\" % (data_dir+\"/patches/\" + img_type + \"/NEG\") )\n\n hlen = 113\n header = img_type + \" - \" + data_type\n prl = (hlen//2-len(header)//2) - 1\n prr = hlen - prl - len(header) - 2\n print(\"-\"*hlen)\n print(\"#\" + \" \"*prl + header + \" \"*prr + \"#\")\n print(\"-\"*hlen)\n\n print(\"{0:<15s}\\t{1:>5s}\\t{2:>10s}\\t{3:>10s}\\t{4:>6s}\\t{5:>6s}\\t{6:>6s}\\t{7:>6s}\"\\\n .format(\"PID\", \"#Find\", \"#Patch\", \"#Positive\", \"#Negative\", \"Load time\", \"Extract Time\", \"Save Time\"))\n overall = time.time()\n for pid in df.ProxID.unique():\n load_start = time.time()\n print(\"{0:<15s}\"\n .format(\"Loading\"), end = \"\\r\", flush = True)\n\n temp = df.loc[df.ProxID == pid]\n try:\n # Get ADC, BVAL Dirs\n names = temp.Name.values\n adc_dir = temp[[name.endswith(\"ADC0\") for name in names]].DCMSerUID.values[0]\n bval_dir = temp[[name.endswith(\"BVAL0\") for name in names]].DCMSerUID.values[0]\n dicom_find = list(temp[[name.endswith(\"BVAL0\") for name in names]].ijk.unique())\n dicom_find = [[int(coord) for coord in f.split(\" \")] for f in dicom_find]\n\n except:\n continue\n\n dicom_path = data_dir + \"/\".join([\"/raw/dicom\", pid])\n # ADC Processing\n if img_type == \"ADC\":\n imgs, findings = load_dicom(dicom_path, adc_dir, dicom_find)\n elif img_type == \"BVAL\":\n imgs, findings = load_dicom(dicom_path, bval_dir, dicom_find)\n elif img_type == \"KTRANS\":\n ktrans_path = data_dir + \"/\".join([\"/raw/ktrans\", pid, pid])\n imgs = load_ktrans(ktrans_path)\n findings = []\n else:\n print(\"image type not defined! ABORT\")\n return -1\n\n ltime = time.time() - load_start\n\n ext_start = time.time()\n fcount = len(dicom_find)\n print(\"{0:<15s}\\t{1:>5d}\".format(\"Extracting\", fcount), end = \"\\r\", flush = True)\n if img_type == \"ADC\" or img_type == \"BVAL\":\n # Patches without, Pathces with findings\n pwof, pwf = extract_patches(imgs, PATCH_SHAPE, findings)\n elif img_type == \"KTRANS\":\n print(\"KTRANS PROCESSING NOT YET IMPLEMENTED! ABORT\")\n return -1\n etime = time.time() - ext_start\n\n\n save_start = time.time()\n\n # Printing\n p = len(pwf)\n n = len(pwof)\n\n print(\"{0:<15s}\\t{1:>5d}\\t{2:>10s}\\t{3:>10s}\\t {4:>6s}\\t {5:>6s}\\t {6:>6s}\"\\\n .format(\"Saving\", fcount, str(p+n), str(p), str(n), str(ltime)[:6], str(etime)[:6]), end = \"\\r\", flush = True)\n\n pwf = np.array(pwf)\n pwof = np.array(pwof)\n n = save_patches(pid, fcount, data_dir, img_type, pwf, pwof)\n\n stime = time.time() - save_start\n print(\"{0:15s}\\t{1:>5d}\\t{2:>10s}\\t{3:>10s}\\t {4:>6s}\\t {5:>6s}\\t {6:>6s}\\t {7:>6s}\"\\\n .format(pid, fcount, str(n+n), str(n), str(n), str(ltime)[:6], str(etime)[:6], str(stime)[:6]))\n\n #break\n\n otime = time.time() - overall\n\n print(\"-\"*hlen)\n header = \"time taken:\" + \"{0:4.5f}\".format(otime)\n prl = (hlen//2-len(header)//2) - 1\n prr = hlen - prl - len(header) - 2\n print(\"#\" + \" \"*prl + header + \" \"*prr + \"#\")\n print(\"-\"*hlen)\n\n return 0", "def reffile_setup(self):\n # Prepare to find files listed as 'config'\n # and set up PSF path\n\n # set up as dictionary of dictionaries\n self.configfiles = {}\n self.psfpath = {}\n self.psfbasename = {}\n self.psfpixfrac = {}\n self.reference_file_dir = {}\n\n for instrument in 'nircam niriss fgs'.split():\n self.configfiles[instrument] = {}\n self.psfpath[instrument] = os.path.join(self.datadir, instrument, 'gridded_psf_library')\n self.psfbasename[instrument] = instrument\n self.reference_file_dir[instrument] = os.path.join(self.datadir, instrument, 'reference_files')\n\n # Set instrument-specific file paths\n if instrument == 'nircam':\n self.psfpixfrac[instrument] = 0.25\n elif instrument == 'niriss':\n self.psfpixfrac[instrument] = 0.1\n elif instrument == 'fgs':\n self.psfpixfrac[instrument] = 0.1\n\n # Set global file paths\n self.configfiles[instrument]['filter_throughput'] = os.path.join(self.modpath, 'config', 'placeholder.txt')\n\n for instrument in 'miri nirspec'.split():\n self.configfiles[instrument] = {}\n self.psfpixfrac[instrument] = 0\n self.psfbasename[instrument] = 'N/A'\n\n # create empty dictionaries\n list_names = 'superbias linearity gain saturation ipc astrometric photom pam dark lindark'.split()\n for list_name in list_names:\n setattr(self, '{}_list'.format(list_name), {})\n\n self.det_list = {}\n self.det_list['nircam'] = ['A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3', 'B4', 'B5']\n self.det_list['niriss'] = ['NIS']\n self.det_list['fgs'] = ['G1', 'G2']\n self.det_list['nirspec'] = ['NRS']\n self.det_list['miri'] = ['MIR']\n\n for instrument in 'nircam niriss fgs miri nirspec'.split():\n for list_name in list_names:\n getattr(self, '{}_list'.format(list_name))[instrument] = {}\n\n if self.offline:\n # no access to central store. Set all files to none.\n for list_name in list_names:\n if list_name in 'dark lindark'.split():\n default_value = ['None']\n else:\n default_value = 'None'\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n elif instrument == 'nircam':\n rawdark_dir = os.path.join(self.datadir, 'nircam/darks/raw')\n lindark_dir = os.path.join(self.datadir, 'nircam/darks/linearized')\n for det in self.det_list[instrument]:\n self.dark_list[instrument][det] = glob(os.path.join(rawdark_dir, det, '*.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(lindark_dir, det, '*.fits'))\n\n elif instrument in ['nirspec', 'miri']:\n for key in 'subarray_def_file fluxcal filtpupil_pairs readpatt_def_file crosstalk ' \\\n 'dq_init_config saturation_config superbias_config refpix_config ' \\\n 'linearity_config filter_throughput'.split():\n self.configfiles[instrument][key] = 'N/A'\n default_value = 'none'\n for list_name in list_names:\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n else: # niriss and fgs\n for det in self.det_list[instrument]:\n if det == 'G1':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS1_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS1_DARK_SEARCH_STRING))\n\n elif det == 'G2':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS2_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS2_DARK_SEARCH_STRING))\n\n elif det == 'NIS':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/raw',\n '*uncal.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/linearized',\n '*linear_dark_prep_object.fits'))", "def makeSNPMap(snpfile, referencemap):\n\tbimfile = open(snpfile, \"r\") # open the input file\n\tmapfile = open(referencemap, \"r\")\n\toutfilename = re.sub(r'\\.bim', '.markerpos', snpfile)\n\tposfilename = re.sub(r'\\.bim', '.snp_locations', snpfile)\n\toutfile = open(outfilename, \"w\")\n\tposfile = open(posfilename, \"w\")\n\t# Initialize variables \n\tpreviousCM = 0\n\tpreviousPos = 0\n\ti=0\n\tbimline = bimfile.readline().strip().split() # Pos 1 is rsID, Pos 3 is location\n\tfor mapline in mapfile:\n\t\tif len(bimline) == 0:\n\t\t\tbreak\t\t\n\t\tif i==0:\n\t\t\ti+=1\n\t\t\tcontinue\n\t\tmapline = mapline.strip().split()\n\t\t# Three cases: 1. SNP pos gt map pos\n\t\twhile int(bimline[3]) < int(mapline[0]): # This means that the BIM file is behind the map file, so need to write output here with the interopolation\n\t\t# of the previous values\n\t\t\tdiffCM = float(mapline[2]) - float(previousCM)\n\t\t\tdiffpos = float(mapline[0]) - float(previousPos)\n\t\t\tmulti = (float(bimline[3]) - float(previousPos))/diffpos\n\t\t\tcmout = multi*diffCM + float(previousCM)\n\t\t\tif cmout < 0: # this should not happen so if it does dump data and quit\n\t\t\t\tprint i\n\t\t\t\tprint cmout\n\t\t\t\tprint diffCM\n\t\t\t\tprint diffpos\n\t\t\t\tprint previousCM\n\t\t\t\tprint previousPos\n\t\t\t\tprint bimline\n\t\t\t\tprint mapline\n\t\t\t\texit()\n\n\t\t\toutfile.write( str(cmout) +\"\\n\")\n\t\t\tposfile.write( str(bimline[3]) + \"\\t\" + str(cmout) + \"\\n\")\n\t\t\tbimline = bimfile.readline().strip().split()\n\t\t\tif len(bimline) == 0:\n\t\t\t\tbreak\t\t\n\t\tif len(bimline) ==0:\n\t\t\tbreak\n\t\tif bimline[3] == mapline[0]: # write out genetic position\n\t\t\toutfile.write( mapline[2]+ \"\\n\")\n\t\t\tposfile.write( str(bimline[3]) + \"\\t\" + mapline[2] + \"\\n\")\n\t\t\tbimline = bimfile.readline().strip().split()\n\t\n\t\t#if bimline[3] > mapline[0]: # read next line in the map file\n\t\t#\tpreviousCM = mapline[2]\n\t\t#\tpreviousPos = mapline[0]\n\t\t#\tcontinue\n\t\t# Hits this and continues if bimline is above mapline\n\t\tpreviousCM = mapline[2]\n\t\tpreviousPos = mapline[0]\n\t\ti += 1\n\toutfile.close()\n\treturn(outfile.name)", "def readdata(self, filepaths):\n pass", "def test_outpath_multi(tmpdir):\n base = glob.glob(\"%s/dummy/mm0\" % DATA_DIR)[0]\n paths = sorted(glob.glob(base + \"/*.ufo\"))\n # the reference font is modified in-place, make a temp copy first\n referenceSrc = py.path.local(paths[0])\n referenceDst = tmpdir / referenceSrc.basename\n referenceSrc.copy(referenceDst)\n reference = str(referenceDst)\n inpaths = paths[1:]\n outpaths = [str(tmpdir / basename(p)) for p in inpaths]\n\n psautohint(inpaths + ['-o'] + outpaths + ['-r', reference])", "def process_file(file_name):\n _,fn = os.path.split(file_name)\n pdb_id = get_pdb_id(fn)\n # Get files in pdb format even when at LBL, to avoid issues with phenix.reduce\n if ('_mirror' in file_name) or (file_name == pdb_id):\n file_name = pdb_id + '.pdb'\n file_to_clean = []\n if not os.path.isfile(file_name):\n # leave file in folder if it was already there\n # cmd = 'phenix.fetch_pdb {} --all'.format(pdb_id)\n cmd = 'phenix.fetch_pdb {}'.format(pdb_id)\n r = easy_run.go(cmd,join_stdout_stderr=False)\n for fn in r.stdout_lines:\n fn = os.path.split(fn)[-1]\n if '.pdb' in fn: file_name = fn\n file_to_clean.append(fn)\n fn = fn.replace('.pdd','_with_h.pdb')\n file_to_clean.append(fn)\n return file_name,pdb_id,file_to_clean", "def _findfile(self, path):\n return DataSource._findfile(self, self._fullpath(path))", "def get_fasta_fps(fasta_dir, fasta_files):\r\n\r\n fasta_filepaths = []\r\n for curr_file in fasta_files:\r\n curr_fp = join(fasta_dir, curr_file)\r\n try:\r\n file_test = open(curr_fp, \"U\")\r\n file_test.close()\r\n except IOError:\r\n raise IOError(\"Unable to open %s\" % curr_fp)\r\n fasta_filepaths.append(curr_fp)\r\n\r\n return fasta_filepaths", "def find_data_files(source, target, patterns):\r\n if glob.has_magic(source) or glob.has_magic(target):\r\n raise ValueError(\"Magic not allowed in src, target\")\r\n ret = {}\r\n for pattern in patterns:\r\n pattern = os.path.join(source, pattern)\r\n for filename in glob.glob(pattern):\r\n if os.path.isfile(filename):\r\n targetpath = os.path.join(target, os.path.relpath(filename,source))\r\n path = os.path.dirname(targetpath)\r\n ret.setdefault(path, []).append(filename)\r\n return sorted(ret.items())", "def loadfile(self,fd):\n pat=re.compile(r'!')\n f=self.files.index(fd)\n index=0\n newstack=0\n fnc={}\n inc={}\n thisline=[]\n for line in fd:\n line=line.strip()\n if pat.search(line):\n if newstack>0 and index>1:\n count=int(thisline[index-1])\n for i in range(index-1):\n fn=thisline[i]\n fn=re.sub('^.*(: |`)','',fn)\n fn=re.sub('\\/.*$','',fn)\n inc[fn]=inc.get(fn,0)+1\n fn=re.sub('\\+.*$','',fn)\n fnc[fn]=fnc.get(fn,0)+1\n if i==0:\n self.excl[f][fn]=self.excl[f].get(fn,0)+count\n else:\n fn=fn+\"+\"+prefunc\n prefunc=fn\n self.total[f]+=count\n for i in fnc:\n self.incl[f][i]=self.incl[f].get(i,0)+count*fnc[i]\n for i in inc:\n self.inst[f][i]=self.inst[f].get(i,0)+count*inc[i]\n self.caller_callee[f][fn]=self.caller_callee[f].get(fn,0)+count\n fnc.clear()\n inc.clear()\n del thisline[:]\n index=0\n\n newstack+=1\n continue\n\n if newstack>0:\n thisline += [line]\n index+=1", "def relatedFiles(fname):\n\n # We want to return all files in the same\n # directory which have the following name:\n\n #\n # [prefix].*.[type].gii\n #\n # where\n # - prefix is the file prefix, and which\n # may include periods.\n #\n # - we don't care about the middle\n #\n # - type is func, shape, label, or time\n\n # We determine the unique prefix of the\n # given file, and back-up to the most\n # recent period. Then search for other\n # files which have that same (non-unique)\n # prefix.\n prefix = fslpath.uniquePrefix(fname)\n lastdot = prefix.rfind('.')\n prefix = prefix[:lastdot]\n\n if lastdot == -1:\n return []\n\n funcs = list(glob.glob('{}*.func.gii' .format(prefix)))\n shapes = list(glob.glob('{}*.shape.gii'.format(prefix)))\n labels = list(glob.glob('{}*.label.gii'.format(prefix)))\n times = list(glob.glob('{}*.time.gii' .format(prefix)))\n\n return funcs + shapes + labels + times", "def test_get_output_filepaths(self):\r\n\r\n output_dir = \".\"\r\n\r\n fasta_fp = \"seqs.fna\"\r\n\r\n qual_fp = \"seqs.qual\"\r\n\r\n expected_fasta_fp = \"./seqs_filtered.fasta\"\r\n expected_qual_fp = \"./seqs_filtered.qual\"\r\n\r\n actual_fasta_fp, actual_qual_fp =\\\r\n get_output_filepaths(output_dir, fasta_fp, qual_fp)\r\n\r\n self.assertEqual(actual_fasta_fp, expected_fasta_fp)\r\n self.assertEqual(actual_qual_fp, expected_qual_fp)\r\n\r\n # Test for relative paths\r\n output_dir = \"test/\"\r\n\r\n fasta_fp = \"../seqs.fna\"\r\n\r\n qual_fp = \"quality_scores/seqs.qual\"\r\n\r\n expected_fasta_fp = \"test/seqs_filtered.fasta\"\r\n expected_qual_fp = \"test/seqs_filtered.qual\"\r\n\r\n actual_fasta_fp, actual_qual_fp =\\\r\n get_output_filepaths(output_dir, fasta_fp, qual_fp)\r\n\r\n self.assertEqual(actual_fasta_fp, expected_fasta_fp)\r\n self.assertEqual(actual_qual_fp, expected_qual_fp)", "def match_files(gold_folder, sys_folder):\n\n print \"Compiling files...\"\n # Get a list of files in the folders supplied.\n gold_files = compile_files(gold_folder) # nnnnG.xml\n sys_files = compile_files(sys_folder) # nnnnXXN.xml\n\n print \"%d gold files found in %s\" % (len(gold_files), base_name(gold_folder))\n print \"%d system files found in %s\\n\" % (len(sys_files), base_name(sys_folder))\n\n print \"Matching system files to gold files...\"\n # Match them up, where nnnn must be common in a pair.\n pairs = [(f1, f2) for f1 in gold_files for f2 in sys_files\n if base_name(f2).startswith(base_name(f1).split(\"GE.\")[0])]\n\n return pairs" ]
[ "0.5827411", "0.55954766", "0.5543815", "0.5515996", "0.5477217", "0.5447455", "0.5424407", "0.54009223", "0.5372652", "0.5370227", "0.5364059", "0.5320857", "0.5294063", "0.52870816", "0.5261717", "0.5260605", "0.5242285", "0.52146846", "0.51936823", "0.51909316", "0.51843077", "0.51814646", "0.51709306", "0.5165626", "0.5162636", "0.514549", "0.51454705", "0.5145386", "0.5136396", "0.51355034", "0.5132417", "0.5129304", "0.5118344", "0.5082502", "0.50729793", "0.50727063", "0.5069143", "0.5057468", "0.50521314", "0.5040488", "0.5029407", "0.5028556", "0.50043476", "0.49936298", "0.4989722", "0.49844822", "0.4982958", "0.49807712", "0.49725777", "0.49673232", "0.49513447", "0.49499515", "0.49469876", "0.49438235", "0.49404913", "0.49401146", "0.4925999", "0.4924784", "0.49238768", "0.49235725", "0.4923556", "0.49222544", "0.4901", "0.48992905", "0.48983315", "0.48972124", "0.48942253", "0.48932406", "0.48826376", "0.48797518", "0.48784354", "0.48677474", "0.48417178", "0.48352572", "0.482947", "0.4827602", "0.4827602", "0.4821091", "0.4819069", "0.48187828", "0.48168805", "0.48153335", "0.48102313", "0.48023677", "0.4796121", "0.47904325", "0.4783611", "0.47828487", "0.47779816", "0.477778", "0.47713557", "0.47703037", "0.47682384", "0.4765062", "0.4759154", "0.4758387", "0.47567472", "0.47513777", "0.47509578", "0.47465235" ]
0.6289985
0
Order the epis and assign names defined in the template files.
def _GetEpiOrder(self): self.epi_series.sort() for series in self.epi_series: self.GetEpiAcqTimes(series) self.AssignEpiNames()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)", "def get_template_names(self): \n product = self.get_object()\n names = ['%s/detail-for-upc-%s.html' % (self.template_folder, product.upc), \n '%s/detail-for-class-%s.html' % (self.template_folder, product.item_class.name.lower()),\n '%s/detail.html' % (self.template_folder)]\n return names", "def __fill_all_templates__(self,configs):\n template_dir = configs['system'].get('Common_directories','template')\n sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))\n system_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','system'))\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio'))\n self.__fill_template__(sample_template,self.sample_file)\n self.__fill_template__(system_template,self.systems_file)\n self.__fill_template__(qsub_template,self.qsub_file)", "def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)", "def reverseName(self, locatorGroup):\r\n locatorList = cmds.listRelatives(locatorGroup)\r\n\r\n eyeLocators = []\r\n earLocators = []\r\n\r\n for i in locatorList:\r\n if \"Eye_Coord\" in i:\r\n eyeLocators.append(i)\r\n if \"Ear_Coord\" in i:\r\n earLocators.append(i)\r\n\r\n\r\n # We first check if there is more then one eye or not. If there is, we have to reorder\r\n points = 8\r\n TempRename = []\r\n if len(eyeLocators) > points:\r\n # We first rename all the eye locators to a default name to prevent name clashing\r\n for i in range(0, len(eyeLocators)):\r\n RenameObj = cmds.rename(eyeLocators[i], 'TempEyeCoord#')\r\n TempRename.append(RenameObj)\r\n\r\n # We reorder the eye from right to left\r\n for i in range((len(eyeLocators)/points)-1 , -1 , -1):\r\n for j in range(0, points):\r\n cmds.rename(TempRename[j + (i * points)], 'Eye_Coord#')\r\n\r\n # We then check if there is more then one ear or not. If there is, we have to reorder\r\n points = 5\r\n TempRename = []\r\n if len(earLocators) > points:\r\n # We first rename all the ear locators to a default name to prevent name clashing\r\n for i in range(0, len(earLocators)):\r\n RenameObj = cmds.rename(earLocators[i], 'TempEarCoord#')\r\n TempRename.append(RenameObj)\r\n\r\n # We reorder the ear from right to left\r\n for i in range((len(earLocators) / points) - 1, -1, -1):\r\n for j in range(0, points):\r\n cmds.rename(TempRename[j + (i * points)], 'Ear_Coord#')", "def content(tmp_loc, ref_names_dict, order):\n \n fl = '[Content_Types].xml'\n inp_path = '/'.join([tmp_loc, fl])\n out_path = '/'.join([output_path, fl])\n \n cnt_lst = []\n asset_lst = []\n def_att = []\n d = dict()\n \n root1,tree1 = gen_tree(inp_path)\n root2,tree2 = gen_tree(out_path)\n \n # get all the extensions belongs to \"Default\" tag\n for relation in root2:\n if 'Default' in relation.tag:\n def_att.append(relation.attrib['Extension'])\n else:\n break\n \n for relation in root1:\n if 'Override' in relation.tag:\n attrib = relation.attrib['PartName'][1:]\n try:\n cnt = attrib.split('ppt/')[-1]\n ini = '/ppt/'\n except:\n cnt = attrib\n ini = '/'\n if cnt in ref_names_dict.keys():\n relation.attrib['PartName'] = f'{ini}{ref_names_dict[cnt]}'\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['PartName'])\n else:\n cnt_lst.append(relation)\n if relation.attrib['PartName'] not in asset_lst:\n asset_lst.append(relation.attrib['PartName'])\n else:\n attrib = relation.attrib['Extension']\n if attrib not in def_att:\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['Extension'])\n # deal with the assest_lst\n # print(\"AA: \", asset_lst)\n cnt_lst = natsort.natsorted(cnt_lst)\n for ele in cnt_lst:\n prev = tree2.find(ele.tag)\n prev.addnext(ele)\n \n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n \n unq_attr = []\n for relation in root2:\n if 'Override' in relation.tag:\n if relation.attrib['PartName'] not in unq_attr:\n unq_attr.append(relation.attrib['PartName'])\n else:\n root2.remove(relation)\n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)", "def testTitleTemplateFindNames(self):\n\n\t\ttests = {\n\t\t\t'${abc.def.1}-$abc-${123}': {\n\t\t\t\t'abc.def.1': ['abc', 'def', 1],\n\t\t\t\t'123': [123]\n\t\t\t},\n\t\t\t'${abc..def} $$ ${qwe}': {'qwe': ['qwe']}\n\t\t}\n\n\t\tfor test in tests:\n\t\t\tt = TitleTemplate(test)\n\t\t\tself.assertEqual(t.getFieldNames(), tests[test])", "def __rename_images(self):\n for idx, image in enumerate(self._values):\n image.partname = '/ppt/media/image%d%s' % (idx+1, image.ext)", "def _set_templates(spm_dir=SPM_DIR):\n global EPI_TEMPLATE, T1_TEMPLATE, GM_TEMPLATE, WM_TEMPLATE, CSF_TEMPLATE\n\n spm_version = _get_version_spm(SPM_DIR)\n\n # Set the tpm and template paths according to SPM version\n if spm_version == 'spm12':\n template_path = 'toolbox/OldNorm'\n tpm_path = 'toolbox/OldSeg'\n else:\n template_path = 'templates'\n tpm_path = 'tpm'\n\n # configure template images\n EPI_TEMPLATE = os.path.join(SPM_DIR, template_path, 'EPI.nii')\n SPM_T1_TEMPLATE = os.path.join(SPM_DIR, template_path, 'T1.nii')\n T1_TEMPLATE = \"/usr/share/data/fsl-mni152-templates/avg152T1.nii\"\n if not os.path.isfile(T1_TEMPLATE):\n T1_TEMPLATE += '.gz'\n if not os.path.exists(T1_TEMPLATE):\n T1_TEMPLATE = SPM_T1_TEMPLATE\n GM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'grey.nii')\n WM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'white.nii')\n CSF_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'csf.nii')", "def __rename_slides(self):\n for idx, slide in enumerate(self._values):\n slide.partname = '/ppt/slides/slide%d.xml' % (idx+1)", "def _extract_template_events(self):\n\t\ttry:\n\t\t\ttable = self.hdf5file[fastq_paths[self.version]['template'] % self.group]\n\t\t\tself.template_events = [Event(x) for x in table['Events'][()]]\n\t\texcept Exception, e:\n\t\t\tself.template_events = []", "def inject_hosts_files(self):\n self.log.info(\"Injecting host files\")\n hosts = dict()\n for i in self.all_nodes:\n hosts[i.name] = i.get_public_addr()\n #add the host names to etc/hosts\n orchestrator.inject_hostnames(hosts, delete=self.cluster_name)\n for i in self.all_nodes:\n i.inject_hostnames(hosts, delete=self.cluster_name)\n self.all_nodes[0].run_command(\"service ganglia-monitor restart; service gmetad restart\", silent=True)\n orchestrator.run_command(\"service ganglia-monitor restart; service gmetad restart\", silent=True)", "def _create_namelist_ncep_post(case, confdir, config, infile, nmlgen, nmlgen_model_configure, namelist_user):\n####################################################################################\n #----------------------------------------------------\n # Clear out old data.\n #----------------------------------------------------\n data_list_path = os.path.join(case.get_case_root(), \"Buildconf\", \"ufsatm.input_data_list\")\n\n #----------------------------------------------------\n # Initialize namelist defaults\n #----------------------------------------------------\n nmlgen.init_defaults(infile, config)\n\n #----------------------------------------------------\n # Write out namelist groups\n #----------------------------------------------------\n groups=['nampgb']\n\n # Make input format for post-processing consistent with model output\n output_file = nmlgen_model_configure.get_value('output_file')\n if 'netcdf' in output_file:\n nmlgen.set_value('ioform', 'netcdf')\n elif 'nemsio' in output_file:\n nmlgen.set_value('ioform', 'binarynemsiompiio')\n\n # Query start date and time\n run_start_date = case.get_value('RUN_STARTDATE').split('-')\n yy = run_start_date[0]\n mm = run_start_date[1]\n dd = run_start_date[2]\n run_start_tod = int(case.get_value('START_TOD'))\n hh = run_start_tod//3600\n mi = (run_start_tod-hh*3600)//60\n ss = run_start_tod-hh*3600-mi*60\n\n # Overwrite user_nl_ufsatm changes\n nmlgen = nmlOverwrite(namelist_user, nmlgen)\n\n # Create namelist file for first time step / template script will update it for specific date\n namelist_file = os.path.join(confdir, \"itag.tmp\")\n nmlgen.write_output_file(namelist_file, data_list_path, groups=groups, sorted_groups=False)\n\n # Add header section to namelist\n with open(namelist_file, 'r+') as f:\n content = f.read()\n f.seek(0,0)\n f.write(nmlgen.get_value('filename')+\"\\n\")\n f.write(nmlgen.get_value('ioform')+\"\\n\")\n f.write(nmlgen.get_value('outform')+\"\\n\")\n f.write(\"{}-{}-{}\".format(yy,mm,dd)+\"_\"+\"{hh:02d}:{mm:02d}:{ss:02d}\".format(hh=hh,mm=mi,ss=ss)+\"\\n\")\n f.write(nmlgen.get_value('modelname')+\"\\n\")\n f.write(nmlgen.get_value('filenameflux')+\"\\n\")\n f.write(content)\n\n # Check/correct task count used for post-processing\n atm_grid = case.get_value(\"ATM_GRID\").replace('r', '')\n mach = case.get_value(\"MACH\")\n\n # Specific fix for Stampede2\n tasks_per_node = int(case.get_value(\"MAX_TASKS_PER_NODE\"))\n if (\"C384\" in atm_grid or \"C768\" in atm_grid) and \"stampede2\" in mach:\n tasks_per_node = 24\n case.set_value(\"tasks_per_node\", str(tasks_per_node), subgroup=\"case.gfs_post\")\n case.flush()\n logger.info(\"NCEP Post tasks per node is changed to {}!\".format(tasks_per_node))\n\n task_count = {\"C96\": tasks_per_node, \"C192\": tasks_per_node, \"C384\": tasks_per_node*2, \"C768\": tasks_per_node*4}\n if atm_grid in task_count.keys():\n case.set_value(\"task_count\", str(task_count[atm_grid]), subgroup=\"case.gfs_post\")\n case.flush()\n logger.info(\"NCEP Post task count is changed to {}!\".format(task_count[atm_grid]))", "def emit_tep(self, tep, typ=\"all\", **context):\n e = self.get_all_tep.get(tep) or dict(HTMLFile=[], HTMLString=[])\n #: Disposable template sequence\n if self.stpl is True:\n e[\"HTMLFile\"] = map(lambda tpl: tpl.split('@')[-1], sorted(e['HTMLFile'], key=lambda x: x.split('@')[0], reverse=self.stpl_reverse))\n e[\"HTMLString\"] = map(lambda tpl: tpl.split('@')[-1], sorted(e['HTMLString'], key=lambda x: x.split('@')[0], reverse=self.stpl_reverse))\n typ = \"all\" if not typ in (\"fil\", \"cod\") else typ\n mtf = jinja2.Markup(\"\".join([render_template(i, **context) for i in e[\"HTMLFile\"]]))\n mtc = jinja2.Markup(\"\".join(e[\"HTMLString\"]))\n if typ == \"fil\":\n return mtf\n elif typ == \"cod\":\n return mtc\n else:\n return mtf + mtc", "def test_templater(self):\n\n # Set a global templater for all items\n self.site.template(r\"(.*)\", lambda item: \"ALL\")\n # Set another templater on the index item\n self.site.template(r\"index.html\", lambda item: \"INDEX\")\n\n # Since an item can only have one templater, the index templater should have been overwritten\n self.assertEqual(\"INDEX\", self.site.items[\"index.html\"].templated)\n self.assertEqual(\"ALL\", self.site.items[\"test/test.html\"].templated)", "def rename_slides(self):\n for idx, slide in enumerate(self):\n partname_str = '/ppt/slides/slide%d.xml' % (idx+1)\n slide.partname = PackURI(partname_str)", "def get_template_names(self):\n name = self.__class__.__name__.replace(\"DatatableView\", \"\")\n name = re.sub(r'([a-z]|[A-Z]+)(?=[A-Z])', r'\\1_', name)\n return [\"demos/\" + name.lower() + \".html\", \"example_base.html\"]", "def _set_base_namelists(self):\n\n # Create namelists\n hydro_namelist = self.model.hydro_namelists\n hrldas_namelist = self.model.hrldas_namelists\n\n self.base_hydro_namelist = hydro_namelist.patch(self.domain.hydro_namelist_patches)\n self.base_hrldas_namelist = hrldas_namelist.patch(self.domain.hrldas_namelist_patches)", "def _ProcessTemplate(self,topdir):\n self.dicomdir = \"%s/anatomicals\" % self.topdir\n self.rawdir = \"%s/raw\" % topdir\n self.rawdirs = {}\n tmplt = self._GetTemplate()\n if self.opts.outdir is not None:\n# Override template output directory.\n tmplt['top_outdir'] = self.opts.outdir\n self.tmplt = tmplt\n if len(tmplt['top_outdir']) == 0:\n tmplt['top_outdir'] = os.path.realpath(self.topdir)\n raise RuntimeError('Template file must specify an output directory.')\n tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir'])\n if '/home' in tmplt['top_outdir'][:7]:\n raise RuntimeError('Image data cannot be stored in the /home partition. Change the \"top_outdir\" entry in the template file: %s.' % (' '.join(self.templates)))\n# tmplt['subject'] = 'orig'\n self.procdir = os.path.abspath(\"%s/%s\" % \\\n (tmplt['top_outdir'],tmplt['subject']))\n target = os.path.abspath('%s/../..' % tmplt['top_outdir'])\n if not ismounted(target):\n raise RuntimeError('Could not access partition at %s' % target)\n\n self.anatdir = \"%s/anat\" % self.procdir\n self.fmapdir = \"%s/%s\" % (self.procdir,tmplt['fmap']['outdir'])\n self.dtidir = \"%s/%s\" % (self.procdir,tmplt['dti']['outdir'])\n self.logdir = \"%s/%s\" % (self.procdir,tmplt['logdir'])\n self.skip = tmplt.get('skip', DEFAULT_SKIP)\n self.acq_tr = tmplt.get('acq_tr',None)\n self.episetup_dir = \"%s/%s\" % (self.procdir,tmplt['first_epi'])\n self.fsl_cmpblty = tmplt.get('fsl_compatibility',False)\n self.epi_file_format = self.tmplt['epi_file_format']\n self.censor_thresh = tmplt.get('censor_threshold', 2.)\n self.censor_interleave = tmplt.get('censor_interleave', True)\n# self.server_userid = self.tmplt.get('server_userid','default')\n\n# Overide flags for aligning EPIs and skull-stripping with command-\n# line options.\n if self.opts.align_fmaps:\n self.align_fmaps = True\n else:\n self.align_fmaps = self.tmplt.get('epi_align', False)\n\n if self.opts.no_align_fmaps:\n self.no_align_fmaps = True\n else:\n self.no_align_fmaps = self.tmplt.get('no_epi_align', False)\n\n if self.opts.skull_strip:\n self.skull_strip = True\n else:\n self.skull_strip = self.tmplt.get('skull_strip', False)\n\n# Create log file now so it can be used immediately.\n if not os.path.exists(self.logdir):\n if self.verbose:\n print 'mkdir %s' % self.logdir\n if not self.opts.fake_opts:\n self.MakeDir(self.logdir)\n\n self._ProcessTemplateEpiInfo()", "def elastixTemplates():\n\t\ttransformations = []\n\t\tfileNames = os.listdir(AppVars.transformationsPath())\n\t\tfor fileName in fileNames:\n\t\t\tfullFileName = os.path.join(AppVars.transformationsPath(), fileName)\n\t\t\ttransformation = ParameterList()\n\t\t\tif transformation.loadFromFile(fullFileName):\n\t\t\t\ttransformations.append(transformation)\n\t\treturn transformations", "def test_create_namespaced_processed_template(self):\n pass", "def setup_templates(self):\n self.libs[\"template\"] = (\"#libs/templates/include\", None, \"\")\n self[\"CPPPATH\"].append(\"#libs/templates/include\")", "def sort_by_name(self):\n # sort_by_name_sitem = self.locator_finder_by_idx(self.sort_by_name_id)\n # sort_by_name_sitem = sort_by_name_sitem.find_element_by_xpath(\"./..\")\n # while True:\n # try:\n # sort_by_name_sitem.click()\n # break\n # except ElementNotInteractableException:\n # time.sleep(1)\n \n if self.current_package_version() == semver.VersionInfo.parse(\"3.8.0\"):\n name = '//*[@id=\"collectionsDropdown\"]/ul[3]/li[2]/a/label'\n sort_by_name_sitem = self.locator_finder_by_xpath(name)\n else:\n sort_by_name_sitem = self.locator_finder_by_xpath(self.sort_by_name_id)\n sort_by_name_sitem.click()\n time.sleep(2)", "def get_ordered_templates() -> List[Tuple[Version, Path]]:\n all_templates = list(Path(\"./templates\").iterdir())\n\n fallback = None\n ordered_templates = []\n for template in all_templates:\n # `moved.py` isn't one of the templates to be used here.\n if template.name == \"moved.py\":\n continue\n if template.name == \"default.py\":\n fallback = template\n continue\n assert template.name.startswith(\"pre-\")\n\n version_str = template.name[4:-3] # \"pre-{version}.py\"\n version = Version(version_str)\n ordered_templates.append((version, template))\n\n # Use the epoch mechanism, to force the fallback to the end.\n assert fallback is not None\n assert fallback.name == \"default.py\"\n ordered_templates.append((Version(\"1!0\"), fallback))\n\n # Order the (version, template) tuples, by increasing version numbers.\n return sorted(ordered_templates, key=operator.itemgetter(0))", "def populate_names(apps, schema_editor):\n Distillery = apps.get_model('distilleries', 'Distillery')\n for distillery in Distillery.objects.filter(name__isnull=True):\n collection = distillery.collection\n warehouse = collection.warehouse\n distillery.name = '%s.%s.%s' % (warehouse.backend, warehouse.name,\n collection.name)\n distillery.save()", "def setUp(self):\n print \"Setting Up: %s\" % self.id()\n # render the template\n g.render_template(self.template_file,\n self.template_vars,\n self.output_file,\n self.search_path)\n\n # read the resulting config file built from template\n self.output_config = g.load_config(self.output_file)\n g.show_config(self.output_config)", "def bulk_rename(current_path,casetype):\n\tclick.echo(current_path)\n\tfilenames = os.listdir(current_path) \n\n\tfor filename in filenames:\n\t\tif filename != 'file_organizer0.03.py':\n\t\t\tif casetype == 'lower':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.lower())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").lower())\n\t\t\telif casetype == 'upper':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.upper())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").upper())\n\t\t\t\t\n\t\t\telif casetype == 'title':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.title)\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").title())\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.lower())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").lower())\n\n\tclick.secho('Finished Renaming to {} case!!'.format(casetype),bg='blue',fg='white')", "def add_user_templates(self):\n\n # get all the user's templates\n user_templates = self.find_user_templates()\n\n # loop through the templates\n for template in user_templates:\n # create a template object and add it to the database\n local_template = PhishingTemplate(template)\n self._templates[template] = local_template", "def setup():\n if not os.path.isfile(etymology_file):\n page = re.compile(r'index.php\\?l=\\w+&p=\\d+&allowed_in_frame=0.html')\n pages = list(find_files(directory=site, pattern=page, recursive=False))\n etymology = etymologies(pages)\n dump(etymology, etymology_file)\n for affix, dictionary in affixes(etymology):\n affix_file = os.path.join('resources', '{}.json'.format(affix))\n if not os.path.isfile(affix_file):\n dump(dictionary, affix_file)", "def _setup(self):\n self._raw_top_dir = os.path.join(self._snippets_dir,\"raw\",\"dynamic\")\n if not os.path.exists(self._raw_top_dir):\n os.mkdir(self._raw_top_dir)\n\n self._trec_top_dir = os.path.join(self._snippets_dir,\"trec\",\"dynamic\")\n if not os.path.exists(self._trec_top_dir):\n os.mkdir(self._trec_top_dir)\n\n self._temp_top_dir = os.path.join(self._snippets_dir,\"temp\",\"dynamic\")\n if not os.path.exists(self._temp_top_dir):\n os.mkdir(self._temp_top_dir)\n\n self._snippet_result_top_dir = os.path.join(self._snippets_dir,\"result\",\"dynamic\")\n if not os.path.exists(self._snippet_result_top_dir):\n os.mkdir(self._snippet_result_top_dir)\n\n self._snippet_index_top_dir = os.path.join(self._snippets_dir,\"index\",\"dynamic\")\n if not os.path.exists(self._snippet_index_top_dir):\n os.mkdir(self._snippet_index_top_dir)\n\n self._para_top_dir = os.path.join(self._snippets_dir,\"para\",\"dynamic\")\n if not os.path.exists(self._para_top_dir):\n os.mkdir(self._para_top_dir)", "def set_task_order(self, order):\n for task in self.tasks:\n task.order = order", "def createExtnNodes(self):\n for parent, dirs, files in os.walk(self.destndir):\n for fname in files:\n filename = os.path.join(parent, fname)\n if os.path.isfile(filename):\n direntry=parent\n #direntry=parent.replace(self.destndir,'',len(self.destndir))\n #direntry = os.path.basename(os.path.abspath(parent))\n self.appendSrcType(direntry, fname)", "def get_templates(self):\n\n\t\tif not os.path.isdir('./repo'): os.mkdir('./repo')\n\t\ttemps = self.settings['template']\n\t\t#---ensure that the template object is always in a list\n\t\tif len(temps) == 2 and type(temps[0])==str and type(temps[1])==str: temps = [temps]\n\t\tself.template = []\n\t\tfor t in temps:\n\t\t\tprint 'retrieving '+str(t[0])\n\t\t\t#---check if in repo and move\n\t\t\tif not os.path.isfile(self.rootdir+t[0]+'.pdb') and os.path.isfile('./repo/'+t[0]+'.pdb'):\n\t\t\t\tcopy('./repo/'+t[0]+'.pdb',self.rootdir+t[0]+'.pdb')\n\t\t\t\t#---fasta retrieval is deprecated\n\t\t\t\tif 0: copy('./repo/'+t[0]+'.fasta',self.rootdir+t[0]+'.fasta')\n\t\t\telif not os.path.isfile(self.rootdir+t[0]+'.pdb'):\n\t\t\t\tresponse = urllib2.urlopen('http://www.rcsb.org/pdb/files/'+t[0]+'.pdb')\n\t\t\t\tpdbfile = response.read()\n\t\t\t\twith open(self.rootdir+t[0]+'.pdb','w') as fp: fp.write(pdbfile)\n\t\t\t\tcopy(self.rootdir+t[0]+'.pdb','./repo/'+t[0]+'.pdb')\n\t\t\tself.template.append(t)", "def _initNames(self):\n self.outselect = os.path.join(self.workpath, 'FT1_selected'+self.suffix+'.fits')\n self.outmktime = os.path.join(self.workpath, 'FT1_filtered'+self.suffix+'.fits')\n self.outltcube = os.path.join(self.workpath, 'LtCube'+self.suffix+'.fits')\n self.outbincub = os.path.join(self.workpath, 'BinCube'+self.suffix+'.fits')\n self.outbinmap = os.path.join(self.workpath, 'CMAP'+self.suffix+'.fits')\n self.outbinexp = os.path.join(self.workpath, 'BinExpMap'+self.suffix+'.fits')\n self.outexpmap = os.path.join(self.workpath, 'ExpMap'+self.suffix+'.fits')\n self.outsrcmap = os.path.join(self.workpath, 'SrcMaps'+self.suffix+'.fits')\n self.outgtlike = os.path.join(self.workpath, 'Results'+self.suffix+'.dat')\n self.outmodel = os.path.join(self.workpath, 'OutModel'+self.suffix+'.xml')\n self.outapert = os.path.join(self.workpath, 'LC_ApPhoto'+self.suffix+'.fits')\n self.outgtmod = os.path.join(self.workpath, 'GtModel'+self.suffix+'.fits')\n self.outresid = os.path.join(self.workpath, 'Resid'+self.suffix+'.fits')\n self.outresig = os.path.join(self.workpath, 'ResSigma'+self.suffix+'.fits')\n self.outtsmap = os.path.join(self.workpath, 'TSMmap'+self.suffix+'.fits')\n return\n # self.outfind = self.dir + self.src + '_FindSrc'+self.suffix+'.txt'", "def load_templates(self):\n\n self.templates = []\n\n if os.path.exists(\"question_templates.txt\"):\n for line in open(\"question_templates.txt\", \"r\"):\n self.templates.append(line.replace(\"\\n\", \"\"))", "def set_institutes(self):\n\n if develope_mode:\n print(help(self.set_institutes))\n\n for (dirpath, dirnames, filenames) in os.walk(\"../Finanzexplorer-Git-data/Institute\"):\n filenames = [x for x in filenames\n if \"Haushaltsb\" in x\n and \".xlsx\" in x\n and \"Template\" not in x\n and \"Haushaltsbücher_MPG_gesamt.xlsx\" not in x\n and \"_All\" not in x]\n for f in filenames:\n if f.split(\"_\")[1] == \"MPI\":\n name = f[21:len(f)-5].lower()\n else:\n name = f[17:len(f) - 5].lower()\n name = name.capitalize()\n\n self.institute[name][\"path\"] = os.path.join(dirpath, f)\n self.institute[name][\"file\"] = f", "def set_coefs_order(self, order):\n # Attach an epistasis model.\n self.order = order\n self.add_epistasis()\n self.epistasis.data.values = np.zeros(self.epistasis.n)\n self.epistasis.data.values[0] = 1\n return self", "def main():\n with open(\"page_data.yaml\", 'r') as inputstr:\n config_data = yaml.safe_load(inputstr)\n ointf = OutputInterface('template.txt')\n table_data = get_song_artist_matches()\n ofilen = config_data['directory'] + os.sep + 'common_songs.html'\n title = 'Song Titles and Band Name Overlap'\n header = ['No.', 'Artist', 'Peak', 'Date', 'Song/Artist', 'Peak',\n 'Date', 'Song']\n ointf.build_page(ofilen, title, header, fmt_table(table_data))\n ointf.inject(XTRAEDIT)\n ointf.output()", "def test_create_template_for_all_namespaces(self):\n pass", "def main():\n extension_choices = {}\n os.chdir(\"FilesToSort.old\")\n for file_name in os.listdir('.'):\n if os.path.isdir(file_name):\n continue\n\n file_extension = file_name.split('.')[-1]\n if file_extension not in extension_choices:\n choice = input(\"What file type would you like to sort {} files into? \".format(file_extension))\n extension_choices[file_extension] = choice\n try:\n os.mkdir(choice)\n except FileExistsError:\n pass\n\n os.rename(file_name, \"{}/{}\".format(extension_choices[file_extension], file_name))", "def reorder_rules(self):\n new_order = sorted(self.rules, key=attrgetter(\"pci_order\"))\n for idx, r in enumerate(new_order):\n r.dev_rename(\"%s%s\" % (r.dev_name_prefix, idx))", "def sorting_hat(soup=''):\n\n\ttemplate = \"\"\n\tcode = \"unknown\"\n\n\tfor comment in soup.find_all(text=lambda text: isinstance(text, Comment)):\n\t\tif 'template' in comment:\n\t\t\tif ' id:' in comment:\n\t\t\t\tif ' path:' in comment:\n\t\t\t\t\tcode = comment.strip().split('id:')[1].split(' path:')[0]\n\t\t\telif 'rm-msp-template-1' in comment:\n\t\t\t\tcode = 'rm-msp-template-1'\n\n\tfor lpe_div in soup.find_all(\"div\", {\"id\" : re.compile('^lpeCDiv')}):\n\t\tif soup.find_all(\"span\", {\"mktoname\" : \"Hero Styling\"}):\n\t\t\tcode = 'Hero Styling'\n\t\telif soup.find_all(\"div\", {\"class\" : \"sitecontainer\"}):\n\t\t\tcode = '1642402159'\n\t\telse:\n\t\t\tcode = 'lpeCDiv'\n\n\tif soup.find(\"div\", {\"class\": \"background-sun\"}):\n\t\tcode = 'background-sun'\n\n\ttemplate = templates[code]()\n\treturn template", "def process_tempita(fromfile):\n if not fromfile.endswith('.in'):\n raise ValueError(\"Unexpected extension: %s\" % fromfile)\n\n from_filename = tempita.Template.from_filename\n template = from_filename(fromfile,\n encoding=sys.getdefaultencoding()) \n\n content = template.substitute()\n\n outfile = os.path.splitext(fromfile)[0]\n with open(outfile, 'w') as f:\n f.write(content)", "def initialise_templates(self, tel_type):\n for t in tel_type:\n if tel_type[t] in self.prediction.keys():\n continue\n\n self.prediction[tel_type[t]] = \\\n TableInterpolator(self.root_dir + \"/\" +\n self.file_names[tel_type[t]])\n\n return True", "def defineProcessTemplates(histos):\n\n templates=[]\n\n #nominal\n templates.append( histos[0] )\n nomStats=templates[-1].Integral()\n\n #systematic variations\n #if Up/Down already in the name store directly updating the name\n #if not, mirror the variation given \n for i in xrange(1,len(histos)): \n templates.append( histos[i] )\n key=templates[-1].GetName()\n if not 'Up' in key and not 'Down' in key :\n templates[-1].SetName(key+'Up')\n templates.append( histos[i].Clone(key+'Down') )\n for xbin in range(templates[0].GetNbinsX()):\n templates[-1].SetBinContent(xbin+1,2*templates[0].GetBinContent(xbin+1)-templates[-2].GetBinContent(xbin+1))\n \n #don't leave bins with 0's\n for h in templates:\n h.SetDirectory(0)\n iStats=h.Integral()\n if iStats>0: h.Scale(nomStats/iStats)\n for xbin in range(h.GetNbinsX()):\n if h.GetBinContent(xbin+1)>0: continue\n h.SetBinContent(xbin+1,1e-6)\n \n return templates", "def reorder(self, new_order):\n #TODO doesn't work probably CRA 3/2019\n for field in [\"atoms\", \"xyz\"]:\n self.__dict__[field] = self.__dict__[field][list(new_order)]\n self.atoms = [self.atoms[i] for i in new_order]", "def get_template_names(self):\n tpl = super(Group_codeView, self).get_template_names()[0]\n app = self.model._meta.app_label\n mdl = 'group_code'\n self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))\n return [self.template_name]", "def main():\n parser = ArgumentParser(description=\"pre-process nexus templates\")\n parser.add_argument(\n \"nexus_templates\",\n nargs=\"+\",\n help=\"Nexus template files to process\",\n )\n args = parser.parse_args()\n\n for template_file in args.nexus_templates:\n preprocess_template(template_file)", "def _setup(self):\n self._raw_dir = os.path.join(self._snippets_dir,\"raw\",\"static\")\n if not os.path.exists(self._raw_dir):\n os.mkdir(self._raw_dir)\n\n self._trec_dir = os.path.join(self._snippets_dir,\"trec\",\"static\")\n if not os.path.exists(self._trec_dir):\n os.mkdir(self._trec_dir)\n\n self._temp_dir = os.path.join(self._snippets_dir,\"temp\",\"static\")\n if not os.path.exists(self._temp_dir):\n os.mkdir(self._temp_dir)\n\n self._para_dir = os.path.join(self._snippets_dir,\"para\",\"static\")\n if not os.path.exists(self._para_dir):\n os.mkdir(self._para_dir)\n\n self._snippet_result_dir = os.path.join(self._snippets_dir,\"result\",\"static\")\n if not os.path.exists(self._snippet_result_dir):\n os.mkdir(self._snippet_result_dir)\n\n self._snippet_index_dir = os.path.join(self._snippets_dir,\"index\",\"static\")\n if not os.path.exists(self._snippet_index_dir):\n os.mkdir(self._snippet_index_dir)\n\n \n\n\n self._index_para = os.path.join(self._para_dir,\"index_para\")\n\n self._temp_query_para = os.path.join(self._para_dir,\"temp_query_para\")\n\n self._index_list = os.path.join(self._para_dir,\"static_index_list\")\n \n self._orf = os.path.join(self._snippet_result_dir,\"orf\")\n\n self._oqf = os.path.join(self._temp_dir,\"oqf\")\n \n self._temp_output = os.path.join(self._temp_dir,\"temp_output\")\n\n with open(self._index_list,\"w\") as f:\n f.write(self._snippet_index_dir+\"\\n\")\n\n self._temp_query_builder = IndriQueryFactory(count=10000,\n rule=self._retrieval_method)\n\n self._oqf_builder = IndriQueryFactory(count=30,\n rule=self._retrieval_method)", "def _custom_template_names(self, template):\n splitted = template.rsplit('/', 1)\n name = 'custom_' + splitted[-1]\n ret = [name]\n if len(splitted) == 2:\n ret.append(splitted[0] + '/' + name)\n return ret", "def __write_epics_env(self, path, template_name, macros):\n file = \"{}mps.env\".format(path)\n template = \"{}epics_env/{}\".format(self.template_path, template_name)\n self.__write_file_from_template(file=file, template=template, macros=macros)", "def attach_spm_pet_grouptemplate(main_wf, wf_name=\"spm_pet_template\"):\n # Dependency workflows\n pet_wf = main_wf.get_node(\"spm_pet_preproc\")\n\n in_files = get_input_node(main_wf)\n datasink = get_datasink(main_wf, name='datasink')\n\n # The base name of the 'pet' file for the substitutions\n pet_fbasename = remove_ext(op.basename(get_input_file_name(in_files, 'pet')))\n\n # the group template datasink\n base_outdir = datasink.inputs.base_directory\n grp_datasink = pe.Node(DataSink(parameterization=False,\n base_directory=base_outdir,),\n name='{}_grouptemplate_datasink'.format(pet_fbasename))\n grp_datasink.inputs.container = '{}_grouptemplate'.format(pet_fbasename)\n\n # the list of the raw pet subjects\n warped_pets = pe.JoinNode(interface=IdentityInterface(fields=[\"warped_pets\"]),\n joinsource=\"infosrc\",\n joinfield=\"warped_pets\",\n name=\"warped_pets\")\n\n # the group template workflow\n template_wf = spm_create_group_template_wf(wf_name)\n\n # output node\n output = setup_node(IdentityInterface(fields=[\"pet_template\"]), name=\"group_template\")\n\n # group dataSink output substitutions\n regexp_subst = [\n (r\"/wgrptemplate{pet}_merged_mean_smooth.nii$\", \"/{pet}_grouptemplate_mni.nii\"),\n (r\"/w{pet}_merged_mean_smooth.nii$\", \"/{pet}_grouptemplate_mni.nii\"),\n ]\n regexp_subst = format_pair_list(regexp_subst, pet=pet_fbasename)\n regexp_subst += extension_duplicates(regexp_subst)\n grp_datasink.inputs.regexp_substitutions = extend_trait_list(grp_datasink.inputs.regexp_substitutions,\n regexp_subst)\n\n # Connect the nodes\n main_wf.connect([\n # warped pets file list input\n (pet_wf, warped_pets, [(\"warp_output.warped_files\", \"warped_pets\")]),\n\n # group template wf\n (warped_pets, template_wf, [((\"warped_pets\", flatten_list), \"grptemplate_input.in_files\")]),\n\n # output node\n (template_wf, output, [(\"grptemplate_output.template\", \"pet_template\")]),\n\n # template output\n (output, grp_datasink, [(\"pet_template\", \"@pet_group_template\")]),\n ])\n\n # Now we start with the correction and registration of each subject to the group template\n do_petpvc = get_config_setting('spm_pet_template.do_petpvc')\n if do_petpvc:\n if main_wf.get_node('spm_anat_preproc') is None:\n raise AttributeError(\"Expected `spm_anat_preproc` workflow node to attach PETPVC.\")\n\n preproc_wf_name = \"spm_mrpet_grouptemplate_preproc\"\n main_wf = attach_spm_mrpet_preprocessing(main_wf, wf_name=preproc_wf_name, do_group_template=True)\n\n preproc_wf = main_wf.get_node(preproc_wf_name)\n main_wf.connect([(output, preproc_wf, [(\"pet_template\", \"pet_input.pet_template\".format(preproc_wf_name))]), ])\n else:\n # add the pet template to the preproc workflow\n reg_wf = spm_register_to_template_wf(wf_name=\"spm_pet_register_to_grouptemplate\")\n main_wf.connect([\n (output, reg_wf, [(\"pet_template\", \"reg_input.template\")]),\n (in_files, reg_wf, [(\"pet\", \"reg_input.in_file\"),]),\n\n (reg_wf, datasink, [(\"reg_output.warped\", \"pet.grp_template.@warped\"),\n (\"reg_output.warp_field\", \"pet.grp_template.@warp_field\"),\n ]),\n ])\n\n # per-subject datasink output substitutions\n regexp_subst = [\n (r\"/{pet}_sn.mat$\", \"/{pet}_grptemplate_params.mat\"),\n (r\"/wgrptemplate_{pet}.nii$\", \"/{pet}_grptemplate.nii\"),\n (r\"/w{pet}.nii\", \"/{pet}_grptemplate.nii\"),\n ]\n regexp_subst = format_pair_list(regexp_subst, pet=pet_fbasename)\n regexp_subst += extension_duplicates(regexp_subst)\n datasink.inputs.regexp_substitutions = extend_trait_list(datasink.inputs.regexp_substitutions,\n regexp_subst)\n\n return main_wf", "def prepare(self):\n self.parse_template()\n self.build_argparser()\n self.parse_arguments()\n self.render_template()\n self.update_relation()", "def group_by_filenames(self):\n package = self.container.config.output.package\n class_map = collections.group_by(self.container, key=get_location)\n groups = self.group_common_paths(class_map.keys())\n\n for keys in groups:\n if len(keys) == 1:\n common_path = os.path.dirname(keys[0])\n else:\n common_path = os.path.commonpath(keys)\n\n for key in keys:\n items = class_map[key]\n suffix = \".\".join(Path(key).parent.relative_to(common_path).parts)\n\n package_name = f\"{package}.{suffix}\" if suffix else package\n self.assign(items, package_name, module_name(key))", "def make_output_names(self):\n yaml_names = []\n fits_names = []\n\n if self.use_nonstsci_names:\n for i in range(len(self.info['Module'])):\n act = str(self.info['act_id'][i]).zfill(2)\n if self.info['Instrument'][i].lower() == 'niriss':\n det = 'NIS'\n elif self.info['Instrument'][i].lower() == 'fgs':\n det = 'FGS'\n else:\n det = self.info['detector'][i]\n mode = self.info['Mode'][i]\n dither = str(self.info['dither'][i]).zfill(2)\n\n yaml_names.append(os.path.abspath(os.path.join(self.output_dir, 'Act{}_{}_{}_Dither{}.yaml'\n .format(act, det, mode, dither))))\n fits_names.append('Act{}_{}_{}_Dither{}_uncal.fits'.format(act, det, mode, dither))\n\n else:\n for i in range(len(self.info['Module'])):\n if self.info['Instrument'][i].upper() == 'NIRCAM':\n fulldetector = 'nrc{}'.format(self.info['detector'][i].lower())\n else:\n fulldetector = self.info['detector'][i].lower()\n outfilebase = self.create_output_name(self.info, index=i)\n outfile = \"{}{}{}\".format(outfilebase, fulldetector, '_uncal.fits')\n yamlout = \"{}{}{}\".format(outfilebase, fulldetector, '.yaml')\n\n yaml_names.append(yamlout)\n fits_names.append(outfile)\n\n self.info['yamlfile'] = yaml_names\n self.info['outputfits'] = fits_names\n # Table([self.info['yamlfile']]).pprint()", "def setup(self):\n ### Set Names\n # Name of the pipeline reduction step\n self.name='sortobs'\n # Shortcut for pipeline reduction step and identifier for\n # saved file names.\n self.procname = 'RAW'\n # Set Logger for this pipe step\n self.log = logging.getLogger('pipe.step.%s' % self.name)\n ### Set Parameter list\n # Clear Parameter list\n self.paramlist = []\n # Append Parameters\n self.paramlist.append(['pattern', '(^.+_([gri]-band|oiii|sii|clear|h-alpha))',\n 'Regex pattern used to get name by matching name_filter'])\n # Confirm end of setup\n self.log.debug('Setup: done')", "def populate_titles(self,owner):\n if not owner in self.titles:\n try:\n a=self._get_plans_generator(owner)\n self.titles[owner]=[]\n for group in a:\n self.titles[owner].append(group[\"title\"])\n except:\n logging.warning(f\"could not get existing plans from groupId: {owner}\")\n self.titles[owner]=[]", "def refactor_post(self,post_name):\n for name in list(self.rules):\n related_post = \"{}.post.{}\".format(name,post_name)\n if related_post in self.rules:\n parts = [self.MakeSymbolName(x) for x in [post_name, related_post]]\n self.rules[name] = self.MakeChoice([self.MakeSeq(parts)])", "def _replace_init_name(self, cr, uid, organisation_function_child_ids, context=None):\r\n for item in organisation_function_child_ids:\r\n if len(item) == 3 and item[2] and 'function_type_id' in item[2]:\r\n function_type_obj = self.pool.get('res.function.type').browse(cr,uid,item[2]['function_type_id'])\r\n #reformat name\r\n item[2]['name'] = function_type_obj.name\r\n yield item", "def load_template_files(self):\n templates = dict()\n template_path = settings.CUSTOM_VERTO_TEMPLATES\n templates.update(self.read_template_files(template_path))\n if hasattr(self, \"extra_converter_templates_directory\"):\n directory = self.extra_converter_templates_directory\n template_path = os.path.join(template_path, directory)\n templates.update(self.read_template_files(template_path))\n return templates", "def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())", "def poststartup(self):\n if not self.genlocationevents():\n return\n with self._objslock:\n for n in sorted(self._objs.keys()):\n e = self._objs[n]\n for netif in e.netifs():\n (x, y, z) = netif.node.position.get()\n e.setnemposition(netif, x, y, z)", "def _po_packages(self):\n for name in self.distribution.po:\n source_dir = os.path.join(self.distribution.po_dir, name)\n build_dir = os.path.join(self.build_lib, name, 'locale')\n template = os.path.join(source_dir, name + '.pot')\n pkg = {'name': name,\n 'template': template,\n 'source_dir': source_dir,\n 'build_dir': build_dir}\n yield pkg", "def setUp(self):\n self.myfuncdesc = \"casper.demo.module.a_function_to_wrap\"\n self.mycloth = \"casper.demo.module.clothing\"\n self.mypipedesc = \"casper.demo.pipeline.xml\"\n self.myclothingdesc = \"casper.demo.clothing_pipeline.xml\"\n self.mypipexmldesc = \"casper.demo.xml_pipeline.xml\"\n self.mypyramiddesc = \"casper.demo.pyramid_pipeline.xml\"\n self.myswitchdesc = \"casper.demo.switch_pipeline.xml\"\n self.myiterativedesc = \"casper.demo.iterative_pipeline.xml\"\n self.myfile = os.path.abspath(__file__)\n self.mydir = os.path.dirname(self.myfile)", "def get_template_names(self):\n tpl = super(Teacher_professionalView, self).get_template_names()[0]\n app = self.model._meta.app_label\n mdl = 'teacher_professional'\n #self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))\n self.template_name = tpl[:8]+'teacher_professional/'+tpl[8:]\n return [self.template_name]", "def organize_by_order(current_path):\n\tfor file in sorted(os.listdir(current_path)):\n\t\tif file != 'file_organizer.py':\n\t\t\ttry:\n\t\t\t\tos.makedirs(file[0])\n\t\t\t\tclick.echo(\"Creating a Folder\",file[0])\n\t\t\texcept:\n\t\t\t\tNone\n\t\t\tshutil.move(file,file[0])\n\t\t\tclick.secho(('Finished moving : {} to {} folder'.format(file,file[0])),fg='green')", "def test_multi_template():\n data = []\n data.extend([\"{}_data.json\".format(i) for i in range(50)])\n data.extend([\"{}_log.csv\".format(i) for i in range(50)])\n data.extend([\"filename_{}.py\".format(i) for i in range(50)])\n data.extend([\"stuff_{}.py\".format(i) for i in range(50)])\n temp = data[:]\n random.shuffle(temp)\n assert data == sort(temp)", "def make_template(filenames):\n result = {}\n for fn in filenames:\n with open(fn) as f:\n conf = yaml.load(f)\n expand_horizons(result, conf)\n return result", "def add_to_pr_export(self, exp_template):", "def setup(self):\n base = automap_base()\n engine = create_engine(\"mysql+pymysql://\" + csconfig.config.db_user + \":\" +\n csconfig.config.db_password + \"@\" +\n csconfig.config.db_host + \":\" +\n str(csconfig.config.db_port) +\n \"/\" + csconfig.config.db_name)\n base.prepare(engine, reflect=True)\n session = Session(engine)\n cloud_yaml = base.classes.csv2_group_resource_yaml\n\n for cloud in self.group_resources:\n cloud_yamls = session.query(cloud_yaml).\\\n filter(cloud_yaml.group_name == self.name,\n cloud_yaml.cloud_name == cloud.cloud_name)\n cloud_yaml_list = []\n for yam in cloud_yamls:\n cloud_yaml_list.append([yam.yaml_name, yam.yaml, yam.mime_type])\n if cloud.cloud_type == 'localhost':\n newcloud = cloudscheduler.localhostcloud.LocalHostCloud(extrayaml=cloud_yaml_list, resource=cloud)\n else:\n newcloud = cloudscheduler.openstackcloud.\\\n OpenStackCloud(extrayaml=cloud_yaml_list, resource=cloud)\n self.clouds[newcloud.name] = newcloud\n self.log.debug(\"Added all clouds for group: %s\", self.name)", "def reorder_expected_outputs():\n test_data_path = get_test_data_path()\n expected_output_files = sorted(glob(os.path.join(test_data_path, \"test_*_outputs.txt\")))\n for expected_output_file in expected_output_files:\n LOGGER.info(f\"Sorting {expected_output_file}\")\n\n with open(expected_output_file, \"r\") as fo:\n file_contents = fo.readlines()\n\n file_contents = sorted(file_contents)\n\n with open(expected_output_file, \"w\") as fo:\n fo.writelines(file_contents)", "def get_hosts(self, filename):\n\n data = parse_inventory(filename)\n\n for host in data['routers']['hosts']:\n self.router_names.append(str(host))\n for host in data['brokers']['hosts']:\n self.broker_names.append(str(host))", "def reorder(self):\n self.npages = len(self)\n self.pageindex = []\n for i, page in enumerate(self):\n page.prev = self[i-1].namext\n if i == self.npages-1: i = -1\n page.next = self[i+1].namext\n page.first = self[0].namext\n page.last = self[-1].namext\n page.document = self\n self.pageindex.append(page)", "def _substitute_template_parts(template_code):\n template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\n template_parts = [\n 'v2/style',\n 'v2/styleMail',\n 'v2/stylePage',\n 'v2/stylePageCover',\n 'v2/stylePageDraft',\n 'v2/stylePageMail',\n 'v2/stylePageRegistration',\n 'v2/stylePageRegistrationDraft',\n 'stylePageMail',\n 'logo',\n 'macros',\n 'registrarSignature',\n 'registration/details',\n 'registration/givingNoticeParty',\n 'registration/location',\n 'registration/notes',\n 'registration/owners',\n 'registration/sections',\n 'registration/submittingParty',\n 'search-result/details',\n 'search-result/location',\n 'search-result/notes',\n 'search-result/owners',\n 'search-result/pprRegistrations',\n 'v2/search-result/selected',\n 'search-result/sections',\n 'v2/search-result/registration',\n 'search-result-ppr/financingStatement',\n 'search-result-ppr/amendmentStatement',\n 'search-result-ppr/changeStatement',\n 'search-result-ppr/renewalStatement',\n 'search-result-ppr/dischargeStatement',\n 'search-result-ppr/securedParties',\n 'search-result-ppr/courtOrderInformation',\n 'search-result-ppr/debtors',\n 'search-result-ppr/registeringParty',\n 'search-result-ppr/vehicleCollateral',\n 'search-result-ppr/generalCollateral'\n ]\n\n # substitute template parts - marked up by [[filename]]\n for template_part in template_parts:\n if template_code.find('[[{}.html]]'.format(template_part)) >= 0:\n template_part_code = Path(f'{template_path}/template-parts/{template_part}.html').read_text()\n for template_part_nested in template_parts:\n template_reference = '[[{}.html]]'.format(template_part_nested)\n if template_part_code.find(template_reference) >= 0:\n path = Path(f'{template_path}/template-parts/{template_part_nested}.html')\n template_nested_code = path.read_text()\n template_part_code = template_part_code.replace(template_reference, template_nested_code)\n template_code = template_code.replace('[[{}.html]]'.format(template_part), template_part_code)\n\n return template_code", "def get_template_names(self):\n tpl = super(School_upgradationView, self).get_template_names()[0]\n app = self.model._meta.app_label\n mdl = 'school_upgradation'\n #self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))\n self.template_name = tpl[:7]+'school_upgradation/'+tpl[7:]\n return [self.template_name]", "def get_template_names(self):\n templates = super(PartialResponseMixin, self).get_template_names()\n if self.get_use_partial():\n templates.insert(0, self.get_partial_template_name())\n return templates", "def initordering(cls):\n for i in range(len(clslist)):\n stages = cls.getConfigStages()\n for j in range(len(stages)):\n for k in range(len(slotlist)):\n cls.initorderingclsslot(clslist[i], stages[j], slotlist[k])\n # print(ordering)\n cls.log(1, ordering)", "def package():\n \n hou.hipFile.save()\n currentHip = hou.expandString(hou.hipFile.name())\n\n # create a temp directory we are going to fill with crap\n tempFilePath = tempfile.mkdtemp()\n \n otls = os.path.join(tempFilePath, \"otls\")\n os.mkdir(otls)\n files = os.path.join(tempFilePath, \"files\")\n os.mkdir(files)\n \n # Get all the external references to the hipfile\n fileOnDisk = hou.fileReferences()\n\n # loop and do what comes natural.\n for _file in fileOnDisk:\n\n parm = _file[0]\n filepath = _file[1]\n \n # if its a otl we need to store it.\n if filepath.endswith(\".otl\"):\n \n shutil.copy(hou.expandString(filepath), otls)\n \n else:\n \n if not os.path.isfile(hou.expandString(filepath)): \n \n continue\n \n # create a directory in files and save 1 file to that location\n tmpFileName = os.path.basename(hou.expandString(filepath))\n tmpFileDir = os.path.basename(os.path.dirname(hou.expandString(filepath)))\n path = os.path.join(files, tmpFileDir)\n \n if not os.path.isdir(path):\n \n os.mkdir(path)\n\n shutil.copy(hou.expandString(filepath), os.path.join(path, os.path.basename(hou.expandString(filepath))))\n\n try:\n \n if not parm.node().isLocked():\n \n parm.set(os.path.join(path.replace(tempFilePath, \"$HIP\"), tmpFileName))\n \n except hou.PermissionError: \n \n logging.warning(\"Error hardening parm :\" + str(parm.name()) + \"on node \" +parm.node().path())\n\n hou.hipFile.save(os.path.join(tempFilePath, os.path.basename(hou.expandString(hou.hipFile.name()))))\n # Load the source hipfile\n hou.hipFile.load(currentHip)\n \n # create a zipfile and package everything. then copy it to the home.\n zipfileLoc = zipdir(tempFilePath)\n shutil.move(zipfileLoc, os.path.join(hou.expandString(\"~\"), \"package.zip\"))\n shutil.rmtree(tempFilePath)", "def setLatticeOrder(self):\n\t\taccNodes = self.getNodes()\n\t\telemInLine = {}\n\t\tfor i in range(len(accNodes)):\n\t\t\telem = accNodes[i]\t\t\t\n\t\t\telemname = elem.getName()\n\t\t\tif(elemInLine.has_key(elemname)):\n\t\t\t\telemInLine[elemname] += 1\n\t\t\telse:\telemInLine[elemname] = 1\n\t\t\tnode = self.getNodes()[i]\n\t\t\tnode.setParam(\"TPName\",node.getName()+\"_\"+str(elemInLine[elemname]))\n\t\t\t#node.setParam(\"sequence\",i+1)\n\t\t\t#print \"debug node\",node.getName(),node.getParamsDict()", "def task_process_department_files():\n for dept in Department.list():\n for file_name, file in dept.files.items():\n yield {\n 'name': f'{dept}:{file_name}',\n 'file_dep': file.dependencies +\n [file.raw_path, util.path.CONFIG_PATH],\n 'targets': [file.processed_path],\n 'actions': [file.process],\n 'clean': True,\n }", "def setUp_extra(self):\n [self.testproject,\n self.root,\n self.projectadmin,\n self.participant,\n self.signedup_user] = self._create_dummy_project(\"test-project\")\n \n self.participant2 = self._create_random_user(\"participant2_\")\n self._register(self.participant2,self.testproject)\n \n \n from django.core.files.storage import default_storage\n #this fake file is included on test pages later to test rendering\n default_storage.add_fake_file(\"fakeinclude.html\",\"This is some fake include content:\" \n \"here is the content of fakecss\" \n \"<somecss>{% insert_file \"+default_storage.FAKE_DIRS[1]+\"/fakecss.css %} </somecss>and a \"\n \"non-existant include: <nonexistant>{% insert_file nothing/nonexistant.txt %}</nonexistant> Also\"\n \" try to include scary file path <scary>{% insert_file ../../../allyoursecrets.log %}</scary>\")", "def render_templates(self):\n\n # dockerfile\n try:\n t = self.templates.get_template(\n 'docker/dockerfiles/{}.dockerfile.template'.format(self.repo)\n )\n except TemplateNotFound:\n t = self.templates.get_template(\n 'docker/dockerfiles/default.dockerfile.template'\n )\n\n self.files.append({\n 'name': 'Dockerfile',\n 'content': t.render(commit=self.commit),\n })\n\n # gunicorn\n t = self.templates.get_template(\n 'docker/gunicorn/gunicorn.conf.py'\n )\n self.files.append({\n 'name': 'gunicorn.conf.py',\n 'content': t.render(),\n })\n\n t = self.templates.get_template(\n 'docker/gunicorn/gunicorn.sh'\n )\n self.files.append({\n 'name': 'gunicorn.sh',\n 'content': t.render(),\n 'mode': 0555,\n })\n\n # nginx\n t = self.templates.get_template(\n 'docker/nginx/app.nginx.conf'\n )\n self.files.append({\n 'name': 'app.nginx.conf',\n 'content': t.render(),\n })\n\n t = self.templates.get_template(\n 'docker/nginx/nginx.sh'\n )\n self.files.append({\n 'name': 'nginx.sh',\n 'content': t.render(),\n 'mode': 0555,\n })\n\n # cron/, etc/ iif there exists a `self.repo` directory\n def _filter(p):\n return (\"cron/\" in p or \"etc/\" in p) and (self.repo in p) and \\\n (not os.path.basename(p).startswith('.'))\n\n for t in self.templates.list_templates(\n filter_func=_filter):\n\n self.files.append({\n 'name': os.path.basename(t),\n 'content': self.templates.get_template(t).render(),\n })", "def update_templates():\n logging.info(\"Copying english po files to %s\" % POT_PATH)\n\n # post them to exposed URL\n ensure_dir(POT_PATH)\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"django.po\"), os.path.join(POT_PATH, \"kalite.pot\"))\n shutil.copy(get_po_filepath(lang_code=\"en\", filename=\"djangojs.po\"), os.path.join(POT_PATH, \"kalitejs.pot\"))", "def create_init_files(self, app_label, model_names, models):\n model_name_slugs = [\"%s_views\" % (self.camel_to_slug(model_name)) for model_name in model_names]\n model_names_dict = {self.camel_to_slug(model.__name__): self.camel_to_slug(self.model_name_plural(model)) for\n model in models}\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/__init__.py\" % (app_label, folder_name)\n template_path = \"django_baker/__init__%s\" % folder_name\n self.create_file_from_template(file_path, template_path, {\"app_label\": app_label,\n \"model_name_slugs\": model_name_slugs,\n \"model_names_dict\": model_names_dict\n })", "def setUp(self):\n\n # This test suite needs actual depots.\n pkg5unittest.ManyDepotTestCase.setUp(self, [\"test1\", \"test1\",\n \"test2\", \"test2\"], start_depots=True)\n\n self.make_misc_files(self.misc_files)\n\n self.dpath1 = self.dcs[1].get_repodir()\n self.durl1 = self.dcs[1].get_depot_url()\n self.published = self.pkgsend_bulk(self.durl1, (self.amber10,\n self.amber20, self.bronze10, self.bronze20))\n\n # Purposefully republish bronze20 a second later so a version\n # exists that only differs in timestamp. Also publish tree\n # and scheme after that.\n time.sleep(1)\n self.published.extend(self.pkgsend_bulk(self.durl1,\n (self.bronze20, self.tree10, self.branch10, self.leaf10,\n self.scheme10)))\n\n self.dpath2 = self.dcs[2].get_repodir()\n self.durl2 = self.dcs[2].get_depot_url()\n self.tempdir = tempfile.mkdtemp(dir=self.test_root)\n\n self.durl3 = self.dcs[3].get_depot_url()\n self.durl4 = self.dcs[4].get_depot_url()", "def generate_direct_templates(self, write):\r\n PAGINATED_TEMPLATES = self.settings['PAGINATED_DIRECT_TEMPLATES']\r\n for template in self.settings['DIRECT_TEMPLATES']:\r\n paginated = {}\r\n if template in PAGINATED_TEMPLATES:\r\n paginated = {'articles': self.articles, 'dates': self.dates}\r\n save_as = self.settings.get(\"%s_SAVE_AS\" % template.upper(),\r\n '%s.html' % template)\r\n if not save_as:\r\n continue\r\n\r\n write(save_as, self.get_template(template),\r\n self.context, blog=True, paginated=paginated,\r\n page_name=os.path.splitext(save_as)[0])", "def write_template_body2(template_filename):\n template_type = template_filename.split('/')[-1].split('_')[0]\n basin = template_filename.split('/')[-1].split('_')[1].replace('.php', '')\n template_file = open(template_filename, 'a')\n template_file.write('domains.push({\\n')\n template_file.write(' displayName: \"All\",\\n')\n template_file.write(' name: \"'+basin+'\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write('variables.push({\\n')\n template_file.write(' displayName: \"Mean\",\\n')\n template_file.write(' name: \"<?php echo $LeadMean_name; ?>\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_AL.php\",\\n')\n template_file.write(' displayName: \"Atlantic\",\\n')\n template_file.write(' name: \"'+template_type+'_AL\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_CP.php\",\\n')\n template_file.write(' displayName: \"Central Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_CP\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_EP.php\",\\n')\n template_file.write(' displayName: \"Eastern Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_EP\",\\n')\n template_file.write('});\\n')\n template_file.write('maptypes.push({\\n')\n template_file.write(' url: \"'+template_type+'_WP.php\",\\n')\n template_file.write(' displayName: \"Western Pacific\",\\n')\n template_file.write(' name: \"'+template_type+'_WP\",\\n')\n template_file.write('});\\n')\n template_file.write('\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//Initialize the page\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//function for keyboard controls\\n')\n template_file.write('document.onkeydown = keys;\\n')\n template_file.write('\\n')\n template_file.write(\n '//Decare object containing data about the currently displayed map\\n'\n )\n template_file.write('imageObj = {};\\n')\n template_file.write('\\n')\n template_file.write('//Initialize the page\\n')\n template_file.write('initialize();\\n')\n template_file.write('\\n')\n template_file.write(\n '//Format initialized run date & return in requested format\\n'\n )\n template_file.write('function formatDate(offset,format){\\n')\n template_file.write(' var newdate = String(cycle);\\n')\n template_file.write(' var yyyy = newdate.slice(0,4)\\n')\n template_file.write(' var mm = newdate.slice(4,6);\\n')\n template_file.write(' var dd = newdate.slice(6,8);\\n')\n template_file.write(' var hh = newdate.slice(8,10);\\n')\n template_file.write(\n ' var curdate = new Date(yyyy,parseInt(mm)-1,dd,hh)\\n'\n )\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.write(' //Offset by run\\n')\n template_file.write(\n ' var newOffset = curdate.getHours() + offset;\\n'\n )\n template_file.write(' curdate.setHours(newOffset);\\n')\n template_file.write('\\n')\n template_file.write(\n ' var yy = String(curdate.getFullYear()).slice(2,4);\\n'\n )\n template_file.write(' yyyy = curdate.getFullYear();\\n')\n template_file.write(' mm = curdate.getMonth()+1;\\n')\n template_file.write(' dd = curdate.getDate();\\n')\n template_file.write(' if(dd < 10){dd = \"0\" + dd;}\\n')\n template_file.write(' hh = curdate.getHours();\\n')\n template_file.write(' if(hh < 10){hh = \"0\" + hh;}\\n')\n template_file.write('\\n')\n template_file.write(' var wkday = curdate.getDay();\\n')\n template_file.write(\n ' var day_str = [\"Sun\", \"Mon\", \"Tue\", \"Wed\", '\n +'\"Thu\", \"Fri\", \"Sat\"];\\n'\n )\n template_file.write('\\n')\n template_file.write(' //Return in requested format\\n')\n template_file.write(\" if(format == 'valid'){\\n\")\n template_file.write('//06Z Thu 03/22/18 (90 h)\\n')\n template_file.write(\n 'var txt = hh + \"Z \" + day_str[wkday] + \" \" + '\n +'mm + \"/\" + dd + \"/\" + yy;\\n'\n )\n template_file.write(' return txt;\\n')\n template_file.write(' }\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write('//Initialize the page\\n')\n template_file.write('function initialize(){\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Set image object based on default variables\\n'\n )\n template_file.write(' imageObj = {\\n')\n template_file.write(\n ' variable: \"<?php echo $LeadMean_name; ?>\",\\n'\n )\n template_file.write(' domain: \"'+basin+'\"\\n')\n template_file.write(' };\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Change domain based on passed argument, if any\\n'\n )\n template_file.write(' var passed_domain = \"\";\\n')\n template_file.write(' if(passed_domain!=\"\"){\\n')\n template_file.write(\n ' if(searchByName(passed_domain,domains)>=0){\\n'\n )\n template_file.write(\n ' imageObj.domain = passed_domain;\\n'\n )\n template_file.write(' }\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Change variable based on passed argument, if any\\n'\n )\n template_file.write(' var passed_variable = \"\";\\n')\n template_file.write(' if(passed_variable!=\"\"){\\n')\n template_file.write(\n ' if(searchByName(passed_variable,variables)>=0){\\n'\n )\n template_file.write(\n ' imageObj.variable = passed_variable;\\n'\n )\n template_file.write(' }\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(\n ' //Populate forecast hour and dprog/dt arrays for this '\n +'run and frame\\n'\n )\n template_file.write(\" populateMenu('variable');\\n\")\n template_file.write(\" populateMenu('domain');\\n\")\n template_file.write(\" populateMenu('maptype')\\n\")\n template_file.write('\\n')\n template_file.write(' //Populate the frames arrays\\n')\n template_file.write(' frames = [];\\n')\n template_file.write(\n ' for(i=minFrame;i<=maxFrame;i=i+incrementFrame)'\n +'{frames.push(i);}\\n'\n )\n template_file.write('\\n')\n template_file.write(\n ' //Predefine empty array for preloading images\\n'\n )\n template_file.write(' for(i=0; i<variables.length; i++){\\n')\n template_file.write(' variables[i].images = [];\\n')\n template_file.write(' variables[i].loaded = [];\\n')\n template_file.write(' variables[i].dprog = [];\\n')\n template_file.write(' }\\n')\n template_file.write('\\n')\n template_file.write(' //Preload images and display map\\n')\n template_file.write(' preload(imageObj);\\n')\n template_file.write(' showImage();\\n')\n template_file.write('\\n')\n template_file.write(' //Update mobile display for swiping\\n')\n template_file.write(' updateMobile();\\n')\n template_file.write('\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write('var xInit = null;\\n')\n template_file.write('var yInit = null;\\n')\n template_file.write('var xPos = null;\\n')\n template_file.write('var yPos = null;\\n')\n template_file.write('\\n')\n template_file.write('</script>\\n')\n template_file.write('\\n')\n template_file.write('</body>\\n')\n template_file.write('</html>\\n')\n template_file.close()", "def bootstrap(self):\n\n\t\t#---paths.yaml specifies directories which might be absent so make them\n\t\tif not os.path.isdir(self.postdir): os.mkdir(self.postdir)\n\t\tif not os.path.isdir(self.plotdir): os.mkdir(self.plotdir)\n\t\t#---parse the simulations found in each \"spot\"\n\t\tfor spot in self.spots: self.treeparser(spot)\n\t\t#---if there is a part named edr then we use it to get simulation times\n\t\t#---! edr files are required to infer times for slicing however we might also use xtc or trr later\n\t\tassert 'edr' in zip(*self.spots.keys())[1]\n\t\tself.treeparser_edr()\n\t\t#---data are stored in dictionaries by spot name\n\t\tall_top_keys = [i for j in [k.keys() for k in self.toc.values()] for i in j]\n\n\t\t#---! under development\n\t\tfor key in ['post','groups','slices']:\n\t\t\tif key not in self.members_with_specific_parts:\n\t\t\t\tself.__dict__[key] = {i:{} for i in all_top_keys}\n\t\t\telse: self.__dict__[key] = {(spot,i):{} \n\t\t\t\tfor spot in self.toc for i in self.toc[spot]}\n\t\tself.save()", "def order(self):\n raise NotImplementedError()", "def select_sort_by_name_ascendant(self):\n msg = \"The new order of the items is by ascendant name\"\n with self.allure.step(msg):\n self.__product_sort.select_by_text('Name (A to Z)')\n self.allure.attach_image(self.driver, msg)", "def setUp_extra(self):\n [self.testproject,\n self.root,\n self.projectadmin,\n self.participant,\n self.signedup_user] = self._create_dummy_project(\"linkreplacer-test\")\n\n self.replacer = HtmlLinkReplacer()", "def _load_personas(self, names, is_custom=False):\n names = names or [path.stem for path in\n self.persona_dir[is_custom].iterdir()\n if path.is_dir()]\n for name in names:\n try:\n self.update_persona_dicts(self.process_name(name),\n is_custom=is_custom)\n except:\n warnings.warn(f'Could not load files for {name}.')", "def create_base_templates(outdir, templateEnv):\n for file in ME_TEMPLATES:\n filename = os.path.join(outdir, ME_FILENAME.format(file))\n template = templateEnv.get_template(file + '.go.jinja')\n\n with open(filename, 'w') as f:\n output = template.render(copyright=COPYRIGHT,\n generator_warning=GENERATOR_WARNING,\n package_name=PACKAGE_NAME)\n f.write(output)\n pass", "def setESFiles(self, eSourceDir = None, verbose = False):\n\n print('\\n***Setting electronic structure files')\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc':\n # Check and set electronic structure file for packaging.\n if '***Missing' in self.nbDetails[key]['jobInfo'][2]:\n self.nbDetails[key]['elecStructure'] = None\n else:\n if eSourceDir is not None:\n # Copy electronic structure files to package using supplied path\n fileName = Path(self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\"))\n self.nbDetails[key]['elecStructure'] = Path(eSourceDir, fileName.name).as_posix()\n\n else:\n # Copy electronic structure files to package, based on full path from original job\n self.nbDetails[key]['elecStructure'] = self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\")\n\n checkList = self.checkFiles(self.nbDetails[key]['elecStructure'])\n\n # If file is missing, set to \"missing\"\n if not checkList[0]:\n self.nbDetails[key]['elecStructure'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n\n # If file is present, check also for corresponding files\n else:\n # Assuming above is molden file, check also for corresponding Gamess file\n gFile = Path(self.nbDetails[key]['elecStructure']).with_suffix('.log')\n checkList = self.checkFiles(gFile)\n if checkList[0]:\n # self.nbDetails[key]['elecStructure'].append(gFile.as_posix()) # Set here to append... hopefully works OK with arch update code...\n self.nbDetails[key]['elecStructureGamess'] = gFile.as_posix() # Set here as separate item\n else:\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {gFile.as_posix()}\"\n #\n\n if verbose:\n print(f\"Job {key}: {self.nbDetails[key]['title']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructure']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructureGamess']}\")", "def __init__(self, template_path, groups):\n self._groups = groups\n self._template_path = template_path", "def create_files_from_templates(self, model_attributes):\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/%s_%s.py\" % (model_attributes['app_label'], folder_name,\n model_attributes['model_name_slug'], folder_name)\n template_path = \"django_baker/%s\" % (folder_name)\n self.create_file_from_template(file_path, template_path, model_attributes)\n for file_name in [\"base\", \"list\", \"detail\", \"create\", \"update\", \"delete\"]:\n file_path = \"%s/templates/%s/%s_%s.html\" % (model_attributes['app_label'], model_attributes['app_label'],\n model_attributes['model_name_slug'], file_name)\n template_path = \"django_baker/%s.html\" % (file_name)\n self.create_file_from_template(file_path, template_path, model_attributes)", "def setUp(self):\n # Used to initialize objects that should be re-initialized or\n # re-created for each individual test\n self.t = Task()\n\n self.t.config(\"alias.from\", \"to\")", "def test_template_name():\n for t in templates:\n assert len(t.name) > 0", "def setUp(self):\r\n capa_path = capa.__path__[0]\r\n self.template_path = os.path.join(capa_path,\r\n 'templates',\r\n self.TEMPLATE_NAME)\r\n with open(self.template_path) as f:\r\n self.template = MakoTemplate(f.read())", "def analyze_pptx(template_file):\n prs = Presentation(template_file)\n # Each powerpoint file has multiple layouts\n # Loop through them all and see where the various elements are\n slide_masters = prs.slide_masters\n for index, slide_master in enumerate(prs.slide_masters):\n print('------------------------------------')\n print('------------------------------------')\n print(f\"slide master indexed: {index}\")\n print(slide_master)\n print(\"text boxes:\")\n for shape in slide_master.shapes:\n try:\n dummystring = f\"shape name: {shape.name} - shape text: {shape.text}\"\n shape.text = shape.name\n print(dummystring)\n except:\n pass\n # shape.text = 'hahahaha'\n # for shape in slide_master.slideshapes:\n # print(shape)\n print('------------------------------------')\n for index, slide_layout in enumerate(slide_master.slide_layouts):\n print(f\"\\tslide layout: {slide_layout.name}\")\n slide = prs.slides.add_slide(slide_master.slide_layouts[index])\n # Not every slide has to have a title\n try:\n title = slide.shapes.title\n title.text = 'Title for Layout {}'.format(index)\n except AttributeError:\n print(\"No Title for Layout {}\".format(index))\n # Go through all the placeholders and identify them by index and type\n for shape in slide.placeholders:\n if shape.is_placeholder:\n phf = shape.placeholder_format\n # Do not overwrite the title which is just a special placeholder\n try:\n if 'Title' not in shape.text:\n shape.text = 'Placeholder index:{} type:{}'.format(phf.idx, shape.name)\n except AttributeError:\n print(\"{} has no text attribute\".format(phf.type))\n print(f\"\\t\\tid: {phf.idx} - name: {shape.name}\")\n # output_file = '..\\\\resources\\pptx\\\\template_names.pptx'\n # prs.save(output_file)" ]
[ "0.6324153", "0.5177771", "0.5155023", "0.5115554", "0.5099046", "0.50792223", "0.50491947", "0.5045727", "0.49775788", "0.49729112", "0.4915597", "0.49100342", "0.48777694", "0.48749626", "0.48587674", "0.48527986", "0.48417845", "0.48114392", "0.47574326", "0.47461927", "0.47124007", "0.47047317", "0.47027886", "0.46937972", "0.46674475", "0.4665849", "0.46637896", "0.46623066", "0.4659722", "0.46577945", "0.46507475", "0.4642071", "0.46388483", "0.4636217", "0.46333802", "0.46192503", "0.46109018", "0.46070233", "0.46026954", "0.45958158", "0.45856237", "0.4577187", "0.4568224", "0.45608437", "0.4558679", "0.45568934", "0.45437106", "0.45399663", "0.45352563", "0.45308027", "0.45299965", "0.45267305", "0.45191658", "0.4517779", "0.4516859", "0.4516518", "0.45073602", "0.45000187", "0.44994453", "0.44983956", "0.4497742", "0.44901627", "0.44834992", "0.448136", "0.44805506", "0.44794038", "0.44705087", "0.4467169", "0.44667464", "0.44640952", "0.44640008", "0.4460624", "0.44569135", "0.4456589", "0.44517198", "0.4440851", "0.44398507", "0.443841", "0.44329315", "0.44260833", "0.44217443", "0.44148564", "0.4412671", "0.44121894", "0.44079006", "0.44041887", "0.44022608", "0.439972", "0.4399079", "0.43974477", "0.4394788", "0.4392439", "0.43904454", "0.43901074", "0.4382428", "0.43791354", "0.43787843", "0.43778113", "0.4367596", "0.4367593" ]
0.50911015
5
Fill structure for sorting acquisition times.
def GetEpiAcqTimes(self, series): # Find minimum and maximum start times for each acquistion in series. self.epi_times = {} for entry in self.entry_map['epi']: # Loop through each file in this series. if self.info[entry]['series'] == series and \ self.info[entry]['tdim'] > 2: # Relate each entry to its time of acquisition. self.epi_times[self.info[entry]['acqtime']] = entry
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sort_time(self):\n time = np.copy(self.data[\"time\"][:])\n ind_sorted = np.argsort(time)\n ind_valid: list[int] = []\n for ind in ind_sorted:\n if time[ind] not in time[ind_valid]:\n ind_valid.append(ind)\n n_time = len(time)\n for key, array in self.data.items():\n if not hasattr(array, \"shape\"):\n continue\n if array.ndim == 1 and array.shape[0] == n_time:\n self.data[key] = self.data[key][ind_valid]\n if array.ndim == 2 and array.shape[0] == n_time:\n self.data[key] = self.data[key][ind_valid, :]", "def __init__(self):\n self.timeMap = defaultdict(list)", "def __init__(self):\n self.timeMap = defaultdict(list)", "def sort_time(self):\n self.entries.sort(key=lambda x: x.date_stamp_utc)", "def _sort_by_duration(self) -> None:\n total_samples = len(self.paths)\n if total_samples == 0:\n return\n samples = zip(self.paths, self.durations, self.transcriptions)\n sorted_samples = sorted(samples, key=lambda sample: sample[1])\n self.paths, self.durations, self.transcriptions = [\n list(c) for c in zip(*sorted_samples)\n ]\n assert (\n total_samples\n == len(self.paths)\n == len(self.durations)\n == len(self.transcriptions)\n ), \"_sort_by_duration len mis-match\"", "def process_timecards(self):\n timecard = open('timecards.txt','r')\n time_temp = []\n time = []\n for line in timecard:\n time_temp.append(line)\n for i in time_temp:\n time.append(i.split(','))\n for i in time:\n for q in range(len(i)):\n if q == 0:\n pass\n else:\n i[q] = float(i[q])\n for i in time:\n for q in range(len(i)):\n self.timecard[i[0]] = i[1:]\n #print(self.timecard)\n return self.timecard", "def _prep_times(self):\n self.test_times = 'diagonal'\n if hasattr(self, 'times'):\n self.train_times = self.times\n if hasattr(self, 'times_'):\n self.train_times_ = self.times_\n self.test_times_ = _DecodingTime()\n self.test_times_['slices'] = [[slic] for slic in\n self.train_times_['slices']]\n self.test_times_['times'] = [[tim] for tim in\n self.train_times_['times']]\n if hasattr(self, 'scores_'):\n self.scores_ = [[score] for score in self.scores_]\n if hasattr(self, 'y_pred_'):\n self.y_pred_ = [[y_pred] for y_pred in self.y_pred_]", "def _get_time(self) -> None:\n self.data[\"time\"] = np.zeros(len(self.data[\"yyyymmdd\"]), dtype=object)\n \n for idx, (yyyymmdd, hhmmss) in enumerate(zip(self.data[\"yyyymmdd\"], self.data[\"hhmmss\"])):\n year, month, day = yyyymmdd.split(\"/\")\n hour, minute, second = hhmmss.split(\":\")\n self.data[\"time\"][idx] = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n \n del self.data[\"yyyymmdd\"]\n del self.data[\"hhmmss\"]", "def test_sort_data_by_time():\n data = race.read_file_to_list()\n sorted_data = race.sort_data_by_time(data)\n assert data != sorted_data\n assert len(data) == len(sorted_data)\n assert type(sorted_data) == list\n for lines in sorted_data:\n assert type(lines) == dict", "def prepare(self):\n self.datelist = np.array(self.datelist)\n self.adulist = np.array(self.adulist)\n ast = np.argsort(self.datelist)\n return (self.datelist[ast], self.adulist[ast])", "def __init__(self):\n self.time = deque()\n self.lookup = defaultdict(int)\n self.now = 0", "def concat_and_sort(self):\n for link in self.to_concat:\n \n to_concat = self.to_concat[link]\n df = pd.concat(to_concat,axis=0)\n df=df.sort_values(by=['day','actualtime_arr_from'])\n for d in df['day'].unique():\n self.data[d][link] = {}\n temp = df[df['day']==d]\n \n for r in temp['routeid'].unique(): \n self.data[d][link][r] = temp[temp['routeid']==r][['actualtime_arr_from','actualtime_arr_to','routeid']].values \n del(temp)\n del(df)\n del(self.to_concat)", "def makeChronList(self):\n from operator import itemgetter\n ## make list of msg lists in the format accespted by reconstructLine\n self.outData_temp = [] # this will be in chronological order\n for sens in self.outData:\n if sens is not 'header':\n for meas in self.outData[sens]:\n for time in self.outData[sens][meas]:\n value = self.outData[sens][meas][time]\n thismsg = [time, sens, meas, str(value)] # leave time as float for sorting\n self.outData_temp.append(thismsg)\n self.outData_temp.sort(key=itemgetter(0)) # sort by first index\n for msg in self.outData_temp: # now we can make time a string\n msg[0] = str(msg[0])", "def _generate_time_values(self):\r\n # Populate time values\r\n log('writing times', 'INFO')\r\n d1970 = datetime(1970, 1, 1, tzinfo=utc)\r\n time_array = [[int((self.start_datetime - d1970).total_seconds())]]\r\n \r\n datetime_nc_start_simulation = self.start_datetime\r\n for raw_nc_index, raw_nc in enumerate(self.raw_nc_list):\r\n \r\n raw_nc_time = raw_nc.get_time_array(datetime_simulation_start=datetime_nc_start_simulation,\r\n simulation_time_step_seconds=self.time_step_array[raw_nc_index])\r\n \r\n time_array.append(raw_nc_time)\r\n datetime_nc_start_simulation = datetime.utcfromtimestamp(raw_nc_time[-1])\r\n \r\n self.cf_nc.variables['time'][:] = np.concatenate(time_array)\r\n end_date = datetime.utcfromtimestamp(self.cf_nc.variables['time'][-1])\r\n self.cf_nc.time_coverage_start = self.start_datetime.isoformat() + 'Z'\r\n self.cf_nc.time_coverage_end = end_date.isoformat() + 'Z'", "def _prepare(self):\n # Time list\n self.time_list = []\n # Distance array\n if self._fxn[0] is True:\n self.res_dists, self.res_keys = build_reslist_dict(self._rpl)\n\n # Distance between alpha carbons\n if self._fxn[1] is True:\n self.ca_dists, self.ca_keys = build_reslist_dict(self._rpl)\n\n # Distance between resid center of mass\n if self._fxn[2] is True:\n self.cm_dists, self.cm_keys = build_reslist_dict(self._rpl)\n\n # Distance between resid center of geometry\n if self._fxn[3] is True:\n self.cg_dists, self.cg_keys = build_reslist_dict(self._rpl)", "def init(self, start):\r\n\t\tself.start = start\r\n\t\tself.time = 0\r\n\t\tself.t = []\r\n\t\tself.ch = []", "def sort_auto(self):\n key = lambda buz1, buz2: buz1 if buz1.trip_duration <= buz2.trip_duration else buz2\n self.autobuze.sort(key=key)", "def sort_duration(self):\n self.sort('duration')", "def init(self):\n\t\t# Event list: time\n\t\tself.event_lst = [0, float('inf')]\n\t\t# Rate list of each edge edge->(time->rate)\n\t\t# \tRate here is the residual capacity\n\t\t#\tEnsure that there is one initial timestamp with full capacity and one infinite timestamp with no capacity\n\t\tself.rate_lst = {e:{0:self.topo.topo[e[0]][e[1]]['Capacity'], float('inf'):0} for e in self.topo.edges}", "def _finalize_cells(self):\n # Order by time (as path) and then drilldown dimension value (group)\n # The key[0] is a list of paths: time, another_drilldown\n\n order = lambda left, right: cmp(left[0], right[0])\n cells = self.time_cells.items()\n cells.sort(order)\n\n # compute the current datetime, convert to path\n current_time_path = time_to_path(\n pytz.timezone('UTC').localize(datetime.utcnow()).astimezone(self.browser.timezone).strftime(\"%Y-%m-%d %H:00:00\"), \n self.last_time_level, \n self.time_hierarchy)\n\n self.cells = []\n for key, cell in cells:\n # If we are aggregating at finer granularity than \"all\":\n time_key = key[0]\n if time_key:\n # if time_key ahead of current time path, discard\n if time_key > current_time_path:\n continue\n cell.update(zip(self.time_levels, time_key))\n\n # append the drilldown_on attribute ref\n if self.drilldown_on:\n cell[self.drilldown_on] = self.drilldown_on_value_func(key[1])\n\n self.cells.append(cell)", "def set_times(self):\n if self.anchor == \"P\":\n # specified pickup time, 5 minutes early.\n self.earliestPickup = tools.time_to_seconds(str(self.times)) - 300\n # given pickup time, we are 15 minutes late.\n self.latestPickup = tools.time_to_seconds(str(self.times)) + 900\n # We are given pickup time, caluclate pickup time, and are 5 min early\n self.earliestDropoff = tools.time_to_seconds(self.times) - 300 + self.time_for_travel()\n # we are given pickup time, add travel time, and are 20 minutes\n self.latestDropoff = tools.time_to_seconds(self.times) + self.time_for_travel() + 900\n else:\n # this means the dropoff time is given. calculate the time it takes to drive, and then 5 minutes early\n self.earliestPickup = tools.time_to_seconds(str(self.times)) - self.time_for_travel() - 1200\n # given dropoff time, we calucate when to arrive, and then are 15 minutes late.\n self.latestPickup = tools.time_to_seconds(str(self.times)) - self.time_for_travel()\n # we are given dropoff time. It's earliest pickup time + travel time\n self.earliestDropoff = tools.time_to_seconds(self.times) - 1200\n self.latestDropoff = tools.time_to_seconds(self.times)", "def _sort_by_endtime(self) -> None:\n if self._instantiated:\n raise RuntimeError(\n 'SearchResults should not be overwritten once instantiated. '\n 'Instantiate new object with order_by_endtime=True.'\n )\n\n order = np.argsort(self._end_times)\n\n self.train_metric_dict = {name: [arr[idx] for idx in order] for name, arr in self.train_metric_dict.items()}\n self.opt_metric_dict = {name: [arr[idx] for idx in order] for name, arr in self.opt_metric_dict.items()}\n self.test_metric_dict = {name: [arr[idx] for idx in order] for name, arr in self.test_metric_dict.items()}\n\n self._fit_times = [self._fit_times[idx] for idx in order]\n self._end_times = [self._end_times[idx] for idx in order]\n self.status_types = [self.status_types[idx] for idx in order]\n self.budgets = [self.budgets[idx] for idx in order]\n self.config_ids = [self.config_ids[idx] for idx in order]\n self.is_traditionals = [self.is_traditionals[idx] for idx in order]\n self.additional_infos = [self.additional_infos[idx] for idx in order]\n\n # Don't use numpy slicing to avoid version dependency (cast config to object might cause issues)\n self.configs = [self.configs[idx] for idx in order]\n\n # Only rank_opt_scores is np.ndarray\n self.rank_opt_scores = self.rank_opt_scores[order]", "def sort_time(cls):\n CloudCtx.objCloudCtx.sort(key=lambda x: datetime.strptime(x.modTs, \"%d-%m-%Y %I:%M:%S %p\"), reverse=True)\n for elem in CloudCtx.objCloudCtx:\n print(elem.display_cloud_ctx())", "def sort_func(structure):\n return structure.timestamp", "def initializeCollection():\n return {SENSOR1:[], SENSOR2:[], SENSOR3:[],SENSOR4:[], DATE:[]}", "def _construct_all_positions(self):\n d = dict((s, 0) for s in self.symbol_list)\n d['datetime'] = self.backtest_date\n return [d]", "def __init__(self, jobReleaseDict):\n self.jobs = []\n\n releaseTimes = sorted(jobReleaseDict.keys())\n for time in releaseTimes:\n for job in jobReleaseDict[time]:\n self.jobs.append(job)\n\n self._sortQueue()", "def init_anime(self):\n for a in range(self.art_num):\n self.lines[a].set_data([], [])\n self.points[a].set_data([], [])\n self.time_text.set_text('')\n return self.lines + self.points + [self.time_text]", "def todo(self):\n # sort events with eventid using datetime string\n pass", "def _sort_by_endtime(self) -> None:\n if self._instantiated:\n raise RuntimeError(\n 'EnsembleResults should not be overwritten once instantiated. '\n 'Instantiate new object with order_by_endtime=True.'\n )\n\n order = np.argsort(self._end_times)\n\n self._train_scores = self.train_scores[order].tolist()\n self._test_scores = self.test_scores[order].tolist()\n self._end_times = self.end_times[order].tolist()", "def report_sort_key(self):\n return (self._start_time, self._end_time)", "def get_time_table(self,day):\n output = []\n for link in self.data[day]:\n df = self.data[link][day]\n for row in df:\n output.append({'actualtime_arr_from':row[0],'acutaltime_arr_to':row[1],\\\n 'routeid':row[2],'link':route})\n from operator import itemgetter\n return sorted(output, key=itemgetter('actualtime_arr_from'))", "def add_sort1(self, dt, eid, grid, angle, sc, sd, se, sf):\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n self.data[self.itime, self.itotal] = [angle, sc, sd, se, sf]\n self.element_node[self.itotal] = [eid, grid]\n #self.ielement += 1\n self.itotal += 1", "def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]", "def stats():\r\n times_lst = []\r\n time_dict = {}\r\n for album, details in dbase().items():\r\n time_m = 0\r\n time_s = 0\r\n for songs, details_s in details[0].items():\r\n time = details_s[1].split(\":\")\r\n min = int(time[0])\r\n sec = int(time[1])\r\n time_m += min\r\n time_s += sec\r\n time_s = datetime.timedelta(seconds=time_s)\r\n time_m = datetime.timedelta(seconds=time_m)\r\n time = time_m + time_s\r\n time = str(time)\r\n times_lst.append(time)\r\n time_dict[album] = time\r\n\r\n time_dict = sorted(time_dict.items(), key=lambda x: x[1], reverse=True)\r\n return time_dict", "def __init__(self):\n self.key2value = {}\n self.key2time = {}", "def set_timepoints(self):\n unixtime = self.created.timestamp() # float\n self.timepoints = unixtime + self.points # TODO: calc a sort value!", "def organize_data(path_dir, accelerometer_file, accelerometer_data):\n\n accelerometer_df = pd.read_csv(os.path.join(path_dir, accelerometer_file), usecols=['UTC time', 'x', 'y', 'z'])\n\n x_list = accelerometer_df['x']\n y_list = accelerometer_df['y']\n z_list = accelerometer_df['z']\n UTC_times_list = accelerometer_df['UTC time']\n\n x_y_z_list_for_hour = [] # will contain 60*60 values, that every value is [x,y,z]\n\n curr_line_index = 0\n curr_date_time = get_date_time_from_UTC_time(UTC_times_list[curr_line_index])\n for i in range(60):\n for j in range(60):\n if (curr_date_time.minute != i or curr_date_time.second != j) or curr_line_index + 1 == len(UTC_times_list): # the curr time is more or little then the wanted time, or we finished all the lines in the file --> there is a need to fulfill the values with 0,0,0\n continue\n else:\n x_y_z_list_for_hour.append([x_list[curr_line_index], y_list[curr_line_index], z_list[curr_line_index]])\n while curr_date_time.minute == i and curr_date_time.second <= j and curr_line_index + 1 != len(UTC_times_list):\n curr_line_index += 1\n curr_date_time = get_date_time_from_UTC_time(UTC_times_list[curr_line_index])\n date = get_date_from_file_name(accelerometer_file)\n hour = curr_date_time.hour\n if date not in accelerometer_data.data_dic:\n accelerometer_data.data_dic[date] = {}\n accelerometer_data.data_dic[date][hour] = x_y_z_list_for_hour", "def add_sort1(self, dt, eid, grid, angle, sc, sd, se, sf, omax, omin, mst, msc):\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n self.data[self.itime, self.itotal] = [angle, sc, sd, se, sf, omax, omin, mst, msc]\n self.element_node[self.itotal] = [eid, grid]\n #self.ielement += 1\n self.itotal += 1", "def __init__(self, **fields):\r\n \r\n self._by_number = []\r\n self._names = []\r\n self._by_name = {}\r\n self._numbers = {}\r\n \r\n for name in sorted(fields.keys()):\r\n self.add(name, fields[name])", "def __init__(self):\n self.ts = dict()\n self.cache = dict()", "def setup(self):\n cls = type(\"Timings\", (Structure,),\n {\"_fields_\": [(n, c_double) for n in self._timers]})\n self._C_timings = cls()\n return byref(self._C_timings)", "def with_time(self):\n key = list(self.keys())[0]\n length = len(self[key])\n time_slices = self[key].time_slices\n\n if time_slices is None:\n raise FeatureError(\"FeatureCollection has no time reference.\")\n\n for i in range(length):\n res = {}\n for key, feature in self.items():\n res[key] = feature.data[feature.name][i]\n yield (time_slices[i], res)", "def main(iterator):\n\n entries = OrderedDict()\n for line in iterator:\n\n if \"START\" in line:\n entries.update({\"start_time\":int(re.search(r'\\d+', line).group())})\n if \"STOP\" in line:\n entries.update({\"end_time\":int(re.search(r'\\d+', line).group())})\n if \"NUMERIC SORT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"numeric_sort\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n entries.update({\"numeric_sort_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_num_arrs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"STRING SORT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"string_sort\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_arrs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"STRING SORT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"string_sort\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_arrs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"BITFIELD\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"bitfield\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_ops_arr_size\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"FP EMULATION\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"fp_emul\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_num_loops\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"FOURIER\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"fourier\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_num_coef\":int(re.search(r'\\d+', line).group())})\n\n if \"ASSIGNMENT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"assignment\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n entries.update({\"assignment_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"assignment_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"assignment_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"assignment_num_arrs\":int(re.search(r'\\d+', line).group())})\n\n if \"IDEA\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"idea\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"idea_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"idea_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"idea_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"idea_arr_size\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"idea_num_loops\":int(re.search(r'\\d+', line).group())})\n \n if \"HUFFMAN\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"huffman\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_arr_size\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_num_loops\":int(re.search(r'\\d+', line).group())})\n\n\n if \"NEURAL NET\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"nnet\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_num_loops\":int(re.search(r'\\d+', line).group())})\n\n if \"LU DECOMPOSITION\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"lu_decomp\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_num_arrs\":int(re.search(r'\\d+', line).group())})\n\n if \"libc\" in line and \"Baseline\" not in line and \"*\" not in line:\n line = next(iterator)\n \n entries.update({\"memory_index\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"integer_index\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"float_index\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n\n #print(entries)\n return entries", "def __init__(self):\n self._keys = []\n self._sortKeys = []", "def format_time_sortkey(self, data):\n return self.input['start_time'].time().strftime('%H%M').lstrip('0')", "def getTimes():", "def getTimes():", "def getTimes():", "def __init__(self):\n self.vals = []\n self.mins = []", "def _start_times_to_visit_info(self):\n\n self.visit_plan = {\n 'exp_start_times': self.exp_start_times,\n # for visit trends\n 'orbit_start_index': tools.detect_orbits(self.exp_start_times),\n }", "def __getTimeToStart( self ):\n channelInfo = {}\n for channelID, value in self.bandwidths.items():\n channelDict = self.channels[channelID]\n channelFiles = channelDict['Files']\n channelSize = channelDict['Size']\n status = channelDict['Status']\n channelName = channelDict['ChannelName']\n channelInfo[channelName] = {'ChannelID': channelID}\n\n if status != 'Active':\n throughputTimeToStart = float( 'inf' ) # Make the channel extremely unattractive but still available\n fileTimeToStart = float( 'inf' ) #Make the channel extremely unattractive but still available\n else:\n channelThroughput = value['Throughput']\n channelFileput = value['Fileput']\n channelFileSuccess = value['SuccessfulFiles']\n channelFileFailed = value['FailedFiles']\n attempted = channelFileSuccess + channelFileFailed\n if attempted != 0:\n successRate = 100.0 * ( channelFileSuccess / float( attempted ) )\n else:\n successRate = 100.0\n if successRate < self.acceptableFailureRate:\n #print 'This channel is failing %s' % channelName\n throughputTimeToStart = float( 'inf' ) # Make the channel extremely unattractive but still available\n fileTimeToStart = float( 'inf' ) # Make the channel extremely unattractive but still available\n else:\n if channelFileput > 0:\n fileTimeToStart = channelFiles / float( channelFileput )\n else:\n fileTimeToStart = 0.0\n\n if channelThroughput > 0:\n throughputTimeToStart = channelSize / float( channelThroughput )\n else:\n throughputTimeToStart = 0.0\n\n if self.schedulingType == 'File':\n channelInfo[channelName]['TimeToStart'] = fileTimeToStart\n elif self.schedulingType == 'Throughput':\n channelInfo[channelName]['TimeToStart'] = throughputTimeToStart\n else:\n errStr = 'StrategyHandler.__dynamicThroughput: CS SchedulingType entry must be either File or Throughput'\n gLogger.error( errStr )\n return S_ERROR( errStr )\n\n return S_OK( channelInfo )", "def timing_default(self):\n\n return {\"runtimes\": [], \"dates\": []}", "def aggregate_data(sequence):\r\n data = defaultdict(timedelta)\r\n for item in sequence:\r\n _update_default_dict(data, item)\r\n return data", "def __init__(self):\n self.tic0=time.time()\n self.tic=self.tic0\n self.record=[]", "def generate_time_data(self):\n # generate random dates and append to a list\n sd = self.start_date\n ed = self.end_date\n dates = [random_date(start=sd, end=ed) for d in range(0, self.obs)]\n\n # convert to ISO 8601 format and update \"Local Time\" field\n self.output['Local Time'] = map(lambda x: x.isoformat(), dates)", "def deterministic(timeTable):\n # dictionary voor roomslots\n sortedRoomSlotsDict = {\n 'mo' :[],\n 'tu' :[],\n 'we' :[],\n 'th' :[],\n 'fr' :[],\n }\n sortedRoomSlotsList = []\n \n #fill courses from max students to min students\n courses = timeTable.getCourses()\n lectureList = []\n seminarList = []\n practicumList = []\n sortCourses = []\n\n # Sort courses list\n for c in courses:\n sortCourses.append(c)\n sortCourses.sort(key=operator.attrgetter('getNumberOfStudents'))\n\n # sortCourses --> activitiesMaxMin Dictionary\n for c in sortCourses:\n act = c.getActivities()\n for a in act:\n aType = a.getType()\n if \"lecture\" in aType:\n lectureList.append(a)\n if \"seminar\" in aType:\n seminarList.append(a)\n if \"practicum\" in aType:\n practicumList.append(a)\n activitiesMaxMin = {'lecture':lectureList, 'seminar':seminarList, 'practicum':practicumList}\n\n # Activities MaxMin List\n activityList = []\n for c in sortCourses:\n act = c.getActivities()\n for a in act:\n activityList.append(a)\n \n # maak dictionary met roomslots per dag. 'mo': [roomslotsValues] \n for t in timeTable.timeSlots:\n roomSlots = t.getRoomSlots()\n for s in roomSlots:\n sortedRoomSlotsDict[t.getDay()].append(s)\n \n # sorteert roomslots op size\n for key in sortedRoomSlotsDict:\n sortedRoomSlotsDict[key].sort(key=operator.attrgetter('size'))\n \n # gesorteerde lijst met roomslots\n for key in sortedRoomSlotsDict:\n for roomslots in sortedRoomSlotsDict[key]:\n sortedRoomSlotsList.append(roomslots)\n sortedRoomSlotsList.sort(key=operator.attrgetter('size'))\n\n # Courselist gesorteerd op aantal activities\n sortCourses.sort(key = lambda x: len(x.getActivities()), reverse=True)\n\n # dict\n pointDict = {\n 5 : ['mo', 'tu', 'we', 'th', 'fr'],\n 4 : ['mo', 'tu', 'th', 'fr'],\n 3 : ['mo', 'we', 'fr'],\n 2 : ['th', 'mo'],\n }\n\n # random room slots\n randomRoomSlots = []\n for t in timeTable.getTimeSlots():\n randomRoomSlots += t.getRoomSlots()\n random.shuffle(randomRoomSlots)\n\n\n bestScore = 0\n count = 0\n stack = [[[\"mo\", activityList[0]]]]\n bookedActivities = []\n while stack != []:\n for child in stack:\n try: parent = stack[-1].pop()\n except:\n \n #lijst van children is leeg\n stack.pop()\n lBookedActivity = bookedActivities.pop()\n deleteActivity(lBookedActivity[1], timeTable)\n continue\n \n # Book Parent\n if depthBookActivity(parent[1], sortedRoomSlotsDict[parent[0]], sortedRoomSlotsList, timeTable) != False:\n bookedActivities.append(parent)\n # Maak children van parent\n try:\n children = generateAllChildren(parent[1], activityList)\n stack.append(children)\n except:\n # Calculeer maxpoints en vergelijk timetable met eerder resultaat\n currentScore = getPointsDeterministic(timeTable)\n count += 1\n lBookedActivity = bookedActivities.pop()\n if currentScore > bestScore: bestScore = currentScore\n if count > 1000:\n count = 0\n print bestScore\n deleteActivity(lBookedActivity[1], timeTable)", "def _build_sort1_table(key_itime, keys_map, header_dict,\n form, form_results, form_resultsi,\n disp_dict, stress_dict, strain_dict, force_dict,\n strain_energy_dict, gpstress_dict, log):\n is_results = False\n form_resultsi_subcase = []\n #for key, value in header_dict.items():\n #print(key, value)\n # (isubcase, analysis_code, sort_method,\n # count, ogs, superelement_adaptivity_index) = key\n key_itime0 = key_itime[0]\n key0 = key_itime0[0]\n # (isubcase, analysis_code, sort_method,\n # count, ogs, superelement_adaptivity_index, pval_step) = key\n subcase_id_old = key0[0]\n count_old = key0[3]\n ogs_old = key0[4]\n subtitle_old = key0[5]\n subtitle_old, label_old, superelement_adaptivity_index_old, unused_pval_step_old = keys_map[key0]\n del label_old\n del superelement_adaptivity_index_old\n\n # now that we have the data built, we put it in the form\n # in sorted order\n #\n # TODO: consider pval_step\n for key, itime in key_itime:\n # (isubcase, analysis_code, sort_method,\n # count, ogs, superelement_adaptivity_index, pval_step) = key\n #print('key =', key)\n subcase_id = key[0]\n count = key[3]\n ogs = key[4]\n #print('*ogs =', ogs)\n #subtitle = key[4]\n try:\n subtitle, unused_label, superelement_adaptivity_index, unused_pval_step = keys_map[key]\n except Exception:\n subcase_id = subcase_id_old\n subtitle = subtitle_old + '?'\n superelement_adaptivity_index = '?'\n raise\n\n #print('key =', key)\n if subcase_id != subcase_id_old or subtitle != subtitle_old or ogs != ogs_old:\n count_str = '' if count == 0 else ' ; opt_count=%s' % count_old\n ogs_str = '' if ogs == 0 else '; OGS=%s' % ogs_old\n subcase_str = 'Subcase %s; %s%s%s%s' % (\n subcase_id_old, subtitle_old, superelement_adaptivity_index, count_str, ogs_str)\n #print(subcase_str)\n res = (\n subcase_str.rstrip('; '),\n None,\n form_resultsi_subcase\n )\n form_resultsi.append(res)\n form_resultsi_subcase = []\n subcase_id_old = subcase_id\n subtitle_old = subtitle\n count_old = count\n ogs_old = ogs\n\n\n try:\n header = header_dict[(key, itime)]\n except KeyError: # this hits for strain energy\n msg = 'Missing (key, itime) in header_dict\\n'\n msg += ' key=%s\\n' % str(key)\n\n (subcase, analysis_code, sort_method,\n count, ogs, superelement_adaptivity_index, pval_step) = key\n msg += f' subcase={subcase}\\n'\n msg += f' analysis_code={analysis_code}\\n'\n msg += f' sort_method={sort_method}\\n'\n msg += f' count={count}\\n'\n msg += f' ogs={ogs}\\n'\n msg += f' superelement_adaptivity_index={superelement_adaptivity_index!r}\\n'\n msg += f' pval_step={pval_step!r}\\n'\n\n msg += ' itime=%s\\n' % itime\n msg += ' %s\\n' % str((key, itime))\n msg += 'Possible (key, time):\\n'\n for keyi in header_dict:\n msg += ' %s\\n' % str(keyi)\n #print(msg.rstrip())\n #print('expected = (%s, %r)\\n' % (str(key), itime))\n log.error(msg.rstrip() + '\\n')\n #self.log.error('expected = (%s, %r)\\n' % (str(key), itime))\n continue\n #raise KeyError(msg)\n try:\n header = header.strip()\n except Exception:\n print('header = %r' % header)\n raise\n\n\n form_outi = []\n form_out = (header, None, form_outi)\n disp_formi = disp_dict[(key, itime)]\n stress_formi = stress_dict[(key, itime)]\n strain_formi = strain_dict[(key, itime)]\n force_formi = force_dict[(key, itime)]\n strain_energy_formi = strain_energy_dict[(key, itime)]\n gpstress_formi = gpstress_dict[(key, itime)]\n if disp_formi:\n form_outi += disp_formi\n #form_outi.append(('Disp', None, disp_formi))\n if stress_formi:\n form_outi.append(('Stress', None, stress_formi))\n is_results = True\n if strain_formi:\n form_outi.append(('Strain', None, strain_formi))\n is_results = True\n if force_formi:\n form_outi.append(('Force', None, force_formi))\n is_results = True\n if strain_energy_formi:\n form_outi.append(('Strain Energy', None, strain_energy_formi))\n is_results = True\n if gpstress_formi:\n form_outi.append(('Grid Point Stresses', None, gpstress_formi))\n is_results = True\n\n if form_outi:\n is_results = True\n form_resultsi_subcase.append(form_out)\n #break\n\n #print(\"subcase_id = \", subcase_id)\n if subcase_id:\n count_str = '' if count == 0 else ' ; opt_count=%s' % count_old\n ogs_str = '' if ogs == 0 else '; OGS=%s' % ogs_old\n subcase_str = 'Subcase %s; %s%s%s' % (subcase_id, subtitle, count_str, ogs_str)\n #print('*', subcase_str)\n res = (\n subcase_str.strip('; '),\n None,\n form_resultsi_subcase\n )\n form_resultsi.append(res)\n assert len(form_out) > 0, form_out\n form_resultsi_subcase = []\n\n if is_results:\n form.append(form_results)\n assert len(form_out) > 0, form_out\n #print('formi =', formi)\n #print('form_out =', form_out)\n #print('form_resultsi =', form_resultsi)\n #print('form_results =', form_results)\n #print(form)\n #if len(formi):\n #form.append(form0)\n #print(form)\n #aa\n #print('form', form)\n #print('form_results =', form_results)\n return form", "def _load_time(self):\n\n time_variables = ('time', 'Times', 'Itime', 'Itime2')\n got_time, missing_time = [], []\n for time in time_variables:\n # Since not all of the time_variables specified above are required, only try to load the data if they\n # exist. We'll raise an error if we don't find any of them though.\n if time in self.ds.variables:\n setattr(self.time, time, self.ds.variables[time][:])\n got_time.append(time)\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[time].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[time], attribute))\n setattr(self.atts, time, attributes)\n else:\n missing_time.append(time)\n\n if len(missing_time) == len(time_variables):\n warn('No time variables found in the netCDF.')\n else:\n if 'Times' in got_time:\n # Overwrite the existing Times array with a more sensibly shaped one.\n self.time.Times = np.asarray([''.join(t.astype(str)).strip() for t in self.time.Times])\n\n # Make whatever we got into datetime objects and use those to make everything else. Note: the `time' variable\n # is often the one with the lowest precision, so use the others preferentially over that.\n if 'Times' not in got_time:\n if 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n try:\n self.time.Times = np.array([datetime.strftime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in _dates])\n except ValueError:\n self.time.Times = np.array([datetime.strftime(d, '%Y/%m/%d %H:%M:%S.%f') for d in _dates])\n # Add the relevant attribute for the Times variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Times', attributes)\n\n if 'time' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n self.time.time = date2num(_dates, units='days since 1858-11-17 00:00:00')\n # Add the relevant attributes for the time variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'long_name', 'time')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'time', attributes)\n\n if 'Itime' not in got_time and 'Itime2' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n _datenum = date2num(_dates, units='days since 1858-11-17 00:00:00')\n self.time.Itime = np.floor(_datenum)\n self.time.Itime2 = (_datenum - np.floor(_datenum)) * 1000 * 60 * 60 # microseconds since midnight\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime', attributes)\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'msec since 00:00:00')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime2', attributes)\n\n # Additional nice-to-have time representations.\n if 'Times' in got_time:\n try:\n self.time.datetime = np.array([datetime.strptime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in self.time.Times])\n except ValueError:\n self.time.datetime = np.array([datetime.strptime(d, '%Y/%m/%d %H:%M:%S.%f') for d in self.time.Times])\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'Python datetime.datetime')\n setattr(self.atts, 'datetime', attributes)\n else:\n self.time.datetime = _dates\n self.time.matlabtime = self.time.time + 678942.0 # convert to MATLAB-indexed times from Modified Julian Date.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'MATLAB datenum')\n setattr(self.atts, 'matlabtime', attributes)\n\n # Clip everything to the time indices if we've been given them. Update the time dimension too.\n if 'time' in self._dims:\n if all([isinstance(i, (datetime, str)) for i in self._dims['time']]):\n # Convert datetime dimensions to indices in the currently loaded data.\n self._dims['time'][0] = self.time_to_index(self._dims['time'][0])\n self._dims['time'][1] = self.time_to_index(self._dims['time'][1]) + 1 # make the indexing inclusive\n for time in self.obj_iter(self.time):\n setattr(self.time, time, getattr(self.time, time)[self._dims['time'][0]:self._dims['time'][1]])\n self.dims.time = len(self.time.time)", "def __init__(self, ):\n self.starts = dict()\n self.ends = dict()", "def task4(self) ->list:\n user_readTimes = {}\n for entry in self.records:\n if(entry['event_type'] == 'pagereadtime'):\n if (entry['visitor_uuid'] in user_readTimes):\n user_readTimes[entry['visitor_uuid']] += entry['event_readtime']\n else:\n user_readTimes[entry['visitor_uuid']] = entry['event_readtime']\n readTimes = list(sorted(user_readTimes.items(), key=operator.itemgetter(1), reverse = True))[0:10]\n for times in readTimes:\n print(times)\n return readTimes", "def fix_time_fields(self):\n time_fields = {\"Time of day\": lambda time: time.hour, \"Time of year (month)\": lambda time: time.month}\n for time_field in time_fields.keys():\n for i in range(self.df.shape[0]):\n value = self.df[time_field][i]\n if type(value) is datetime.time or type(value) is datetime.datetime:\n self.df[time_field].loc[i] = time_fields[time_field](value)", "def initializeTimeIntegration(self,timeIntegration):\n pass", "def sort_by_time(pairs):\n pairs = sorted(pairs, key=lambda line: line[2], reverse=False)\n order = 0\n out = []\n for i in range(len(pairs)):\n if i != 0 and pairs[i][2] == pairs[i - 1][2]:\n out += [(pairs[i][0], pairs[i][1], order)]\n else:\n order += 1\n out += [(pairs[i][0], pairs[i][1], order)]\n return out", "def __parse(self):\n lines = self.data.readlines()\n for i in range(0, len(lines)):\n line = lines[i]\n if line[0] == '#':\n continue\n tokens = line.split(\"\\t\")\n time_str = tokens[self.timecol]\n if time_str.find('start:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n elif time_str.find('end:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n break\n else:\n duration = float(tokens[6])\n fms = int(tokens[2])\n hfms = int(tokens[3])\n svs = int(tokens[4])\n self.calls.append((fms, hfms, svs))\n self.durations.append(duration)\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.length = (self.times[len(self.times) - 1] -\\\n self.times[0]).seconds", "def __init__(self, numQueues, rate, start_hour, end_hour, appt_low, appt_high):\n\n self.rate = rate\n self.numQueues = numQueues\n self.start = datetime.datetime.combine(datetime.date.today(), datetime.time(start_hour,0,0))\n self.end = datetime.datetime.combine(datetime.date.today(), datetime.time(end_hour,0,0))\n self.appt_low = appt_low\n self.appt_high = appt_high\n minutes_for_new_items = (end_hour-start_hour)*60 #new patients seen between 9AM and 4PM\n time_between_items = rate #exponential dist. time parameter\n self.expected_count = int(np.ceil(stats.poisson.ppf(.9999, minutes_for_new_items/time_between_items)))\n self.ques = [datetime.datetime.combine(datetime.datetime.today(), datetime.time(start_hour,0,0)) for i in range(0, self.numQueues)]\n cols = ['simulation', 'num_items', 'wait_count', 'avg_wait_time', 'close_time']\n self.results = pd.DataFrame(columns = cols)\n return", "def assemble(self, dt_range=None):\n if dt_range is not None:\n self.dt_list = trace_source.time_list(dt_range[0],\n dt_range[1],\n self.config['time']['step'])\n\n # only for the testcase\n traj_dir = self.config['partposit_dir']\n days_avail = os.listdir(traj_dir)\n # filter only for the trajectory files with tdump extension\n days_avail = [f for f in days_avail if len(f) == 11]\n print(days_avail)\n folders = [f for f in days_avail if datetime.datetime.strptime(f, \"%Y%m%d_%H\") in self.dt_list]\n\n assert len(folders) > 0, 'no folders with flexpart partposit data'\n\n # the defaultdict is used here to sort the files by datetime within a dictionary\n # filtered_files = defaultdict(list)\n # for f in files:\n # # regex the yyyymmdd-hh timestamp in the filename\n # dt = datetime.datetime.strptime(re.search('([0-9]{8})-([0-9]){2}', f).group(0), '%Y%m%d-%H')\n # height = float(re.search('([0-9]{3,6})(?=_0[0-9-]{1,4}.tdump)', f).group(0))\n # #print(f, dt, height)\n # if dt >= self.dt_list[0] and dt <= self.dt_list[-1]:\n # filtered_files[dt].append((f,height))\n\n # here an empty dict is generated with a zero containing array\n self.stat2d_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list))))\n\n self.statls_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list), 7)))\n\n self.raw_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list),\n abs(self.config['time']['tr_duration'])+1)))\n\n # TODO make more than 7 geo names possible\n ng = trace_source.land_sfc.named_geography(self.config['geonames'])\n self.geo_names = ng.geo_names\n no_geo_names = len(list(self.geo_names.keys()))\n self.statgn_dict = defaultdict(lambda: np.zeros((len(self.dt_list),\n len(self.height_list),\n no_geo_names)))\n\n\n self.lat_names = {0: '<-60', 1: '-60..-30', 2:'-30..0', 3: '0..30', 4: '30..60', 5: '>60'}\n self.statlat_dict = defaultdict(lambda: np.zeros((len(self.dt_list),\n len(self.height_list),\n len(list(self.lat_names.keys())))))\n\n\n ls = trace_source.land_sfc.land_sfc()\n self.ls_categories = ls.categories\n\n\n for it, dt in enumerate(self.dt_list[:]):\n print('trajectories eding at ', dt)\n files_for_time = os.listdir(traj_dir + dt.strftime(\"%Y%m%d_%H\"))\n files_for_time = sorted([f for f in files_for_time if \"partposit_\" in f])\n folder = traj_dir + dt.strftime(\"%Y%m%d_%H\") + \"/\"\n print('files_for_time ', files_for_time)\n\n print('heights ', len(self.height_list), self.height_list)\n\n flex_stat = [flex_statistics(self.config, ls=ls, ng=ng) for h in self.height_list]\n traj_meta = read_flexpart_traj_meta(folder + \"trajectories.txt\")\n\n self.no_part.append(traj_meta['releases_meta'][1]['no_particles'])\n self.time_res.append(10*24/len(files_for_time))\n\n # different structure than hysplit\n # 1. loop through the ending times of the current day\n # 2. load partposit for a specified time\n # 3. loop through heights\n\n for f in files_for_time:\n print('files_for_time ', f)\n part_pos = read_partpositions(folder + f, 1, ctable=True)\n part_pos = np.array(part_pos)\n\n for ih, h in enumerate(self.height_list):\n #print(\"at \", ih, h)\n this_population = np.where(part_pos[:,0] == ih+1)[0]\n #release_sel = np.array([list(p) for p in part_pos if p[0]==ih+1])\n release_sel = part_pos[this_population, :]\n #assert np.all(release_sel == other_release)\n meta = traj_meta['releases_meta'][ih+1]\n #print(meta)\n assert np.mean(meta['heights']) == h, f\"{meta['heights']} {h} do not fit\"\n flex_stat[ih].add_partposits_gn(release_sel)\n\n flex_stat[ih].add_partposits_ls(release_sel)\n flex_stat[ih].add_partposits_thres(release_sel)\n\n # now assemble the statistics for all heights\n for ih, h in enumerate(self.height_list): \n flex_stat[ih].calc_gn_stat()\n for k in list(flex_stat[ih].stat_gn.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_gn[k].no_below\n print('stat gn ', h, k, flex_stat[ih].stat_gn[k])\n self.statgn_dict[k][it, ih] = list(flex_stat[ih].stat_gn[k].counter.values())\n\n flex_stat[ih].calc_ls_stat()\n for k in list(flex_stat[ih].stat_ls.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_ls[k].no_below\n print('stat ls ', h, k, flex_stat[ih].stat_ls[k])\n self.statls_dict[k][it, ih] = list(flex_stat[ih].stat_ls[k].counter.values())\n\n flex_stat[ih].calc_thres_stat()\n for k in list(flex_stat[ih].stat_lat.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_lat[k].no_below\n print('stat_lat ', h, k, flex_stat[ih].stat_lat[k])\n self.statlat_dict[k][it, ih] = list(flex_stat[ih].stat_lat[k].counter.values())\n\n\n # #assert len(f_list) > 1\n # for ih, f in enumerate(f_list):\n # print(it, ih, f[1], dt)\n # traj = trajectory(self.config)\n # traj.load_file(traj_dir+f[0], silent=True)\n # savepath = '{}/{}'.format(self.config['plot_dir'], dt.strftime('%Y%m%d'))\n\n\n # if \"timeinterval\" in self.config['plotmap']:\n # timeinterval = self.config['plotmap']['timeinterval']\n # else:\n # timeinterval = 12\n # if \"heights\" in self.config['plotmap']:\n # heightlist = self.config['plotmap']['heights']\n # else:\n # heightlist = [1500.0, 3000.0, 4500.0]\n # #if f[1] == 3000.0 and dt.hour % 12 == 0:\n # if f[1] in heightlist and dt.hour % timeinterval == 0:\n # print(\"plotting \", f[1], dt.hour)\n # plot_trajectories_ens(traj, savepath, ls=ls, config=self.config)\n # #continue\n\n # traj.evaluate(silent=True)\n # traj.add_land_sfc(ls, silent=True)\n # traj.add_ensemble_land_sfc(ls)\n # traj.add_ensemble_geo_names(ng)\n # #traj.add_area_land_sfc('md', ls, silent=True)\n # #traj.add_area_land_sfc(2000, ls, silent=True)\n\n # #print(\"at step\", it, dt, ih, f)\n # #print('keys ', traj.statistics.keys())\n # # now the empty dict is filled with the keys (and values) of the statistics dict from traj\n # for k in list(traj.statistics.keys()):\n # self.stat2d_dict[k][it, ih] = traj.statistics[k]\n # # subset of trajectory data to collect\n # param_collect = ['latitude', 'longitude', 'height', \"PRESSURE\", \"AIR_TEMP\",\n # \"RAINFALL\", \"RELHUMID\", \"TERR_MSL\", 'age']\n # if 'land_sfc_category' in list(traj.data.keys()):\n # param_collect.append('land_sfc_category')\n # for k in param_collect:\n # #self.raw_dict[k][it, ih, :traj.data[1][k].shape[0]] = traj.data[1][k]\n # self.raw_dict[k][it, ih, :] = traj.data[1][k]\n # #self.raw_dict[k][it, ih, traj.data[1][k].shape[0]:] = -999.\n\n # for k in list(traj.stat_ls.keys()):\n # self.stat2d_dict[k+'_no_below'][it, ih] = traj.stat_ls[k].no_below\n # print('stat ls ', k, traj.stat_ls[k])\n # self.statls_dict[k][it, ih] = list(traj.stat_ls[k].counter.values())\n\n # for k in list(traj.stat_gn.keys()):\n # self.stat2d_dict[k+'_no_below'][it, ih] = traj.stat_gn[k].no_below\n # print('stat gn ', k, traj.stat_gn[k])\n # self.statgn_dict[k][it, ih] = list(traj.stat_gn[k].counter.values())\n\n # trying to free memory\n del ls\n del ng", "def __init__(self, extra_fields=None):\n if extra_fields:\n self.fields.extend(extra_fields)\n self.data = {k: [] for k in self.fields}\n self.last_r = 0.0", "def __init__(self, n):\n\n self.name = n\n self.timelines = {}", "def recorded_timestamps(self):\n return sorted(self.reception_records.keys())", "def make_all_datetime(self):\n \n logging.info('\\n *** Running make_all_datetime ' )\n\n all_uniques = [] # storing a list with all the unique date_times \n which_k_in_dt = {} # list of avilable dataset for each unique date_time, so that when looping over the distinct date_times, only the proper dataset will be read and compared \n\n \"\"\" Loop over all the datasets \n k: name of the dataset\n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ]\"\"\"\n\n for k,v in self.datasets.items() :\n self.unique_dates[k] = {}\n for F in v: \n self.unique_dates[k][F] = {}\n \n self.unique_dates[k][F]['indices'] = {} \n self.unique_dates[k][F]['index_offset_next'] = 0 # to be replaced later when slicing \n self.unique_dates[k][F]['index_offset'] = 0 # to be replaced later when slicing \n\n unique_dt = list(data[k][F]['recordtimestamp'])\n \n indices = list(data[k][F]['recordindex'])\n all_uniques += unique_dt # adding to the total unique date_times \n\n \"\"\" Loop over all the date_times of each dataset \"\"\"\n for dt, index_low, count in zip (unique_dt, indices, range(len(unique_dt)) ):\n\n if dt not in which_k_in_dt.keys():\n which_k_in_dt[dt] = {}\n if k not in which_k_in_dt[dt].keys():\n which_k_in_dt[dt][k] = [] \n if F not in which_k_in_dt[dt][k]:\n which_k_in_dt[dt][k].append(F)\n # at this point I have e.g. which_k_in_dt= {1990-01-01-12-00: {era5_1:[file1,file2] , ncar:[file3] } }\n\n self.unique_dates[k][F]['indices'][dt] = {}\n self.unique_dates[k][F]['indices'][dt]['low'] = index_low \n try:\n index_up = indices[ count + 1 ] # works until the last available recordindex\n except: \n index_up = max(indices)+1000000 # dummy large number \n\n self.unique_dates[k][F]['indices'][dt]['up'] = index_up\n self.unique_dates[k][F]['up_to_dt_slice'] = data[k][F]['min_date'] \n \n\n self.dataset_per_dt = which_k_in_dt \n self.merged_unique_dates = np.unique(np.array(all_uniques) ) # storing the set of *ALL* distinct dt values of all datasets and all files \n logging.debug('*** make_all_datetime finished ')", "def createTimeBlocks(availableTimesDict):\n\n # get all keys of the dict\n availableTimesList = list(availableTimesDict.keys())\n # store first key\n firstSlot = availableTimesList[0]\n # store first key as start of first time block\n timeSlots = [[firstSlot]]\n\n # loop through all keys\n for i in range(len(availableTimesList) - 1):\n key = availableTimesList[i]\n # if a number(minute) has no successor which is 1 greater example: 719, 950\n # then the current number is the end of a time block and the following number\n # is the new start of a block\n # ignore all other numbers\n if (key + 1) not in availableTimesList:\n timeSlots[-1].append(key)\n timeSlots.append([availableTimesList[i + 1]])\n del availableTimesDict[key]\n\n # the last number left in the dict is the end of the last time block\n timeSlots[-1].append(list(availableTimesDict.keys())[0])\n return timeSlots", "def _init(self) -> None:\n now = time()\n\n # Timestamps for tracking compute durations by task group.\n # Start with length 2 so that we always can compute a valid dt later.\n self.time = [now] * 2\n # The amount of compute since the last timestamp\n self.compute = {}\n # The number of threads at the time\n self.nthreads = [self.scheduler.total_nthreads] * 2", "def _addinittimes(self, init_times: list):\n if not all([isinstance(ii, datetime.datetime) for ii in init_times]):\n raise ValueError('List object not all datetime.datetime objects, as expected')\n\n if self._forcing_dirs != [] and len(init_times) > 1:\n if len(self._forcing_dirs) != len(init_times):\n raise ValueError(\"Length of init_times does not match that of self._forcing_dirs.\")\n\n self._init_times = copy.deepcopy(init_times)", "def fill_testing_dates(self):\r\n \r\n now = datetime.now()\r\n month = now.strftime('%m')\r\n year = now.year \r\n most_recent_date = '{}-{}-01'.format(year, month)\r\n self.testing_dates[1] = {'cv_start': '1972-01-01', \r\n 'cv_end': '1975-12-01', \r\n 'pred_start': '1976-01-01',\r\n 'pred_end': '1981-07-01'}\r\n self.testing_dates[2] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1981-07-01', \r\n 'pred_start': '1981-08-01',\r\n 'pred_end': '1983-07-01'}\r\n self.testing_dates[3] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1983-07-01', \r\n 'pred_start': '1983-08-01',\r\n 'pred_end': '1992-12-01'}\r\n self.testing_dates[4] = {'cv_start': '1983-08-01', \r\n 'cv_end': '1992-12-01', \r\n 'pred_start': '1993-01-01',\r\n 'pred_end': '2003-07-01'}\r\n self.testing_dates[5] = {'cv_start': '1993-01-01', \r\n 'cv_end': '2003-07-01', \r\n 'pred_start': '2003-08-01',\r\n 'pred_end': '2010-09-01'}\r\n self.testing_dates[6] = {'cv_start': '2003-08-01', \r\n 'cv_end': '2010-09-01', \r\n 'pred_start': '2010-10-01',\r\n 'pred_end': '2021-07-01'}\r\n self.testing_dates[7] = {'cv_start': '2010-10-01', \r\n 'cv_end': '2021-07-01', \r\n 'pred_start': '2021-08-01',\r\n 'pred_end': most_recent_date}", "def __init__(self, v, p):\n self.val = v\n self.priority = p\n self.timestamp = TimeSpecifiedItem.ctr[0]\n TimeSpecifiedItem.ctr[0] += 1", "def __initializeData():\n\tdata = OrderedDict()\n\tdata['Saved_LIVE'] = False\n\tdata['Saved_POST'] = False\n\tdata['Time_Written_POST'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\tdata['Time_Written_LIVE'] = datetime.datetime.now().strftime('%m-%d-%Y, %H:%M')\n\treturn data", "def _fill_day_dicts(self):\n today = datetime.date.today()\n for i, record in enumerate(self._dataset):\n if (record[\"createdAt\"] / 1000) > time.mktime((today - datetime.timedelta(days=30)).timetuple()):\n self._add_record(self._all30_dict, record, key=i)\n\n elif (record[\"createdAt\"] / 1000) > time.mktime((today - datetime.timedelta(days=60)).timetuple()):\n self._add_record(self._all60_dict, record, key=i)\n\n else:\n self._add_record(self._all90_dict, record, key=i)", "def construct_all_positions(self):\n # Creates a dictionary for each symbol, sets a value of 0 for each, adds a datetime key, adds it to a list.\n d = dict((k,v) for k,v in [(s,0) for s in self.symbol_list]) # self.current_positions\n d['datetime'] = self.start_date\n \n return [d]", "def construct_all_positions(self):\n # Creates a dictionary for each symbol, sets a value of 0 for each, adds a datetime key, adds it to a list.\n d = dict((k,v) for k,v in [(s,0) for s in self.symbol_list]) # self.current_positions\n d['datetime'] = self.start_date\n \n return [d]", "def available_timing(filename,day,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #finding occupied hours\n brlist = []\n #for all lines in file\n for k in range(len(incsv)):\n #if venue in line matches desired venue and day in line matches desired day\n if incsv[k][0][7] == venue and int(incsv[k][0][3]) == day:\n #add time range of line into brlist\n brlist.append([int(incsv[k][0][5]),int(incsv[k][0][6])])\n #pruning\n #tlist stands for timelist. stores remaining hours for synthesis\n tlist = []\n #list of hours\n tlist = [600,700,800,900,1000,1100,1200,1300,1400,1500,1600,1700,1800,1900,2000,2100,2200,2300,2400]\n #for line in brlist\n for l in range(len(brlist)):\n #for the range of hours of the line\n for m in range(int((brlist[l][1]-brlist[l][0])/100)):\n #if hours in range still in tlist\n if (brlist[l][0] + m*100) in tlist:\n #remove from tlist\n tlist.remove(brlist[l][0] + m*100)\n #plist for partition list. range of available timings appended here\n plist = []\n #check is for the start time of each available time ranges\n check = 0\n #formation of time ranges\n #for hours in tlist\n for n in range(len(tlist)):\n #if code is checking element 2. Could have used range(1,len(tlist)) but nevermind\n if n >= 1:\n #if 2 adjacent hours are not consecutive\n if tlist[n] != (tlist[n-1]+100):\n #add time range to plist\n plist.append((tlist[check],tlist[n-1]+100))\n #set check to next minimum available start time\n check = n\n #adding range with last hour\n #if last hour in tlist is 2400 and precedent hour in tlist is 2300\n if tlist[n] == 2400 and tlist[n-1] == 2300:\n #add time range\n plist.append((tlist[check],2400))\n return plist", "def add_sort2(self, dt, eid, grid, angle, sc, sd, se, sf, omax, omin, mst, msc):\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n itime = self.itotal\n itotal = self.itime\n self._times[itime] = dt\n #print(f'itime={itime} itotal={itotal}; data.shape={self.data.shape}')\n self.data[itime, itotal, :] = [angle, sc, sd, se, sf, omax, omin, mst, msc]\n self.element_node[itotal] = [eid, grid]\n #self.ielement += 1\n self.itotal += 1", "def _process_schedule(self, day):\n schedule = []\n\n # We need to sort it\n day.sort()\n\n for lesson in day:\n while len(schedule) + 1 < lesson.order:\n schedule.append(None)\n if lesson.part is None:\n # Full group\n schedule.append(lesson)\n else:\n # Parted group\n if not len(schedule) == lesson.order:\n schedule.append([None, None])\n schedule[-1][lesson.part-1] = lesson\n return schedule", "def _sort_records(self):\n self.records.sort(reverse=True, key=lambda record: record.timestamp)", "def get_pump_times(self, start):\n pumps_dict = {}\n for pump in self.pumps:\n dataframe_ = pd.DataFrame()\n time = []\n command = []\n for i in range(len(pump.start_intervals)):\n t_on = pump.start_intervals[i].epanet_on_time\n t_off = pump.start_intervals[i].epanet_off_time\n time += [start + t_on * pd.Timedelta(\"1S\"),\n start + t_off * pd.Timedelta(\"1S\")]\n command += [1, 0]\n dataframe_['Time'] = time\n dataframe_[pump.link_id] = command\n pumps_dict[pump.link_id] = dataframe_\n return pumps_dict", "def ablation_times(self):\n ats = {}\n for n in np.arange(self.n) + 1:\n t = self.Time[self.ns == n]\n ats[n - 1] = t.max() - t.min()\n return ats", "def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics", "def __init__(self, canal_names, akw_time):\n self.lock = threading.RLock()\n self.akw_time = akw_time\n self.dataDict = {}\n self.canal_names = canal_names\n self.nr_of_averages = 1\n self.vis_data_pointer = 0 #how much data from beginin not to visualise\n\n for name in canal_names:\n self.dataDict[name] = [] # create empty list for each canal", "def readSrc_byTime(self):\n for msg in self.srcFile:\n msg = msg[0:-1] # remove \\n at the end of the string\n if '%%' in msg:\n self.srcHeader.append(msg)\n else:\n msg = msg.split()\n time = float(msg[0])\n meas = msg[1]\n sens = msg[2]\n valu = msg[3]\n if time not in self.srcData: # none from this time yet\n self.srcData[time] = {}\n if sens not in self.srcData[time]: # none at this time from this gSensor\n self.srcData[time][sens] = {}\n self.srcData[time][sens][meas] = valu # assume only one message per meas from sens at a time", "def prepare_timeslots_for_display(timeslots, rooms):\n\n # Populate room_data. This collects the timeslots for each room binned by\n # day, plus data needed for sorting the rooms for display.\n room_data = dict()\n all_days = set()\n # timeslots_qs is already sorted by location, name, and time\n for t in timeslots:\n if t.location not in rooms:\n continue\n\n t.layout_width = timedelta_to_css_ems(t.duration)\n if t.location_id not in room_data:\n room_data[t.location_id] = dict(\n timeslots_by_day=dict(),\n timeslot_count=0,\n start_and_duration=[],\n first_timeslot = t,\n )\n rd = room_data[t.location_id]\n rd['timeslot_count'] += 1\n rd['start_and_duration'].append((t.time, t.duration))\n ttd = t.time.date()\n all_days.add(ttd)\n if ttd not in rd['timeslots_by_day']:\n rd['timeslots_by_day'][ttd] = []\n rd['timeslots_by_day'][ttd].append(t)\n\n all_days = sorted(all_days) # changes set to a list\n # Note the maximum timeslot count for any room\n if len(room_data) > 0:\n max_timeslots = max(rd['timeslot_count'] for rd in room_data.values())\n else:\n max_timeslots = 0\n\n # Partition rooms into groups with identical timeslot arrangements.\n # Start by discarding any roos that have no timeslots.\n rooms_with_timeslots = [r for r in rooms if r.pk in room_data]\n # Then sort the remaining rooms.\n sorted_rooms = sorted(\n rooms_with_timeslots,\n key=lambda room: (\n # First, sort regular session rooms ahead of others - these will usually\n # have more timeslots than other room types.\n 0 if room_data[room.pk]['timeslot_count'] == max_timeslots else 1,\n # Sort rooms with earlier timeslots ahead of later\n room_data[room.pk]['first_timeslot'].time,\n # Sort rooms with more sessions ahead of rooms with fewer\n 0 - room_data[room.pk]['timeslot_count'],\n # Sort by list of starting time and duration so that groups with identical\n # timeslot structure will be neighbors. The grouping algorithm relies on this!\n room_data[room.pk]['start_and_duration'],\n # Within each group, sort higher capacity rooms first.\n room.capacity,\n # Finally, sort alphabetically by name\n room.name\n )\n )\n\n # Rooms are now ordered so rooms with identical timeslot arrangements are neighbors.\n # Walk the list, splitting these into groups.\n room_groups = []\n last_start_and_duration = None # Used to watch for changes in start_and_duration\n for room in sorted_rooms:\n if last_start_and_duration != room_data[room.pk]['start_and_duration']:\n room_groups.append([]) # start a new room_group\n last_start_and_duration = room_data[room.pk]['start_and_duration']\n room_groups[-1].append(room)\n\n # Next, build the structure that will hold the data for the view. This makes it\n # easier to arrange that every room has an entry for every day, even if there is\n # no timeslot for that day. This makes the HTML template much easier to write.\n # Use OrderedDicts instead of lists so that we can easily put timeslot data in the\n # right place.\n days = OrderedDict(\n (\n day, # key in the Ordered Dict\n [\n # each value is an OrderedDict of room group data\n OrderedDict(\n (room.pk, dict(room=room, timeslots=[]))\n for room in rg\n ) for rg in room_groups\n ]\n ) for day in all_days\n )\n\n # With the structure's skeleton built, now fill in the data. The loops must\n # preserve the order of room groups and rooms within each group.\n for rg_num, rgroup in enumerate(room_groups):\n for room in rgroup:\n for day, ts_for_day in room_data[room.pk]['timeslots_by_day'].items():\n days[day][rg_num][room.pk]['timeslots'] = ts_for_day\n\n # Now convert the OrderedDict entries into lists since we don't need to\n # do lookup by pk any more.\n for day in days.keys():\n days[day] = [list(rg.values()) for rg in days[day]]\n\n return days", "def sort_column(self, column):\n if column == 1: # type\n self.sorted_keys = sorted(self.data_dict.keys(),\n key=lambda x: (self.data_dict[x]['type']),\n reverse=self.sorted_type_top)\n # Invert sorting method\n self.sorted_type_top = not self.sorted_type_top\n\n elif column == 2: # Score\n self.sorted_keys = sorted(self.data_dict.keys(),\n key=lambda x: (float(self.data_dict[x]['score'])),\n reverse=self.sorted_score_top)\n # Invert sorting method\n self.sorted_score_top = not self.sorted_score_top\n\n elif column == 4: # Duration\n d = dict()\n for k in self.sorted_keys:\n duration_string = self.data_dict[k]['duration']\n\n # Get amount of episodes\n if 'episode' in duration_string:\n if 'Some' in duration_string:\n episodes = 0\n else:\n episodes = int(duration_string.split(' episodes')[0])\n else:\n episodes = 1\n\n # Get the duration in minutes\n minutes = 0\n if 'min' in duration_string:\n minutes = int(re.findall('([0-9]+)min', duration_string)[0])\n if 'h' in duration_string:\n minutes += int(re.findall('([0-9]+)h', duration_string)[0]) * 60\n\n # Get total duration of the whole show\n minutes *= episodes\n\n # Store it for sorting\n d[k] = minutes\n\n # Sort titles based on duration\n self.sorted_keys = sorted(d.keys(),\n key=lambda x: d[x],\n reverse=self.sorted_duration_top)\n # Invert sorting method\n self.sorted_duration_top = not self.sorted_duration_top\n\n elif column == 5: # release year\n self.sorted_keys = sorted(self.data_dict.keys(),\n key=lambda x: (float(self.data_dict[x]['released'])),\n reverse=self.sorted_year_top)\n # Invert sorting method\n self.sorted_year_top = not self.sorted_year_top\n\n if column != 2:\n # Make sure next time we click to sort by score,\n # the highest score is on top\n self.sorted_score_top = True\n\n # Redraw the table\n self.setup_tableview()", "def __init__(self):\n self.users = {}\n self.tweetTime = {}\n self.recentMax = 0\n self.time = 0", "def _sort_timeframes(cls, e_df=None, r_df=None, o_df=None):\n timeline = list()\n cls._append_timeline(timeline, e_df, 'E')\n cls._append_timeline(timeline, r_df, 'R')\n cls._append_timeline(timeline, o_df, 'O')\n return sorted(timeline, key=lambda x: x.start)", "def get_timepoints(self):\n return list(sorted(self.mdvtc.keys()))", "def rapl_timeline():\n\n return [{ \"timestamp\": \"2021-10-05T09:14:58.226\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5558763520.0, \"time_enabled\": 1000770053.0, \"time_running\": 1000770053.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:14:59.226\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 4777050112.0, \"time_enabled\": 2001065535.0, \"time_running\": 2001065535.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:00.227\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 6847987712.0, \"time_enabled\": 3001449088.0, \"time_running\": 3001449088.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:01.227\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5054922752.0, \"time_enabled\": 4001882359.0, \"time_running\": 4001882359.0 } } } } },\n { \"timestamp\": \"2021-10-05T09:15:02.228\", \"sensor\": \"toto\", \"target\": \"all\", \"groups\": { \"rapl\": { \"0\": { \"7\": { \"RAPL_ENERGY_PKG\": 5434507264.0, \"time_enabled\": 5002352709.0, \"time_running\": 5002352709.0 } } } } }\n ]", "def __init__(self, simulations):\n self.waiting = defaultdict(float) #resource -> avg. waiting time\n self.numberWaiting = defaultdict(float) #resource -> avg. number waiting\n self.utilization = defaultdict(float) #resource -> avg. utilization\n self.seized = defaultdict(float) #resource -> avg. number seized\n self.avgTotalNumberOut = 0 #total resources processed\n self.avgTotalNumberIn = 0 #total number in\n self.avgTotalWaitingTime = 0 #total avg. waiting time\n self.totalResources = 0 #total number of resources\n self.capacities = 0 #resources per queue\n \n self.capacities = simulations[0].get_capacities()\n self.totalResources = simulations[0].get_resource_count()\n self.n = len(simulations)\n self.maxTime = simulations[0].maxTime\n n = self.n\n \n numberOut = [] #total number out per simulation\n numberIn = [] #total number in per simulation\n waitingTime = [] #total waiting time per simulaiton\n waitingTimeValues = defaultdict(list) #total waiting time per simulation per station\n \n numberSeizedValues = defaultdict(list)\n numberWaitingValues = defaultdict(list)\n utilizationValues = defaultdict(list)\n for simul in simulations:\n \n self.avgTotalNumberOut += simul.get_number_out() / float(n)\n self.avgTotalNumberIn += simul.get_number_in() / float(n)\n self.avgTotalWaitingTime += simul.get_avg_waiting_times() / float(n)\n \n numberOut.append(simul.get_number_out())\n numberIn.append(simul.get_number_in())\n waitingTime.append(simul.get_avg_waiting_times())\n \n \n for key in simul.monitors.keys():\n #waiting time\n try:\n self.waiting[key] += simul.monitors[key].mean() / float(n)\n waitingTimeValues[key].append(simul.monitors[key].mean())\n except ZeroDivisionError:\n self.waiting[key] += 0\n \n for key in simul.resources.keys():\n #number waiting\n self.numberWaiting[key] += simul.get_number_waiting(key) / float(n)#simul.resources[key].waitMon.mean() / float(n)\n numberWaitingValues[key].append(simul.get_number_waiting(key))\n #utilization\n self.utilization[key] += simul.get_utilization(key) /float(n)\n utilizationValues[key].append(simul.get_utilization(key))\n #number seized\n self.seized[key] += simul.monitors[key].count() / float(n)\n numberSeizedValues[key].append(simul.monitors[key].count())\n \n self.halfWidthTotalNumberOut = get_half_width(numberOut)\n self.halfWidthTotalNumberIn = get_half_width(numberIn)\n self.halfWidthTotalWaitingTime = get_half_width(waitingTime) #per simulation\n self.halfWidthWaitingTimes = {}\n self.halfWidthNumberSeized = {}\n self.halfWidthUtilization = {}\n self.halfWidthNumberWaiting = {}\n \n for key in simul.monitors.keys():\n self.halfWidthWaitingTimes[key] = get_half_width(waitingTimeValues[key])\n self.halfWidthNumberSeized[key] = get_half_width(numberSeizedValues[key])\n self.halfWidthNumberWaiting[key] = get_half_width(numberWaitingValues[key])\n self.halfWidthUtilization[key] = get_half_width(utilizationValues[key])", "def constructTimeLineItem(self):\n\t\treturn", "def _set_date_times(self):\n if self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT, ReportTypes.SEARCH_BODY_REPORT):\n self._report_data['searchDateTime'] = Report._to_report_datetime(self._report_data['searchDateTime'])\n if self._report_data['totalResultsSize'] > 0:\n for detail in self._report_data['details']:\n detail['createDateTime'] = Report._to_report_datetime(detail['createDateTime'])\n if detail.get('declaredDateTime'):\n detail['declaredDateTime'] = Report._to_report_datetime(detail['declaredDateTime'], False)\n declared_value = str(detail['declaredValue'])\n if declared_value.isnumeric() and declared_value != '0':\n detail['declaredValue'] = '$' + '{:0,.2f}'.format(float(declared_value))\n else:\n detail['declaredValue'] = ''\n if detail.get('description') and detail['description'].get('engineerDate'):\n if detail['description']['engineerDate'] == '0001-01-01':\n detail['description']['engineerDate'] = ''\n else:\n detail['description']['engineerDate'] = \\\n Report._to_report_datetime(detail['description']['engineerDate'], False)\n else:\n detail['description']['engineerDate'] = ''\n if detail.get('location') and detail['location'].get('taxExpiryDate'):\n detail['location']['taxExpiryDate'] = \\\n Report._to_report_datetime(detail['location']['taxExpiryDate'], False)\n elif self._report_key == ReportTypes.MHR_REGISTRATION:\n reg = self._report_data\n reg['createDateTime'] = Report._to_report_datetime(reg['createDateTime'])\n if reg.get('description') and reg['description'].get('engineerDate'):\n if reg['description']['engineerDate'] == '0001-01-01':\n reg['description']['engineerDate'] = ''\n else:\n reg['description']['engineerDate'] = \\\n Report._to_report_datetime(reg['description']['engineerDate'], False)\n else:\n reg['description']['engineerDate'] = ''\n if reg.get('location') and reg['location'].get('taxExpiryDate'):\n reg['location']['taxExpiryDate'] = Report._to_report_datetime(reg['location']['taxExpiryDate'], False)\n elif self._report_key in (ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION,\n ReportTypes.MHR_TRANSPORT_PERMIT, ReportTypes.MHR_NOTE,\n ReportTypes.MHR_ADMIN_REGISTRATION):\n reg = self._report_data\n reg['createDateTime'] = Report._to_report_datetime(reg['createDateTime'])\n if reg.get('declaredValue'):\n declared_value = str(reg['declaredValue'])\n if declared_value.isnumeric() and declared_value != '0':\n reg['declaredValue'] = '$' + '{:0,.2f}'.format(float(declared_value))\n else:\n reg['declaredValue'] = ''\n if reg.get('transferDate'):\n reg['transferDate'] = Report._to_report_datetime(reg['transferDate'], False)\n if self._report_key == ReportTypes.MHR_TRANSPORT_PERMIT and reg.get('newLocation'):\n reg['location'] = reg.get('newLocation')\n if reg.get('location') and reg['location'].get('taxExpiryDate'):\n reg['location']['taxExpiryDate'] = Report._to_report_datetime(reg['location']['taxExpiryDate'],\n False)", "def test_aggregate_times(self):\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = 123456\n f2.read_time_start = 456789\n f1.read_time_end = 444\n f2.read_time_end = 555\n f1.write_time_start = 222\n f2.write_time_start = 111\n f1.write_time_end = 666\n f2.write_time_end = 777\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 123456)\n self.assertEqual(f1.read_time_end, 555)\n self.assertEqual(f1.write_time_start, 111)\n self.assertEqual(f1.write_time_end, 777)\n\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = 456789\n f2.read_time_start = 123456\n f1.read_time_end = 555\n f2.read_time_end = 444\n f1.write_time_start = 111\n f2.write_time_start = 222\n f1.write_time_end = 777\n f2.write_time_end = 666\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 123456)\n self.assertEqual(f1.read_time_end, 555)\n self.assertEqual(f1.write_time_start, 111)\n self.assertEqual(f1.write_time_end, 777)\n\n # One equals None\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = 123456\n f2.read_time_start = None\n f1.read_time_end = 555\n f2.read_time_end = None\n f1.write_time_start = None\n f2.write_time_start = 111\n f1.write_time_end = None\n f2.write_time_end = 666\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 123456)\n self.assertEqual(f1.read_time_end, 555)\n self.assertEqual(f1.write_time_start, 111)\n self.assertEqual(f1.write_time_end, 666)\n\n # The other equals None\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = None\n f2.read_time_start = 456789\n f1.read_time_end = None\n f2.read_time_end = 444\n f1.write_time_start = 222\n f2.write_time_start = None\n f1.write_time_end = 777\n f2.write_time_end = None\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, 456789)\n self.assertEqual(f1.read_time_end, 444)\n self.assertEqual(f1.write_time_start, 222)\n self.assertEqual(f1.write_time_end, 777)\n\n # Both equal None\n f1 = DarshanIngestedJobFile(\"job\")\n f2 = DarshanIngestedJobFile(\"job\")\n f1.read_time_start = None\n f2.read_time_start = None\n f1.read_time_end = None\n f2.read_time_end = None\n f1.write_time_start = None\n f2.write_time_start = None\n f1.write_time_end = None\n f2.write_time_end = None\n f1.aggregate(f2)\n self.assertEqual(f1.read_time_start, None)\n self.assertEqual(f1.read_time_end, None)\n self.assertEqual(f1.write_time_start, None)\n self.assertEqual(f1.write_time_end, None)", "def _construct_all_holdings(self):\n d = dict((s, 0.0) for s in self.symbol_list)\n d['datetime'] = self.backtest_date\n d['cash'] = self.initial_capital\n d['commission'] = 0.0\n d['total'] = self.initial_capital\n d['buy_times'] = 0\n d['sell_times'] = 0\n d['total_times'] = 0\n d['hold'] = 0\n return [d]" ]
[ "0.6283316", "0.6228001", "0.6228001", "0.620398", "0.57670397", "0.57652324", "0.57647645", "0.5753199", "0.57444894", "0.5722699", "0.57000864", "0.5646933", "0.55846196", "0.55585563", "0.5551203", "0.5549234", "0.55354136", "0.5506358", "0.54945", "0.54808986", "0.5460458", "0.54581845", "0.54562086", "0.54480094", "0.5440352", "0.54382914", "0.5409593", "0.5405079", "0.53965575", "0.53959", "0.53439313", "0.5343825", "0.53426635", "0.5336252", "0.532757", "0.53130263", "0.5311266", "0.5302565", "0.5297672", "0.5295022", "0.5285525", "0.5282526", "0.52776235", "0.52515036", "0.5226255", "0.52254313", "0.5224102", "0.5224102", "0.5224102", "0.521879", "0.52151924", "0.5206774", "0.5206602", "0.5202386", "0.5195692", "0.5176208", "0.5166047", "0.51629055", "0.51626045", "0.5155703", "0.5153826", "0.5149558", "0.51364833", "0.51290965", "0.51254946", "0.51134694", "0.51106536", "0.5095676", "0.5091068", "0.5086536", "0.5085876", "0.5083075", "0.5082798", "0.50820124", "0.50741726", "0.50723624", "0.50494057", "0.50474066", "0.5045846", "0.5045846", "0.50370157", "0.5032226", "0.5020084", "0.50183463", "0.5014519", "0.50110966", "0.50091493", "0.50062376", "0.50059146", "0.50020957", "0.5001867", "0.4997794", "0.4994993", "0.49813822", "0.4980418", "0.4980298", "0.49722752", "0.49721006", "0.4971989", "0.49719414" ]
0.5688327
11
Assign names to each epi file based on information in the template.
def AssignEpiNames(self): # Sort each run in the series by its acquisition time. epi_sort = self.epi_times.keys() epi_sort.sort() # Rewrite pfiles as an ordered list of p-files to be reconstructed. for idx in xrange(len(epi_sort)): entry = self.epi_times[epi_sort[idx]] info = self.info[entry] if info['data_filetype'] == 'ge_data': self.pfiles_recon.append(entry) info['run'] = '%0d' % (self.n_epi) self.n_epi = self.n_epi + 1 plane = info['plane'] if not self.epinames.has_key(plane): plane = 'any' n_epi = self.epinames[plane]['n_epi'] if n_epi > len(self.epinames[plane]['names'])-1: if self.epinames.has_key('any') and \ n_epi < len(self.epinames['any']): plane = 'any' n_epi = self.epinames[plane]['n_epi'] else: self.DumpInfo() errstr = 'Not enough EPI names in template file' raise RuntimeError(errstr) # epiname = self.epinames[plane]['names'][n_epi] filebase = os.path.basename(self.epinames[plane]['names'][n_epi]) epi_mf_outdir = os.path.dirname(\ self.epinames[plane]['names'][n_epi]) epi_base = self.epinames[plane]['subdir'][n_epi] tmp_outdir = '%s/%s' % (self.tmpdir, epi_base) # Get output directory for raw epis. if self.no_motcorr: epi_r_outdir = epi_mf_outdir elif self.keep_epi_raw: epi_r_outdir = self.epi_scratch_space else: epi_r_outdir = tmp_outdir # Get output directory for motion-corrected epis. if self.keep_epi_mot: epi_m_outdir = self.epi_scratch_space else: epi_m_outdir = tmp_outdir info['outdir'] = epi_mf_outdir if n_epi < len(self.epinames[plane]['names']): epiname = self.epinames[plane]['names'][n_epi] info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase) else: info['imgfile'] = '%s/s%0d_epi_run%0d' % \ (epi_r_outdir, n_epi, idx+1) self.epinames[plane]['n_epi'] += 1 info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase) info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase) info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase) if self.no_motcorr: info['imgfile_m'] = None info['imgfile_mf'] = None info['imgfile_final'] = info['imgfile'] else: info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase) if self.no_fmapcorr or info['fmap_entry'] is None: info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase) info['imgfile_mf'] = None info['imgfile_final'] = info['imgfile_m'] else: info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase) info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase) info['imgfile_final'] = info['imgfile_mf'] info['skip'] = self.skip info['motion_ref_frame'] = self.tmplt['motion_ref_frame'] info['motion_interp'] = self.tmplt['epi_motion_interp'] if not info['motion_interp'].startswith('-'): info['motion_interp'] = '-%s' % info['motion_interp'] info['filetype'] = self.tmplt['epi_file_format'] info['valid'] = True self.info[entry] = info if not self.no_motcorr: epi_base = os.path.basename(info['imgfile_m']) info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base) info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]", "def generate_filename(self):\n file_pattern = os.path.join(self.path, \"TCGA-*\")\n for f in glob(file_pattern):\n organ = get_organ(f)\n for raw_f in glob(os.path.join(f, \"*.tif\")):\n gt_f = raw_f.replace(\".tif\", \".xml\")\n yield raw_f, gt_f, organ", "def make_output_names(self):\n yaml_names = []\n fits_names = []\n\n if self.use_nonstsci_names:\n for i in range(len(self.info['Module'])):\n act = str(self.info['act_id'][i]).zfill(2)\n if self.info['Instrument'][i].lower() == 'niriss':\n det = 'NIS'\n elif self.info['Instrument'][i].lower() == 'fgs':\n det = 'FGS'\n else:\n det = self.info['detector'][i]\n mode = self.info['Mode'][i]\n dither = str(self.info['dither'][i]).zfill(2)\n\n yaml_names.append(os.path.abspath(os.path.join(self.output_dir, 'Act{}_{}_{}_Dither{}.yaml'\n .format(act, det, mode, dither))))\n fits_names.append('Act{}_{}_{}_Dither{}_uncal.fits'.format(act, det, mode, dither))\n\n else:\n for i in range(len(self.info['Module'])):\n if self.info['Instrument'][i].upper() == 'NIRCAM':\n fulldetector = 'nrc{}'.format(self.info['detector'][i].lower())\n else:\n fulldetector = self.info['detector'][i].lower()\n outfilebase = self.create_output_name(self.info, index=i)\n outfile = \"{}{}{}\".format(outfilebase, fulldetector, '_uncal.fits')\n yamlout = \"{}{}{}\".format(outfilebase, fulldetector, '.yaml')\n\n yaml_names.append(yamlout)\n fits_names.append(outfile)\n\n self.info['yamlfile'] = yaml_names\n self.info['outputfits'] = fits_names\n # Table([self.info['yamlfile']]).pprint()", "def _initNames(self):\n self.outselect = os.path.join(self.workpath, 'FT1_selected'+self.suffix+'.fits')\n self.outmktime = os.path.join(self.workpath, 'FT1_filtered'+self.suffix+'.fits')\n self.outltcube = os.path.join(self.workpath, 'LtCube'+self.suffix+'.fits')\n self.outbincub = os.path.join(self.workpath, 'BinCube'+self.suffix+'.fits')\n self.outbinmap = os.path.join(self.workpath, 'CMAP'+self.suffix+'.fits')\n self.outbinexp = os.path.join(self.workpath, 'BinExpMap'+self.suffix+'.fits')\n self.outexpmap = os.path.join(self.workpath, 'ExpMap'+self.suffix+'.fits')\n self.outsrcmap = os.path.join(self.workpath, 'SrcMaps'+self.suffix+'.fits')\n self.outgtlike = os.path.join(self.workpath, 'Results'+self.suffix+'.dat')\n self.outmodel = os.path.join(self.workpath, 'OutModel'+self.suffix+'.xml')\n self.outapert = os.path.join(self.workpath, 'LC_ApPhoto'+self.suffix+'.fits')\n self.outgtmod = os.path.join(self.workpath, 'GtModel'+self.suffix+'.fits')\n self.outresid = os.path.join(self.workpath, 'Resid'+self.suffix+'.fits')\n self.outresig = os.path.join(self.workpath, 'ResSigma'+self.suffix+'.fits')\n self.outtsmap = os.path.join(self.workpath, 'TSMmap'+self.suffix+'.fits')\n return\n # self.outfind = self.dir + self.src + '_FindSrc'+self.suffix+'.txt'", "def create_base_templates(outdir, templateEnv):\n for file in ME_TEMPLATES:\n filename = os.path.join(outdir, ME_FILENAME.format(file))\n template = templateEnv.get_template(file + '.go.jinja')\n\n with open(filename, 'w') as f:\n output = template.render(copyright=COPYRIGHT,\n generator_warning=GENERATOR_WARNING,\n package_name=PACKAGE_NAME)\n f.write(output)\n pass", "def test_pnictogen():\n for template in templates:\n template_prefix, extension = os.path.splitext(template)\n for xyz_file in example_xyz_files:\n input_prefix, xyz_file_extension = os.path.splitext(xyz_file)\n\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(xyz_file, xyz_file_extension[1:])\n )\n written_files = pnictogen(mol, input_prefix, template, extension[1:])\n\n assert_equals(type(written_files), list)\n for written_file in written_files:\n assert_equals(type(written_file), str)\n\n written_files2 = pnictogen(mol, input_prefix, template)\n assert_equals(written_files, written_files2)\n\n # Allow use of template in the parent directory\n with cd(\"pnictogen/repo\"):\n mol = Atoms(\n cclib.bridge.cclib2openbabel.readfile(\"../../data/water-dimer.xyz\", \"xyz\")\n )\n written_files = pnictogen(mol, \"../../data/water-dimer\", \"ADF.in\", \"in\")\n\n assert_equals(written_files, [\"../../data/water-dimer.in\"])\n\n main([\"-g\", \"/tmp/hello.world.ORCA.inp\"])\n mol = Atoms(cclib.bridge.cclib2openbabel.readfile(\"data/co.xyz\", \"xyz\"))\n written_files = pnictogen(mol, \"data/co\", \"/tmp/hello.world.ORCA.inp\", foo=\"bar\")\n\n assert_equals(written_files, [\"data/co.inp\"])", "def get_template_names(self): \n product = self.get_object()\n names = ['%s/detail-for-upc-%s.html' % (self.template_folder, product.upc), \n '%s/detail-for-class-%s.html' % (self.template_folder, product.item_class.name.lower()),\n '%s/detail.html' % (self.template_folder)]\n return names", "def file_creator(title_list):\n for file_name in title_list: #title names are retrieved out of genID.txt\n with open (\"nuc_variant_calls/\"+file_name.strip()+\".var\",'w') as x:\n x.write(\"Feature type\\tAlignment length\\tIdentical nucleotides\\tIndel count\\n\") #Table headers.", "def __rename_images(self):\n for idx, image in enumerate(self._values):\n image.partname = '/ppt/media/image%d%s' % (idx+1, image.ext)", "def createExtnNodes(self):\n for parent, dirs, files in os.walk(self.destndir):\n for fname in files:\n filename = os.path.join(parent, fname)\n if os.path.isfile(filename):\n direntry=parent\n #direntry=parent.replace(self.destndir,'',len(self.destndir))\n #direntry = os.path.basename(os.path.abspath(parent))\n self.appendSrcType(direntry, fname)", "def filename(i):\n rand_name = os.path.join(os.getcwd(), \"input-%d.txt\" % i)\n ref_name = os.path.join(os.getcwd(), \"input-%d.ref\" % i)\n return rand_name, ref_name", "def create_files_from_templates(self, model_attributes):\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/%s_%s.py\" % (model_attributes['app_label'], folder_name,\n model_attributes['model_name_slug'], folder_name)\n template_path = \"django_baker/%s\" % (folder_name)\n self.create_file_from_template(file_path, template_path, model_attributes)\n for file_name in [\"base\", \"list\", \"detail\", \"create\", \"update\", \"delete\"]:\n file_path = \"%s/templates/%s/%s_%s.html\" % (model_attributes['app_label'], model_attributes['app_label'],\n model_attributes['model_name_slug'], file_name)\n template_path = \"django_baker/%s.html\" % (file_name)\n self.create_file_from_template(file_path, template_path, model_attributes)", "def __fill_all_templates__(self,configs):\n template_dir = configs['system'].get('Common_directories','template')\n sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))\n system_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','system'))\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio'))\n self.__fill_template__(sample_template,self.sample_file)\n self.__fill_template__(system_template,self.systems_file)\n self.__fill_template__(qsub_template,self.qsub_file)", "def _EpiInfo(self, info, path):\n\n epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \\\n 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']}\n for key in self.epi_keys.keys():\n if self.epi_keys[key] != str(epi_vals[key]):\n# Return None, which will cause these data to be ignored.\n return None\n\n# Early versions of the EPIC software saved p-files for the setup epis.\n# Don't process these (or any epi with fewer than eight useable frames).\n if self.hdr['tdim'] < (8 + self.skip):\n return None\n\n info['slice_order'] = self.shdr.get('SliceOrder', 'altplus')\n if self.shdr['EffEchoSpacing'] is not None:\n info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000.\n else:\n info['echo_spacing'] = 0.\n if info['data_filetype'] == 'dicom':\n# Entry is name of dirctory for dicom images.\n if not os.path.isdir(path):\n entry = os.path.dirname(path)\n else:\n entry = path\n else:\n# Otherwise it is the name of a directory containing p-files.\n entry = path\n\n if info['data_filetype'] == 'ge_data' and info['type'] is not None:\n# Found a pfile. Add it to the list.\n if entry not in self.pfiles and info['tdim'] > 2:\n self.pfiles.append(entry)\n self.entry_map['epi'].append(entry)\n if info['series'] not in self.epi_series:\n self.epi_series.append(info['series'])\n elif info['data_filetype'] == 'dicom' and \\\n info['psdname'] == 'epibold':\n# This is the initial EPI done during setup.\n info['outdir'] = self.episetup_dir\n info['type'] = 'first_epi'\n self.entry_map['first_epi'].append(entry)\n info['imgfile'] = '%s/first_epi_%d' % \\\n (self.episetup_dir, len(self.entry_map['first_epi']))\n elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \\\n info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2:\n# This is an epi reconstructed on the scanner.\n self.epi_series.append(info['series'])\n self.entry_map['epi'].append(entry)\n if not os.path.isdir(path):\n tmp_path = os.path.dirname(path)\n else:\n tmp_path = path\n self.epirt_paths.append(tmp_path)\n\n if self.fsl_flip:\n info['filetype'] = 'brik'\n else:\n info['filetype'] = self.tmplt['epi_file_format']\n\n info['TR'] = self.hdr['tsize']\n if self.tmplt['acq_tr'] is None:\n info['acq_tr'] = float(info['TR'])\n else:\n info['acq_tr'] = float(self.tmplt['acq_tr'])\n return OK", "def task_process_department_files():\n for dept in Department.list():\n for file_name, file in dept.files.items():\n yield {\n 'name': f'{dept}:{file_name}',\n 'file_dep': file.dependencies +\n [file.raw_path, util.path.CONFIG_PATH],\n 'targets': [file.processed_path],\n 'actions': [file.process],\n 'clean': True,\n }", "def _prepare_file(self, item_name, page_instructions):\n if item_name not in self.prepared_instructions:\n self.prepared_instructions[item_name] = []\n\n for instruction in getattr(page_instructions, item_name):\n item = copy.copy(instruction)\n\n if 'url' in instruction:\n item['location'] = instruction['url']\n\n else:\n template_name = context = process_func = None\n\n if 'process' in instruction:\n process_func = self._get_processing_function(\n instruction.get('process'))\n\n template_name = instruction.get('static', False) or \\\n instruction.get('inline', False)\n\n assert template_name, (\n 'You must provide either \"static\" or \"inline\" properties '\n 'that point to a file, provided object was %r'\n % instruction)\n\n if 'inline' in instruction:\n context = self.context\n else:\n context = None\n\n source, is_cached = self._get_media_source(\n template_name, process_func, context)\n\n if 'css' in item_name and self.make_css_urls_absolute \\\n and not is_cached:\n source = self._fix_css_urls(instruction, source)\n\n if 'static' in instruction:\n location, filename = self._copy_to_media(\n template_name, source)\n item['location'] = location\n elif 'inline' in instruction:\n item['source'] = source\n\n if 'include' in instruction and not \\\n instruction['include']:\n if 'inline' in instruction:\n raise AttributeError('You have specified inline and '\n 'include: false, these really don\\'t make sense '\n 'together')\n continue\n\n self.prepared_instructions[item_name].append(item)", "def ExtractFirstEpi(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'first_epi':\n epiname = self.info[entry]['imgfile']\n cmd = 'convert_file %s -f0 %s %s %s' % \\\n (self.flip_opts, entry,epiname, self.info[entry]['filetype'])\n fname = '%s%s' % (epiname, self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])\n self.info[entry]['imgfile'] = fname", "def _ProcessTemplate(self,topdir):\n self.dicomdir = \"%s/anatomicals\" % self.topdir\n self.rawdir = \"%s/raw\" % topdir\n self.rawdirs = {}\n tmplt = self._GetTemplate()\n if self.opts.outdir is not None:\n# Override template output directory.\n tmplt['top_outdir'] = self.opts.outdir\n self.tmplt = tmplt\n if len(tmplt['top_outdir']) == 0:\n tmplt['top_outdir'] = os.path.realpath(self.topdir)\n raise RuntimeError('Template file must specify an output directory.')\n tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir'])\n if '/home' in tmplt['top_outdir'][:7]:\n raise RuntimeError('Image data cannot be stored in the /home partition. Change the \"top_outdir\" entry in the template file: %s.' % (' '.join(self.templates)))\n# tmplt['subject'] = 'orig'\n self.procdir = os.path.abspath(\"%s/%s\" % \\\n (tmplt['top_outdir'],tmplt['subject']))\n target = os.path.abspath('%s/../..' % tmplt['top_outdir'])\n if not ismounted(target):\n raise RuntimeError('Could not access partition at %s' % target)\n\n self.anatdir = \"%s/anat\" % self.procdir\n self.fmapdir = \"%s/%s\" % (self.procdir,tmplt['fmap']['outdir'])\n self.dtidir = \"%s/%s\" % (self.procdir,tmplt['dti']['outdir'])\n self.logdir = \"%s/%s\" % (self.procdir,tmplt['logdir'])\n self.skip = tmplt.get('skip', DEFAULT_SKIP)\n self.acq_tr = tmplt.get('acq_tr',None)\n self.episetup_dir = \"%s/%s\" % (self.procdir,tmplt['first_epi'])\n self.fsl_cmpblty = tmplt.get('fsl_compatibility',False)\n self.epi_file_format = self.tmplt['epi_file_format']\n self.censor_thresh = tmplt.get('censor_threshold', 2.)\n self.censor_interleave = tmplt.get('censor_interleave', True)\n# self.server_userid = self.tmplt.get('server_userid','default')\n\n# Overide flags for aligning EPIs and skull-stripping with command-\n# line options.\n if self.opts.align_fmaps:\n self.align_fmaps = True\n else:\n self.align_fmaps = self.tmplt.get('epi_align', False)\n\n if self.opts.no_align_fmaps:\n self.no_align_fmaps = True\n else:\n self.no_align_fmaps = self.tmplt.get('no_epi_align', False)\n\n if self.opts.skull_strip:\n self.skull_strip = True\n else:\n self.skull_strip = self.tmplt.get('skull_strip', False)\n\n# Create log file now so it can be used immediately.\n if not os.path.exists(self.logdir):\n if self.verbose:\n print 'mkdir %s' % self.logdir\n if not self.opts.fake_opts:\n self.MakeDir(self.logdir)\n\n self._ProcessTemplateEpiInfo()", "def process_tempita(fromfile):\n if not fromfile.endswith('.in'):\n raise ValueError(\"Unexpected extension: %s\" % fromfile)\n\n from_filename = tempita.Template.from_filename\n template = from_filename(fromfile,\n encoding=sys.getdefaultencoding()) \n\n content = template.substitute()\n\n outfile = os.path.splitext(fromfile)[0]\n with open(outfile, 'w') as f:\n f.write(content)", "def get_names():\n\n #Initialize entities dictionary\n entities = {'entity': 'source_file'}\n\n # Construct the raw_directory path\n project_root = os.environ['PYTHONPATH']\n raw_directory = '{}/data/raw/'.format(project_root)\n \n for file in os.listdir(raw_directory):\n if file.endswith('.json'):\n \n # Construct the full file path\n full_path = '{}{}'.format(raw_directory, file)\n \n # Open each JSON file\n with open(full_path, 'r') as source_file:\n data = source_file.read()\n parsed_data = json.loads(data)\n \n # Iterate through the dictionary parsed_data\n for key in parsed_data:\n if 'SocialTag' in key:\n name = parsed_data[key]['name']\n entities.update({name: file})\n\n return entities", "def prepare_anno(infolder, outfolder, mode=\"split\"):\n\tprint(\"Starting...\")\n\t\n\tinpath = os.path.join(infolder, \"*.xml\")\n\tfilecounter = 0\n\t\n\t# check output folders\n\tif not os.path.exists(outfolder):\n\t\tos.makedirs(outfolder)\n\t\t\n\tout_tei = os.path.join(outfolder, \"temp\")\n\tout_txt = os.path.join(outfolder, \"txt\")\n\t\n\tif not os.path.exists(out_tei):\n\t\tos.makedirs(out_tei)\n\tif not os.path.exists(out_txt):\n\t\tos.makedirs(out_txt)\n\t\t\n\t\n\tfor filepath in glob.glob(inpath):\n\t\tfilecounter+= 1\n\t\tfn = os.path.basename(filepath)[:-4]\n\t\toutfile_x = fn + \".xml\"\n\t\t\n\t\tdoc = etree.parse(filepath)\n\t\t\n\t\tif mode == \"split-1\":\n\t\t\ttransform = etree.XSLT(xslt_TEIwrapper_1)\n\t\telse:\n\t\t\ttransform = etree.XSLT(xslt_TEIwrapper)\n\n\t\tresult_tree = transform(doc)\n\t\tresult = str(result_tree)\n\t\t\n\t\t# create TEI wrapper for future annotation results\n\t\twith open(os.path.join(outfolder, \"temp\", outfile_x), \"w\") as output:\n\t\t\toutput.write(result)\n\t\t\t\n\t\t# create one full text file per chapter (or for the whole text)\n\t\ttei = {'tei':'http://www.tei-c.org/ns/1.0'}\n\t\tcligs_id = doc.xpath(\"//tei:idno[@type='cligs']/text()\", namespaces=tei)\n\t\tif mode == \"split-1\":\n\t\t\tresults = doc.xpath(\"//tei:text/tei:body\", namespaces=tei)\n\t\telse:\n\t\t\tresults = doc.xpath(\"//tei:div[ancestor::tei:body][not(descendant::tei:div[not(ancestor::tei:floatingText)])][not(ancestor::tei:floatingText)]\", namespaces=tei)\n\t\t\n\t\tif isinstance(cligs_id, list):\n\t\t\tcligs_id = cligs_id[0]\n\t\telif isinstance(cligs_id, str) == False:\n\t\t\traise ValueError(\"This type (\" + str(type(cligs_id)) + \") is not supported for cligs_id. Must be list or string.\")\n\t\t\n\t\tfor i,r in enumerate(results):\n\t\t\ttransform = etree.XSLT(xslt_extractDIVs)\n\t\t\tresult_tree = transform(r)\n\t\t\tresult = str(result_tree)\n\t\t\t\n\t\t\toutfile = cligs_id + \"_d\" + str(i + 1) + \".txt\"\n\t\t\t\n\t\t\twith open(os.path.join(outfolder, \"txt\", outfile), \"w\") as output:\n\t\t\t\toutput.write(result)\n\t\n\tprint(\"Done. \" + str(filecounter) + \" files treated.\")", "def _generate_src():\n for ext in extensions:\n yield self.src_format[ext](f=\"{}{}\".format(name, ext))", "def make_template(filenames):\n result = {}\n for fn in filenames:\n with open(fn) as f:\n conf = yaml.load(f)\n expand_horizons(result, conf)\n return result", "def _SetAnatNames(self, anat_tgt):\n# Define links to structural image in each output directory.\n for entry in self.entry_map['epi'] + self.entry_map['fmap'] + \\\n self.entry_map['dti'] + self.entry_map['asl']:\n self.info[entry]['anat_link'] = anat_tgt\n\n# Name the normalization source image T1High. Number the rest.\n anat_entries = self.entry_map['anat'][:]\n anat_entries.remove(anat_tgt)\n n_t1high = 1\n for entry in anat_entries:\n if self.info[entry]['type'] == 'T1High':\n# High res T1-weighted, not normalization target. Rename it.\n fname = 'T1High_%d' % n_t1high\n fullname = '%s/%s' % (self.info[entry]['outdir'], fname)\n self.info[entry]['imgfile'] = fullname\n self.info[entry]['imgfile_skstrip'] = '%s_skstrip' % fullname\n self.info[entry]['matfile'] = '%s_matfile.aff12.1D' % fullname\n self.info[anat_tgt]['norm_src'] = False\n n_t1high += 1\n fname = 'T1High'\n fullname = '%s/%s' % (self.info[anat_tgt]['outdir'], fname)\n self.info[anat_tgt]['imgfile'] = fullname\n self.info[anat_tgt]['imgfile_skstrip'] = '%s_skstrip' % fullname\n self.info[anat_tgt]['matfile'] = '%s_matfile.aff12.1D' % fullname\n self.info[anat_tgt]['norm_src'] = True\n\n self.anatomical = '%s%s' % (self.info[anat_tgt]['imgfile'], \\\n self.info[anat_tgt]['suffix'])\n# The target for motin correction is the source for spatial normalization.\n self.norm_src = anat_tgt", "def jinja_files(self, val: Pattern):\n self[\"jinja_files\"] = str(val)", "def _read_output_files(self):\n self.manage = {} # Empty the dictionary matching phrases\n self.manage['spin'] = (re.compile(' *net spin of'), self._read_spin)\n self.manage['nelect'] = (re.compile(' *number of electrons'), self._read_nelect)\n self.manage['cellcontents'] = (re.compile(' *Unit Cell'), self._read_cellcontents)\n self.manage['pspots'] = (re.compile(' *Files used for pseudopotentials:'), self._read_pspot)\n self.manage['masses'] = (re.compile(' *Mass of species in AMU'), self._read_masses)\n self.manage['kpoints'] = (re.compile(' *Number of kpoints used'), self._read_kpoints)\n self.manage['kpoint_grid'] = (re.compile(' *MP grid size for SCF'), self._read_kpoint_grid)\n self.manage['finalenergy'] = (re.compile(' *Final energy, E'), self._read_energies)\n self.manage['finalenergy2'] = (re.compile('Final energy ='), self._read_energies2)\n self.manage['finalenergy3'] = (re.compile('Dispersion corrected final energy'), self._read_energies3)\n self.manage['energy_cutoff'] = (re.compile(' *plane wave basis set cut'), self._read_energy_cutoff)\n self.manage['nbands'] = (re.compile(' *number of bands'), self._read_nbands)\n self.manage['pressure'] = (re.compile(' *\\* *Pressure: '), self._read_external_pressure)\n self.manage['opticalDielectric'] = (re.compile(' *Optical Permittivity'), self._read_dielectric)\n self.manage['bornCharges'] = (re.compile(' *Born Effective Charges'), self._read_born_charges)\n # For the .phonon file\n self.manage['frequency'] = (re.compile(' q-pt= 1 0.000000 0.000000 0.000000 1.0000000000 *$'), self._read_frequencies)\n self.manage['nbranches'] = (re.compile(' Number of branches'), self._read_nbranches)\n for f in self._outputfiles:\n self._read_output_file(f)\n return", "def setESFiles(self, eSourceDir = None, verbose = False):\n\n print('\\n***Setting electronic structure files')\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc':\n # Check and set electronic structure file for packaging.\n if '***Missing' in self.nbDetails[key]['jobInfo'][2]:\n self.nbDetails[key]['elecStructure'] = None\n else:\n if eSourceDir is not None:\n # Copy electronic structure files to package using supplied path\n fileName = Path(self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\"))\n self.nbDetails[key]['elecStructure'] = Path(eSourceDir, fileName.name).as_posix()\n\n else:\n # Copy electronic structure files to package, based on full path from original job\n self.nbDetails[key]['elecStructure'] = self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\")\n\n checkList = self.checkFiles(self.nbDetails[key]['elecStructure'])\n\n # If file is missing, set to \"missing\"\n if not checkList[0]:\n self.nbDetails[key]['elecStructure'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n\n # If file is present, check also for corresponding files\n else:\n # Assuming above is molden file, check also for corresponding Gamess file\n gFile = Path(self.nbDetails[key]['elecStructure']).with_suffix('.log')\n checkList = self.checkFiles(gFile)\n if checkList[0]:\n # self.nbDetails[key]['elecStructure'].append(gFile.as_posix()) # Set here to append... hopefully works OK with arch update code...\n self.nbDetails[key]['elecStructureGamess'] = gFile.as_posix() # Set here as separate item\n else:\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {gFile.as_posix()}\"\n #\n\n if verbose:\n print(f\"Job {key}: {self.nbDetails[key]['title']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructure']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructureGamess']}\")", "def postpare_anno(infolder, outfolder, mode=\"fl\"):\n\tprint(\"Starting...\")\n\t\n\tif not os.path.exists(infolder):\n\t\traise ValueError(\"The input folder could not be found.\")\n\t\t\n\tin_temp = os.path.join(infolder, \"temp\")\n\tin_anno = os.path.join(infolder, \"annotated_temp\")\n\t\n\tif not os.path.exists(in_temp):\n\t\traise ValueError(\"The folder 'temp' could not be found inside the input folder.\")\n\tif not os.path.exists(in_anno):\n\t\traise ValueError(\"The folder 'annotated_temp' could not be found inside the input folder.\")\n\tif not os.path.exists(outfolder):\n\t\tos.makedirs(outfolder)\n\t\t\n\tfilecounter = 0\t\n\n\t# fetch annotated snippets for each TEI template file\n\tfor filepath in glob.glob(os.path.join(in_temp, \"*.xml\")):\n\t\tprint(\"doing file \" + filepath)\n\t\tfilecounter+= 1\n\t\tfn = os.path.basename(filepath)\n\t\tannofolder = os.path.join(Path(os.path.join(infolder, \"annotated_temp\")).as_uri(), \"\")\n\t\t# which annotation mode are we in?\n\t\tannomode = mode\n\t\t\n\t\tparser = etree.XMLParser(encoding=\"UTF-8\")\n\t\tparser.resolvers.add(FileResolver())\n\t\t\n\t\tdoc = etree.parse(filepath, parser)\n\t\txslt_root = etree.parse(io.StringIO(xslt_joinDIVs), parser)\n\t\t\n\t\ttransform = etree.XSLT(xslt_root)\n\t\t\n\t\tresult_tree = transform(doc, annofolder= \"'\" + annofolder + \"'\", mode= \"'\" + annomode + \"'\")\n\t\tresult = str(result_tree)\n\t\t\n\t\t# save the results\n\t\twith open(os.path.join(outfolder, fn), \"w\") as output:\n\t\t\toutput.write(result)\n\t\n\tprint(\"Done. \" + str(filecounter) + \" files treated.\")", "def _extract_template_events(self):\n\t\ttry:\n\t\t\ttable = self.hdf5file[fastq_paths[self.version]['template'] % self.group]\n\t\t\tself.template_events = [Event(x) for x in table['Events'][()]]\n\t\texcept Exception, e:\n\t\t\tself.template_events = []", "def entry_parser():\n # from tools import file_importer, file_outporter\n from copy import copy\n from collections import defaultdict\n import os.path\n \n print(\"this is entry parser\")\n \n # inPathL = [\"bob/processed/proteinGroups - OST-1-09042017.txt\",\"bob/processed/proteinGroups_OST2.txt\",\"bob/processed/proteinGroups_OST3.txt\"]\n inpathL = []\n inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"txt_cav1ko-1-17082017\", \"proteinGroups.txt\"),\"r\")\n # outPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n fileCount = 1\n # outF = file_outporter(outPath)\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1.csv\"),\"w\")\n # newFlag = True\n \n finDict = defaultdict(list)\n cN = 0\n # for relPath in inPathL:\n outDict = {}\n # inpF = file_importer(relPath)\n headerFlag = True\n \n for inpLine in inpF:\n cN += 1\n if headerFlag:\n headerFlag = False\n headerLine = inpLine\n continue\n inpLine = inpLine.strip(\"\\n\\r\")\n inpItem = inpLine.split(\"\\t\")\n geneL = inpItem[0].split(\";\")\n lenS = len(geneL[0])\n curGene = geneL[0]\n for geneI in geneL: # find gene name with the shortest length\n if len(geneI) < lenS:\n lenS = len(geneI)\n curGene = geneI\n if \"__\" in curGene: continue # get rid of contaminant lines\n try: # get rid of wonky lines introduced by excel\n int(curGene)\n continue\n except ValueError: \n pass\n\n if curGene[-2] == \"-\":\n curGene = curGene[:-2]\n if curGene[-3] == \"-\":\n curGene = curGene[:-3]\n \n # remove ambiguities based on gene name from the entire entry:\n \n corrPos = geneL.index(curGene)\n corrLine = []\n targetCount = 46 # after the 45th item row in the list, peptide IDs and modification start to appear which are allowed to have multiple entries and do not need to be disambiguated\n currCount = 1\n pepFlag = True\n for inpE in inpItem:\n currCount += 1\n if currCount == targetCount:\n pepFlag = False\n # print inpE\n if \";\" in inpE and pepFlag:\n try:\n corrLine.append(inpE.split(\";\")[corrPos])\n except IndexError:\n corrLine.append(inpE.split(\";\")[0])\n else:\n corrLine.append(inpE.rstrip(\"\\n\"))\n\n \n if inpItem[6] == \"\":\n # print \"no protein name found. adding the uniprot ID.\"\n inpItem[6] = curGene\n \n \"\"\"\n try:\n for inpN in inpItem[4:10]:\n inpItem[inpItem.index(inpN)] = int(inpN)\n countFlag = True\n except ValueError:\n print inpItem[4:10]\n countFlag = False\n if countFlag:\n if sum(inpItem[4:10]) == 0: continue # there are some unexpressed proteins in there\n \n \"\"\"\n # print len(corrLine)\n if curGene in outDict: # handle duplicate protein entries and merge them together\n # print \"%s is duplicate\" % curGene\n if curGene == \"Protein IDs\": \n \"\"\"\n quickCount2 = 0\n for quickDictI in outDict[curGene]:\n print str(quickCount2) + \" \" + quickDictI\n quickCount2 += 1\n quickList = inpItem\n quickCount3 = 0\n for quickImp in quickList:\n print str(quickCount3) + \" \" + quickImp\n quickCount3 += 1 \n # print inpItem\n # print outDict[curGene]\n \"\"\"\n continue\n combList = []\n \n \"\"\"\n addL = []\n for i in outDict[curGene][3:]:\n addL.append(i)\n addL2 = []\n for j in corrLine[3:]:\n addL2.append(i)\n outL[3:] = map(add, addL, addL2) # admittedly this looks terrible\n \"\"\"\n \n indexN = 0\n for cItem in corrLine:\n # print indexN\n # print \"---\"\n # print len(corrLine)\n if indexN < 18 or 30 <= indexN <= 43:\n try:\n currC = int(cItem)\n currC = currC + int(outDict[curGene][indexN]) # numbers like peptide counts or LFQ values are added up during merge\n except ValueError:\n currC = cItem\n \n elif 18 <= indexN <= 25 or 28 <= indexN <= 29: # sequence coverage and scores\n currC = max([float(cItem),float(outDict[curGene][indexN])])\n \n elif 26 <= indexN <= 27 or indexN == 44:\n \"\"\"\n quickCount = 0\n for corrItem in corrLine:\n print str(quickCount) + \" \" + corrItem\n quickCount += 1\n \n import time\n \n print relPath\n print corrLine\n print outDict[curGene]\n print \"++++++++++++++++++++++++\"\n print indexN\n time.sleep(0.5)\"\"\"\n currC = cItem\n\n \n else:\n corrL = cItem.split(\";\")\n # print indexN\n # print corrLine\n # print outDict[curGene][indexN]\n dictL = outDict[curGene][indexN].split(\";\")\n mergeL = copy(dictL)\n for corrI in corrL:\n if corrI not in dictL:\n mergeL.append(corrI)\n \n currC = \";\".join(mergeL)\n\n combList.append(currC)\n\n \n indexN +=1\n \n \n combList[-1] = \"merged\" \n outDict[curGene] = combList \n # print \"merged:\"\n # print combList\n else:\n corrLine.append(\"unique\")\n outDict[curGene] = corrLine\n\n \n print(fileCount)\n \n\n # if not newFlag: print fileCount, testKey, finDict[testKey] \n # if newFlag:\n # newFlag = False\n \n for outKey,outValue in list(outDict.items()): \n if outKey in finDict: # add modified dicts together into single, unified dict\n # print fileCount, finDict[outKey]\n # print outValue\n outIndex = 0\n for outItem in outValue:\n finDict[outKey][outIndex].append(outItem)\n outIndex += 1\n # print finDict[outKey]\n\n else: # or just add new entries\n if fileCount == 1:\n for outItem in outValue:\n finDict[outKey].append([outItem])\n \n else: # fill up entries that were not present in the previous cycle\n loopCount = 0\n while loopCount < fileCount - 1:\n for i in range(len(outValue)):\n if len(finDict[outKey]) == i:\n finDict[outKey].append([])\n else:\n finDict[outKey][i].append(\"\")\n loopCount += 1\n outIndex = 0\n for outItem in outValue:\n # print finDict[outKey]\n finDict[outKey][outIndex].append(outItem) \n outIndex += 1\n\n for testKey in finDict: # fill up entries in result dict which were not present in previous file\n if len(finDict[testKey][0]) < fileCount:\n for i in range(len(finDict[testKey])):\n finDict[testKey][i].append(\"\")\n\n if len(inpathL) > 1: fileCount += 1 # this is needed if multiple files are parsed\n for finK, finV in list(finDict.items()):\n for finI in finV[-1]:\n if finI != \"unique\" and finI != \"\":\n print(finK, finV)\n\n \n \n outN = 0 \n # prepare header for file:\n headList = headerLine.strip(\"\\n\\r\").split(\"\\t\")\n if fileCount > 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\")\n headerCount = 1\n while headerCount < fileCount:\n outF.write(headerI + \"-\" + str(headerCount) + \"|\")\n headerCount += 1 \n outF.write(headerI + \"-\" + str(headerCount) + \"\\t\")\n \n headerCount = 1\n while headerCount < fileCount:\n outF.write(headList[-1] + \"-\" + str(headerCount) + \"|\")\n headerCount += 1\n \n outF.write(headList[-1] + \"-\" + str(headerCount) + \"\\n\")\n\n elif fileCount == 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\") \n outF.write(headerI + \"\\t\")\n outF.write(headList[-1].replace(\",\",\".\") + \"\\n\")\n \n else:\n print(\"number of input files should be at least one. Got less somehow\")\n raise ValueError\n \n \n for outDK, outDV in list(finDict.items()): # write out assembled results to a file\n outN += 1\n if len(outDK) > 30: print(\"this line should not be displayed\")\n # print outDV[1]\n # if outN == 100: break\n nameCount = 0\n for outI in outDV:\n # if nameCount == 0: print outI\n for outPiece in outI[:-1]:\n outU = outPiece.replace(\",\",\".\")\n if outU == \"\": outF.write(\"_|\")\n else: outF.write(str(outU) + \"|\")\n if outI[-1] == \"\": # handle missing entries\n if nameCount == 6: outF.write(outDV[0][0] + \"\\t\") # replace missing gene names with their uniprot ID\n else: outF.write(\"_\\t\")\n else: outF.write(str(outI[-1]).replace(\",\",\".\") + \"\\t\")\n nameCount += 1\n outF.write(\"\\n\")\n \n\n print(\"unique proteins: \", outN)\n print(\"lines parsed: \", cN)\n # print headerLine\n inpF.close()\n outF.close()", "def process_all(fileinfos, args):\n # create overall figure\n count_and_draw(fileinfos,args)\n # create figures for all the files\n for key in fileinfos:\n count_and_draw(fileinfos,args,key)\n # create figures for all the elements\n els_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_el\"]:\n if key not in els_processed:\n count_and_draw(fileinfos,args,key)\n els_processed.append(key)\n # create figures for all the attributes\n atts_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_att\"]:\n if key not in atts_processed:\n count_and_draw(fileinfos,args,\"@\"+key)\n atts_processed.append(key)", "def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)", "def _filename_pre_data(self) -> dict:\n key = []\n remainder = \"\"\n prework = {}\n for i in self.draft_file:\n if i == \"{\":\n remainder = \"\"\n elif i == \"}\":\n key.append(remainder)\n else:\n remainder += i\n list_filename = self.filename.split(\"_\")\n for key, value in zip(key, list_filename):\n prework[key] = value\n self.pre_data = prework", "def populate_file_dict(epObject, uc, fileDict):\r\n fileDict = get_pages(epObject, fileDict)\r\n for url in fileDict['pageUrls']:\r\n soup = make_soup(url)\r\n fileDict = get_embedded_object(soup, fileDict, uc)\r\n fileDict = get_css(soup, fileDict)\r\n fileDict = get_img(soup, fileDict, uc)\r\n return fileDict", "def _custom_template_names(self, template):\n splitted = template.rsplit('/', 1)\n name = 'custom_' + splitted[-1]\n ret = [name]\n if len(splitted) == 2:\n ret.append(splitted[0] + '/' + name)\n return ret", "def _assignUIDs(self):\n for messagePath in self.maildir:\n\n messageFile = os.path.basename(messagePath)\n\n if not messageFile in self.metadata['uids']:\n\n self.metadata['uids'][messageFile] = self.metadata['uidnext']\n\n self.metadata['uidnext'] += 1\n\n self.saveMetadata()", "def handleFileNames(self):\n \n # expand the wild cards - but do not create the full directory path\n # as the work sub directories have yet to be created.\n if not os.path.exists(self.shareArea):\n m = 'Cannot set self.auxfiles due to non-existent share directory: %s' % self.shareArea\n self.logger.fatal(m)\n raise RTTCodingError(m)\n\n # resolve auxFile patterns to file names\n auxFiles = []\n for pattern in self.auxFilePatterns:\n base, fnpattern = os.path.split(pattern)\n srcDir = os.path.normpath(os.path.join(self.shareArea, base))\n filesInShare = os.listdir(srcDir)\n auxFiles.extend([os.path.join(base,file) for file in filesInShare if fnmatch.fnmatch(file, fnpattern)])\n\n self.auxFiles = unique(auxFiles)", "def processed_file_names(self):\n if self.force_reprocess == True:\n self.force_reprocess = False\n return 'reprocess.pt'\n \n ''' HR 01/06/22 Workaround to avoid FileNotFoundError '''\n print('self.processed_dir:', self.processed_dir)\n # folder,file = os.path.split(self.processed_dir)\n folder = self.processed_dir\n if not os.path.isdir(folder):\n print(' Making folder', folder)\n os.makedirs(folder)\n \n processedfiles = [f for f in os.listdir(self.processed_dir) if os.path.isfile(\n os.path.join(self.processed_dir, f))]\n if 'pre_filter.pt' in processedfiles:\n processedfiles.remove('pre_filter.pt')\n if 'pre_transform.pt' in processedfiles:\n processedfiles.remove('pre_transform.pt')\n # 'not_implimented.pt' #[f'data_{i}.pt' for i in list(self.data.index)]\n return processedfiles", "def interpretor(file_list):\n for i in range(len(file_list)):\n l_seq = 0\n l_var = 0\n l_ind = 0\n inds = 0 #This variable is used to specify wheter there are more than 1 \"-\" in a row.\n with open(\"alignments/\"+file_list[i],'r') as f:\n regel = f.read().split() #Viewing each file as a whole.\n for item in regel[5:]: #Only from the 5th element there is relevant information present.\n if item.startswith(\"*\"):\n l_var += (len(item))\n elif item[0].isupper() or item[0] == \"-\": #only lines that starts with capital letters or - are sequence lines.\n for char in item: #Viewing individual character in list item.\n if char == \"-\" or char.isupper(): \n l_seq += 1\n if char == \"-\":\n inds+=1\n elif char.isupper() and inds != 0: # if inds > 1. This means there are more than 1 \"-\" in a row.\n l_ind+=1 # This is important because the program needs to reconginze this as 1 indel.\n inds = 0 # Reset the indel count.\n\n fill_var_calls(file_list[i],l_seq,l_var,l_ind) #After each iteration the the file_var_calls method is executed.", "def run(self):\n for filepage in self.generator:\n print (filepage)\n filepage.touch()", "def _generate_objects_file(self):\n xmls = glob(f'{ROOT}/Annotations/**/*.xml', recursive=True)", "def get_page_name(self,en_code):\n files_and_names = {}\n for files_named in self.find_enc(en_code):\n search_in_file = open(self.file_location+\"/\"+files_named)\n for line in search_in_file:\n if '# LINKNAME:' in line:\n #print(line)\n new_line = line.split('# LINKNAME:')\n for nl in new_line:\n fnl = nl.strip()\n if fnl is not None:\n files_and_names[files_named] = fnl\n search_in_file.close()\n return files_and_names", "def populate(infile):\n main(infile)", "def testTitleTemplateFindNames(self):\n\n\t\ttests = {\n\t\t\t'${abc.def.1}-$abc-${123}': {\n\t\t\t\t'abc.def.1': ['abc', 'def', 1],\n\t\t\t\t'123': [123]\n\t\t\t},\n\t\t\t'${abc..def} $$ ${qwe}': {'qwe': ['qwe']}\n\t\t}\n\n\t\tfor test in tests:\n\t\t\tt = TitleTemplate(test)\n\t\t\tself.assertEqual(t.getFieldNames(), tests[test])", "def get_template_files(fs, template_type):\n # no template fitting for null runs\n if fs[\"null_run\"]:\n template_type = None\n\n if \"template_type\" in fs:\n if template_type == fs[\"template_type\"]:\n return\n\n fs[\"template_type\"] = template_type\n\n # find all corresponding foreground templates\n if template_type is None:\n fs[\"template_root\"] = None\n fs[\"template_root2\"] = None\n fs[\"template_files\"] = None\n fs[\"template_files2\"] = None\n fs[\"template_noise_root\"] = None\n fs[\"template_noise_root2\"] = None\n fs[\"template_noise_files\"] = None\n fs[\"template_noise_files2\"] = None\n fs[\"num_template\"] = 0\n fs[\"num_template_noise\"] = 0\n else:\n num_template_noise = None\n for hm in [\"1\", \"2\"]:\n suff = \"\" if hm == \"1\" else \"2\"\n troot = os.path.join(\n fs[\"data_root\"],\n \"templates_{}\".format(template_type),\n \"halfmission-{}\".format(hm),\n )\n ### this block is so sims with template type like\n # 353_100_gauss_003 can use ensemble in 353_100_gauss\n tp = template_type.split(\"_\")\n ttype = template_type\n if tp[-1].isdigit():\n if ttype[-7:] not in [\"353_100\", \"217_100\"]:\n ttype = \"_\".join(tp[:-1])\n\n tnroot = os.path.join(\n fs[\"data_root\"],\n \"templates_noise_{}\".format(ttype),\n \"halfmission-{}\".format(hm),\n )\n\n tfiles = []\n tnfiles = []\n for f in fs[\"map_files\"]:\n nfile = f.replace(fs[\"map_root\"], troot)\n if not os.path.exists(nfile):\n raise OSError(\"Missing hm-{} template for {}\".format(hm, f))\n tfiles.append(nfile)\n nfiles = sorted(\n glob.glob(\n f.replace(fs[\"map_root\"], tnroot).replace(\n \".fits\", \"_*.fits\"\n )\n )\n )\n if not len(nfiles):\n raise OSError(\n \"Missing hm-{} template noise for {}\".format(hm, f)\n )\n tnfiles.append(nfiles)\n if num_template_noise is not None:\n if len(nfiles) != num_template_noise:\n raise OSError(\n \"Wrong number of template noise sims. \"\n \"Found {} files, expected {}.\".format(\n len(nfiles), num_template_noise\n )\n )\n\n num_template_noise = len(nfiles)\n\n tfiles = np.asarray(tfiles)\n tnfiles = np.asarray(tnfiles)\n fs[\"template_root{}\".format(suff)] = troot\n fs[\"template_files{}\".format(suff)] = tfiles\n fs[\"template_noise_root{}\".format(suff)] = tnroot\n fs[\"template_noise_files{}\".format(suff)] = tnfiles\n\n fs[\"num_template\"] = len(fs[\"template_files\"])\n fs[\"num_template_noise\"] = num_template_noise\n self.log(\n \"Found {} templates in {}\".format(\n fs[\"num_template\"], fs[\"template_root\"]\n ),\n \"info\",\n )\n self.log(\n \"Found {} template noise files in {}\".format(\n fs[\"num_template_noise\"], fs[\"template_noise_root\"]\n ),\n \"info\",\n )\n self.log(\"Template files: {}\".format(fs[\"template_files\"]), \"debug\")\n\n fields = [\n \"template_type\",\n \"template_root\",\n \"template_root2\",\n \"template_files\",\n \"template_files2\",\n \"template_noise_root\",\n \"template_noise_root2\",\n \"template_noise_files\",\n \"template_noise_files2\",\n \"num_template\",\n \"num_template_noise\",\n ]\n for k in fields:\n setattr(self, k, fs[k])", "def tag_file_process(self, multiple_files):\n # the path is now becoming a string since it goes through the UI\n # text entry box, not a list or tuple any more, so we turn it to a\n # list of paths\n file_list = multiple_files.split(' ')\n # the main dictionary to store all tags\n tag_dict = dict()\n rows = []\n # now for all the tag file under the folder(root directory), we load\n # the data into the dictionary\n if len(file_list) == 0:\n tk.messagebox.showwarning('warning', 'no files chosen')\n else:\n for file_path in file_list:\n if os.path.isfile(file_path):\n with open(file_path, 'r', encoding='utf-8') as \\\n current_tag_file:\n # initialize the dictionary and the inner dictionary\n reader = csv.reader(current_tag_file)\n for row in reader:\n # the encode, decode is use to resolve the \"\\ueffa\"\n # BOM-utf8 problem\n row[0] = row[0].encode('utf-8').decode('utf-8-sig')\n tag_dict[row[0]] = dict()\n rows.append(row)\n # store the tag into the dictionary\n for row in rows:\n # the 1st column is the main key(mob fact col name)\n # the 2nd column is the tag id\n # the 3rd column is the tag with real meaning\n tag_dict[row[0]][row[1]] = row[2]\n\n else:\n tk.messagebox.showinfo('warning', 'can not obtain: ' +\n file_path)\n return tag_dict", "def files(self, ending='.sif'):\n for f in sorted(os.listdir(self.path)):\n if f.endswith(ending):\n self.file_name = f\n yield f", "def test_main():\n for template in templates:\n main([\"-g\", template])\n\n # One at a time\n for xyz_file in example_xyz_files:\n main([template, xyz_file])\n\n # All at once\n main([template] + list(example_xyz_files))\n\n # Allow use of template in the parent directory\n with cd(\"data\"):\n main([\"../pnictogen/repo/ADF.in\", \"water-dimer.xyz\"])", "def get_template_names(self):\n name = self.__class__.__name__.replace(\"DatatableView\", \"\")\n name = re.sub(r'([a-z]|[A-Z]+)(?=[A-Z])', r'\\1_', name)\n return [\"demos/\" + name.lower() + \".html\", \"example_base.html\"]", "def _set_templates(spm_dir=SPM_DIR):\n global EPI_TEMPLATE, T1_TEMPLATE, GM_TEMPLATE, WM_TEMPLATE, CSF_TEMPLATE\n\n spm_version = _get_version_spm(SPM_DIR)\n\n # Set the tpm and template paths according to SPM version\n if spm_version == 'spm12':\n template_path = 'toolbox/OldNorm'\n tpm_path = 'toolbox/OldSeg'\n else:\n template_path = 'templates'\n tpm_path = 'tpm'\n\n # configure template images\n EPI_TEMPLATE = os.path.join(SPM_DIR, template_path, 'EPI.nii')\n SPM_T1_TEMPLATE = os.path.join(SPM_DIR, template_path, 'T1.nii')\n T1_TEMPLATE = \"/usr/share/data/fsl-mni152-templates/avg152T1.nii\"\n if not os.path.isfile(T1_TEMPLATE):\n T1_TEMPLATE += '.gz'\n if not os.path.exists(T1_TEMPLATE):\n T1_TEMPLATE = SPM_T1_TEMPLATE\n GM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'grey.nii')\n WM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'white.nii')\n CSF_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'csf.nii')", "def setup(self):\n # Call the baseclass setup to resolve any selections\n super().setup()\n\n self.outcont = None\n\n # If we are returning the same file for every iteration,\n # then load that file now.\n if self.only_prefix:\n filename = self.prefix\n\n split_ext = os.path.splitext(filename)\n if split_ext[1] not in [\".h5\", \".hdf5\"]:\n filename = split_ext[0] + \".h5\"\n\n # Load file into outcont attribute\n self.outcont = self._load_file(filename)\n\n else:\n self.prefix = os.path.splitext(self.prefix)[0]", "def _generateExtensionConfigFilePointers(self, eggFileName):\n # Always use forward slashes in eggs\n sep = \"/\"\n eggFile = pylabsZipFile(eggFileName)\n for internalFileName in eggFile.namelist():\n parts = internalFileName.split(sep)\n if parts and parts[-1] == self.extensionConfigName:\n # construct egg path i.e.\n # /opt/qbase2/lib/pylabs/extensions/my_extension.egg/my_first_extension/\n # This format is supported by the eggfile module\n path = sep.join([eggFileName] + parts[:-1])\n yield eggFile.open(internalFileName), path", "def _generate_inventory(self, datapath):\n \n files = [file for file in listdir(datapath) if '.nc' in file and not 'xyz' in file]\n # file_prefixes = list(set([ file.split('_')[0] for file in files ]))\n # file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n if self.extra_pref:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2] + [self.extra_pref]) for file in files ]))\n else:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n \n inventory = {}\n for file_prefix in file_prefixes:\n fname = path.join(datapath,f'{file_prefix}{self.first_suffix}')\n if not self.metafile:\n self.metafile = fname\n vars = [ var for var in list(Dataset(fname).variables) if var not in self.skip_vars ]\n for var in vars:\n inventory[var] = {'files': sorted([path.join(datapath,file) \n for file in listdir(datapath) if file_prefix in file])}\n return inventory", "def set_institutes(self):\n\n if develope_mode:\n print(help(self.set_institutes))\n\n for (dirpath, dirnames, filenames) in os.walk(\"../Finanzexplorer-Git-data/Institute\"):\n filenames = [x for x in filenames\n if \"Haushaltsb\" in x\n and \".xlsx\" in x\n and \"Template\" not in x\n and \"Haushaltsbücher_MPG_gesamt.xlsx\" not in x\n and \"_All\" not in x]\n for f in filenames:\n if f.split(\"_\")[1] == \"MPI\":\n name = f[21:len(f)-5].lower()\n else:\n name = f[17:len(f) - 5].lower()\n name = name.capitalize()\n\n self.institute[name][\"path\"] = os.path.join(dirpath, f)\n self.institute[name][\"file\"] = f", "def _pname_and_metadata(in_file):\n\n\n if in_file.endswith(\".csv\"):\n raise ValueError(\"Did not find input metadata file: %s\" % in_file)\n base, md, global_vars = in_file, {}, {}\n md_file = None\n return base, md, global_vars, md_file", "def extract_file_name(self, input_file):\n self.file_name_with_ext, self.file_name = extract_file_name(input_file)", "def get_file_inter_name(self):\n\t\td = DIImportExternal.get_file_inter_name(self)\n\t\td,_ = os.path.split(d)\n\t\tf,_ = os.path.splitext(self.file)\n\t\treturn \tos.path.join(d,f+'.odt')", "def __fill_template__(self,template_file,output_fname):\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n if k == 'sample_key':\n try:\n int(v)\n new_sample_key = \"Sample_\" + str(v)\n dictionary.update({k:new_sample_key})\n continue\n except ValueError:\n pass\n dictionary.update({k:str(v)})\n dictionary.update({'restats_tail': self.restats_file + '.tail'})\n with open(output_fname,'w') as f:\n string = fill_template(template_file,dictionary)\n f.write(string)", "def _items():\n for writer in writers:\n name = pm.get_manifest(writer.command).display_name\n title = (\n f\"{name} {writer.display_name}\"\n if writer.display_name\n else name\n )\n yield title, writer.filename_extensions", "def generate_metadata_files(self):\n\n data_folder = self.get_data_folder(mode='absolute')\n\n parents = (data_folder / '_').parents\n\n for mfile in self.mdata:\n for regex, level in METADATA_LEVEL_BY_NAME.items():\n if re.compile(regex).match(mfile.name):\n create_file(mfile, parents[(3-level)] / mfile.name,\n mode='copy')", "def elastixTemplates():\n\t\ttransformations = []\n\t\tfileNames = os.listdir(AppVars.transformationsPath())\n\t\tfor fileName in fileNames:\n\t\t\tfullFileName = os.path.join(AppVars.transformationsPath(), fileName)\n\t\t\ttransformation = ParameterList()\n\t\t\tif transformation.loadFromFile(fullFileName):\n\t\t\t\ttransformations.append(transformation)\n\t\treturn transformations", "def test_template_name():\n for t in templates:\n assert len(t.name) > 0", "def populate_titles(self,owner):\n if not owner in self.titles:\n try:\n a=self._get_plans_generator(owner)\n self.titles[owner]=[]\n for group in a:\n self.titles[owner].append(group[\"title\"])\n except:\n logging.warning(f\"could not get existing plans from groupId: {owner}\")\n self.titles[owner]=[]", "def fname_tsk1(tup): #Task 1 & 2\n fname1 = f\"file_{str(tup[0]).zfill(3):}: {tup[1]:.2f}, {tup[2]:.2e}, {tup[3]:.2e}\"\n return(fname1)", "def parse_names(lines, oti_file_name):\n print \" * Parsing names\"\n # Read the real texture file names form the file.\n real_names = []\n if os.path.isfile(oti_file_name):\n with open(oti_file_name, \"rU\") as oti_fd:\n real_names = oti_fd.read().splitlines()\n\n names = {}\n for i, line in enumerate(lines):\n name = \".\"\n if i < len(real_names):\n name = real_names[i]\n names[\"%s\" % i] = {\"alias\": line, \"name\": name}\n return names", "def bulk_rename(current_path,casetype):\n\tclick.echo(current_path)\n\tfilenames = os.listdir(current_path) \n\n\tfor filename in filenames:\n\t\tif filename != 'file_organizer0.03.py':\n\t\t\tif casetype == 'lower':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.lower())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").lower())\n\t\t\telif casetype == 'upper':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.upper())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").upper())\n\t\t\t\t\n\t\t\telif casetype == 'title':\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.title)\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").title())\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tclick.secho('Renaming ::> {} to same name in {} case'.format(filename,casetype),fg='green')\n\t\t\t\tclick.echo(filename.lower())\n\t\t\t\tos.rename(filename,filename.replace(\" \",\"-\").lower())\n\n\tclick.secho('Finished Renaming to {} case!!'.format(casetype),bg='blue',fg='white')", "def rewrite(self):\n for f in self.files:\n metadata = dict()\n metadata[\"description\"] = f.metadata.get(\"desc\", \"Unknown\")\n metadata[\"script\"] = os.path.basename(f.filename)\n metadata[\"requires\"] = []\n for package, component in f.requires:\n if package == self.key:\n metadata[\"requires\"].append(\"/\" + component)\n else:\n metadata[\"requires\"].append(package + \"/\" + component)\n metadata[\"provides\"] = [ p[1] for p in f.provides ]\n # Resolve symlinks\n real_filename = os.path.realpath(f.filename)\n LOG.info(\"Editing: \" + real_filename)\n new_filename = f.filename + \".new\"\n new = file(new_filename, \"w\")\n new.write(\"/*\\n---\\n\")\n new.write(yaml.dump(metadata))\n new.write(\"\\n...\\n*/\\n\")\n new.write(file(f.filename).read())\n new.close()\n os.rename(new_filename, real_filename)\n\n package_data = dict()\n package_data[\"name\"] = self.key\n package_data[\"sources\"] = []\n package_data[\"version\"] = \"Unknown\"\n package_data[\"copyright\"] = \"Unknown\"\n package_data[\"description\"] = \"Unknown\"\n target_dir = os.path.dirname(self.scripts_json_filename)\n # package.yml is typically in the parent of the scripts.json dir\n if os.path.basename(target_dir) == \"Source\":\n target_dir = os.path.dirname(target_dir)\n target_filename = os.path.join(target_dir, \"package.yml\")\n for f in self.files:\n common = os.path.commonprefix([target_filename, f.filename])\n source_file = f.filename[len(common):]\n package_data[\"sources\"].append(source_file)\n LOG.info(\"Writing: \" + target_filename)\n out = file(target_filename, \"w\")\n out.write(yaml.dump(package_data))\n out.close()", "def create_init_files(self, app_label, model_names, models):\n model_name_slugs = [\"%s_views\" % (self.camel_to_slug(model_name)) for model_name in model_names]\n model_names_dict = {self.camel_to_slug(model.__name__): self.camel_to_slug(self.model_name_plural(model)) for\n model in models}\n for folder_name in [\"views\", \"urls\"]:\n file_path = \"%s/%s/__init__.py\" % (app_label, folder_name)\n template_path = \"django_baker/__init__%s\" % folder_name\n self.create_file_from_template(file_path, template_path, {\"app_label\": app_label,\n \"model_name_slugs\": model_name_slugs,\n \"model_names_dict\": model_names_dict\n })", "def build(self) -> None:\n def do_process(fname) -> bool:\n for sfx in skip_suffixes:\n if fname.endswith(sfx):\n return False\n return True\n\n for dirpath, _, fnames in os.walk(self.template_dir):\n for fname in fnames:\n if do_process(fname):\n self.process(dirpath, fname)", "def main():\n parser = ArgumentParser(description=\"pre-process nexus templates\")\n parser.add_argument(\n \"nexus_templates\",\n nargs=\"+\",\n help=\"Nexus template files to process\",\n )\n args = parser.parse_args()\n\n for template_file in args.nexus_templates:\n preprocess_template(template_file)", "def _resolve_list_names(self, dirname='',\n dirname_extinfo=None,\n grisim='',\n grism_extinfo=None,\n in_sex=None,\n out_sex=None):\n # compose the default name\n if in_sex is None:\n # compose the filename from the direct image name\n in_sex = dirname.replace(\".fits\", \"_{0:d}.cat\"\n .format(dirname_extinfo['axe_ext']))\n\n # check whether the explicitly given filename exists\n if not os.path.isfile(in_sex):\n err_msg = (\"The Input Object List: {0:s} does not exist!\"\n .format(in_sex))\n raise aXeError(err_msg)\n\n if out_sex is None:\n # compose the name for the output GOL\n out_sex = os.path.basename(grisim).replace(\".fits\", \"_{0:d}.cat\".format(grism_extinfo['axe_ext']))\n\n # return the IOL and the GOL names\n return in_sex, out_sex", "def _create_namelist_ncep_post(case, confdir, config, infile, nmlgen, nmlgen_model_configure, namelist_user):\n####################################################################################\n #----------------------------------------------------\n # Clear out old data.\n #----------------------------------------------------\n data_list_path = os.path.join(case.get_case_root(), \"Buildconf\", \"ufsatm.input_data_list\")\n\n #----------------------------------------------------\n # Initialize namelist defaults\n #----------------------------------------------------\n nmlgen.init_defaults(infile, config)\n\n #----------------------------------------------------\n # Write out namelist groups\n #----------------------------------------------------\n groups=['nampgb']\n\n # Make input format for post-processing consistent with model output\n output_file = nmlgen_model_configure.get_value('output_file')\n if 'netcdf' in output_file:\n nmlgen.set_value('ioform', 'netcdf')\n elif 'nemsio' in output_file:\n nmlgen.set_value('ioform', 'binarynemsiompiio')\n\n # Query start date and time\n run_start_date = case.get_value('RUN_STARTDATE').split('-')\n yy = run_start_date[0]\n mm = run_start_date[1]\n dd = run_start_date[2]\n run_start_tod = int(case.get_value('START_TOD'))\n hh = run_start_tod//3600\n mi = (run_start_tod-hh*3600)//60\n ss = run_start_tod-hh*3600-mi*60\n\n # Overwrite user_nl_ufsatm changes\n nmlgen = nmlOverwrite(namelist_user, nmlgen)\n\n # Create namelist file for first time step / template script will update it for specific date\n namelist_file = os.path.join(confdir, \"itag.tmp\")\n nmlgen.write_output_file(namelist_file, data_list_path, groups=groups, sorted_groups=False)\n\n # Add header section to namelist\n with open(namelist_file, 'r+') as f:\n content = f.read()\n f.seek(0,0)\n f.write(nmlgen.get_value('filename')+\"\\n\")\n f.write(nmlgen.get_value('ioform')+\"\\n\")\n f.write(nmlgen.get_value('outform')+\"\\n\")\n f.write(\"{}-{}-{}\".format(yy,mm,dd)+\"_\"+\"{hh:02d}:{mm:02d}:{ss:02d}\".format(hh=hh,mm=mi,ss=ss)+\"\\n\")\n f.write(nmlgen.get_value('modelname')+\"\\n\")\n f.write(nmlgen.get_value('filenameflux')+\"\\n\")\n f.write(content)\n\n # Check/correct task count used for post-processing\n atm_grid = case.get_value(\"ATM_GRID\").replace('r', '')\n mach = case.get_value(\"MACH\")\n\n # Specific fix for Stampede2\n tasks_per_node = int(case.get_value(\"MAX_TASKS_PER_NODE\"))\n if (\"C384\" in atm_grid or \"C768\" in atm_grid) and \"stampede2\" in mach:\n tasks_per_node = 24\n case.set_value(\"tasks_per_node\", str(tasks_per_node), subgroup=\"case.gfs_post\")\n case.flush()\n logger.info(\"NCEP Post tasks per node is changed to {}!\".format(tasks_per_node))\n\n task_count = {\"C96\": tasks_per_node, \"C192\": tasks_per_node, \"C384\": tasks_per_node*2, \"C768\": tasks_per_node*4}\n if atm_grid in task_count.keys():\n case.set_value(\"task_count\", str(task_count[atm_grid]), subgroup=\"case.gfs_post\")\n case.flush()\n logger.info(\"NCEP Post task count is changed to {}!\".format(task_count[atm_grid]))", "def do_files(self, args):\n file_names = self.regexprutils.get_file_names()\n print 'File names:'\n for name in file_names:\n print ' %s' % (name, )", "def on_get(self, req, resp):\n resp.set_header('Content-Type', 'text/json')\n diaries_paths = encode.get_files_in_directory(DIARIES_TO_CREATE_DIR, \".pdf\")\n\n def extract_file_name(path): return os.path.basename(path)\n resp.body = json.dumps({\"templates_file_names\": list(map(extract_file_name, diaries_paths)),\n \"templates_paths\": diaries_paths})", "def main():\n with open(\"page_data.yaml\", 'r') as inputstr:\n config_data = yaml.safe_load(inputstr)\n ointf = OutputInterface('template.txt')\n table_data = get_song_artist_matches()\n ofilen = config_data['directory'] + os.sep + 'common_songs.html'\n title = 'Song Titles and Band Name Overlap'\n header = ['No.', 'Artist', 'Peak', 'Date', 'Song/Artist', 'Peak',\n 'Date', 'Song']\n ointf.build_page(ofilen, title, header, fmt_table(table_data))\n ointf.inject(XTRAEDIT)\n ointf.output()", "def inject_files():\n for filename, arcname in INJECT_FILES.items():\n filename = os.path.join('bee2', 'inject', filename)\n if os.path.exists(filename):\n yield filename, arcname\n\n # Additionally add files set in the config.\n for prop in CONF.find_children('InjectFiles'):\n filename = os.path.join('bee2', 'inject', prop.real_name)\n if os.path.exists(filename):\n yield filename, prop.value", "def filenames(self):\n pass", "def defineProcessTemplates(histos):\n\n templates=[]\n\n #nominal\n templates.append( histos[0] )\n nomStats=templates[-1].Integral()\n\n #systematic variations\n #if Up/Down already in the name store directly updating the name\n #if not, mirror the variation given \n for i in xrange(1,len(histos)): \n templates.append( histos[i] )\n key=templates[-1].GetName()\n if not 'Up' in key and not 'Down' in key :\n templates[-1].SetName(key+'Up')\n templates.append( histos[i].Clone(key+'Down') )\n for xbin in range(templates[0].GetNbinsX()):\n templates[-1].SetBinContent(xbin+1,2*templates[0].GetBinContent(xbin+1)-templates[-2].GetBinContent(xbin+1))\n \n #don't leave bins with 0's\n for h in templates:\n h.SetDirectory(0)\n iStats=h.Integral()\n if iStats>0: h.Scale(nomStats/iStats)\n for xbin in range(h.GetNbinsX()):\n if h.GetBinContent(xbin+1)>0: continue\n h.SetBinContent(xbin+1,1e-6)\n \n return templates", "def fixupFileNames(process):\n if not hasattr(process.source, \"fileNames\"):\n process.source.fileNames = cms.untracked.vstring()\n return", "def gen_metadata(args):\n with open(args.bibfile) as bibfile:\n bib_db = BibTexParser(common_strings=True).parse_file(bibfile)\n entries = sorted(list(bib_db.entries),\n key=lambda x: x['year'], reverse=True)\n list([update_file(entry) for entry in entries])\n annotations = [entry_to_annotation(entry, args.PI) for entry in entries]\n stream = open(args.metadata, 'w')\n yaml.dump(annotations, stream, width=192, default_flow_style=False)\n stream.close()", "def content(tmp_loc, ref_names_dict, order):\n \n fl = '[Content_Types].xml'\n inp_path = '/'.join([tmp_loc, fl])\n out_path = '/'.join([output_path, fl])\n \n cnt_lst = []\n asset_lst = []\n def_att = []\n d = dict()\n \n root1,tree1 = gen_tree(inp_path)\n root2,tree2 = gen_tree(out_path)\n \n # get all the extensions belongs to \"Default\" tag\n for relation in root2:\n if 'Default' in relation.tag:\n def_att.append(relation.attrib['Extension'])\n else:\n break\n \n for relation in root1:\n if 'Override' in relation.tag:\n attrib = relation.attrib['PartName'][1:]\n try:\n cnt = attrib.split('ppt/')[-1]\n ini = '/ppt/'\n except:\n cnt = attrib\n ini = '/'\n if cnt in ref_names_dict.keys():\n relation.attrib['PartName'] = f'{ini}{ref_names_dict[cnt]}'\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['PartName'])\n else:\n cnt_lst.append(relation)\n if relation.attrib['PartName'] not in asset_lst:\n asset_lst.append(relation.attrib['PartName'])\n else:\n attrib = relation.attrib['Extension']\n if attrib not in def_att:\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['Extension'])\n # deal with the assest_lst\n # print(\"AA: \", asset_lst)\n cnt_lst = natsort.natsorted(cnt_lst)\n for ele in cnt_lst:\n prev = tree2.find(ele.tag)\n prev.addnext(ele)\n \n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n \n unq_attr = []\n for relation in root2:\n if 'Override' in relation.tag:\n if relation.attrib['PartName'] not in unq_attr:\n unq_attr.append(relation.attrib['PartName'])\n else:\n root2.remove(relation)\n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)", "def FileNameToFile(files):\n files = files.replace('%20%28ja%29', '.ja')\n if files in up_list:\n if files == 'UserManual':\n return \"index.html\"\n elif files == 'UserManual.ja':\n return \"index.ja.html\"\n else:\n return files.lower() + \".html\"\n else: # modules\n sol = files.replace('.py', '').replace('%2F', '_')\n return 'modules/' + sol + '.html'", "def __rename_slides(self):\n for idx, slide in enumerate(self._values):\n slide.partname = '/ppt/slides/slide%d.xml' % (idx+1)", "def _substitute_template_parts(template_code):\n template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\n template_parts = [\n 'v2/style',\n 'v2/styleMail',\n 'v2/stylePage',\n 'v2/stylePageCover',\n 'v2/stylePageDraft',\n 'v2/stylePageMail',\n 'v2/stylePageRegistration',\n 'v2/stylePageRegistrationDraft',\n 'stylePageMail',\n 'logo',\n 'macros',\n 'registrarSignature',\n 'registration/details',\n 'registration/givingNoticeParty',\n 'registration/location',\n 'registration/notes',\n 'registration/owners',\n 'registration/sections',\n 'registration/submittingParty',\n 'search-result/details',\n 'search-result/location',\n 'search-result/notes',\n 'search-result/owners',\n 'search-result/pprRegistrations',\n 'v2/search-result/selected',\n 'search-result/sections',\n 'v2/search-result/registration',\n 'search-result-ppr/financingStatement',\n 'search-result-ppr/amendmentStatement',\n 'search-result-ppr/changeStatement',\n 'search-result-ppr/renewalStatement',\n 'search-result-ppr/dischargeStatement',\n 'search-result-ppr/securedParties',\n 'search-result-ppr/courtOrderInformation',\n 'search-result-ppr/debtors',\n 'search-result-ppr/registeringParty',\n 'search-result-ppr/vehicleCollateral',\n 'search-result-ppr/generalCollateral'\n ]\n\n # substitute template parts - marked up by [[filename]]\n for template_part in template_parts:\n if template_code.find('[[{}.html]]'.format(template_part)) >= 0:\n template_part_code = Path(f'{template_path}/template-parts/{template_part}.html').read_text()\n for template_part_nested in template_parts:\n template_reference = '[[{}.html]]'.format(template_part_nested)\n if template_part_code.find(template_reference) >= 0:\n path = Path(f'{template_path}/template-parts/{template_part_nested}.html')\n template_nested_code = path.read_text()\n template_part_code = template_part_code.replace(template_reference, template_nested_code)\n template_code = template_code.replace('[[{}.html]]'.format(template_part), template_part_code)\n\n return template_code", "def _load_personas(self, names, is_custom=False):\n names = names or [path.stem for path in\n self.persona_dir[is_custom].iterdir()\n if path.is_dir()]\n for name in names:\n try:\n self.update_persona_dicts(self.process_name(name),\n is_custom=is_custom)\n except:\n warnings.warn(f'Could not load files for {name}.')", "def append_subfiles(self, di, lines, lang):\n if lang==\"en\":lang_index=1\n elif lang==\"it\":lang_index=2\n try:\n #if di[0] in self.name_stripped:\n for key in TREE.keys():\n if di[0]==key and di[0] in self.name_stripped:\n deco=[\"&nbsp;&nbsp;|-&nbsp;\" for i in range(len(TREE[key])-1)]\n deco.append(\"&nbsp;&nbsp;`-&nbsp;\")\n for d, fi in zip(deco, TREE[key]):\n bough=\"%s%s\" % (d,fi[lang_index])\n path=\"%s%s/%s/%s\" % (self.backstring, lang, di[0], fi[0])\n lines.append(\"%s %s\" % (path, bough))\n except IndexError:\n pass\n return lines", "def process_file(src_file, dest_file):\n # read data\n with open(src_file) as fil:\n new_data = fil.read()\n # generate a chain of templates\n parent_template = None\n current_template = dest_file\n cursor = 1\n if EXTEND_FLAG in new_data:\n new_data = new_data.replace(EXTEND_FLAG, \"\")\n while exists(current_template):\n parent_template = current_template\n current_template = \"%s%s%d\" % (dest_file, CHILD_TPL_FLAG, cursor)\n cursor += 1\n # write data\n with open(current_template, \"w\") as fil:\n if parent_template:\n # in the chain of templates each has to extend one another\n new_data = \"\\n\".join([\n \"{%% extends \\\"%s\\\" %%}\" % parent_template,\n new_data\n ])\n fil.write(new_data)", "def detail_pages(f, e):\n template = e.get_template(TEMPLATES['detail'])\n for file in f:\n write_file(file['url'], template.render(entry=file, entries=f))", "def fill_ui_with_filename_template(self, filename_template):\n if False:\n from stalker import FilenameTemplate\n\n assert isinstance(filename_template, FilenameTemplate)\n\n self.name_line_edit.setText(filename_template.name)\n self.path_line_edit.setText(filename_template.path)\n self.filename_line_edit.setText(filename_template.filename)", "def populate_proper_names():\n in_dir = os.path.join(buildconfig.FORM_INDEX_DIR, 'proper_names')\n in_file = os.path.join(in_dir, 'all.txt')\n names = []\n counter = 0\n with open(in_file) as filehandle:\n for line in filehandle:\n data = line.strip().split('\\t')\n if len(data) == 3:\n counter += 1\n sortable, name, common = data\n if common.lower() == 'true':\n common = True\n else:\n common = False\n\n names.append(ProperName(lemma=name,\n sort=sortable,\n common=common))\n if counter % 1000 == 0:\n ProperName.objects.bulk_create(names)\n names = []\n\n ProperName.objects.bulk_create(names)", "def _generate_filename(doc_type, login, *args):\n filename = []\n filename.append(doc_type)\n filename.append(login)\n for item in args:\n filename.append(item)\n filename.append(datetime.datetime.now().isoformat(timespec='microseconds'))\n filename = '_'.join(filename)\n return filename", "def _prepare_assets(self, page_instructions, assets=None):\n assert type(assets) == tuple or type(assets) == list\n\n for yaml in page_instructions.yaml:\n # yaml = app/page/page.yaml\n template, origin = loader.find_template(yaml)\n filepath = template.origin.name\n\n # /Users/me/Development/app/templates/app/page/page.yaml\n yaml_basedir = os.path.dirname(yaml)\n # app/page\n template_basedir = filepath[:filepath.find(yaml)]\n # /Users/me/Development/app/templates\n\n for asset in assets:\n # directory = /media/js/templates\n if not yaml_basedir in asset:\n # The user might be specifying the directory relative to\n # the yaml file itself, so we'll add it for them if they\n # gave us something like 'media/js/templates'\n directory = os.path.join(yaml_basedir, asset)\n else:\n directory = asset\n\n sourcedirectory = os.path.join(template_basedir, directory)\n\n if not os.path.isdir(sourcedirectory):\n # We're going to try and find it somewhere else, it may not\n # be relative to the YAML file\n #\n # This is quite possible if the yaml file is processing a\n # \"chirp:\" attribute.\n try:\n sourcedirectory = find_directory_from_loader(\n page_instructions, asset)\n # We need to reset this, it has the yaml_basedir on it\n # at this point\n directory = asset\n except TemplateDoesNotExist:\n continue\n\n if not os.path.isdir(sourcedirectory):\n continue\n\n cachedirectory = os.path.join(self.cache_root, directory)\n\n if os.path.isdir(cachedirectory):\n if self._assets_are_stale(sourcedirectory, cachedirectory):\n shutil.rmtree(cachedirectory)\n else:\n continue\n\n shutil.copytree(sourcedirectory, cachedirectory)\n\n if settings.FILE_UPLOAD_PERMISSIONS is not None:\n os.chmod(cachedirectory, 02750)\n\n for root, dirs, files in os.walk(cachedirectory):\n for momo in files:\n os.chmod(os.path.join(root, momo),\n settings.FILE_UPLOAD_PERMISSIONS)\n for momo in dirs:\n os.chmod(os.path.join(root, momo), 02750)", "def get_output_filename(item: str, root: str, i: int) -> str:\n element_split = item.split(\"/\")\n item, ext = element_split[-1].split(\".\")\n if i < 0:\n return f\"{root}/{'/'.join(element_split[:-1])}/{item}.{ext}\"\n else:\n return f\"{root}/{'/'.join(element_split[:-1])}/{item}_aug{i}.{ext}\"", "def putfilenameontop(idf, lines):\n openfile = '<%s>%s</%s>' % ('h4', idf.idfname, 'h4')\n lines = [openfile, '<hr>'] + lines\n return lines", "def get_template_names(self):\n tpl = super(Teacher_professionalView, self).get_template_names()[0]\n app = self.model._meta.app_label\n mdl = 'teacher_professional'\n #self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))\n self.template_name = tpl[:8]+'teacher_professional/'+tpl[8:]\n return [self.template_name]", "def get_autographs(pathtofile):\n\n autos = {}\n\n path = Path(pathtofile)\n assert path.is_dir()\n file_list = []\n for x in path.iterdir():\n if x.is_dir():\n file_list.append(x)\n print(f\"Found files {len(file_list)} -- {file_list}\")\n\n for f in file_list:\n name = str(f)[len(pathtofile) + 1 :]\n autos[name] = {}\n for x in f.iterdir():\n if str(x) == f\"{pathtofile}/{name}/{name}.txt\":\n info_file = x\n f = open(info_file, \"r\").readlines()\n info_name = f[0]\n info_quote = f[1]\n elif (\n str(x) == f\"{pathtofile}/{name}/{name}.jpg\"\n or str(x) == f\"{pathtofile}/{name}/{name}.png\"\n ):\n info_img = x\n else:\n l = len(pathtofile) + len(name) + 12\n f = open(x, \"r\").read().replace(\"\\n\", \" \").split()\n s = []\n for i in range(0, len(f), 20):\n s.append(\" \".join(f[i : i + 20]))\n output = \"\\n\".join(s)\n autos[name][str(x)[l:-4]] = output\n\n return autos", "def _extract(self):\r\n self._data = []\r\n for fname in self.files:\r\n meta = dict(filename=fname)\r\n\r\n # Perform the actual metadata extraction\r\n fname = os.path.splitext(self.filter_filename(fname))[0]\r\n values = fname.split(self.sep)\r\n\r\n # Handle the case where number of fields is less than the length\r\n # of the extracted values, ie cases where we only want to extract\r\n # a subset of available fields.\r\n if self.index:\r\n values = [val for i, val in enumerate(values) if i in self.index]\r\n\r\n meta.update(dict(zip(self.fields, values)))\r\n if self.split_by in self.fields:\r\n meta[self.split_by] = self._get_split_field_values(meta['filename'])\r\n self._data.append(meta)", "def process(self, terms):\n for entry in self.files:\n try:\n logger.info('file - {0}'.format(entry.path))\n\n # notional output file path\n path_sentences = self.path.joinpath('{0}.csv'.format(entry.path.stem))\n path_summary = self.path.joinpath('{0}-summary.csv'.format(entry.path.stem))\n logger.info('will save to - {0}'.format(path_sentences.resolve()))\n\n reports = self.inspect_doc(entry, terms)\n\n # receiving a list of dicts\n # therefore pandas can package into a useful outcome\n if len(reports) > 0:\n frame_sentences = pd.DataFrame(reports)\n\n frame_sentences = frame_sentences[['page', 'term', 'sentence']]\n logger.info('saving sentence file to - {0}'.format(path_sentences.resolve()))\n frame_sentences.to_csv(str(path_sentences.resolve()))\n \n frame_summary = frame_sentences.pivot_table(\n index='page',\n columns='term',\n aggfunc='size',\n fill_value=0\n )\n logger.info('saving summary file to - {0}'.format(path_sentences.resolve()))\n frame_summary.to_csv(str(path_summary.resolve()))\n\n\n except Exception as e:\n logger.error(e)", "def __init__(self, pageName):\n self.pageName = pageName\n self.updateFileData()\n self.template = pystache.parse(unicode(self.fileData, 'utf-8'))", "def substitute(var_list, data, template_location, save_location):\r\n with open(template_location) as template_file:\r\n template_text = template_file.read()\r\n\r\n row_number = 1\r\n for row in data:\r\n new_text = template_text\r\n for var, sub in zip(var_list, row):\r\n new_text = new_text.replace(\"<\" + var + \">\", sub)\r\n with open(os.path.join(save_location, f\"{row_number}.txt\"), \"w\") as out_file:\r\n out_file.write(new_text)\r\n row_number += 1" ]
[ "0.5765641", "0.5753263", "0.5717375", "0.5547444", "0.55394816", "0.54884017", "0.54623926", "0.53843206", "0.5353627", "0.531927", "0.52712166", "0.5261715", "0.523186", "0.52143687", "0.51853824", "0.51815045", "0.51764005", "0.5170443", "0.5149673", "0.51465887", "0.5144239", "0.5137167", "0.51357424", "0.5131627", "0.512647", "0.51041424", "0.5102202", "0.5100827", "0.50993335", "0.5083337", "0.50786877", "0.5047434", "0.5045514", "0.5042268", "0.5041269", "0.50407475", "0.50318694", "0.50183606", "0.50172204", "0.50113904", "0.49955836", "0.49908957", "0.49797058", "0.49778685", "0.4976934", "0.49710506", "0.4967801", "0.49639708", "0.49636522", "0.49600825", "0.4946685", "0.49461687", "0.4935502", "0.49337074", "0.49311677", "0.4927296", "0.49252805", "0.49242866", "0.49172577", "0.49129736", "0.4910424", "0.4907582", "0.4907086", "0.49015284", "0.48896667", "0.48883718", "0.48827744", "0.4878924", "0.4877782", "0.48769736", "0.48694557", "0.48654813", "0.4858111", "0.4847689", "0.48260126", "0.48243487", "0.4820509", "0.48203772", "0.48176375", "0.4816021", "0.48147684", "0.48048672", "0.48014772", "0.480055", "0.47997656", "0.47984686", "0.47786355", "0.47775844", "0.47756627", "0.4764841", "0.47630972", "0.4755036", "0.47544792", "0.47522312", "0.47521886", "0.47506785", "0.47477442", "0.47474727", "0.47459707", "0.4745343" ]
0.72918445
0
Dump the info object to a yaml file.
def DumpInfo(self): if self.logdir is None: return self.dumpfile = '%s/preprocess_info.yaml' % (self.logdir) try: f = open(self.dumpfile,'w') f.write(yaml.dump(self.info,default_flow_style=False, indent=4)) f.close() except IOError: self.errors = True errstr = 'Error accessing %s' % self.dumpfile raise IOError(errstr) self.LogErrors(errstr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, file=sys.stdout):\n d = self.to_dict()\n if d:\n yaml.dump([d], file, default_flow_style=False)", "def __exit__(self, *_):\n with self._info_yaml_file_path.open(\"w\") as info:\n self._yml.dump(self._info, info)", "def write(self):\n self.f.write(yaml.safe_dump(self.data, default_flow_style=False, indent=4))", "def write(self):\n print yaml.dump(self._config, default_flow_style=False),", "def dump(self, yaml_file):\n\n with open(yaml_file, 'w') as fp:\n yaml.dump(self.__dict__, fp)", "def save(self, filename):\n with open(filename, 'w') as f:\n yaml.dump(self.to_dict(), f, sort_keys=False)", "def UnDumpInfo(self):\n filename = '%s/preprocess_info.yaml' % self.logdir\n f = open(filename,'r')\n self.info = yaml.load(f.read())\n f.close()", "def to_yaml(obj: ConfiguredBaseModel, file: str):\n\n fh = open(file, \"w\") if file else sys.stdout\n\n if isinstance(obj, Entity):\n yaml.dump(obj.dict(), fh, indent=4)\n elif isinstance(obj, Results) or isinstance(obj, HistoPheno) or isinstance(obj, AssociationCountList):\n yaml.dump([item.dict() for item in obj.items], fh, indent=4)\n else:\n raise TypeError(FMT_INPUT_ERROR_MSG)\n\n if file:\n console.print(f\"\\nOutput written to {file}\\n\")\n fh.close()\n\n return", "def dump(self, config_file = 'config.yaml'):\n\n with open(config_file, 'w') as fp:\n yaml.dump(self.__dict__, fp)", "def to_yaml(cls,dumper,self):\n #self.__modelData['ids'] = self.__mapObj.ids\n self.__modelData['ids'] = ','.join(map(str,self.__mapObj.ids))\n\n ##GENERATE Overview\n old_size = self.__size\n self.__mapObj.size = PREVIEW_SIZE\n typ,dat,width,height = processOverview(self.__mapObj.png)\n self.__modelData['overview_typ'] = typ\n self.__modelData['overview_dat'] = dat\n self.__modelData['overview_width'] = width\n self.__modelData['overview_height'] = height\n self.__mapObj.size = old_size\n #END Overview\n\n node = dumper.represent_mapping(cls.yaml_tag,self.__modelData)\n self.SetModified(False)\n return node", "def save(self, filename=None):\n name = filename or self.filename\n with open(name, \"w\") as stream:\n yaml.dump(self.data, stream, default_flow_style=False)", "def dump_yaml(self, data, output):\n yaml.indent(mapping=MAPPING, sequence=SEQUENCE, offset=OFFSET)\n yaml.dump(data, output)", "def save_info(self):\n json.dump(self.info, open(os.path.join(self.dstore_dir, \"info.json\"), \"w\"),\n sort_keys=True, indent=4, ensure_ascii=False)", "def dump(self) -> None:\n ...", "def test_to_yaml(self) -> None:\n entry = Entry(\"Cao_2019\", self.EXAMPLE_ENTRY_DICT)\n yaml_str = YAMLParser().dump(entry)\n with open(self.EXAMPLE_YAML_FILE, \"r\", encoding=\"utf-8\") as file:\n assert yaml_str == file.read()", "def save_to_yaml(self, path=None):\n\n if not path:\n path = \".\".join([self.name.value, \"yaml\"])\n\n planet_dict = {}\n for a in sorted(self.attributes):\n exo_param = getattr(self, a)\n param_dict = exo_param.__dict__\n param_dict = {k: str(v)\n for k, v in param_dict.items()\n if v and len(str(v)) > 0}\n planet_dict[a] = param_dict\n\n with open(path, 'w') as yamlfile:\n yaml.dump(planet_dict, yamlfile, default_flow_style=False)", "def DumpYaml(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import yaml\n \n text = yaml.safe_dump(data)\n \n return text", "def save(self, filepath):\n writer = json.dump if Config.isjson(filepath) else yaml.dump\n with open(filepath, 'w') as f:\n writer(dict(self), f)", "def __str__(self):\n if self.data is None:\n return \"\"\n\n return yaml.dump(self.data, default_flow_style=False, indent=2)", "def yaml_inventory(self):\n inventory_file = 'inventory_file'\n with open(inventory_file, 'w') as invfile:\n yaml.dump(self.inventory_dict, invfile, default_flow_style=False, sort_keys=False)", "def save_to_yml_file(self):\n yml_filename = self.get_yml_filename()\n\n if os.path.exists(yml_filename) and not self.force:\n logger.warning(\n f\"[red]File {yml_filename} already exists, not writing. To override add --force.[/red]\"\n )\n else:\n if self.force:\n logger.info(\n f\"[yellow]Force flag is used. Overriding {yml_filename} if it exists.[/yellow]\"\n )\n if self.metadata:\n self.metadata.save_dict_as_yaml_integration_file(yml_filename)", "def toYAML(cls, obj):\n if isinstance(obj, dict):\n return yaml.dump(obj, default_flow_style=False)\n else:\n return yaml.dump_all(obj, default_flow_style=False)", "def to_yaml(cls, dumper, data):\n\t\tdict_rep = {'location':data._location, 'startFrame':data._startFrame,\n\t\t\t\t\t'endFrame':data._endFrame, 'camera':data._camera}\n\n\t\tprint(dict_rep)\n\n\t\tnode = dumper.represent_mapping(cls.yaml_tag, dict_rep)\n\t\treturn node", "def _save_configuration_to_yml(self):\n data = self.get_configuration_data()\n timestamp = self.model.timestamp\n with open(os.path.join(CHECKPOINTS_DIR, timestamp, 'config_{}.yml'.format(timestamp)), 'w') as outfile:\n yaml.dump(dict(data), outfile, default_flow_style=False)", "async def dump(self, data: dict, file: IO):", "def dump(filename: Path) -> None:\n import yaml\n\n dumped_str = yaml.dump_all(\n [data_dict],\n Dumper=RegressionYamlDumper,\n default_flow_style=False,\n allow_unicode=True,\n indent=2,\n encoding=\"utf-8\",\n )\n with filename.open(\"wb\") as f:\n f.write(dumped_str)", "def save_yaml_to_file(i):\n\n import yaml\n\n fn = i['yaml_file']\n d = i['dict']\n\n try:\n # If using just dump and keys are in unicode,\n # pyyaml adds warning and makes produced yaml unparsable\n s = yaml.safe_dump(d)\n except Exception as e:\n return {'return': 1, 'error': 'problem converting dict to YAML ('+format(e)+')'}\n\n return save_text_file({'text_file': fn, 'string': s})", "def yaml(self):\n raise NotImplementedError", "def save_dict_as_yaml_integration_file(self, output_file: str):\n logger.debug(f\"Writing collected metadata to {output_file}.\")\n\n write_yml(output_file, self.metadata_dict)\n logger.info(\"[green]Finished successfully.[/green]\")", "def dump(self, config):\n raise NotImplementedError", "def save(dikt):\n with open(SAVE_FILE_NAME, 'w') as save_file:\n yaml.safe_dump(dikt, save_file)", "def saveToFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n stream = open(path,\"w\")\n yaml.dump(self.parameters(),stream)", "def to_yaml_string(self):\n Params._check_yaml_import()\n import yaml\n\n return yaml.safe_dump(dict(self))", "def test_02_Dump(self):\n self.m_location.Street = '_test street'\n l_ret = self.m_config.save_yaml_config()\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'C2-02-A - Location', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'C2-02-B - House', 190))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House.Location, 'C2-02-C - Location', 190))\n # print(PrettyFormatAny.form(l_ret, 'C2-02-D - Location', 190))\n # print('Config: {}'.format(l_ret))\n self.assertEqual(l_ret['Location']['City'], 'Washington')", "def dump(self):\n return", "def help_dump(self):\n print(DUMP)", "def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")", "def user_create_yaml(self):\n pass", "def write(self, filename, mode=\"w\"):\n d = self._result_dict\n val = yaml.safe_dump(d, default_flow_style=False)\n\n with open(str(filename), mode) as outfile:\n outfile.write(val)", "def conversion_yaml():\r\n data ={\r\n 'name': 'george',\r\n 'age': 16,\r\n 'friends':\r\n [{'name': 'marry', 'age': 16}, {'name': 'jack', 'age': 17}]\r\n }\r\n yaml_data = yaml.dump(data)\r\n dirname = os.path.dirname(os.path.dirname(__file__))\r\n # data_dir = os.path.join(dirname, 'data')\r\n data_dir = '/'.join([dirname, 'data'])\r\n file_path = data_dir + '/' + 'test.yaml'\r\n with open(file_path, 'w') as fw:\r\n fw.write(yaml_data)\r\n print(yaml_data)", "def dump(self):\n with open(self._config_filename, 'w', encoding='utf-8') as file:\n self._parser.write(file)", "def to_yaml(self, data, options=None):\r\n options = options or {}\r\n\r\n if yaml is None:\r\n raise UnsupportedSerializationFormat(\"Usage of the YAML aspects requires yaml.\")\r\n\r\n return yaml.dump(self.to_simple(data, options))", "def dump(self, outfile):\n with open(outfile, 'wb') as picklefile:\n pickle.dump(\n {str(key): value for key, value in self.data.items()},\n picklefile)", "def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)", "def save_file(filename,d):\n f = open(filename, 'w')\n yaml.dump(d, f)\n f.close()", "def write(self):\n cfgpath = os.path.join(self.config_dir, CONFIG_FILENAME)\n ofile = open(cfgpath, 'w')\n if ofile:\n log.debug( \"Write config: %s\" % cfgpath )\n cfg = yaml.dump(self.yaml, default_flow_style=False)\n log.debug( \"Config:\\n%s\" % cfg)\n ofile.write(cfg)\n ofile.close()", "def write(self, fname=None):\n fname = fname or self.path\n with open(fname, \"w\") as fl:\n yaml.dump(self._as_dict(), fl)\n self.path = Path(fname)", "def flush(self):\n with open(self.fname, \"w\") as f:\n yaml.dump(list(self), f)", "def to_file(self, path: str) -> None:\n from squirrel.catalog.yaml import catalog2yamlcatalog, prep_yaml\n\n yaml = prep_yaml()\n with fsspec.open(path, mode=\"w\") as fh:\n ser = catalog2yamlcatalog(self)\n yaml.dump(ser, fh)", "def yaml(self):\n return str(self.data)", "def pretty(self):\n return yaml.dump(self.get_data(), encoding='utf-8',\n default_flow_style=False).rstrip()", "def dump_yaml(file_path, data):\n\n with open(os.path.abspath(os.path.expanduser(file_path)), \"w\") as f:\n yaml.safe_dump(data, f, default_flow_style=False)\n\n return file_path", "def to_yaml_file(self, file_path, **kwargs):\n Params._check_yaml_import()\n import yaml\n\n try:\n with Params._open_file(file_path, \"w\") as fp:\n yaml.safe_dump(dict(self), stream=fp, **kwargs)\n return file_path\n except Exception as err:\n print(\"Failed to write {} instance to: {}\".format(self.__class__.__name__, file_path), err)\n return None", "def repr_fx(self):\n return yaml.dump(self)", "def save_info_file(self, info_dict: Dict[str, Dict[str, int]]) -> NoReturn:\n with open(os.path.join(self.out_folder, INFO_FILE_NAME), \"w\") as file_handle:\n for split in info_dict:\n file_handle.write(split + \":\\n\")\n for class_name in info_dict[split]:\n file_handle.write(\n class_name + \": \" + str(info_dict[split][class_name]) + \"\\n\"\n )\n file_handle.write(\"\\n\")", "def to_content(cls, data: Mapping) -> str:\n cls._check_yaml()\n s = yaml.safe_dump(data, default_flow_style=False)\n s = '---\\n' + s\n return s", "def _info(self):\n text = ''.join(self._lines)\n rendered_text = jinja2.Template(text).render()\n return yaml.load(rendered_text)", "def setup_dump(self):\n dumpdir = self._dump_dirname\n if not os.path.isdir(dumpdir):\n os.makedirs(dumpdir)\n\n dump = False\n yaml_filename = self._yaml_filename\n\n if not os.path.isfile(yaml_filename):\n dump = True\n else:\n with open(yaml_filename) as f:\n if f.read() != yaml.dump(self):\n logging.warning('Existing step.yaml does not match hash, regenerating')\n dump = True\n\n if dump:\n with open(yaml_filename, 'w') as f:\n yaml.dump(self, f)", "def dump(self):\n # dump self.data\n pickle.dump(self.data, open(self.data_dir + DATA_PATH, 'wb+'))\n # dump self.code2desc\n pickle.dump(self.code2desc, open(self.data_dir + CODE2DESC_PATH, 'wb+'))\n # dump self.family2tf\n pickle.dump(self.family2tf, open(self.data_dir + FAMILY2TF_PATH, 'wb+'))\n # dump self.word2tf\n pickle.dump(self.word2tf, open(self.data_dir + WORD2TF_PATH, 'wb+'))\n # dump self.word2df\n pickle.dump(self.word2df, open(self.data_dir + WORD2DF_PATH, 'wb+'))\n return None", "def dump_params(filename, param, verbose=False):\n tree = get_param(param)\n if verbose:\n print_params(tree, param)\n if not filename:\n f = sys.stdout\n yaml.dump(tree, f)\n else:\n f = open(filename, 'w')\n try:\n yaml.dump(tree, f)\n finally:\n f.close()", "def to_yaml(self, **kwargs):\n if not self._is_graph_network:\n raise NotImplementedError\n\n if yaml is None:\n raise ImportError('Requires yaml module installed.')\n return yaml.dump(self._updated_config(), **kwargs)", "def _yaml_dump(data):\n return yaml.dump(data, Dumper=_OrderedDumper, allow_unicode=True)", "def save_info(base):\r\n _info = open(base.info_name,'wb')\r\n fields = []\r\n for k in base.field_names:\r\n if isinstance(base.fields[k],base.__class__):\r\n fields.append((k,'<base>'+urllib.quote(base.fields[k].name)))\r\n else:\r\n fields.append((k,base.fields[k].__name__))\r\n _info.write(' '.join(['%s:%s' %(k,v) for (k,v) in fields]))\r\n _info.close()\r\n out = open(os.path.join(base.name,\"__defaults__\"),\"wb\")\r\n for field_name,default_value in base.defaults.iteritems():\r\n if field_name in [\"__id__\",\"__version__\"]:\r\n continue\r\n value = base._file[field_name].to_block(default_value)\r\n out.write(\"%s %s\" %(field_name,value))\r\n out.close()", "def save( self ):\n ini = codecs.open(self.filename,\"w\",\"utf-8\",errors=\"replace\",buffering=0)\n for (name,value) in self.conf.items():\n print >>ini, name, \"=\", value\n ini.close()", "def to_yaml(self):\n feature_set_dict = self.to_dict()\n return yaml.dump(feature_set_dict, allow_unicode=True, sort_keys=False)", "def print_dict(init_dict, file_name=\"test\"):\n ordered_dict = collections.OrderedDict()\n order = [\"SIMULATION\", \"PARAMS\", \"DIST\"]\n for key_ in order:\n ordered_dict[key_] = init_dict[key_]\n\n with open(\"{}.boupy.yml\".format(file_name), \"w\") as outfile:\n yaml.dump(ordered_dict, outfile, explicit_start=True, indent=4)", "def to_yaml(self, stream=None, **kwargs):\n return yaml.dump(self, stream, AttrDumper, **kwargs)", "def dump(object, filename):\n import pickle\n\n filename = filename if filename.endswith('.pic') else (filename + '.pic')\n\n with open(filename, 'wb') as f:\n pickle.dump(object, f, protocol=pickle.HIGHEST_PROTOCOL)", "def dump(self, output_file: str) -> None:\n with io.open(output_file, 'w', encoding='utf-8') as f:\n json.dump(self.get_meta_data(), f, ensure_ascii=False, indent=2)", "def save(self, filename: str):\n dump(self, filename)", "def dump(path):\n yaml.add_representer(MetaDict,\n lambda dumper, data: dumper.represent_mapping(\n 'tag:yaml.org,2002:map', data.items()))\n yaml.add_representer(MetaList,\n lambda dumper, data: dumper.represent_sequence(\n 'tag:yaml.org,2002:seq', data))\n click.echo(yaml.dump(load(path), default_flow_style=False))\n return 0", "def dump(self):\n self.hasher.update_time_dicts() # Makes the time measurements available\n\n print(\" Creating a results folder in {} and storing all results there.\".format(self.config.output_dir))\n if not os.path.isdir(self.config.output_dir):\n os.mkdir(self.config.output_dir)\n\n print(\" Dumping profile ...\")\n profile_file_name = \"{}_{}_profile\".format(self.name, self.config.mode)\n with open(os.path.join(self.config.output_dir, profile_file_name), \"a\") as file:\n profile = {\"config\": self.config.dump(),\n \"hash\": self.hasher.hash_time_dict,\n \"find\": self.hasher.find_time_dict}\n\n json.dump(profile, file)\n\n print(\" Dumping matches ...\")\n for i, match in enumerate(self.__matched_offsets):\n if int(match[0] > match[1]):\n offset_a = match[1]\n offset_b = match[0]\n else:\n offset_a = match[0]\n offset_b = match[1]\n\n match_file_name = \"{}_{}_{}_{}\".format(self.name, self.config.mode, offset_a, offset_b)\n with open(os.path.join(self.config.output_dir, match_file_name), \"w\") as file:\n infos = \"Config:\\n: {}\".format(self.config)\n text_a = \"\"\n text_b = \"\"\n if self.config.dump_text:\n text_a = \"Text:\\n{}\".format(self.__offset_text_map.get(offset_a))\n text_b = \"Text:\\n{}\".format(self.__offset_text_map.get(offset_b))\n\n file.write(\"{}\\n\\n{}\\n\\n{}\\n\\n{}\".format(infos, text_a, \"#\"*25, text_b))\n\n if self.config.dump_graph:\n print(\" Creating graphs ...\")\n x1, x2 = list(), list()\n y1, y2 = list(), list()\n t_all = 0\n for element, t in self.hasher.hash_time_dict.items():\n t_all += t\n x1.append(element)\n y1.append(t_all)\n\n t_all = 0\n for element, t in self.hasher.find_time_dict.items():\n t_all += t\n x2.append(element)\n y2.append(t_all)\n\n self.__plot(os.path.join(self.config.output_dir, \"hash_time\"), x1, y1)\n self.__plot(os.path.join(self.config.output_dir, \"find_time\"), x2, y2)\n\n print(\"\\n\\n\")\n\n return", "def __str__(self):\n s = yaml.dump(self.kwargs)\n return s", "def pdump(data):\n return yaml.dump(yaml.load(json.dumps(data)))", "def showconfig():\n print(yaml.dump(CONFIG))", "def dump(self, filename):\n suffix = filename.split(\".\")[-1]\n if not suffix == \"dflx\":\n filename = filename + \".dflx\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, \"wb\")\n pickle.dump(self.meta, f)\n pickle.dump(self.__dict__, f)\n f.close()\n logging.info(\"Results dumped to %s.\", filename)", "def _serialize(self, dest_dir: Path):\n dest_file = self._create_target_dump_dir(dest_dir) / self.normalize_file_name()\n with open(dest_file, \"w\") as file:\n yaml.dump(self._as_dict, file)\n return [dest_file]", "def dump(self):\n str_dict = convert_to_string_only_dict(self.as_dict())\n\n file_name = os.path.join(self.log_dir, self.config_file_name)\n with open(file_name, 'w') as fp:\n json.dump(str_dict, fp, sort_keys=True, indent=4, separators=(',', ': '))\n print('Created log file: {}'.format(file_name))", "def save(self, name, description, template, values):\n # Before attempting to write, ensure the directory exists\n self.directory.mkdir(parents = True, exist_ok = True)\n dest = self.directory / \"{}.yaml\".format(name)\n with dest.open('w') as f:\n yaml.dump(\n dict(\n description = description or '',\n template = template.name,\n values = values\n ),\n f\n )", "def dump(self, fp: Any):\n # redo jinja2 changes\n self.meta = _remunge_jinja2_vars(self.meta, self._jinja2_sentinel)\n\n try:\n # first dump to yaml\n s = io.StringIO()\n self._parser.dump(self.meta, s)\n s.seek(0)\n\n # now unmunge\n lines = _unmunge_split_key_value_pairs_with_selectors(\n [line for line in s.readlines()],\n )\n for i in range(len(lines)):\n lines[i] = _unmunge_line(lines[i])\n\n # add multiline jinja2 statements\n lines = _unmunge_multiline_jinja2(lines)\n\n # put in new jinja2 vars\n lines = _replace_jinja2_vars(lines, self.jinja2_vars)\n\n # now write to final loc\n for line in lines:\n fp.write(line)\n finally:\n # always put things back!\n self.meta = _demunge_jinja2_vars(self.meta, self._jinja2_sentinel)", "def toyaml(self,ymlfile,Kname=\"rgb_intrinsics\",dname=\"rgb_distortion\",sname=(\"image_width\",\"image_height\")):\n q = dict()\n q[Kname] = self.K\n q[dname] = self.dist\n if type(sname) is tuple:\n q[sname[0]] = self.size[0]\n q[sname[1]] = self.size[1]\n else:\n q[sname] = self.size\n\n if ymlfile == \"\" or ymlfile is None:\n return yaml.dumps(q)\n elif type(ymlfile) is str:\n ymlfile = open(ymlfile,\"wb\")\n yaml.dump(q,ymlfile)", "def dump(self, remove_tags=(\"x-commons\",)):\n openapi_tags = (\"openapi\", \"info\", \"servers\", \"tags\", \"paths\", \"components\")\n\n # Dump long lines as \"|\".\n yaml.representer.SafeRepresenter.represent_scalar = my_represent_scalar\n\n openapi = deepcopy(self.openapi)\n\n # If it's not a dict, just dump the standard yaml\n if not isinstance(openapi, dict):\n return yaml.dump(\n openapi,\n default_flow_style=False,\n allow_unicode=True,\n Dumper=NoAnchorDumper,\n )\n\n # Eventually remove some tags, eg. containing references and aliases.\n for tag in remove_tags:\n if tag in openapi:\n del openapi[tag]\n\n # Add resolved schemas.\n # XXX: check if the schema hash is the same in case\n # of multiple entries.\n components = openapi.setdefault(\"components\", {})\n for k, items in self.yaml_components.items():\n if k not in components:\n components[k] = {}\n\n components[k].update(items)\n\n # Order yaml keys for a nice\n # dumping.\n yaml_keys = set(openapi.keys())\n first_keys = [x for x in openapi_tags if x in yaml_keys]\n remaining_keys = list(yaml_keys - set(first_keys))\n sorted_keys = first_keys + remaining_keys\n\n content = \"\"\n for k in sorted_keys:\n content += yaml.dump(\n {k: openapi[k]},\n default_flow_style=False,\n allow_unicode=True,\n Dumper=NoAnchorDumper,\n )\n\n return content", "def to_yaml(cls, dumper, data):\n m = {k: getattr(data, k) for k in cls._yaml_keys}\n return dumper.represent_mapping(cls.yaml_tag, m)", "def _writeToFile(out_model_dict, model_directory, parent):\n\n fname = compat.getsavefilename(parent=parent,\n caption='Save to file',\n basedir=model_directory)[0]\n\n if len(fname) > 0:\n # enforce correct suffix.\n if not fname.endswith(\".yaml\"):\n fname += \".yaml\"\n\n f = open(fname, \"w\")\n yaml.dump(out_model_dict, f,default_flow_style=False)\n f.close()", "def write(self, filename: str):\n obj = self.to_dict(self)\n config.write(obj, filename)", "def dumpme(self) :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)", "def save(self, filename):\n with open(filename, \"w\") as fp:\n dump(self, fp)", "def to_yaml(self):\n return yaml.dump(self.raw, Dumper=yaml.SafeDumper)", "def test_dump_config(self):\n config = easydms.config.Config()\n print(config)", "def to_yaml(self):\r\n entity = {self.__class__.entity_name.lower() : self.current_instance_state()}\r\n return yaml.dump(entity, default_flow_style = False)", "def create_dump(self) -> Dict[str, str]:\n return self.http.post(self.config.paths.dumps)", "def __str__(self):\n _dict = self._def.default.copy()\n _dict.update(self.__dict__)\n #return '%s(%r)' % (self.__class__, _dict)\n # If the yaml is too slow revert to the line above\n name = '%s <%s>' % (self.__class__.__name__, id(self))\n return yaml.dump({name: _dict}, default_flow_style=False)", "def to_yaml(self, skip_nulls=True):\n return yaml.safe_dump(self.to_dict(skip_nulls=skip_nulls),\n default_flow_style=False)", "def dump(self, filename):\n\n utils.save(filename, {'model': self}, zipped=True)", "def save(self):\n defn_dir = path.dirname(self.definition_filename)\n\n if not path.isdir(defn_dir):\n os.makedirs(defn_dir)\n\n # Force check of stopsignal\n self.stopsignal\n\n with open(self.definition_filename, 'w') as df:\n yaml.safe_dump(self.raw_data, df, default_flow_style=False)", "def dump(self, obj):\r\n return self.localpath.dump(obj)", "def write_yaml(yaml_config: Dict[str, Any], filename: str) -> None:\n\n with open(filename, 'w') as outfile:\n yaml.dump(yaml_config, outfile, default_flow_style=False,\n sort_keys=False)", "def dump(data):\n stream = StringIO()\n yaml.dump(data, stream, Dumper=yaml.RoundTripDumper)\n return stream.getvalue().rstrip()", "def create_user_file(self):\n d = self.user2dict()\n with open(User.get_path(self.username), 'w', encoding='utf8') as file:\n yaml.dump(d, file, default_flow_style=False)", "def yaml_dump(\n data, Dumper=None, allow_unicode: bool = True, **kwargs\n): # pylint: disable=invalid-name\n if Dumper is None:\n Dumper = OrderedDumper\n return yaml.dump(\n data, Dumper=Dumper, allow_unicode=allow_unicode, **kwargs\n )" ]
[ "0.7140269", "0.6745914", "0.67231625", "0.6591301", "0.6522555", "0.6498975", "0.64982736", "0.6444522", "0.63701254", "0.62643826", "0.62501603", "0.61706054", "0.60966444", "0.60667735", "0.6060812", "0.6049239", "0.6019757", "0.60015565", "0.60012335", "0.59733576", "0.5957535", "0.5930331", "0.5920716", "0.59146774", "0.58603007", "0.58540636", "0.5849694", "0.58436817", "0.5843065", "0.58338845", "0.5830134", "0.5829749", "0.58291394", "0.5820975", "0.5779402", "0.57725567", "0.57701683", "0.5765962", "0.5759896", "0.57568914", "0.5741405", "0.5738917", "0.5733543", "0.57303995", "0.572894", "0.5721614", "0.5720375", "0.57177955", "0.5700488", "0.56934583", "0.56907046", "0.5686742", "0.5676355", "0.5672274", "0.56713265", "0.5671297", "0.56661385", "0.561821", "0.5580748", "0.5579325", "0.55753577", "0.556974", "0.55532473", "0.55519366", "0.5542139", "0.55344707", "0.5526982", "0.55176246", "0.5513664", "0.55119157", "0.55085695", "0.55043054", "0.55014294", "0.5488772", "0.5486125", "0.5485025", "0.54841083", "0.5482527", "0.54697275", "0.54685587", "0.5468241", "0.5466349", "0.54659426", "0.54625034", "0.54598266", "0.54576105", "0.5451081", "0.5450776", "0.5450671", "0.5448301", "0.54431564", "0.54371727", "0.54358095", "0.54297006", "0.54285574", "0.5418553", "0.54156053", "0.54146135", "0.54142475", "0.5410908" ]
0.7883224
0
Load the info dictionary from a yaml file.
def UnDumpInfo(self): filename = '%s/preprocess_info.yaml' % self.logdir f = open(filename,'r') self.info = yaml.load(f.read()) f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_datas(self) -> tp.Dict[str, dict]:\n with open(self._file, \"r\") as stream:\n try:\n load: tp.Dict[str, dict] = yaml.safe_load(stream)\n logger.info(\"YAML imported\")\n return load\n except yaml.YAMLError as exc:\n logger.debug(\"YAML import error : %s\", exc)\n raise", "def load_yaml_file(i):\n\n import yaml\n\n fn = i['yaml_file']\n\n try:\n if sys.version_info[0] > 2:\n f = open(fn, 'r', encoding='utf8')\n else:\n f = open(fn, 'r')\n except Exception as e:\n return {'return': 16, 'error': 'problem opening YAML file='+fn+' ('+format(e)+')'}\n\n try:\n s = f.read()\n except Exception as e:\n f.close()\n return {'return': 1, 'error': 'problem reading YAML file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n try:\n d = yaml.load(s, Loader=yaml.FullLoader)\n except Exception as e:\n return {'return': 1, 'error': 'problem parsing YAML from file='+fn+' ('+format(e)+')'}\n\n return {'return': 0, 'dict': d}", "def load_yaml(fname: str) -> dict:\n try:\n with open(fname, 'r') as f:\n dataMap = yaml.safe_load(f)\n except IOError as e:\n print(f\"Cannot open YAML file {fname}\")\n print(f\"IOError: {e}\")\n \n return dataMap", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def load(self, file):\n self.__log(f'Starting to load settings from {file}', 'warning')\n contents = load_yaml(file)\n for item in contents:\n if item == 'options':\n self.__log(f'Found options in {file}, loading them', 'warning')\n for i in contents[item]:\n self.__log(f'Setting {i.lower()} to {contents[item][i]}')\n self.set(i.lower(), contents[item][i])\n elif item == 'config':\n self.__log(f'Found configuration variables in {file}, loading them', 'warning')\n for i in contents[item]:\n self.__log(f'Setting {i.upper()} to {contents[item][i]}')\n self.set(i.upper(), contents[item][i])\n else:\n raise UnknownYamlContentError", "def load_yaml(file_path: str) -> dict:\n assert file_path.endswith(\".yaml\")\n with open(file_path) as file:\n return yaml.load(file, Loader=yaml.FullLoader)", "def _parse_from_yaml(self) -> Dict:\n config_path = path.join(path.dirname(path.abspath(__file__)), self.config_file)\n try:\n with open(config_path, \"r\") as f:\n config_dict = yaml.load(f, Loader=yaml.FullLoader)\n return config_dict\n except FileNotFoundError as fnfe:\n raise FileNotFoundError('configuration file not found.')\n except Exception as exc:\n raise Exception('Error while loading config file.')", "def get_cfg_from_yaml(self):\n try:\n with open(self.parsed_cfg_path, 'r') as cfg_yaml:\n self.from_yaml_cfg_dict = yaml.load(cfg_yaml)\n except Exception as exc:\n print(exc)\n traceback.print_exc()\n self.from_yaml_cfg_dict = {}", "def load_yaml(file: Text):\n with open(file) as fp:\n return yaml.load(fp, yaml.FullLoader)", "def loadFromFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n if os.path.exists(path) and os.path.isfile(path):\n self.load(yaml.load(open(path, 'r')))", "def load_yaml(cls, file=None):\n if file is None: file = f'{rcp.base_path}cfg.yml'\n try:\n with open(file, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n cfg.__dict__ = config\n return cfg\n except FileNotFoundError:\n print(\"Config file doesn't exist.\")", "def load_yaml(file):\n with open(file, 'r') as file:\n data = yaml.safe_load(file)\n return data", "def _load_file(self, f):\n if not os.path.exists(f):\n msg = '%s is a non-existant definition file' % f\n raise ValueError(msg)\n\n with open(f, 'r') as fh:\n return yaml.load(fh.read())", "def load_yaml(yml_file):\n with open(yml_file) as src:\n cfg = yaml.load(src, Loader=yaml.Loader)\n return cfg", "def read_yaml(yaml_file: str) -> dict:\n with open(yaml_file, 'r', encoding=\"utf8\") as _file:\n _dict = yaml.safe_load(_file)\n logging.info(f\"Yaml file {yaml_file} parsed!\")\n\n return _dict", "def load_yaml(cls, file=None):\n if file is None:\n file = f'{cls.base_path}rcp_{rcp.stage}.yml'\n try:\n with open(file, 'r') as f:\n recipe = yaml.load(f, Loader=yaml.FullLoader)\n rcp.__dict__ = recipe\n return rcp\n except FileNotFoundError:\n print(\"Recipe file doesn't exist.\")\n raise", "def load_yaml(path: str) -> Dict[str, Any]:\n with open(path, \"r\", encoding=\"utf8\") as fp:\n data = yaml.safe_load(fp)\n return data", "def load_yaml_file(self, yaml_file_path):\n try:\n yaml_file = open(yaml_file_path, encoding=\"UTF-8\").read()\n except FileNotFoundError:\n raise CouldNotFindYAMLFileError(yaml_file_path)\n\n try:\n yaml_contents = yaml.safe_load(yaml_file)\n except yaml.YAMLError:\n raise InvalidYAMLFileError(yaml_file_path)\n\n if yaml_contents is None:\n raise EmptyYAMLFileError(yaml_file_path)\n\n if isinstance(yaml_contents, dict) is False:\n raise InvalidYAMLFileError(yaml_file_path)\n\n return yaml_contents", "def read_yaml_file(filepath: str) -> Dict:\n return yaml.safe_load(read_file(filepath))", "def _load_yaml_file(yaml_file):\n with io.open(yaml_file, 'r', encoding='utf-8') as stream:\n yaml_content = yaml.load(stream)\n FileUtils._check_format(yaml_file, yaml_content)", "def read_exercise_yaml(path_yaml):\n exer_dict = {}\n with open(path_yaml, 'r') as stream:\n try:\n exer_dict = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return exer_dict", "def load(self, yaml_file):\n try:\n with open(yaml_file, 'r') as fp:\n data = yaml.load(fp)\n \n for key in data:\n if hasattr(self, key):\n setattr(self, key, data[key])\n return True # Return true if we succeeded\n \n except IOError: \n return False # Return false if we didn't succeed", "def load(self, filename=None):\n prefix = os.path.dirname(filename)\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n name = filename or self.filename\n\n if os.path.exists(name):\n with open(name, 'rb') as dbfile:\n self.data = yaml.safe_load(dbfile) or dict()", "def load(cls, file_name):\n with open(file_name) as fl:\n config = yaml.load(fl, Loader=yaml.FullLoader)\n return cls(file_name, _loaded_from_file=True, **config)", "def load_from_yaml_file(f: Union[str, TextIO]) -> Dict:\n\n # support environment variables in config\n # https://stackoverflow.com/a/55301129\n\n # For maximum compatibility with PyGeoApi config files, this function is\n # inspired by the yaml_load() function in pygeoapi/util.py here:\n # https://github.com/geopython/pygeoapi/blob/2c567d25f70daa3ed0a047ae548a3dfcd97c7cc2/pygeoapi/util.py#L100\n path_matcher = re.compile(r'.*\\$\\{([^}^{]+)\\}.*')\n\n def path_constructor(loader, node):\n env_var = path_matcher.match(node.value).group(1)\n if env_var not in os.environ:\n raise EnvironmentError(\"Undefined environment variable in config\")\n return str_to_python(path.expandvars(node.value))\n\n class EnvVarLoader(yaml.SafeLoader):\n pass\n\n EnvVarLoader.add_implicit_resolver('!path', path_matcher, None)\n EnvVarLoader.add_constructor('!path', path_constructor)\n do_close = False\n if isinstance(f, str):\n f = open(f, \"r\")\n resp = yaml.load(f, Loader=EnvVarLoader)\n if do_close:\n f.close()\n return resp", "def load_yaml(path: str) -> dict:\n with open(path, 'r') as f:\n yaml_file = yaml.load(f, Loader=yaml.FullLoader)\n return yaml_file", "def load(yml_files, debug = False):\n\n dc = {}\n\n if type(yml_files) == dict:\n dc = yml_files\n elif type(yml_files) == str:\n with open(yml_files, \"r\") as f:\n dc = yaml.load(f)\n elif type(yml_files) == list or type(yml_files) == tuple:\n for yml_file in yml_files:\n with open(yml_file, \"r\") as f:\n dc_cur = yaml.load(f)\n # check that now key is overwritten\n for k in dc_cur.keys():\n if k in dc:\n raise Exception (\"Key %s is defined in at least to yml files (e.g. in %s)\" % (k, yml_file) )\n dc.update(dc_cur)\n\n return build_plasm_from_dictionary(dc, debug)", "def from_yaml(cls, yaml_file):\n return cls(OrderedDict(yaml.load(open(yaml_file, \"r\"), \n Loader=yaml.FullLoader)))", "def loadseasoning(self):\n stream = open(self.fileref)\n self.config = yaml.safe_load(stream)\n stream.close()", "def load():\n with open(SAVE_FILE_NAME, 'r') as save_file:\n dikt = yaml.safe_load(save_file)\n if dikt is None:\n dikt = {}\n return dikt", "def load_yaml(self, file) -> dict:\n with open(file, 'r', encoding=\"utf8\") as stream:\n mapping = yaml.safe_load(stream)\n if mapping is None:\n mapping = dict()\n includes = mapping.get('includes', [])\n if not isinstance(includes, list):\n raise AttributeError(\n f'Includes must be a list, {type(includes)} provided'\n )\n\n dirname = os.path.dirname(file)\n include_mapping = dict()\n for include_config_file in includes:\n tmp_config_file = os.path.join(dirname, include_config_file)\n cur_include_mapping = self.load_yaml(tmp_config_file)\n include_mapping = self.dict_merge(\n include_mapping, cur_include_mapping\n )\n mapping.pop('includes', None)\n\n mapping = self.dict_merge(include_mapping, mapping)\n return mapping", "def _load_config_info(file):\n try:\n with open(file, 'r') as f:\n config_info_dict = json.load(f)\n except IOError as e:\n _error_exit(\"Error loading the scionlab config info file '%s': %s\", file, e)\n try:\n return ConfigInfo(config_info_dict['host_id'],\n config_info_dict['host_secret'],\n config_info_dict.get('url') or DEFAULT_COORDINATOR_URL,\n config_info_dict.get('version'))\n except KeyError as e:\n _error_exit(\"Invalid scionlab config info file '%s': %s\", file, e)", "def load_yml(yml_file):\n with open(yml_file) as src:\n cfg = yaml.load(src, Loader=yaml.Loader)\n return cfg", "def read_label_definitions(filename: str) -> dict:\n with open(filename, 'r') as f:\n translate = yaml.load(f, Loader=yaml.SafeLoader)\n return translate", "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\n data = yaml.load(f)\n f.close()\n return data\n else:\n return {}", "def loadfrom_yaml(key, path):\n\twith open(path, 'r') as f:\n\t\td = yaml.load(f)\n\t\tnew_namespace(key)\n\t\t\n\t\t# ns = get_namespace(key)\n\n\t\t# for key, value in d.items():\n\t\t# \t_recurse(0, key, value, ns)", "def read_yaml(path: PathLike) -> Dict:\n with open(path, \"r\") as read_file:\n return yaml.load(read_file, Loader=yaml.UnsafeLoader)", "def LoadYaml(path):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import yaml\n \n fp = None\n try:\n fp = open(path)\n \n data = yaml.load(fp)\n \n finally:\n if fp:\n fp.close()\n \n return data", "def load_yaml_file(self, path):\n with path.open('r') as handle:\n data = load_yaml(handle)\n\n self.set_all(**self.SCHEMA.load(data).data)", "def load(filePath):\n\n stream = open(filePath, 'r')\n yamlDict = yaml.safe_load(stream)\n\n return yamlDict", "def load_yaml(filename):\n try:\n f = file(filename, 'r')\n data = yaml.load(f)\n return data\n except (IOError, OSError) as e:\n err = e[0]\n reason = e[1]\n error = 'load_yaml: Failed to open {filename}: {reason} {err}'.format(filename=filename, reason=reason, err=err)\n raise IOError(error)", "def read_yaml_file(yaml_file):\n with open(yaml_file, 'r') as yfile:\n loaded_file = yaml.safe_load(yfile)\n return loaded_file", "def load_yaml(fname):\n with open(fname) as f:\n val = yaml.safe_load(os.path.expandvars(f.read()))\n return val", "def load_yaml(filepath):\n with open(filepath, 'r') as stream:\n try:\n return yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)", "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\n data = yaml.load(f)\n f.close()\n return data\n else:\n # This should maybe throw an exception or something\n return {}", "def read_yaml(preset_file: Text) -> Dict:\n with open(preset_file, \"r\") as preset_file:\n return yaml.safe_load(preset_file)", "def read_from_yaml(file_path, Loader=None):\n import yaml\n if Loader is None:\n Loader = yaml.FullLoader\n if os.path.isfile(file_path):\n with open(file_path, 'r') as stream:\n data = yaml.load(stream, Loader=Loader)\n return data\n else:\n raise Exception('File: {} does not exist.'.format(file_path))", "async def load(self, file: IO) -> dict:", "def load_file(self, filepath):\n filepath = self._yaml_extension(filepath)\n data = self._load_data_yaml(filepath)\n return data", "def _load_data_yaml(self, pathname): \n pathname = self._yaml_extension(pathname)\n\n with open(pathname) as file:\n traj_data = yaml.load(file, Loader=yaml.FullLoader)\n \n return traj_data", "def load_config(filename):\n AS[\"config\"] = load_yaml_file(filename)", "def load_yaml(yaml_name):\n print('training network configuration file is {0}'.format(yaml_name))\n util.check_file_exist(yaml_name)\n config = util.load_yaml_file(yaml_name)\n return config", "def load_yaml(file_path):\n with open(file_path) as fin:\n content = yaml.load(fin, Loader=yaml.FullLoader)\n return content", "def load(file):\n _config.load(file)", "def get_config_from_yaml(yaml_file):\n\n with open(yaml_file) as fp:\n config_dict = yaml.load(fp, Loader=yaml.FullLoader)\n #config_dict = yaml.load(fp)\n\n # convert the dictionary to a namespace using bunch lib\n config = Dict(config_dict)\n return config, config_dict", "def read_config() -> dict:\n with Path(\"config.yaml\").open(\"r\") as file_pointer:\n try:\n return yaml.safe_load(file_pointer)\n except yaml.YAMLError as e:\n logging.error(e)", "def _cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n cfg = yaml.load(f)\n return cfg", "def _populate_pil_info(self, file):\n with open(str(Path(file))) as pp_fd:\n test_data = yaml.safe_load_all(pp_fd)\n return next(test_data)", "def __init__(self, yaml_file_path: Path) -> None:\n with yaml_file_path.open(\"r\") as yaml_file:\n self._yaml = YAML().load(yaml_file.read())", "def from_yaml_file(cls, file_path: str) -> Config:\n return cls(**read_yaml_file(file_path))", "def _load_yaml(source_dir, file_name):\n return yaml.dump(utils.load_yaml_dict(os.path.join(source_dir, file_name)))", "def cfg_from_file(file_name):\n import yaml\n with open(file_name, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_two_config(yaml_cfg, __C)", "def load_config(configfile: str = 'config.yml') -> TypedDict:\n with open(configfile, 'r') as stream:\n try:\n config = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return config", "def load_yaml(self):\n env = self.state.document.settings.env\n relpath, abspath = env.relfn2path(directives.path(self.arguments[0]))\n\n env.note_dependency(relpath)\n\n encoding = self.options.get('encoding', env.config.source_encoding)\n with io.open(abspath, 'rt', encoding=encoding) as stream:\n spec = yaml.load(stream, _YamlOrderedLoader) # nosec\n self.spec = spec\n self.paths = spec[self.path_path]\n self.definitions = spec[self.models_path]\n self.openapi_version = spec.get('swagger', None) or spec['openapi']\n self.options.setdefault('uri', 'file://%s' % abspath)", "def test_load_variables_correct_yaml(self):\n var_dict = load_variables(self.correct_yaml)\n self.assertIsNotNone(var_dict)\n self.assertIsInstance(var_dict, dict)", "def read_yaml(file):\n with open(file, mode='r') as stream:\n out = yaml.load(stream)\n\n return out", "def load_params_file(filename):\n with open(filename, 'r') as f:\n params = yaml.safe_load(f)\n return params", "def load_config(config_file): \n with open(config_file) as in_handle: \n config = yaml.load(in_handle) \n # for field, setting in config.items(): \n # if isinstance(config[field], dict): \n # for sub_field, sub_setting in config[field].items(): \n # config[field][sub_field] = expand_path(sub_setting) \n # else: \n # config[field] = expand_path(setting) \n return config", "def config_from_yaml(self, filename):\n with open(filename, 'r') as f:\n config = yaml.load(f)\n config = self._process_config_imports(config)\n self._config.update(config)", "def parse(self):\n try:\n with open(self.path, 'r') as ymlfile:\n self.__cfg = yaml.load(ymlfile)\n except IOError:\n self.log(\"File {0} not found -- aborting\".format(self.path))\n raise ConfigFileException", "def load_yaml(filename):\n with open(filename) as file:\n yaml = YAML()\n data = yaml.load(file)\n return data, yaml", "def read_file_obj(self, file_obj):\n data = yaml.safe_load(file_obj)\n if data:\n for key in data:\n if key in self._parameters:\n self._config[key] = data[key]\n return self._validate()", "def load_config(config_file):\n with open(config_file) as f:\n return yaml.load(f)", "def get_yaml_cfg(yaml_filepath):\n with open(str(yaml_filepath), 'r') as fileobj:\n cfg = AttrDict(yaml.safe_load(fileobj))\n return cfg", "def load_metadata(self, name) -> Dict[str, str]:\n return load_metadata(self._casedir / Path(\"{name}/metadata_{name}.yaml\".format(name=name)))", "def cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n _merge_a_into_b(yaml_cfg, __C)", "def load_configuration(yaml: yaml.ruamel.yaml.YAML, filename: str) -> DictLike:\n with open(filename, \"r\") as f:\n config = yaml.load(f)\n\n return cast(DictLike, config)", "def _load_from_file(self):\n try:\n self.logger.debug('Load metafile %s.', self.meta_file_path)\n with codecs.open(self.meta_file_path, 'r', 'utf-8') as meta_file:\n self._meta_dict = json.load(meta_file)\n # TODO Validate Meta Dict\n except OSError as ex:\n raise MetadataError('Unable to open the metadata file \"{}\". {}'\n .format(self.meta_file_path, ex.strerror)) from ex\n except ValueError as ex:\n raise MetadataError(\n 'Unable to load the metadata file \"{}\". AttributeError: {}'\n .format(self.meta_file_path, ex)) from ex", "def load_config(path=\"configs/default.yaml\") -> dict:\n with open(path, \"r\", encoding=\"utf-8\") as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n return cfg", "def read_config(self,confile):\n\n\n print(\"reading:\",confile)\n with open(confile) as parf:\n data=yaml.load(parf)\n\n\n return data", "def cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = EasyDict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, cfg)", "def from_yaml(self, content):\r\n if yaml is None:\r\n raise UnsupportedDeserializationFormat(\"Usage of the YAML aspects requires yaml.\")\r\n\r\n return yaml.load(content, Loader=DeliciousCakeLoader)", "def load_yaml_params(self, params_file):\n self._update_params(params_file)", "def _load_yaml_config(config_file):\n if type(config_file) is file:\n Config.CONFIG.update(yaml.load(config_file) or {})\n return Config.CONFIG\n else:\n try:\n with open(config_file, 'r') as f:\n return yaml.load(f)\n except IOError as e:\n e.message = \"Could not open configuration file \\\"{}\\\".\".format(config_file)\n raise e", "def read_catalog_info_yaml(self, splitkey):\n catalog_info_yaml = self._name_factory.catalog_split_yaml(sourcekey=splitkey,\n fullpath=True)\n yaml_dict = yaml.safe_load(open(catalog_info_yaml))\n # resolve env vars\n yaml_dict['catalog_file'] = os.path.expandvars(yaml_dict['catalog_file'])\n yaml_dict['catalog_extdir'] = os.path.expandvars(yaml_dict['catalog_extdir'])\n return yaml_dict", "def from_path(cls, path: str) -> Any:\n cls._check_yaml()\n with open(path) as f:\n return yaml.safe_load(f)", "def cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)", "def cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)", "def cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)", "def load_yaml_file(yaml_file):\n try:\n # Get the configuration parameters which contain the region, vpc name, template filename, VPC CIDR blocks\n s = open(yaml_file).read()\n config = list(yaml.load_all(s))[0]\n\n except Exception as e:\n # We're expecting the user parameters to be encoded as YAML\n # so we can pass multiple values. If the YAML can't be decoded\n # then return failure with a helpful message.\n print(e)\n raise Exception('Input configuration parameters could not be decoded as YAML')\n\n return config", "def load_yaml(path):\n fsock = open(path)\n \n try:\n yaml_string = fsock.read()\n yaml_obj = yaml.load(yaml_string)\n \n finally:\n fsock.close()\n\n return yaml_obj", "def load_config_file(path):\n with open(path) as file:\n return yaml.load(file, Loader=yaml.FullLoader)", "def read_yaml_file(path: Union[str, pathlib.Path]) -> dict:\n\n if isinstance(path, (str, pathlib.Path)):\n with open(path, 'r') as fp:\n config = yaml.safe_load(fp)\n else:\n # Assume it's an stream\n config = yaml.safe_load(path)\n\n return config", "def load():\n print(\"Loading Configuration file..\")\n\n def load_defaults():\n global _conf\n _conf = get_defaults()\n save()\n\n if not os.path.exists(__config_file):\n load_defaults()\n return\n\n global _conf\n with open(__config_file, 'r', encoding='utf-8') as stream:\n _conf = yaml.round_trip_load(stream)\n \n if _conf is None:\n load_defaults()\n return\n \n version = _conf.get('_conf', -1)\n if version != VERSION:\n migrate(version)\n _conf['_conf'] = VERSION\n save()\n\n def mergeDict(old: dict, new: dict, layer=1) -> dict:\n \"\"\"\n Merge a dictionary into another while prefering the old values over the new\n\n :param old: original dictionary\n :param new: new dictionary to merge\n \"\"\"\n \n from collections import Mapping\n changed = False\n for key, val in new.items():\n # print(\"{} ({})\".format(key, type(old.get(key))))\n if not key in old:\n print(\"{}Adding new value {}\".format(' ' * layer, key))\n changed = True\n old[key] = val\n elif issubclass(type(old[key]), Mapping) and issubclass(type(val), Mapping):\n print(\"{}Merging dict {}\".format(' ' * layer, key))\n changed = changed or mergeDict(old[key], val, layer + 1)\n\n return changed\n \n defaults = get_defaults()\n if mergeDict(_conf, defaults):\n save()", "def read_yaml(file_location):\n with open(file_location, 'r') as stream:\n try:\n yaml_dict = yaml.load(stream, Loader=yaml.SafeLoader)\n except yaml.YAMLError as e:\n logger.error(e)\n return yaml_dict", "def load(self, path):\n\n try:\n with open(path) as f:\n try:\n self.hooks = yaml.load(f.read())\n except ScannerError:\n self.warning('Error loading {0} hooks - Is it '\n 'correctly formatted?'.format(path))\n else:\n self.out('Loading hooks')\n except IOError:\n self.warning('{0} not found'.format(path))", "def load_yaml(config: str) -> dict:\n with open(config, 'r') as file:\n config = yaml.load(file, Loader=yaml.SafeLoader)\n\n return config", "def _deserialize(self):\n try:\n self._as_dict = yaml.load(self.path)\n except ScannerError as e:\n raise exc.ContentSerializeError(self, self.path, e.problem)", "def load_template(cls, template_name):\n\n template_path = path.join(dirs.user_data_dir, 'template', '%s.yaml' % template_name)\n\n if not path.isfile(template_path):\n return {}\n\n with open(template_path, 'r') as gf:\n return yaml.safe_load(gf)", "def load_cfg(filepath=\"./config.yaml\"):\n with open(filepath, \"r\") as f:\n return yaml.load(f, Loader=yaml.FullLoader)", "def cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, cfg)" ]
[ "0.73581326", "0.70676595", "0.70332026", "0.6870221", "0.68692476", "0.6855705", "0.68462366", "0.680534", "0.6785473", "0.6768446", "0.6765261", "0.6741415", "0.6676443", "0.6671067", "0.66030633", "0.6585844", "0.6558528", "0.65580547", "0.6556143", "0.65504235", "0.65474254", "0.6523342", "0.6504018", "0.6502648", "0.6496006", "0.64790446", "0.6466386", "0.6463216", "0.6456318", "0.6442335", "0.6431106", "0.64282304", "0.6419036", "0.6418745", "0.6398966", "0.63793814", "0.63783145", "0.6376362", "0.6369108", "0.6342898", "0.6321824", "0.6271051", "0.6269683", "0.6263938", "0.62565696", "0.62513727", "0.6247349", "0.62469095", "0.62405133", "0.6219887", "0.62117565", "0.6207121", "0.620529", "0.6204877", "0.61875474", "0.6186429", "0.61820704", "0.61757237", "0.61689186", "0.6154075", "0.61391366", "0.61343664", "0.6097825", "0.6091774", "0.6081638", "0.60701966", "0.60624707", "0.60611343", "0.6055155", "0.6054196", "0.6049606", "0.60382044", "0.60356504", "0.6035395", "0.60316503", "0.6030738", "0.6028715", "0.6028432", "0.6017254", "0.60110897", "0.5997862", "0.5987936", "0.5984169", "0.5983958", "0.5980268", "0.5979087", "0.5975764", "0.5975764", "0.5975764", "0.59715074", "0.59632117", "0.59631747", "0.5933794", "0.5927926", "0.5921484", "0.5913337", "0.59100866", "0.59093076", "0.5908916", "0.590714", "0.59047645" ]
0.0
-1
Ensure all epi files are recomputed by verifying that all output prefixes either don't exist or are deleted.
def CleanEpi(self): for entry in self.info.keys(): info = self.info[entry] if info['psdname'] == 'epi': for tag in ('imgfile', 'imgfile_m', 'imgfile_mf', 'imgfile_t'): if info.has_key(tag) and info[tag] is not None and \ os.path.exists(info[tag]): print 'Deleting %s*' % (info[tag], info['suffix']) cmd = '/bin/rm %s%s*' % (info[tag], info['suffix']) self.ExecCmd(cmd) if '.BRIK' in info['suffix']: cmd = '/bin/rm %s%s*' % (info[tag], \ info['suffix'].replace('.BRIK','.HEAD')) self.ExecCmd(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanupAthenaMP(workdir, outputfiles=[]):\n\n for ampdir in glob('%s/athenaMP-workers-*' % (workdir)):\n for (p, d, f) in os.walk(ampdir):\n for filename in f:\n if 'core' in filename or 'tmp.' in filename:\n path = os.path.join(p, filename)\n path = os.path.abspath(path)\n remove(path)\n for outfile in outputfiles:\n if outfile in filename:\n path = os.path.join(p, filename)\n path = os.path.abspath(path)\n remove(path)\n\n return 0", "def check_all_files_and_dirs(self):\n err = 0\n err_m = ''\n warning = 0\n warning_m = ''\n # Check the pdb file for refinement\n if self.refine_pdb_in == None:\n err = 1\n err_m += '\\nPdb file should be supplied'\n else:\n if self.check_single_file(self.refine_pdb_in):\n self.refine_pdb_in = os.path.abspath(self.refine_pdb_in)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(self.refine_pdb_in)\n\n # Check the pdb file for distance analysis\n if self.check_single_file(self.X8_pdb_in):\n self.X8_pdb_in = os.path.abspath(self.X8_pdb_in)\n else:\n self.X8_pdb_in != None\n warning = 1\n warning_m += '\\nXtrapol8 pdb_in not found. No additional analysis will be applied'\n\n # Check additional files and append them to a string\n additional = \"\"\n for fle in self.additional:\n if len(fle)>0:\n if self.check_single_file(fle):\n new_add = os.path.abspath(fle)\n additional = additional + \"%s \" % (new_add)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(fle)\n self.additional = additional\n\n #Check the output directory\n if os.path.isdir(self.outdir):\n self.outdir = os.path.abspath(self.outdir)\n else:\n err = 1\n err_m += \"\\nXtrapol8 output directory cannot be found.\" \\\n \"Please run this from the same directory from which you ran Xtrapol8.\"\n\n #Check the phil file for reciprocal space refinement\n if self.check_single_file(self.reciprocal_space_phil):\n self.reciprocal_space_phil = os.path.abspath(self.reciprocal_space_phil)\n else:\n self.reciprocal_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for reciprocal space refinement not found. Refinement will use default parameters.'\n\n\n #Check the phil file for real space refinement\n if self.check_single_file(self.real_space_phil):\n self.real_space_phil = os.path.abspath(self.real_space_phil)\n else:\n self.real_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for real space refinement not found. Refinement will use default parameters.'\n\n #Check the residue list for distance analysis\n if self.check_single_file(self.residue_list):\n self.residue_list = os.path.abspath(self.residue_list)\n else:\n self.residue_list = None\n warning = 1\n warning_m += '\\nResidue list not found. Distance analysis (if required) will be performed without residue list.'\n\n return err, err_m, warning, warning_m", "def cleanup_intermediate_files(self):\n self.cmd(\"rm -f {local_temp_dir}/*rg_dict* \\\n {local_temp_dir}/*aln* \\\n {local_temp_dir}/snappy*\".\n format(\n local_temp_dir=self.local_temp_dir\n ),\n shell=True)", "def test_empty_units(self):\n command_line = [\"filesystem\", \"create\", \"pn\", \"fn\", '--size=\"312\"']\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def test_outpath_multi_unequal(tmpdir):\n base = glob.glob(\"%s/dummy/mm0\" % DATA_DIR)[0]\n paths = sorted(glob.glob(base + \"/*.ufo\"))\n # the reference font is modified in-place, make a temp copy first\n referenceSrc = py.path.local(paths[0])\n referenceDst = tmpdir / referenceSrc.basename\n referenceSrc.copy(referenceDst)\n reference = str(referenceDst)\n inpaths = paths[1:]\n outpaths = [str(tmpdir / basename(p)) for p in inpaths][1:]\n\n with pytest.raises(SystemExit):\n psautohint(inpaths + ['-o'] + outpaths + ['-r', reference])", "def process_cleanup(self, output_file=None, output_list=None):\n if output_file:\n self.check_output_file( output_file )\n elif output_list:\n for output_file in output_list:\n self.check_output_file( output_file )\n log.info('All expected output files found - process successful!\\n')", "def checkAllFilesGenerated(self):\n root = get_exhale_root(self)\n containmentFolder = self.getAbsContainmentFolder()\n for node in root.all_nodes:\n if node.kind in [\"enumvalue\", \"group\"]:\n continue\n gen_file_path = os.path.join(containmentFolder, node.file_name)\n self.assertTrue(\n os.path.isfile(gen_file_path),\n \"File for {kind} node with refid=[{refid}] not generated to [{gen_file_path}]!\".format(\n kind=node.kind, refid=node.refid, gen_file_path=gen_file_path\n )\n )", "def process_delete_mp3_output_files(stand_alone_flag):\n\n if stand_alone_flag == 1:\n print(\"Deleting mp3 and output file. Value of stand_alone_flag : \", str(stand_alone_flag))\n mp3_files = glob.glob('*.mp3')\n output_files = glob.glob('*_Output.txt')\n for files in mp3_files:\n try:\n os.remove(files)\n except OSError:\n print(\"Cannot able delete the old mp3 files.\")\n\n for files in output_files:\n try:\n os.remove(files)\n except OSError:\n print(\"Cannot able delete the old output text files.\")", "def checkCopiedFiles(self):\n self.missingAiCopies = 0\n self.invalidAiCopies = 0\n self.invalidMapCopies = 0\n self.missingMapCopies = 0\n\n for iFile in self.inputFilesAll:\n if not (os.path.isfile(self.MAPCOPY + iFile + '.msb')):\n self.missingMapCopies += 1\n else:\n with open(self.MAPCOPY + iFile + '.msb', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidMapCopies += 1\n\n if not (iFile == \"m12_00_00_01\"):\n if (self.useDCX):\n if not (os.path.isfile(self.AICOPY + iFile + '.luabnd.dcx')):\n self.missingAiCopies += 1\n else:\n with open(self.AICOPY + iFile + '.luabnd.dcx', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidAiCopies += 1\n else:\n if not (os.path.isfile(self.AICOPY + iFile + '.luabnd')):\n self.missingAiCopies += 1\n else:\n with open(self.AICOPY + iFile + '.luabnd', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidAiCopies += 1\n\n if (self.missingAiCopies > 0 or self.invalidAiCopies > 0 or self.missingMapCopies > 0 or self.invalidMapCopies > 0 or self.missingSfxCopies > 0 or self.invalidSfxCopies > 0):\n return False\n else:\n return True", "def test_funny_units(self):\n command_line = [\"filesystem\", \"create\", \"pn\", \"fn\", '--size=\"312WiB\"']\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def removeRedundantFiles(workdir, outputfiles=[]):\n\n logger.info(\"Removing redundant files prior to log creation\")\n\n workdir = os.path.abspath(workdir)\n\n dir_list = [\"AtlasProduction*\",\n \"AtlasPoint1\",\n \"AtlasTier0\",\n \"buildJob*\",\n \"CDRelease*\",\n \"csc*.log\",\n \"DBRelease*\",\n \"EvgenJobOptions\",\n \"external\",\n \"fort.*\",\n \"geant4\",\n \"geomDB\",\n \"geomDB_sqlite\",\n \"home\",\n \"o..pacman..o\",\n \"pacman-*\",\n \"python\",\n \"runAthena*\",\n \"share\",\n \"sources.*\",\n \"sqlite*\",\n \"sw\",\n \"tcf_*\",\n \"triggerDB\",\n \"trusted.caches\",\n \"workdir\",\n \"*.data*\",\n \"*.events\",\n \"*.py\",\n \"*.pyc\",\n \"*.root*\",\n \"JEM\",\n \"tmp*\",\n \"*.tmp\",\n \"*.TMP\",\n \"MC11JobOptions\",\n \"scratch\",\n \"jobState-*-test.pickle\",\n \"*.writing\",\n \"pwg*\",\n \"pwhg*\",\n \"*PROC*\",\n \"madevent\",\n \"HPC\",\n \"objectstore*.json\",\n \"saga\",\n \"radical\",\n \"ckpt*\"]\n\n # remove core and pool.root files from AthenaMP sub directories\n try:\n cleanupAthenaMP(workdir, outputfiles)\n except Exception, e:\n print(\"Failed to execute cleanupAthenaMP(): %s\" % (e))\n\n # explicitly remove any soft linked archives (.a files) since they will be dereferenced by the tar command (--dereference option)\n matches = []\n import fnmatch\n for root, dirnames, filenames in os.walk(workdir):\n for filename in fnmatch.filter(filenames, '*.a'):\n matches.append(os.path.join(root, filename))\n for root, dirnames, filenames in os.walk(os.path.dirname(workdir)):\n for filename in fnmatch.filter(filenames, 'EventService_premerge_*.tar'):\n matches.append(os.path.join(root, filename))\n if matches != []:\n for f in matches:\n remove(f)\n # else:\n # print(\"Found no archive files\")\n\n # note: these should be partitial file/dir names, not containing any wildcards\n exceptions_list = [\"runargs\", \"runwrapper\", \"jobReport\", \"log.\"]\n\n to_delete = []\n for _dir in dir_list:\n files = glob(os.path.join(workdir, _dir))\n exclude = []\n\n if files:\n for exc in exceptions_list:\n for f in files:\n if exc in f:\n exclude.append(os.path.abspath(f))\n\n _files = []\n for f in files:\n if not f in exclude:\n _files.append(os.path.abspath(f))\n to_delete += _files\n\n exclude_files = []\n for of in outputfiles:\n exclude_files.append(os.path.join(workdir, of))\n for f in to_delete:\n if not f in exclude_files:\n remove(f)\n\n # run a second pass to clean up any broken links\n broken = []\n for root, dirs, files in os.walk(workdir):\n for filename in files:\n path = os.path.join(root, filename)\n if os.path.islink(path):\n target_path = os.readlink(path)\n # Resolve relative symlinks\n if not os.path.isabs(target_path):\n target_path = os.path.join(os.path.dirname(path), target_path)\n if not os.path.exists(target_path):\n broken.append(path)\n else:\n # If it's not a symlink we're not interested.\n continue\n\n if broken:\n for p in broken:\n remove(p)\n\n return 0", "def clean_up(self):\n try:\n data_dir = os.environ[\"DATA\"]\n plots_dir = os.environ[\"PLOTS\"]\n logs_dir = os.environ[\"LOGS\"]\n except KeyError as detail:\n print \"GenerateSpectrum.clean_up: error\", detail, \"not set\"\n print \" --> source analysis environment scripts before running!\"\n sys.exit(1)\n for root, dirs, files in os.walk(os.getcwd()):\n for file in files:\n is_data = re.search(r\".*\\.root$\", file)\n is_plot = re.search(r\".*\\.png$\", file)\n hostname = socket.gethostname()\n is_log = re.search(r\"^rat\\.\"+hostname+r\"\\.[0-9]+\\.log$\", file)\n if is_data:\n try:\n root_file = TFile(file)\n tree = root_file.Get(\"T\")\n tree.ls()\n except ReferenceError as detail:\n \"generate_spectrum.clean_up: error in TFile,\", detail\n sys.exit(1)\n file_manips.copy_file(os.path.join(root, file), data_dir)\n elif is_plot:\n file_manips.copy_file(os.path.join(root, file), plots_dir)\n elif is_log:\n file_manips.copy_file(os.path.join(root, file), logs_dir)", "def clean_folder(self):\n # Remove the 1st output\n # Remove the 2nd output\n # Remove the calibrated output\n try:\n os.remove(\"output1.csv\")\n except:\n pass\n try: \n os.remove(\"output2.csv\")\n except:\n pass\n try:\n os.remove(self.__add_output_file_location(self._output_filename))\n except:\n pass\n \n list = os.listdir(\"edited\")\n for file in list:\n file = os.path.join(\"edited\", file)\n try:\n os.remove(file)\n except:\n pass\n \n list = os.listdir(\"extracted\")\n for file in list:\n file = os.path.join(\"extracted\", file)\n try:\n os.remove(file)\n except:\n pass", "def _test_align_file_existance(self):\n if len(self._pathcreator.get_read_files()) == 0:\n self._write_err_msg_and_quit(\"Error! No read libraries given!\\n\")\n if len(self._ref_seq_files) == 0:\n self._write_err_msg_and_quit(\n \"Error! No reference sequence files given!\\n\"\n )", "def clean():\n possible_outputs = (\n '{}.html'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.epub'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.pdf'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.docx'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.odt'.format(CONFIG['FULL_PROJECT_NAME']),\n )\n\n for filename in possible_outputs:\n if os.path.exists(filename):\n os.remove(filename)\n print(\"Removed {}\".format(filename))", "def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)", "def cleanUpTemporaryFiles(options):\n os.system(\"rm \"+options.output_directory_per_run+\"/*.abundance\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*.phasing_score\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*regionsOfInterest*\")\n os.system(\"mv \"+options.output_directory_per_run+\"/* \"+options.output_directory_per_run+\"/../\")\n os.system(\"rm -rf \"+options.output_directory_per_run)", "def check_prerequisites(self, env):\n super(PopLog, self).check_prerequisites(env)\n print(\" Checking prerequisites for : {0}\".format(self.__class__.__name__))\n \n for inFile in self._expectedInFiles:\n rc, err_msg = cesmEnvLib.checkFile('{0}/{1}'.format(env['WORKDIR'],inFile), 'read')\n if not rc:\n print('{0}... continuing with additional plots.'.format(err_msg))", "def clean(self):\n if self.verbosity:\n self.header(\"Cleaning data files\")\n\n tsv_list = os.listdir(self.tsv_dir)\n\n if self.resume_mode:\n # get finished clean command logs of last update\n prev_cleaned = [\n x.file_name + '.TSV'\n for x in self.log_record.called.filter(\n command='cleancalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} files already cleaned.\".format(len(prev_cleaned)))\n # remove these from tsv_list\n tsv_list = [x for x in tsv_list if x not in prev_cleaned]\n\n # Loop through all the files in the source directory\n if self.verbosity:\n tsv_list = progress.bar(tsv_list)\n for name in tsv_list:\n call_command(\n \"cleancalaccessrawfile\",\n name,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n )", "def _clean_files(self):\n if self.delfiles & 1:\n ProcUtils.remove(self.okm)\n if self.delfiles & 2:\n ProcUtils.remove(self.hkm)\n if self.delfiles & 4:\n ProcUtils.remove(self.qkm)\n if self.delfiles & 8:\n ProcUtils.remove(self.obc)\n\n if self.log is False:\n ProcUtils.remove(self.pcf_file)\n base = os.path.basename(self.okm)\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogReport', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogStatus', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogUser', base])))", "def file_checker():\n\n PATH_RELEASE1_IDEN = os.getcwd()+'/archive_all_2014-10/'\n PATH_RELEASE1_UNIDE = None\n #PATH_RELEASE1_UNIDE = os.getcwd()+'/archive_all_2014-10/'\n\n PATH_RELEASE2_IDEN = os.getcwd()+'/archive_all_2016-10/archive_identified_2016-10/'\n PATH_RELEASE2_UNIDE = os.getcwd() + '/archive_all_2016-10/archive_unidentified_2016-10/'\n\n\n #From here don't change anything.\n #This global function finds the .mgf files in paths\n list_of_files_release1_ide = glob.glob(PATH_RELEASE1_IDEN+'*.mgf')\n list_of_files_release1_unide = None #REMOVE THIS PART AND UNCOMMENT NEXT LINE IN NEXT RELEASES.\n\n #list_of_files_release1_unid = glob.glob(PATH_RELEASE1_UNID'+*.mgf')\n\n list_of_files_release2_ide = glob.glob(PATH_RELEASE2_IDEN+'*.mgf')\n list_of_files_release2_unide = glob.glob(PATH_RELEASE2_UNIDE+'*.mgf')\n\n\n #Check if exist cache folder. If not will make it. \n #RELEASE 1 \n if not os.path.exists(PATH_RELEASE1_IDEN+'cache'):\n os.makedirs(PATH_RELEASE1_IDEN+'cache')\n\n # if not os.path.exists(PATH_RELEASE1_UNIDE'+cache'):\n # os.makedirs(PATH_RELEASE1_UNIDE'+cache')\n\n #RELEASE2\n if not os.path.exists(PATH_RELEASE2_IDEN+'cache'):\n os.makedirs(PATH_RELEASE2_IDEN+'cache')\n\n if not os.path.exists(PATH_RELEASE2_UNIDE+'cache'):\n os.makedirs(PATH_RELEASE2_UNIDE+'cache')\n \n\n return PATH_RELEASE1_IDEN, \\\n PATH_RELEASE2_IDEN, \\\n PATH_RELEASE2_UNIDE, \\\n list_of_files_release1_ide, \\\n list_of_files_release2_ide, \\\n list_of_files_release2_unide", "def clean_outputs(self) -> None:\n\n def _delete_if_not_none(fn: Optional[str]) -> None:\n if fn is not None:\n Path(fn).unlink()\n\n _delete_if_not_none(self.config[\"LOG_FILE\"])\n\n for file_ in self.exporter.get_all_files():\n file_.unlink()", "def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass", "def cleanup_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF, DIR_BACK, DIR_TEXT)\n map(lambda dir: shutil.rmtree(os.path.join(cwd, dir)) , dirs)", "def clean(args):\n log = 'removing tmp dir %s ' % (args.tmpdir)\n if args.tmpdir.endswith('STAR'):\n cmd = ['rm -rf %s' % (args.tmpdir)]\n run_subprocess(cmd,args,log)\n log = \"remove tmp files from output dir\"\n cmd = ['mv %s/crick_joinedLog.final.out %s/Crick_joinedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/watson_joinedLog.final.out %s/Watson_joinedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/crick_mergedLog.final.out %s/Crick_mergedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/watson_mergedLog.final.out %s/Watson_mergedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/crick_*' % args.output_dir]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/watson_*' % args.output_dir]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/joined* header.sam' % args.output_dir]\n run_subprocess(cmd, args, log)", "def test_multiple_output_files(self):\r\n convert_fastaqual(self.fasta_file_path,\r\n multiple_output_files=True,\r\n output_directory=self.output_dir,\r\n per_file_buffer_size=23)\r\n\r\n sample_id_s = [('PC.634', expected_fasta_634_default,\r\n expected_qual_634_default),\r\n ('PC.354', expected_fasta_354_default,\r\n expected_qual_354_default),\r\n ('PC.481', expected_fasta_481_default,\r\n expected_qual_481_default)]\r\n for sample_id, expected_fasta, expected_qual in sample_id_s:\r\n actual_output_fasta_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '_' + sample_id + '.fna',\r\n self.output_dir)\r\n\r\n actual_output_qual_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '_' + sample_id + '.qual',\r\n self.output_dir)\r\n\r\n actual_output_fasta = open(actual_output_fasta_path)\r\n actual_output_qual = open(actual_output_qual_path)\r\n actual_fasta = actual_output_fasta.read()\r\n actual_output_fasta.close()\r\n actual_qual = actual_output_qual.read()\r\n actual_output_qual.close()\r\n self._files_to_remove.append(actual_output_fasta_path)\r\n self._files_to_remove.append(actual_output_qual_path)\r\n\r\n self.assertEquals(actual_fasta, expected_fasta)\r\n self.assertEquals(actual_qual, expected_qual)", "def cleanUpPackage(inProgressFilename, packageFilename, propFilename):\n try:\n for filename in (inProgressFilename, packageFilename, propFilename):\n if (filename is not None and os.path.exists(filename)):\n os.remove(filename)\n\n except OSError, osErr :\n LOG.error('Unable to cleanup Package (%s)' % osErr)", "def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True", "def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True", "def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True", "def test_output_exists():\n global out_dir, cor_dir\n assert(path.exists(path.join(out_dir, 'oshea_similarity.json')))", "def clean_outputs(remit, sourcelist):\n if not os.path.exists('output-'+remit):\n os.mkdir('output-'+remit)\n for source in sourcelist:\n os.chdir('output-'+remit)\n if os.path.exists(source):\n shutil.rmtree(source)\n print('* deleted old \"output-%s/%s\"' % (remit, source))\n os.mkdir(source)\n # os.chdir(source)\n # os.mkdir('debug')\n # os.chdir('..')\n os.chdir('..')", "def test_verify_corrupt_archive(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')", "def output_files_exist(self):\n return all([split.exists() for split in self.split_files])", "def cleanup_precluster_intermediate_files(batch_index):\n files = [\"seed{0}.S.fasta\".format(batch_index),\n \"seed{0}.orphans.fasta\".format(batch_index),\n \"batch{0}.fasta\".format(batch_index),\n \"batch{0}.remains.fasta\".format(batch_index),\n \"batch{0}.remains2.fasta\".format(batch_index)]\n\n files += glob.glob(\"batch{0}*.minimap\".format(batch_index))\n for file in files:\n try:\n os.remove(file)\n except:\n print >> sys.stderr, \"Failure to remove {0}. Ignore.\".format(file)", "def test_outpath_multi(tmpdir):\n base = glob.glob(\"%s/dummy/mm0\" % DATA_DIR)[0]\n paths = sorted(glob.glob(base + \"/*.ufo\"))\n # the reference font is modified in-place, make a temp copy first\n referenceSrc = py.path.local(paths[0])\n referenceDst = tmpdir / referenceSrc.basename\n referenceSrc.copy(referenceDst)\n reference = str(referenceDst)\n inpaths = paths[1:]\n outpaths = [str(tmpdir / basename(p)) for p in inpaths]\n\n psautohint(inpaths + ['-o'] + outpaths + ['-r', reference])", "def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'analysis')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'analysis', existing_file))", "def main(args=None):\n args = parse_args(args)\n\n with multiprocessing.Pool(8) as pool:\n printer = StatusPrinter()\n names = generate_paths(args.input, args.recursive)\n names = printer.set_input(names)\n written = itertools.chain.from_iterable(\n pool.imap_unordered(\n partial(process_file, args.output), names, 1000))\n written = printer.set_output(written)\n\n unique_count, dupe_count, invalid_count = 0, 0, 0\n invalids = []\n for item in written:\n if item == '__duplicate__':\n dupe_count += 1\n elif item.startswith(args.input):\n invalids.append(item)\n invalid_count += 1\n else:\n unique_count += 1\n print('{} unique, {} duplicates, {} invalid ({} total)'.format(\n unique_count, dupe_count, invalid_count,\n invalid_count + unique_count + dupe_count))\n\n print('invalid files: \\n{}'.format('\\n'.join(invalids)))", "def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)", "def precheck(self):\n if (not dfs.exists(self.outputpath)):\n logger.debug(\"precheck(%s): outputpath %s does not exist, ready to run.\" \n % (self, self.outputpath))\n return 'ready'\n inTSs = [dfs.modtime(file) for file in self.inputpaths]\n outTS = dfs.modtime(self.outputpath)\n newer = reduce(lambda x,y: x or y, [(inTS>outTS) for inTS in inTSs])\n logger.debug(\"Input timestamps: %s\" % inTSs)\n logger.debug(\"Output timestamp: %s\" % outTS)\n if newer:\n logger.debug(\"At least one input file is newer than outputfile, ready to run.\")\n dfs.delete(self.outputpath)\n return 'ready'\n else:\n logger.debug(\"All input files are newer than outputfile, skipping.\")\n return 'skip'", "def remove_intermediate_files(self):\r\n\r\n # tmp files are written in the current dir,\r\n # app controller always jumps into dir specified via exec_dir\r\n # Note: blast intermediates are not removed\r\n exec_dir = str(self.Parameters['--exec_dir'].Value)\r\n inp_file_name = str(self.Parameters['--query_NAST'].Value)\r\n\r\n exec_dir = exec_dir.rstrip('\"')\r\n exec_dir = exec_dir.lstrip('\"')\r\n\r\n inp_file_name = inp_file_name.rstrip('\"')\r\n inp_file_name = inp_file_name.lstrip('\"')\r\n\r\n tmp_suffixes = [\".CPS\", \".CPS.CPC\", \".CPS_RENAST\", \".CPS_RENAST.cidx\",\r\n \".CPS.CPC.wTaxons\", \".cidx\"]\r\n cs_tmp_files = [\r\n exec_dir +\r\n '/' +\r\n inp_file_name +\r\n x for x in tmp_suffixes]\r\n remove_files(cs_tmp_files, error_on_missing=False)\r\n\r\n db_param = self.Parameters['--db_NAST']\r\n if db_param.isOn():\r\n nast_db_name = str(db_param.Value)\r\n nast_db_name = nast_db_name.rstrip('\"')\r\n nast_db_name = nast_db_name.lstrip('\"')\r\n\r\n # Better do not remove this file since other ChimeraSlayer\r\n # instances running on the same ref set might use this file\r\n # Should be rather deleted in the calling function\r\n# remove_files([nast_db_name + \".cidx\"],\r\n# error_on_missing=False)\r\n\r\n fasta_param = self.Parameters['--db_FASTA']\r\n if fasta_param.isOn():\r\n fasta_name = str(fasta_param.Value)\r\n fasta_name = fasta_name.rstrip('\"')\r\n fasta_name = fasta_name.lstrip('\"')\r\n\r\n blast_db_files = [\r\n fasta_name +\r\n x for x in [\r\n \".nsq\",\r\n \".nin\",\r\n \".nhr\",\r\n \".cidx\"]]\r\n remove_files(blast_db_files, error_on_missing=False)", "def clear_base_files(self):\r\n compilelock.get_lock()\r\n try:\r\n for base_dir in ('cuda_ndarray', 'cutils_ext', 'lazylinker_ext',\r\n 'scan_perform'):\r\n to_delete = os.path.join(self.dirname, base_dir + '.delete.me')\r\n if os.path.isdir(to_delete):\r\n try:\r\n shutil.rmtree(to_delete)\r\n _logger.debug('Deleted: %s', to_delete)\r\n except Exception:\r\n _logger.warning('Could not delete %s', to_delete)\r\n continue\r\n to_rename = os.path.join(self.dirname, base_dir)\r\n if os.path.isdir(to_rename):\r\n try:\r\n shutil.move(to_rename, to_delete)\r\n except Exception:\r\n _logger.warning('Could not move %s to %s',\r\n to_rename, to_delete)\r\n finally:\r\n compilelock.release_lock()", "def test_calculate_indicates_removal_of_unrelated_files(self, m_free):\n # files are unrelated to backup\n walk_paths = {'/dst': [('/dst', ['/a'], ['x0.txt']),\n ('/dst/a', [], ['x1.txt'])]}\n copied_indexes = []\n reconciler = keepfilesreconciler.KeepFilesReconciler(self.resolver, self.options)\n with filesystemhelpers.mock_walk(walk_paths):\n filepaths = reconciler.calculate(self.copyfiles, copied_indexes)\n assert filepaths == {'/dst/a/x1.txt', '/dst/x0.txt'}", "def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()", "def check_analysis_pickle_files(self):\n # Make sure that there have been no more trials run since this\n # last processing. To do this, get the number of output files\n for basename in nsort(os.listdir(self.logdir)):\n m = self.labels.subdir_re.match(basename)\n if m is None or 'pckl' in basename:\n continue\n # Here is the output directory which contains the files\n subdir = os.path.join(self.logdir, basename)\n # Account for failed jobs. Get the set of file numbers that\n # exist for all h0 and h1 combinations\n self.get_set_file_nums(\n filedir=subdir\n )\n # Take one of the pickle files to see how many data\n # entries it has.\n data_sets = from_file(os.path.join(self.logdir,\n 'data_sets.pckl'))\n # Take the first data key and then the h0 fit to h0 fid\n # which should always exist. The length of this is then\n # the number of trials in the pickle files.\n if 'h0_fit_to_h0_fid' in data_sets[data_sets.keys()[0]].keys():\n pckl_trials = len(data_sets[data_sets.keys()[0]][\n 'h0_fit_to_h0_fid'].keys())\n # The number of pickle trials should match the number of\n # trials derived from the output directory.\n if self.num_trials == pckl_trials:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing %i trials. If '\n 'this seems incorrect please delete the files: '\n 'data_sets.pckl, all_params.pckl and labels.pckl '\n 'from the logdir you have provided.'%pckl_trials\n )\n pickle_there = True\n else:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing %i trials. '\n 'However, based on the number of json files in the '\n 'output directory there should be %i trials in '\n 'these pickle files, so they will be regenerated.'%(\n pckl_trials, self.num_trials)\n )\n pickle_there = False\n else:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script which do not seem to '\n 'contain any trials, so they will be regenerated.'\n )\n pickle_there = False\n \n return pickle_there", "def _verify_prefix(prefix, files):\n for f in files:\n f = os.path.join(prefix, f)\n if not os.path.exists(f):\n return False\n else:\n return True", "def _checksubrepostate(pushop):\n for n in pushop.outgoing.missing:\n ctx = pushop.repo[n]\n\n if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():\n for subpath in sorted(ctx.substate):\n sub = ctx.sub(subpath)\n sub.verify(onpush=True)", "def deleteIntermediateFiles(self):\n uniq_files = set(self.files_to_delete)\n print (\"Deleting %d intermediate files\" % len(uniq_files))\n for fn in uniq_files:\n # don't delete log files\n if not fn.endswith(\".log\"):\n os.remove(fn)", "def clean(self):\r\n\r\n for _, data in self.composition.items():\r\n index_file = Path(data['file'] + '.fxi')\r\n if index_file.exists():\r\n index_file.unlink()", "def cleanup(self):\n\t\tfor filename in self.cfg_files:\n\t\t\tif os.path.isfile(filename):\n\t\t\t\tsize = os.stat(filename)[6]\n\t\t\t\tif size == 0:\n\t\t\t\t\tos.remove(filename)\n\n\t\treturn True", "def cleanup() -> None:\n\n for fname in glob(os.path.join(tdir, 'alexandria.*')):\n if os.path.splitext(fname)[1] not in {'.c', '.h'}:\n os.unlink(fname)", "def _clean_up_optimization():\n for (root, dirs, files) in walk(TEMP_MODULES_DIR_PATH, topdown=False):\n for file in files:\n if file.startswith(\"__temp_\"):\n remove(f\"{root}/{file}\")\n try:\n rmdir(root)\n except OSError:\n G.warn_(f\"Unidentified file found in temporary directory: {root}\")", "def test_clean(self):\n self.make_files('foo.py', 'bar.js')\n env = pike.Environment()\n with pike.Graph('g') as graph:\n pike.glob('.', '*.py')\n env.add(graph)\n env.run_all()\n env.clean('.')\n self.assertTrue(os.path.exists('foo.py'))\n self.assertFalse(os.path.exists('bar.js'))", "def main():\n # checking the directory\n cwd = os.getcwd()\n print(f'The working directory: {cwd}')\n # counting time \n start_time = time.process_time()\n # passing args\n arg = parse_arguments()\n sub_dir = arg.sub_dir\n dir_out = arg.dir_out\n file_amb = 'csv_to_clean'\n names_ambigous = defaultdict(str)\n with open(file_amb, 'r') as fh:\n for line in fh:\n name = line.strip().split('/')[2]\n names_ambigous[name] = names_ambigous.get(name, '')\n names_ambigous[name] += line.strip()\n print(f'number files: {len(names_ambigous)}')\n # checking if the output directory exist\n # if not make it\n f_pwd = os.path.join('Results', 'kmer_counts')\n # get the genus names\n cnt = 0\n for name, filename in names_ambigous.items():\n cleaned = get_csv_clean(filename)\n full_path = os.path.join(f_pwd, name)\n if os.path.exists(full_path):\n print(f'The path {full_path} exist')\n pass\n else:\n os.makedirs(full_path)\n csv_name = f'{full_path}/{name}_k2_8_chr.csv'\n print(f'Checking the full path {csv_name}')\n with open(csv_name, 'w') as fout:\n for km, cn in cleaned.items():\n fout.write(f'{km},{cn}\\n')\n cnt += 1\n # get final time of the script\n end = time.process_time()\n total_time = end - start_time\n print(f'The script takes {total_time} to finish!')\n print(f'Where read and manipulated {cnt} files')\n print('Done!')", "def check_corrupted_files(self):\r\n for store in STORES:\r\n path = f\"{self.system.config_path}/.storage/{STORES[store]}\"\r\n if os.path.exists(path):\r\n if os.stat(path).st_size == 0:\r\n # File is empty (corrupted)\r\n return True\r\n return False", "def check_all():\n for name, module in sorted(sys.modules.items()): # module files\n filepath = getattr(module, '__file__', None)\n if filepath is None:\n # we land here when a module is an attribute of another module\n # i.e., it exists twice in the sys.modules table, once as its\n # canonical representation, and again having been imported\n # within another module\n continue\n filepath = filepath.endswith(\".pyc\") and filepath[:-1] or filepath\n check_one(filepath)\n\n for filepath in extras: # additional files\n check_one(filepath)", "def _CheckNoIn(input_api, output_api):\n results = []\n for f in input_api.AffectedFiles(include_deletes=False):\n if f.LocalPath().endswith('.in'):\n results.append(output_api.PresubmitError(\n 'Remove %s since corpus tests should not use .in files' % f.LocalPath()))\n return results", "def test_output_otions(rawinputfile, reformfile1, assumpfile1):\n taxyear = 2021\n tcio = TaxCalcIO(input_data=rawinputfile.name,\n tax_year=taxyear,\n reform=reformfile1.name,\n assump=assumpfile1.name,\n growdiff_response=None,\n aging_input_data=False,\n exact_calculations=False)\n outfilepath = tcio.output_filepath()\n # --ceeu output and standard output\n try:\n tcio.static_analysis(writing_output_file=True, output_ceeu=True)\n except: # pylint: disable=bare-except\n if os.path.isfile(outfilepath):\n try:\n os.remove(outfilepath)\n except OSError:\n pass # sometimes we can't remove a generated temporary file\n assert 'TaxCalcIO.calculate(ceeu)_ok' == 'no'\n # --dump output\n try:\n tcio.static_analysis(writing_output_file=True, output_dump=True)\n except: # pylint: disable=bare-except\n if os.path.isfile(outfilepath):\n try:\n os.remove(outfilepath)\n except OSError:\n pass # sometimes we can't remove a generated temporary file\n assert 'TaxCalcIO.calculate(dump)_ok' == 'no'\n # if tries were successful, try to remove the output file\n if os.path.isfile(outfilepath):\n try:\n os.remove(outfilepath)\n except OSError:\n pass # sometimes we can't remove a generated temporary file", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def test_single_file_upgma(self):\r\n\r\n titles = ['hi', 'ho']\r\n distdata = numpy.array([[0, .5], [.5, 0.]])\r\n fd, fname = mkstemp(prefix='upgma_', suffix='.txt')\r\n close(fd)\r\n f = open(fname, 'w')\r\n self._paths_to_clean_up.append(fname)\r\n f.write(format_distance_matrix(titles, distdata))\r\n f.close()\r\n\r\n fd, fname2 = mkstemp(prefix='upgma_', suffix='.txt')\r\n close(fd)\r\n self._paths_to_clean_up.append(fname2)\r\n single_file_upgma(fname, fname2)\r\n assert(os.path.exists(fname2))", "def test_multiple_output_files(self):\r\n convert_fastq(self.fasta_file_path, self.qual_file_path,\r\n multiple_output_files=True,\r\n output_directory=self.output_dir,\r\n per_file_buffer_size=23)\r\n\r\n sample_ids = [('PC.634', expected_fastq_634_default),\r\n ('PC.354', expected_fastq_354_default),\r\n ('PC.481', expected_fastq_481_default)]\r\n for sample_id, expected_output in sample_ids:\r\n actual_output_file_path = get_filename_with_new_ext(\r\n self.fasta_file_path,\r\n '_' + sample_id + '.fastq',\r\n self.output_dir)\r\n\r\n actual_output_file = open(actual_output_file_path)\r\n actual_output = actual_output_file.read()\r\n actual_output_file.close()\r\n self._files_to_remove.append(actual_output_file_path)\r\n\r\n self.assertEquals(actual_output, expected_output)", "def clean_up(self) -> None:\n print('Doing some clean-up work...')", "def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass", "def clean(self):\n\n for metric in self.metricList:\n listf = glob.glob(\n '{}/*_{}_{}*'.format(self.outDir, metric.name, self.num))\n if len(listf) > 0:\n for val in listf:\n os.system('rm {}'.format(val))", "def pass1(self, verbose):\n \n for root, dirs, files in os.walk(self.dir_to_check, topdown=False):\n t_size = 0\n for f in files:\n new_f = os.path.join(root,f) #complete path in case of homonyms\n size = os.path.getsize(new_f)\n t_size += size\n self.cache[new_f] = HumanReadableSize(size)\n t_size += sum ([self.cache[os.path.join(root,d)].val for d in dirs])\n self.cache[root] = HumanReadableSize(t_size)\n if verbose:\n print ('.................... Computing size of {}!'.format(root))\n \n #print (self.cache) #debugging", "def collect_initial_outputs(self) -> None: # pylint: disable=too-many-branches\n assert self.step is not None\n missing_outputs = []\n for pattern in sorted(self.step.output):\n formatted_pattern = fmt_capture(self.kwargs, pattern)\n self.expanded_outputs.append(formatted_pattern)\n\n if is_phony(formatted_pattern):\n self.phony_outputs.append(formatted_pattern)\n Invocation.phony.add(formatted_pattern)\n continue\n\n try:\n paths = glob_paths(formatted_pattern)\n if not paths:\n Logger.debug(f\"Nonexistent optional output(s): {pattern}\")\n else:\n for path in paths:\n self.initial_outputs.append(path)\n if path == pattern:\n Logger.debug(f\"Existing output: {path}\")\n else:\n Logger.debug(f\"Existing output: {pattern} -> {path}\")\n except NonOptionalException:\n Logger.debug(f\"Nonexistent required output(s): {pattern}\")\n self.missing_output = formatted_pattern\n missing_outputs.append(capture2re(formatted_pattern))\n\n if self.new_persistent_actions:\n for path in self.old_persistent_outputs:\n if path in self.initial_outputs:\n continue\n\n was_reported = False\n for regexp in missing_outputs:\n if re.fullmatch(regexp, path):\n was_reported = True\n break\n\n if was_reported:\n continue\n\n if Stat.exists(path):\n Logger.debug(f\"Changed to abandon the output: {path}\")\n self.abandoned_output = path\n else:\n Logger.debug(f\"Missing the old built output: {path}\")\n self.missing_output = path\n\n Stat.forget(path)\n\n if (\n self.must_run_action\n or self.phony_outputs\n or self.missing_output is not None\n or self.abandoned_output is not None\n ):\n return\n\n for output_path in sorted(self.initial_outputs):\n if is_exists(output_path):\n continue\n output_mtime_ns = Stat.stat(output_path).st_mtime_ns\n if self.oldest_output_path is None or self.oldest_output_mtime_ns > output_mtime_ns:\n self.oldest_output_path = output_path\n self.oldest_output_mtime_ns = output_mtime_ns\n\n if Logger.isEnabledFor(logging.DEBUG) and self.oldest_output_path is not None:\n Logger.debug(\n f\"Oldest output: {self.oldest_output_path} \"\n f\"time: {_datetime_from_nanoseconds(self.oldest_output_mtime_ns)}\"\n )", "def check_generated_files(out_dir, output_list_file):\n xcpd_dir = os.path.join(out_dir, \"xcp_d\")\n found_files = sorted(glob(os.path.join(xcpd_dir, \"**/*\"), recursive=True))\n found_files = [os.path.relpath(f, out_dir) for f in found_files]\n\n # Ignore figures\n found_files = [f for f in found_files if \"figures\" not in f]\n\n with open(output_list_file, \"r\") as fo:\n expected_files = fo.readlines()\n expected_files = [f.rstrip() for f in expected_files]\n\n if sorted(found_files) != sorted(expected_files):\n expected_not_found = sorted(list(set(expected_files) - set(found_files)))\n found_not_expected = sorted(list(set(found_files) - set(expected_files)))\n\n msg = \"\"\n if expected_not_found:\n msg += \"\\nExpected but not found:\\n\\t\"\n msg += \"\\n\\t\".join(expected_not_found)\n\n if found_not_expected:\n msg += \"\\nFound but not expected:\\n\\t\"\n msg += \"\\n\\t\".join(found_not_expected)\n raise ValueError(msg)", "def _clean_up(paths):\n print('Cleaning up')\n # Iterate over the given paths, unlinking them\n for path in paths:\n if os.path.exists(path):\n print('Removing %s' % path)\n os.unlink(path)\n else:\n print('%s Not found. Skipped.' % path)", "def check_hdf5_files(database):\n\n logger.info(\" Checking dataset Integrity\")\n remove_file = []\n for fname in database:\n try:\n f = h5py.File(fname, 'r')\n mol_names = list(f.keys())\n if len(mol_names) == 0:\n warnings.warn(' -> %s is empty ' % fname)\n remove_file.append(fname)\n f.close()\n except BaseException:\n warnings.warn(' -> %s is corrputed ' % fname)\n remove_file.append(fname)\n\n for name in remove_file:\n database.remove(name)\n if remove_file:\n logger.info(f'\\t -> Empty or corrput databases are removed:\\n'\n f'{remove_file}')\n\n return database", "def test_set_reduce_files_automatically_when_only_field_specified():\n\n fields = (\n constants.UNIHAN_MANIFEST['Unihan_Readings.txt']\n + constants.UNIHAN_MANIFEST['Unihan_Variants.txt']\n )\n\n options = {'fields': fields}\n\n b = process.Packager(options)\n\n expected = ['Unihan_Readings.txt', 'Unihan_Variants.txt']\n results = b.options['input_files']\n\n assert set(expected) == set(results)", "def test_compute_seqs_per_file(self):\r\n fd, temp_fasta_fp = mkstemp(prefix='QiimeScriptUtilTests',\r\n suffix='.fasta')\r\n close(fd)\r\n temp_fasta = ['>seq', 'AAACCCCAAATTGG'] * 25\r\n open(temp_fasta_fp, 'w').write('\\n'.join(temp_fasta))\r\n\r\n actual_25 = self.pw._compute_seqs_per_file(temp_fasta_fp, 25)\r\n actual_2 = self.pw._compute_seqs_per_file(temp_fasta_fp, 2)\r\n actual_10 = self.pw._compute_seqs_per_file(temp_fasta_fp, 10)\r\n actual_5 = self.pw._compute_seqs_per_file(temp_fasta_fp, 5)\r\n actual_40 = self.pw._compute_seqs_per_file(temp_fasta_fp, 40)\r\n\r\n remove_files([temp_fasta_fp])\r\n\r\n self.assertEqual(actual_25, 1)\r\n self.assertEqual(actual_2, 13)\r\n self.assertEqual(actual_10, 3)\r\n self.assertEqual(actual_5, 5)\r\n self.assertEqual(actual_40, 1)", "def files_unchanged(self):\n\n passed = []\n failed = []\n ignored = []\n fixed = []\n could_fix = False\n\n # Check that we have the minimum required config\n required_pipeline_config = {\"manifest.name\", \"manifest.description\", \"manifest.author\"}\n missing_pipeline_config = required_pipeline_config.difference(self.nf_config)\n if missing_pipeline_config:\n return {\"ignored\": [f\"Required pipeline config not found - {missing_pipeline_config}\"]}\n try:\n prefix, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\n \"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is <pipeline> and default to repo 'nf-core'\"\n )\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\")\n prefix = \"nf-core\"\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n files_exact = [\n [\".gitattributes\"],\n [\".prettierrc.yml\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n ]\n files_partial = [\n [\".gitignore\", \".prettierignore\", \"pyproject.toml\"],\n ]\n\n # Only show error messages from pipeline creation\n logging.getLogger(\"nf_core.create\").setLevel(logging.ERROR)\n\n # Generate a new pipeline with nf-core create that we can compare to\n tmp_dir = tempfile.mkdtemp()\n\n # Create a template.yaml file for the pipeline creation\n template_yaml = {\n \"name\": short_name,\n \"description\": self.nf_config[\"manifest.description\"].strip(\"\\\"'\"),\n \"author\": self.nf_config[\"manifest.author\"].strip(\"\\\"'\"),\n \"prefix\": prefix,\n }\n\n template_yaml_path = os.path.join(tmp_dir, \"template.yaml\")\n with open(template_yaml_path, \"w\") as fh:\n yaml.dump(template_yaml, fh, default_flow_style=False)\n\n test_pipeline_dir = os.path.join(tmp_dir, f\"{prefix}-{short_name}\")\n create_obj = nf_core.create.PipelineCreate(\n None, None, None, no_git=True, outdir=test_pipeline_dir, template_yaml_path=template_yaml_path\n )\n create_obj.init_pipeline()\n\n # Helper functions for file paths\n def _pf(file_path):\n \"\"\"Helper function - get file path for pipeline file\"\"\"\n return os.path.join(self.wf_path, file_path)\n\n def _tf(file_path):\n \"\"\"Helper function - get file path for template file\"\"\"\n return os.path.join(test_pipeline_dir, file_path)\n\n # Files that must be completely unchanged from template\n for files in files_exact:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file has an identical match\n else:\n for f in files:\n try:\n if filecmp.cmp(_pf(f), _tf(f), shallow=True):\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n shutil.copy(_tf(f), _pf(f))\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # Files that can be added to, but that must contain the template contents\n for files in files_partial:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file contains the template file contents\n else:\n for f in files:\n try:\n with open(_pf(f), \"r\") as fh:\n pipeline_file = fh.read()\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n if template_file in pipeline_file:\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n with open(_pf(f), \"w\") as fh:\n fh.write(template_file)\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # cleaning up temporary dir\n shutil.rmtree(tmp_dir)\n\n return {\"passed\": passed, \"failed\": failed, \"ignored\": ignored, \"fixed\": fixed, \"could_fix\": could_fix}", "def setUp(self):\n self.outdir = \"tests/out/pdftotext\"\n if not os.path.exists(self.outdir):\n os.makedirs(self.outdir)\n else:\n files = glob.glob(self.outdir)\n for f in files:\n if os.path.isfile(f):\n os.remove(f)", "def test_call_default_params_save_intermediate_files(self):\r\n\r\n intermediate_files_dir = self.output_dir + \"/test_usearch61/\"\r\n create_dir(intermediate_files_dir)\r\n self._dirs_to_remove.append(intermediate_files_dir)\r\n\r\n app = Usearch610DeNovoOtuPicker(\r\n params={'save_intermediate_files': True,\r\n 'output_dir':\r\n intermediate_files_dir,\r\n 'remove_usearch_logs': False\r\n })\r\n\r\n obs_clusters = app(self.tmp_seq_filepath_97perc_id)\r\n\r\n expected_intermediate_fps =\\\r\n [intermediate_files_dir + \"denovo_abundance_sorted.fna\",\r\n intermediate_files_dir + \"denovo_abundance_sorted.uc\",\r\n intermediate_files_dir + \"denovo_smallmem_clustered.uc\",\r\n intermediate_files_dir + \"abundance_sorted.log\",\r\n intermediate_files_dir + \"smallmem_clustered.log\"]\r\n\r\n for curr_file in expected_intermediate_fps:\r\n self.assertTrue(exists(curr_file))", "def test_calculate_indicates_removal_of_already_copied_files(self, m_free):\n # files belong to backup, but have been copied to another device\n # (indicated by copied_indexes)\n walk_paths = {'/dst': [('/dst', ['a'], ['0.txt']),\n ('/dst/a', [], ['2.txt'])]}\n copied_indexes = [0, 2]\n reconciler = keepfilesreconciler.KeepFilesReconciler(self.resolver, self.options)\n with filesystemhelpers.mock_walk(walk_paths):\n filepaths = reconciler.calculate(self.copyfiles, copied_indexes)\n assert filepaths == {'/dst/0.txt', '/dst/a/2.txt'}", "def postProcessOutput(self):\n\n logging.info(\" ========> Analysis %20s called postProcessOutput:\"%(self.name))\n\n if self.checkExpectedOutputFiles() == False:\n raise Exception(\"Missing expected output files. Number missing are [%d]\"%(len(self.missing_output_files)))\n\n FileUtils.checkDirExists(self.output_dir)\n\n tmpfiles = []\n\n logging.info(\" ========> Analysis %20s called postProcessOutput: Moving files from %s to %s \"%(self.name,self.working_dir,self.output_dir))\n try:\n for srcfile in self.expected_output_files:\n\n fullsrcfile = os.path.join(self.working_dir,srcfile)\n destfile = os.path.join(self.output_dir,srcfile)\n\n FileUtils.checkDirExistsForFile(destfile)\n\n res = shutil.move(fullsrcfile,destfile)\n\n if res == None:\n res = \"OK\"\n else:\n res = \"FAILED\"\n\n print \"Checking %s\"%destfile\n tmpfiles.append(destfile)\n \n logging.info(\" ========> Analysis %20s called postProcessOutput: Result of file move for %s = %s\" % (self.name,srcfile,res))\n\n except Exception as e:\n logging.info(\" ========> Analysis %20s file move failed %s\"%(self.name,e))\n raise\n\n self.output_files = tmpfiles\n\n for f in self.temp_output_files:\n logging.info(\" ========> Analysis %20s removing temp file %s \"%(self.name,f))\n\t res = os.remove(f)", "def _need_generate(paths):\r\n if not os.path.exists(paths.generated_dir):\r\n return True\r\n\r\n if not os.path.exists(paths.index_file):\r\n return True\r\n\r\n # Use the index file to determine if regeneration is necessary\r\n with open(paths.index_file, 'r',newline='\\n') as index_file:\r\n indexed = [item for item in\r\n index_file.read().split('\\n') if len(item) != 0 and\r\n not item.startswith(\"#\")]\r\n return indexed != paths.resource_files", "def run_gen_and_econ(self):\n try:\n super().run_gen_and_econ()\n finally:\n temp_dir = getattr(self, \"_temp_dir\", None)\n if temp_dir is not None:\n temp_dir.cleanup()", "def test_non_incremental(self):\n\n one = '1\\n'\n two = '2\\n'\n target, task = self._fixture(incremental=False)\n\n # Run twice.\n self._create_clean_file(target, one)\n vtA = task.execute()\n self.assertContent(vtA, one)\n self._create_clean_file(target, two)\n vtB = task.execute()\n\n # Confirm two unassociated current directories with a stable results_dir.\n self.assertContent(vtA, one)\n self.assertContent(vtB, two)\n self.assertNotEqual(vtA.current_results_dir, vtB.current_results_dir)\n self.assertEqual(vtA.results_dir, vtB.results_dir)", "def flush_outputs():\n try:\n shutil.rmtree(ROOT_OUTPUT_DIR)\n print(\"Removed directory '{}'!\".format(ROOT_OUTPUT_DIR))\n return True\n except FileNotFoundError:\n print(\"Directory '{}' already removed!\".format(ROOT_OUTPUT_DIR))\n return False", "def cleanUp(self):\n print(\" cleaning up\",self.folderSave)\n for fname in glob.glob(self.folderSave+\"/*.*\"):\n if not fname.endswith(\".npy\") and not fname.endswith(\".csv\"):\n print(\" deleting\",os.path.basename(fname))\n os.remove(fname)", "def testFilesExist(self):\n \n for year in range(2007,2013):\n self.assertTrue(os.path.exists(\"./IncomeHistogram_\"+ str(year)+\".pdf\"), \"A histogram didn't save to output.\")\n self.assertTrue(os.path.exists(\"./LogIncomeHistogram_\"+ str(year)+\".pdf\"), \"A histogram didn't save to output.\")\n self.assertTrue(os.path.exists(\"./IncomeBoxplot(log)_\"+ str(year)+\".pdf\"), \"A boxplot didn't save to output.\") \n self.assertTrue(os.path.exists(\"./results.txt\"), \"Results file doesn't exist.\")", "def cleanup():\r\n compiledir = theano.config.compiledir\r\n for directory in os.listdir(compiledir):\r\n file = None\r\n try:\r\n try:\r\n filename = os.path.join(compiledir, directory, \"key.pkl\")\r\n file = open(filename, 'rb')\r\n #print file\r\n try:\r\n keydata = cPickle.load(file)\r\n for key in list(keydata.keys):\r\n have_npy_abi_version = False\r\n have_c_compiler = False\r\n for obj in flatten(key):\r\n if isinstance(obj, numpy.ndarray):\r\n #Reuse have_npy_abi_version to\r\n #force the removing of key\r\n have_npy_abi_version = False\r\n break\r\n elif isinstance(obj, basestring):\r\n if obj.startswith('NPY_ABI_VERSION=0x'):\r\n have_npy_abi_version = True\r\n elif obj.startswith('c_compiler_str='):\r\n have_c_compiler = True\r\n elif (isinstance(obj, (theano.gof.Op, theano.gof.Type)) and\r\n hasattr(obj, 'c_code_cache_version')):\r\n v = obj.c_code_cache_version()\r\n if v not in [(), None] and v not in key[0]:\r\n #Reuse have_npy_abi_version to\r\n #force the removing of key\r\n have_npy_abi_version = False\r\n break\r\n\r\n if not have_npy_abi_version or not have_c_compiler:\r\n try:\r\n #This can happen when we move the compiledir.\r\n if keydata.key_pkl != filename:\r\n keydata.key_pkl = filename\r\n keydata.remove_key(key)\r\n except IOError, e:\r\n _logger.error(\r\n \"Could not remove file '%s'. To complete \"\r\n \"the clean-up, please remove manually \"\r\n \"the directory containing it.\",\r\n filename)\r\n if len(keydata.keys) == 0:\r\n shutil.rmtree(os.path.join(compiledir, directory))\r\n\r\n except EOFError:\r\n _logger.error(\r\n \"Could not read key file '%s'. To complete \"\r\n \"the clean-up, please remove manually \"\r\n \"the directory containing it.\",\r\n filename)\r\n except IOError:\r\n _logger.error(\r\n \"Could not clean up this directory: '%s'. To complete \"\r\n \"the clean-up, please remove it manually.\",\r\n directory)\r\n finally:\r\n if file is not None:\r\n file.close()", "def remove_out_pot_impcalcs(successful, pks_all_calcs, dry_run=False):\n import tarfile, os\n from aiida.orm import load_node\n from aiida.common.folders import SandboxFolder\n from aiida_kkr.calculations import KkrimpCalculation\n\n if dry_run:\n print('test', successful, len(pks_all_calcs))\n\n # name of tarfile\n tfname = KkrimpCalculation._FILENAME_TAR\n\n # cleanup only if calculation was successful\n if successful and len(pks_all_calcs) > 1:\n # remove out_potential for calculations\n # note that also last calc can be cleaned since output potential is stored in single file data\n pks_for_cleanup = pks_all_calcs[:]\n\n # loop over all calculations\n for pk in pks_for_cleanup:\n if dry_run:\n print('pk_for_cleanup:', pk)\n # get getreived folder of calc\n calc = load_node(pk)\n ret = calc.outputs.retrieved\n\n # open tarfile if present\n if tfname in ret.list_object_names():\n delete_and_retar = False\n with ret.open(tfname) as tf:\n tf_abspath = tf.name\n\n # create Sandbox folder which is used to temporarily extract output_all.tar.gz\n tmpfolder = SandboxFolder()\n tmpfolder_path = tmpfolder.abspath\n with tarfile.open(tf_abspath) as tf:\n tar_filenames = [ifile.name for ifile in tf.getmembers()]\n # check if out_potential is in tarfile\n if KkrimpCalculation._OUT_POTENTIAL in tar_filenames:\n tf.extractall(tmpfolder_path)\n delete_and_retar = True\n\n if delete_and_retar and not dry_run:\n # delete out_potential\n os.remove(os.path.join(tmpfolder_path, KkrimpCalculation._OUT_POTENTIAL))\n with tarfile.open(tf_abspath, 'w:gz') as tf:\n # remove out_potential from list of files\n tar_filenames = [i for i in tar_filenames if i != KkrimpCalculation._OUT_POTENTIAL]\n for f in tar_filenames:\n # create new tarfile without out_potential file\n fabs = os.path.join(tmpfolder_path, f)\n tf.add(fabs, arcname=os.path.basename(fabs))\n elif dry_run:\n print('dry run:')\n print('delete and retar?', delete_and_retar)\n print('tmpfolder_path', tmpfolder_path)\n\n # clean up temporary Sandbox folder\n if not dry_run:\n tmpfolder.erase()", "def test_clean_dry_run(self):\n self.make_files('foo.py', 'bar.js')\n env = pike.Environment()\n with pike.Graph('g') as graph:\n pike.glob('.', '*.py')\n env.add(graph)\n env.run_all()\n removed = env.clean('.', dry_run=True)\n self.assertEqual(removed, [os.path.abspath('bar.js')])\n self.assertTrue(os.path.exists('foo.py'))\n self.assertTrue(os.path.exists('bar.js'))", "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "def init_check(self):\n for required_file in self._required_files:\n # Check if required files are there\n # FIXME Sometimes it doesn't work :?\n if required_file not in self.files:\n self.valid = False", "def clean():\n clean_files()", "def cleanup(self):\n\tprint \"clean up on \" + self.dest\n for root, folders, files in os.walk(self.dest):\n for ignore_dir in self.ignore_dirs:\n if ignore_dir in folders:\n folders.remove(ignore_dir)\n\t\t \n for folder in folders:\n backupdir = os.path.join(root,folders)\n sourcedir = bakupdir.replace(destination,source) \n if not os.path.exists(sourcedir):\n trash = backupdir.replace(destination,trash_dir)\n # shutil.move(backupdir, trash)\n print(\"move\",backupdir,\"to\",trash)\n # os.utime(trash, None)\n \n for filename in files:\n checkfile = root + \"/\" + filename\n checkfile = checkfile.replace(self.dest, self.source)\n print(\"checking if \", checkfile, \"exists\")\n if not os.path.exists(checkfile): \n print os.path.join(root,filename)\n\t\t backupfile = checkfile.replace(self.source,self.dest)\n trash = self.trash + checkfile.replace(self.source, \"\")\n # shutil.move(backupfile, trash)\n print(\"move\",backupfile,\"to\",trash)\n # os.utime(trash, None)", "def test_call_default_params_save_intermediate_files(self):\r\n\r\n intermediate_files_dir = self.output_dir + \"/test_usearch61/\"\r\n create_dir(intermediate_files_dir)\r\n self._dirs_to_remove.append(intermediate_files_dir)\r\n\r\n app = Usearch61ReferenceOtuPicker(\r\n params={'save_intermediate_files': True,\r\n 'output_dir':\r\n intermediate_files_dir,\r\n 'remove_usearch_logs': False\r\n })\r\n\r\n obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id,\r\n refseqs_fp=self.tmp_seq_filepath_97perc_id_rc)\r\n\r\n expected_intermediate_fps =\\\r\n [join(intermediate_files_dir, \"abundance_sorted.fna\"),\r\n join(intermediate_files_dir, \"abundance_sorted.log\"),\r\n join(intermediate_files_dir, \"abundance_sorted.uc\"),\r\n join(intermediate_files_dir, \"ref_clustered.log\"),\r\n join(intermediate_files_dir, \"ref_clustered.uc\")]\r\n\r\n for curr_file in expected_intermediate_fps:\r\n self.assertTrue(exists(curr_file))\r\n\r\n expected_failures = []\r\n self.assertEqual(failures, expected_failures)", "def create_coverage_files(self):\n\n # Select if normalisation is based on fragment numbers\n if self._args.paired_end and not self._args.no_norm_by_fragments:\n norm_by_fragments = True\n else:\n norm_by_fragments = False\n if norm_by_fragments:\n reads_or_fragments = \"fragments\"\n alignment_stats_path = (\n self._pathcreator.fragment_alignments_stats_path\n )\n else:\n reads_or_fragments = \"reads\"\n alignment_stats_path = self._pathcreator.read_alignments_stats_path\n\n # Get alignment stats\n raw_stat_data_reader = RawStatDataReader()\n alignment_stats = [raw_stat_data_reader.read(alignment_stats_path)]\n # Lib names was paired end\n lib_names = list(alignment_stats[0].keys())\n was_paired_end_alignment = self._was_paired_end_alignment(lib_names)\n\n # Quit if the wrong parameters have been chosen for the subcommand\n if was_paired_end_alignment and not self._args.paired_end:\n self._write_err_msg_and_quit(\n \"The alignemnt seems to be based on paired end reads. \"\n \"Please also set \"\n \"the option '-P' or '--paired_end'.\\n\"\n )\n\n if self._args.no_fragments and not self._args.paired_end:\n self._write_err_msg_and_quit(\n \"The option '-nf' or \"\n \"'--no_fragments' is only valid \"\n \"for paired end reads. If you have \"\n \"paired end reads, please also set \"\n \"the option '-P' or '--paired_end'.\\n\"\n )\n\n if self._args.no_norm_by_fragments and not self._args.paired_end:\n self._write_err_msg_and_quit(\n \"The option '-nnf' or \"\n \"'--no_norm_by_fragments' is only valid \"\n \"for paired end reads. If you have \"\n \"paired end reads, please also set \"\n \"the option '-P' or '--paired_end'.\\n\"\n )\n\n # Set read files and lib names\n if not was_paired_end_alignment:\n self._pathcreator.set_read_files_dep_file_lists_single_end(\n self._pathcreator.get_read_files(), lib_names\n )\n else:\n self._pathcreator.set_read_files_dep_file_lists_paired_end(\n self._pathcreator.get_read_files(), lib_names\n )\n # If fragments should be used and they were not created during alignment,\n # they will be created now\n if not self._args.no_fragments and self._args.paired_end:\n bam_files_exist = []\n for (\n bam_fragment_path\n ) in self._pathcreator.aligned_fragments_bam_paths:\n bam_files_exist.append(os.path.exists(bam_fragment_path))\n # If any of the bam files containing fragments is missing, create all\n # of them\n if not all(bam_files_exist):\n self._build_fragments()\n\n # Set alignment paths to fragments or single reads\n if not self._args.no_fragments and self._args.paired_end:\n alignment_paths = self._pathcreator.aligned_fragments_bam_paths\n else:\n alignment_paths = self._pathcreator.read_alignment_bam_paths\n # determine species cross mapped reads\n self._pathcreator.set_ref_seq_paths_by_species()\n if not self._args.count_cross_aligned_reads:\n self._crossmapped_reads_by_lib = {}\n for lib_name, read_alignment_path in zip(\n lib_names, self._pathcreator.read_alignment_bam_paths\n ):\n # retrieve the cross mapped reads from the single read files\n # to also get reads where two mates map to different\n # species. This would not be possible with the built fragments\n self._crossmapped_reads_by_lib[\n lib_name\n ] = self.determine_crossmapped_reads(read_alignment_path)\n\n if not self._args.non_strand_specific:\n strands = [\"forward\", \"reverse\"]\n else:\n strands = [\"forward_and_reverse\"]\n\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species = {}\n for sp in self._species_folder_prefixes_and_display_names.keys():\n # Retrieve the either the no. of uniquely aligned reads or\n # the number of species exclusive aligned reads (\"all aligned\" - \"cross aligned\") (Default behaviour) or\n # number of all aligned reads for each library of the given species\n read_files_aligned_read_freq = {}\n for read_file, attributes in alignment_stats[0].items():\n # If option normalize by uniquely is chosen, only the sum of uniquely aligned reads is used for normalisation\n # this excludes species cross mapped reads, split aligned reads and multiple aligned reads\n if self._args.normalize_by_uniquely:\n read_files_aligned_read_freq[read_file] = attributes[\n \"species_stats\"\n ][sp][f\"no_of_uniquely_aligned_{reads_or_fragments}\"]\n elif self._args.normalize_cross_aligned_reads_included:\n read_files_aligned_read_freq[read_file] = attributes[\n \"species_stats\"\n ][sp][f\"no_of_aligned_{reads_or_fragments}\"]\n # Default: Number of aligned reads without the cross aligned reads are used for normalization\n else:\n read_files_aligned_read_freq[read_file] = (\n attributes[\"species_stats\"][sp][\n f\"no_of_aligned_{reads_or_fragments}\"\n ]\n - attributes[\"species_stats\"][sp][\n f\"no_of_cross_aligned_{reads_or_fragments}\"\n ]\n )\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species[\n sp\n ] = {}\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species[\n sp\n ][\"read_files_aligned_read_freq\"] = read_files_aligned_read_freq\n # Retrieve the min no. of aligned reads\n # of all libraries for the given species\n min_no_of_aligned_reads = float(\n min(read_files_aligned_read_freq.values())\n )\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species[\n sp\n ][\"min_no_of_aligned_reads\"] = min_no_of_aligned_reads\n self._pathcreator.set_coverage_folder_and_file_names(\n strands,\n lib_names,\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species,\n )\n\n project_creator = ProjectCreator()\n project_creator.create_subfolders(\n self._pathcreator.required_coverage_folders()\n )\n self._test_folder_existance(\n self._pathcreator.required_coverage_folders()\n )\n\n # get references by species\n references_by_species = self._get_references_by_species()\n\n # Run the coverage file creation species-wise\n for (\n sp\n ) in (\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species.keys()\n ):\n # Run the generation of coverage in parallel\n\n jobs = []\n with concurrent.futures.ProcessPoolExecutor(\n max_workers=self._args.processes\n ) as executor:\n for lib_name, bam_path in zip(lib_names, alignment_paths):\n if not self._args.count_cross_aligned_reads:\n cross_mapped_reads = self._crossmapped_reads_by_lib[\n lib_name\n ]\n else:\n cross_mapped_reads = None\n coverage_creator = CoverageCreator(\n self._args,\n strands,\n self._pathcreator.coverage_files_by_species[sp][\n lib_name\n ],\n references_by_species[sp],\n self._args.count_cross_aligned_reads,\n cross_mapped_reads,\n )\n jobs.append(\n executor.submit(\n coverage_creator.create_coverage_files_for_lib,\n lib_name,\n bam_path,\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species[\n sp\n ][\n \"read_files_aligned_read_freq\"\n ][\n lib_name\n ],\n self.read_files_aligned_read_freq_and_min_reads_aligned_by_species[\n sp\n ][\n \"min_no_of_aligned_reads\"\n ],\n )\n )\n # Evaluate thread outcome\n self._check_job_completeness(jobs)", "def clean(session):\n clean_dirs = (\n get_path(\".cache\"),\n get_path(\".coverage\"),\n get_path(\".pytest_cache\"),\n get_path(\"__pycache__\"),\n get_path(\"build\"),\n get_path(\"dist\"),\n get_path(\"docs\", \"__pycache__\"),\n get_path(\"docs\", \"build\"),\n get_path(\"scripts\", \"macos\", \"__pycache__\"),\n get_path(\"src\", \"python\", \"bezier.egg-info\"),\n get_path(\"src\", \"python\", \"bezier\", \"__pycache__\"),\n get_path(\"tests\", \"__pycache__\"),\n get_path(\"tests\", \"functional\", \"__pycache__\"),\n get_path(\"tests\", \"unit\", \"__pycache__\"),\n get_path(\"tests\", \"unit\", \"hazmat\", \"__pycache__\"),\n get_path(\"wheelhouse\"),\n )\n clean_globs = (\n get_path(\".coverage\"),\n get_path(\"*.mod\"),\n get_path(\"*.pyc\"),\n get_path(\"docs\", \"abi\", \"example\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.pyc\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.pyd\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.so\"),\n get_path(\"src\", \"fortran\", \"*.o\"),\n get_path(\"tests\", \"*.pyc\"),\n get_path(\"tests\", \"functional\", \"*.pyc\"),\n get_path(\"tests\", \"unit\", \"*.pyc\"),\n )\n for dir_path in clean_dirs:\n session.run(shutil.rmtree, dir_path, ignore_errors=True)\n for glob_path in clean_globs:\n for filename in glob.glob(glob_path):\n session.run(os.remove, filename)", "def handle_cleaning():\n extra_fl = ['changesInfos', 'printerSettings']\n fld_path = f'{output_path}/ppt'\n out_rel_path = f'{fld_path}/_rels/presentation.xml.rels'\n root, tree = gen_tree(out_rel_path)\n \n for i in extra_fl:\n shutil.rmtree(f'{fld_path}/{i}')\n \n for relation in root:\n attrib = relation.attrib\n if i in attrib['Target']:\n root.remove(relation)\n \n tree.write(out_rel_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n return", "def _assert_correct_files_are_present(outputdir: Path) -> None:\n for plane in PLANES:\n assert (outputdir / f\"{AMP_BETA_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{BETA_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{PHASE_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{TOTAL_PHASE_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{ORBIT_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{DISPERSION_NAME}x.tfs\").is_file()\n assert (outputdir / f\"{NORM_DISP_NAME}x.tfs\").is_file() # no norm disp in Y plane\n\n for rdt in [\"1001\", \"1010\"]:\n assert (outputdir / f\"f{rdt}.tfs\").is_file()", "def remove_empty_startapp_files(self, app_label):\n for file_name in [\"views\", \"admin\", \"tests\"]:\n file_path = \"%s/%s.py\" % (app_label, file_name)\n if os.path.exists(file_path):\n num_lines = sum(1 for line in open(file_path))\n if num_lines <= 4:\n os.remove(file_path)", "def test_verify_corrupt_archive_compare_data(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable',\n options=[u\"--compare-data\"])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')", "def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'processed/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'processed/test.pkl')))", "def post_combine(self, target):\n target_extra_files = self.target_extra_files\n if target_extra_files:\n if self.disable_cleanup:\n self.stderr.write(\"Cleanup operations disabled by user.\\n\")\n else:\n self.stderr.write(\"Found extra files not part of source tree(s): \"\n f\"{len(target_extra_files)} files.\\n\")\n\n keep_existing = create_filtered_list(\"splunk\", default=False)\n # splglob_simple: Either full paths, or simple file-only match\n keep_existing.feedall(self.keep_existing, filter=splglob_simple)\n for dest_fn in target_extra_files:\n if keep_existing.match_path(dest_fn):\n self.stderr.write(f\"Keep existing file {dest_fn}\\n\")\n elif self.disable_cleanup:\n self.stderr.write(f\"Skip cleanup of unwanted file {dest_fn}\\n\")\n else:\n self.stderr.write(f\"Remove unwanted file {dest_fn}\\n\")\n os.unlink(os.path.join(target, dest_fn))", "def test_provider_system_hook_file_shred(change_dir, clean_files):\n files = ['stuff', 'thing', 'foo']\n for f in files:\n file = open(f, \"w\")\n file.write(f)\n file.close()\n\n tackle('.', no_input=True, context_file='shred.yaml')\n\n for f in files:\n assert not os.path.isfile(f)" ]
[ "0.5984496", "0.5982936", "0.59657407", "0.59632736", "0.5948575", "0.5879957", "0.5869022", "0.57011503", "0.5682905", "0.56816006", "0.56665385", "0.5654999", "0.5644077", "0.5643207", "0.5622836", "0.5610936", "0.56048816", "0.56018406", "0.5580712", "0.55641234", "0.55630195", "0.553035", "0.55057925", "0.5498781", "0.54903114", "0.548565", "0.5477777", "0.54766494", "0.5471059", "0.5471059", "0.5471059", "0.546485", "0.5453737", "0.54479927", "0.544588", "0.543135", "0.5418838", "0.5415572", "0.54150796", "0.540033", "0.53877854", "0.5346824", "0.5334532", "0.5332699", "0.5332658", "0.5331536", "0.532421", "0.53202015", "0.53177094", "0.5317124", "0.5308156", "0.5298576", "0.5298484", "0.529115", "0.52826005", "0.5275778", "0.5263086", "0.5246288", "0.5239622", "0.52314126", "0.521645", "0.52121687", "0.52090406", "0.5202741", "0.5200947", "0.52008", "0.51972395", "0.51820874", "0.51738805", "0.5172457", "0.5168533", "0.51672053", "0.51649463", "0.5161496", "0.5160385", "0.51588076", "0.515179", "0.5149927", "0.51469487", "0.51396793", "0.5135904", "0.51340383", "0.51312065", "0.5125646", "0.5116941", "0.51107574", "0.5109236", "0.5108956", "0.51088166", "0.5108636", "0.51052845", "0.5103934", "0.5100863", "0.50981236", "0.50978786", "0.5096643", "0.50960183", "0.5089641", "0.5085077", "0.5075545" ]
0.5448935
33
Create output directories if they don't already exist.
def CreateDirs(self): # First, create a list of directories. dnames = [] tags = ['', '_m', '_mf'] for entry in self.info.keys(): if self.info[entry]['type'] == 'epi': for tag in tags: fname = self.info[entry].get('imgfile%s' % tag, None) if fname is not None: dnames.append(os.path.dirname(fname)) else: if self.info[entry].get('outdir',None) is not None: dnames.append(self.info[entry]['outdir']) # Create them if they don't already exist. for dname in dnames: if not os.path.exists(dname): self.MakeDir(dname) if self.verbose: print 'mkdir %s' % dname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_output_dir(self):\n out_dir = os.path.dirname(self._out_format)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n LOG.info('Created output directory: %s', out_dir)", "def setup_outdir():\n try:\n shutil.rmtree(OUTDIR)\n except FileNotFoundError:\n pass\n os.makedirs(OUTDIR, exist_ok=True)", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])", "def _make_output_directory(self):\n fs = self._filesystem\n output_filename = fs.join(self._root_output_dir, self._test_name)\n fs.maybe_make_directory(fs.dirname(output_filename))", "def create_output_dir(self):\n if self.output_dir is None:\n new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))\n try:\n os.makedirs(self.output_dir)\n except OSError:\n pass", "def create_dir(output_path):\n if not os.path.exists(output_path) and is_directory(output_path):\n os.makedirs(output_path)", "def _make_output_directory(output_dir: str) -> None:\n if output_dir and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n logging.info(f\"output directory does not exist, made '{output_dir}'\")", "def create_output_dir(output_dir):\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n for folder in [CHECKPOINT_DIR, LOG_DIR]:\n folder_path = os.path.join(output_dir, folder)\n if not os.path.isdir(folder_path):\n os.mkdir(folder_path)", "def _create_target_directories(self):\n if os.path.exists(self.PREPROCESSED_DATA_OUT_DIR):\n if self._hparams.over_write:\n print_info(\"Deleting data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n shutil.rmtree(self.PREPROCESSED_DATA_OUT_DIR)\n print_info(\"Recreating data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n os.makedirs(self.PREPROCESSED_DATA_OUT_DIR)\n else:\n print_info(\"Skipping preprocessing step, since the data might already be available\")\n else:\n print_info(\"Creating data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n os.makedirs(self.PREPROCESSED_DATA_OUT_DIR)", "def __manage_output_folder(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)", "def create_dirs(_log, output_dir, overwrite):\n _log.info(\"Create model directory\")\n\n if output_dir is None:\n raise ValueError(\"Config `output_dir` has to be set!\")\n\n if os.path.exists(output_dir) and not overwrite:\n raise ValueError(\n \"Output directory already exists (set overwrite flag?):\", output_dir\n )\n\n if os.path.exists(output_dir) and overwrite:\n rmtree(output_dir)\n\n if not os.path.exists(output_dir) and output_dir not in [\"\", \".\"]:\n os.makedirs(output_dir)", "def ensure_dirs_exists(self):\n os.makedirs(os.path.join(self.location, \"batches\"), exist_ok=True)\n os.makedirs(os.path.join(self.location, \"results\"), exist_ok=True)", "def clean_dirs(output_dir):\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)", "def create_out_dir(out): \n out_path = os.path.join(out,out_dir_name)\n try:\n os.stat(out_path)\n except:\n os.mkdir(out_path)", "def ensure_exists(output_dir):\n try:\n makedirs(output_dir)\n except OSError:\n if not isdir(output_dir):\n raise", "def create_out_dir(self):\n\n logging.debug('create_out_dir called \\n'\n 'Output directory to be created: '\n '%s', self.out_dir)\n\n access_rights = 0o755\n list_outs = ['docs/downloaded', 'docs/edited_csv', 'docs/graphics']\n for address in list_outs:\n path = self.out_dir + address\n if os.path.exists(path):\n logging.debug('Path: %s :already exists', path)\n else:\n try:\n os.makedirs(path, access_rights)\n except OSError:\n logging.debug('Creation of directory has failed '\n 'at: %s', path)\n else:\n logging.debug('Successfully created the '\n 'directory path at: %s', path)\n return self.out_dir, list_outs", "def create_output_dir(output_dir, dir_name):\n try:\n os.mkdir(os.path.join(output_dir, dir_name))\n except OSError:\n print(os.path.join(output_dir, dir_name) + \" exits... :(\")", "def create_temp_output_paths() -> None:\n if not os.path.exists(TMP_PATH):\n os.makedirs(TMP_PATH)\n if not os.path.exists(TMP_MAP_PATH):\n os.makedirs(TMP_MAP_PATH)", "def make_directories(self):\n os.makedirs(self.data_dir, exist_ok=True)\n os.makedirs(self.patches_dir, exist_ok=True)\n os.makedirs(self.raw_image_dir, exist_ok=True)\n os.makedirs(self.pro_image_dir, exist_ok=True)\n os.makedirs(self.results_dir, exist_ok=True)", "def ensure_out_dir(out_dir):\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)", "def create_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF,\n DIR_BACK, DIR_TEXT, DIR_FINAL)\n \n for dir in dirs:\n try:\n os.mkdir(os.path.join(cwd, dir))\n except OSError, e:\n print 'directory (', dir, ') already exists'", "def mkdir_needed(d):\n dirs=[d['outdir']]\n dirs.append( get_sample_dir(d['outdir'],d['obj']) )\n for dr in dirs:\n if not os.path.exists(dr):\n os.makedirs(dr)", "def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()", "def make_sure_path_exists(out_path):\n try:\n os.makedirs(out_path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n print \"Errors in output folder path! please change the output path or analysis name\\n\"\n exit()", "def __setup_output_directory(self):\n print('Setting up output directory')\n time_stamp = datetime.now().strftime(\"%d-%m-%Y-%H-%M-%S\")\n self.output_path = os.path.join(self.output_base_path, '%s_%s' % (self.execution_name, time_stamp))\n print('- Creating output directory: %s' % self.output_path)\n os.makedirs(self.output_path)\n print('- Output directory created')", "def mkdirs():\n if os.path.exists(DIST_DIR):\n shutil.rmtree(DIST_DIR)\n else:\n os.makedirs(BUILD_DIR)", "def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)", "def make_output_dir(directory):\r\n if os.path.exists(directory):\r\n try:\r\n shutil.rmtree(directory)\r\n except OSError:\r\n print(\"[SETUP] ERROR: Removing the existing output directory failed\")\r\n return False\r\n else:\r\n print(\"[SETUP] STATUS: Existing output directory removed\")\r\n\r\n try:\r\n os.mkdir(directory)\r\n except OSError:\r\n print(\"[SETUP] ERROR: Creation of the output directory failed\")\r\n return False\r\n else:\r\n print(\"[SETUP] STATUS: Successfully created output directory\")\r\n return True", "def setup(self, newdir=None):\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)\n if newdir:\n _new = os.path.join(self.output_path, newdir)\n if not os.path.exists(_new):\n os.makedirs(_new)", "def _create_directories(self):\n print \"[--init] creating directory structure in %s\" % (self.target_path)\n ensure_path(self.conf_path)\n for subdir in config.PROCESSING_AREAS:\n subdir_path = self.data_path + os.sep + subdir\n ensure_path(subdir_path)", "def create_directory_structure():\n\n def ensure_directory(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ensure_directory('./out/textures')\n ensure_directory('./out/data')", "def make_output_dirs_for_part2(parent_dir):\n\n if not os.path.exists(parent_dir + 'Modeling/'):\n os.makedirs(parent_dir + 'Modeling/')\n if not os.path.exists(parent_dir + 'Modeling/cleaned_template_fastas/'):\n os.makedirs(parent_dir + 'Modeling/cleaned_template_fastas/')\n if not os.path.exists(parent_dir + 'Modeling/cleaned_template_pdbs/'):\n os.makedirs(parent_dir + 'Modeling/cleaned_template_pdbs/')\n if not os.path.exists(parent_dir + 'Modeling/fasta_alns_and_identities/'):\n os.makedirs(parent_dir + 'Modeling/fasta_alns_and_identities/')\n if not os.path.exists(parent_dir + 'Modeling/grishin_alns/'):\n os.makedirs(parent_dir + 'Modeling/grishin_alns/')\n if not os.path.exists(parent_dir + 'Modeling/threaded_pdbs/'):\n os.makedirs(parent_dir + 'Modeling/threaded_pdbs/')\n if not os.path.exists(parent_dir + 'Modeling/final_models/'):\n os.makedirs(parent_dir + 'Modeling/final_models/')", "def createDirs():\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/Serotyping_Database/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/Serotyping_Database/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/VF_Database/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/VF_Database/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Results/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Results/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/xml/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/xml/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Uploads/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Uploads/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Results/RGI/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Results/RGI/')", "def _check_dirs(self):\r\n for dir in [self.papers_dir,\r\n self.buffer_dir]:\r\n if not os.path.exists(dir):\r\n message = f'Dir not exists: {dir}. Making it.'\r\n logging.warning(message)\r\n os.mkdir(dir)", "def _create_result_directory(self):\n\t\tFileSystem.create_dir(self._result_directory_name)\n\t\tFileSystem.create_dir(self._result_directory_name + \"/\" + \"Log\")\n\t\tFileSystem.create_dir(self._result_directory_name + \"/\" + \"Dump\")", "def createDirs(self):\n logging.info(\"Creating Directories\")\n\n if not self.img_exist:\n self.reCreateDir(self.savePathJoin(\"Images\"))\n if not self.of_exist:\n self.reCreateDir(self.savePathJoin(\"Of\"))\n if not self.back_of_exist:\n self.reCreateDir(self.savePathJoin(\"Back_Of\"))\n if not self.depth_exist:\n self.reCreateDir(self.savePathJoin(\"Depth\"))\n if not self.object_detection_dir_exist and (\n self.ui.c_object_detection.isChecked() or self.ui.c_crash_plot.isChecked()\n ):\n self.reCreateDir(self.savePathJoin(\"ObjectDetection\"))\n if self.super_pixel_method != \"\" and not os.path.exists(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n ):\n os.makedirs(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n )\n\n self.reCreateDir(RESULTS)\n self.reCreateDir(NP_DIR)\n self.reCreateDir(MASK_DIR)\n\n if self.ui.c_crash_plot.isChecked():\n self.reCreateDir(PLOT_CRASH_DIR)\n if self.ui.c_draw.isChecked():\n self.reCreateDir(DRAW_DIR)\n if self.ui.c_velocity.isChecked():\n self.reCreateDir(VL_DIR)\n if self.ui.c_speed_plot.isChecked():\n self.reCreateDir(PLOT_SPEED_DIR)\n if self.super_pixel_method != \"\":\n self.reCreateDir(SUPER_PIXEL_DIR)\n if self.user[\"GT\"] != \"\" and self.ui.c_error_plot.isChecked():\n self.reCreateDir(PLOT_ERROR_DIR)", "def create_dirs():\n run(\"mkdir -p %s\"%RUN_DIR)\n run(\"mkdir -p %s\"%LOG_DIR)", "def create_output_folder(output_folder_name: str, finding_labels: list):\n if not os.path.isdir(output_folder_name):\n os.mkdir(output_folder_name)\n for type in ['/train', '/val', '/test']:\n if not os.path.isdir(output_folder_name + type):\n os.mkdir(output_folder_name + type)\n for disease in finding_labels:\n if not os.path.isdir(output_folder_name + type + '/' + disease):\n os.mkdir(output_folder_name + type + '/' + disease)", "def make_dirs():\n global paths_made\n\n #Have we done this already? Then why are we trying to do it again?\n if paths_made:\n return\n\n #Make the dirs\n os.makedirs(log_dir, exist_ok=True)\n os.makedirs(datastream_dir, exist_ok=True)\n paths_made = True", "def cleanOutputDir(output):\n if os.path.exists(output) and os.path.isdir(output):\n shutil.rmtree(output)", "def MakeDestinationDirectories(self, dst_files):\n for dst in dst_files:\n path = os.path.dirname(dst);\n if (len(path) > 0) and (not os.path.exists(path)):\n self.VerboseMsg(\"Make Directory: \" + path)\n if self.execute:\n os.makedirs(path)", "def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)", "def check_out_dir_exists(out_dir):\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)", "def create_target_directories(output_directory, num_splits):\n target_directories = {i: os.path.join(output_directory, \"%05d\" % i) for i in range(num_splits)}\n for i in target_directories:\n target_dir = target_directories[i]\n if os.path.isfile(target_dir):\n logger.error(\"File exists: %s\" % target_dir)\n exit(1)\n mkdir(target_dir)\n\n return target_directories", "def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)", "def SetupOutDir(out_dir):\n logging.info('entering ...')\n assert re.match(r'^[a-zA-Z_\\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir\n\n if os.path.exists(out_dir):\n subprocess.check_call(['rm', '-rf', out_dir])\n os.mkdir(out_dir)\n logging.info('... done')", "def __mkdir(self, output_directory):\n try:\n if not os.path.exists(output_directory):\n os.mkdir(output_directory)\n return True\n except Exception as e:\n print e\n return False", "def clean_outputs(remit, sourcelist):\n if not os.path.exists('output-'+remit):\n os.mkdir('output-'+remit)\n for source in sourcelist:\n os.chdir('output-'+remit)\n if os.path.exists(source):\n shutil.rmtree(source)\n print('* deleted old \"output-%s/%s\"' % (remit, source))\n os.mkdir(source)\n # os.chdir(source)\n # os.mkdir('debug')\n # os.chdir('..')\n os.chdir('..')", "def setup_directory_structure(\n output_root: Union[str, Path], with_production: bool = False\n) -> None:\n mkdir(output_root, exists_ok=True, parents=True)\n output_root = Path(output_root).resolve()\n for link in [paths.BEST_LINK, paths.LATEST_LINK]:\n link_path = output_root / link\n if not link_path.is_symlink() and not link_path.exists():\n mkdir(link_path)\n\n if with_production:\n production_dir = output_root / paths.PRODUCTION_RUN\n mkdir(production_dir, exists_ok=True)", "def build_directories(self):\n print(\"Building Directories...\")\n\n path_1 = \"./saved_models/model_1/\"\n\n if not os.path.exists(path_1):\n os.mkdir(path_1, 0755)\n print(\"Completed directories creation or if already exist - then checked\")", "def make_output_dir(experiment_dir, identifier):\n output_dir = Path(experiment_dir, identifier).resolve()\n output_dir.mkdir(parents=True, exist_ok=True)\n return output_dir", "def create_output_dir(self, cfg: dict) -> str:\n output_dir = cfg.get(\"output\").get(\"output_dir\")\n time_sfx = cfg.get(\"output\").get(\"time_suffix\", True)\n if not os.path.isabs(output_dir):\n output_dir = os.path.join(self.repo_path, output_dir)\n subdir = self.project_name\n if time_sfx:\n cur_time = get_cur_time_str()\n subdir = f\"{subdir}_{cur_time}\"\n output_dir = os.path.join(output_dir, subdir) # type: str\n if check_dir(output_dir, make_if_not=True):\n logger.info(\"Results will be in {}\".format(output_dir))\n else:\n exit(ErrorCode.PATH_ERROR)\n return output_dir", "def getOutputDir():\n directory = os.path.join(Configurations.getProjectRootDir(), OUTPUT_DIR_NAME)\n if not os.path.exists(directory):\n logger.warning('Directory %s not exist, CREATE!', directory)\n os.makedirs(directory)\n\n return directory", "def create_dirs():\n os.makedirs(ORIGINAL_LOG_DIR, exist_ok=True)", "def __init_output_folder():\n try:\n os.makedirs(Result.__json_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e", "def initialize_directories(): # pragma: no cover\n\n for i in (CACHE_DIR, CONFIG_DIR):\n i.mkdir(parents=True, exist_ok=True)", "def create_dir_structure():\n LOG.info('In create_dir_structure')\n OutputWrite.change_to_script_directory(__file__)\n path = os.path.abspath(os.path.join('..', 'results',\n global_constants.TEXT_BOARD,\n global_constants.TEXT_INTERFACE,\n global_constants.TEXT_DEVICE,\n global_constants.TEST_EXECUTION_NAME\n ))\n LOG.debug('Path to be Created = {0}'.format(path))\n os.makedirs(path, exist_ok=True, mode=0o755)\n for item in global_constants.TEST_CASE_LIST_NAMES:\n in_path = os.path.exists(os.path.join(path, item))\n if not os.path.exists(in_path):\n LOG.debug('Path with Test Case name = {0}'.format(in_path))\n os.mkdir(in_path)\n LOG.debug('Path = {0}'.format(path))\n return path", "def create_required_dir():\n if not os.path.exists('utils_dfn/temp'):\n os.mkdir('utils_dfn/temp')\n if not os.path.exists('utils_dfn/img'):\n os.mkdir('utils_dfn/img')\n if not os.path.exists('utils_dfn/mask'):\n os.mkdir('utils_dfn/mask')\n if not os.path.exists('utils_dfn/output'):\n os.mkdir('utils_dfn/utils_dfn/output')\n # if not os.path.exists('compare'):\n # os.mkdir('compare')", "def create_dirs(self):\n for new_directory in [self.event_dir, self.event_dir / 'videos']:\n new_directory.mkdir(exist_ok=self.overwrite)\n logger.debug('Dir {} created', new_directory)", "def prepDir(path=None):\n if path:\n if os.path.exists(path):\n return path\n else:\n os.makedirs(path)\n else:\n # Do something innocent when no path is provided\n path = tempfile.mkdtemp(prefix='XEPs_')\n print \"creating {} for output\".format(path)\n return path", "def make_dir_structure(self, out):\n program_folder = os.path.join(out, self.out)\n self.make_output_dir(program_folder)\n self.make_config_dirs(program_folder)\n return None", "def createPath(self, outPath):\n # Create new directory for output path\n try:\n os.mkdir(outPath)\n except OSError:\n print (\"Creation of the directory %s failed\" % outPath)\n else:\n print (\"Successfully created the directory %s \" % outPath)", "def make_dir(file_name): # output_file_loc = des\n for i in os.walk(f'{tmp_path}/{file_name}'):\n fld = i[0].split(file_name)[-1]\n if fld:\n loc = f\"{output_path}{fld}\"\n if not os.path.exists(f'{output_path}/{fld}'):\n os.makedirs(f'{output_path}/{fld}')\n # print(\"MAKE_DIR completed...\") \n return", "def make_dir(name='results'):\n if os.path.isabs(name):\n output_path = name\n else:\n output_path = os.path.join(os.getcwd(), name)\n\n if ('.' not in output_path):\n directory = os.path.dirname(os.path.join(output_path, 'toto')) # doesn't work w/o 'toto'\n else :\n directory = os.path.dirname(output_path);\n\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n return output_path", "def mkdirout():\n #pdbid=os.path.splitext(os.path.basename(PDB_PATH))[0]\n #outdir = os.path.join(OUTPUT_DIR, pdbid(),\"\") # OUTPUT DIRECTORY WHERE OUTPUT FILES WILL GO\n\n if os.path.exists(output_dir()):\n sys.exit(\"ERROR. Unable to create output directory. %s already exists. Please, make sure you choose an output path not containing former results.\" % output_dir() ) # LOGGING?\n else:\n try:\n os.mkdir(output_dir())\n except OSError:\n sys.exit(\"ERROR. Unable to create output directory %s.\" % output_dir() )\n os.mkdir(output_tmpdir())\n os.mkdir(output_tmpdir(\"pisacov\"))\n os.mkdir(output_tmpdir(\"pisa\"))\n os.mkdir(output_tmpdir(\"deepmetapsicov\"))", "def setup_dexy_dirs(self):\n if not os.path.exists(self.artifacts_dir):\n os.mkdir(self.artifacts_dir)\n if not os.path.exists(self.log_dir):\n os.mkdir(self.log_dir)", "def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)", "def create_save_folder(self):\n absolute_output = os.path.abspath(self.output).replace(\"\\\\\", \"/\")\n if self.paddle_length_factor is not None:\n self.save_folder = f\"{absolute_output}/{self.env_name}/PaddleLength_\" \\\n f\"{self.paddle_length_factor}/session{self.session}\"\n else:\n self.save_folder = f\"{absolute_output}/{self.env_name}/StandardEnv/session{self.session}\"\n tmp_folder = self.save_folder\n\n folder_tree = []\n while True:\n if not os.path.exists(self.save_folder):\n folder_tree.insert(0, self.save_folder)\n self.save_folder = self.save_folder[:self.save_folder.rindex(\"/\")]\n else:\n self.save_folder = tmp_folder\n break\n for folder in folder_tree:\n os.mkdir(folder)", "def prepare_folders():\n folder_list = [\"./data\", \"./data/stage\", \"./data/spoken\", \"./data/stage_lemmas\", \"./data/spoken_lemmas\"]\n for folder in folder_list:\n if not os.path.exists(folder):\n os.mkdir(folder)\n print(f\"Created folder {folder}\")\n else:\n print(f\"Folder {folder} already existed\")", "def create_paths(self):\n for path in self.PATHS_TO_CREATE:\n path = os.path.expanduser(path)\n if not os.path.isdir(path):\n if self.dry_run:\n print('mkdir -p {}'.format(path))\n else:\n os.makedirs(path)", "def create_dirs():\n\tif os.path.isdir(path):\n\t\tshutil.rmtree(path, ignore_errors=True)\n\tos.makedirs(path+\"/log\",exist_ok=True)\n\tos.makedirs(path+\"/losses\",exist_ok=True) \n\tos.makedirs(path+\"/samples\",exist_ok=True)\n\tos.makedirs(path+\"/model\",exist_ok=True)\n\tos.makedirs(path+\"/datasets\",exist_ok=True)\n\tshutil.copy2(\"config.py\", path+\"/config.py\")\n\tfor i in rconfig[\"datasets\"]:\n\t\tdsconfig = get_dsconfig(i)\n\t\tos.makedirs(path+\"/datasets/\"+dsconfig[\"id\"],exist_ok=True)\n\t\tshutil.copy2(i+\"/dsconfig.py\", path+\"/datasets/\"+dsconfig[\"id\"]+\"/dsconfig.py\")\n\t\tcopytree(dsconfig[\"split\"], path+\"/datasets/\"+dsconfig[\"id\"]+\"/split\")", "def PrepareOutputDir(dirname, preserve=False):\n global outdir, preserve_outdir\n\n preserve_outdir = dirname or preserve\n if dirname:\n outdir = dirname\n if not os.path.isdir(outdir):\n try:\n os.makedirs(outdir)\n except OSError as err:\n raise CmdError(\"Cannot make output directory '%s': '%s'\" %\n (outdir, err.strerror))\n tout.Debug(\"Using output directory '%s'\" % outdir)\n else:\n outdir = tempfile.mkdtemp(prefix='binman.')\n tout.Debug(\"Using temporary directory '%s'\" % outdir)", "def make_config_dirs(self, out):\n for config in self.configurations:\n config.make_dir_structure(out)\n return None", "def setup():\n print('...')\n # Make sure dirs exist\n for directory in [DATA_DIR, DATA_INPUT_DIR, DATA_OUTPUT_DIR]:\n os.makedirs(directory, exist_ok=True)", "def getOutputFolder(analysesFolder):\n i = 1\n outputFolder = os.path.join(analysesFolder, \"Output_\" + str(i))\n while os.path.exists(outputFolder):\n i += 1\n outputFolder = os.path.join(analysesFolder, \"Output_\" + str(i))\n\n os.mkdir(outputFolder)\n return outputFolder", "def prepare_run(input_path: str, output_path: str, tmp: str) -> None:\n input_file_exists(input_path)\n if os.path.isdir(output_path) and len(os.listdir(output_path)) != 0:\n raise AssertionError(\"output folder must be empty or non-existent.\")\n set_tempdir(tmp)\n os.makedirs(output_path, exist_ok=True)", "def make_folders(self):\n\t\tfor name in self.folders:\n\t\t\tos.makedirs(self.path+\"/\"+name,exist_ok=True)", "def make_directories():\n os.mkdir('principal_wings')\n os.mkdir('random_wings')", "def makeAtomDirectories(self):\n for atom in self.atoms:\n atomDir = os.getcwd() + '/' + atom\n if not os.path.isdir(atomDir):\n subprocess.call(['mkdir',atomDir])", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def create_directories():\n directories = ['train', 'test', 'validation']\n\n for directory in directories:\n try:\n os.mkdir(directory)\n except OSError:\n print (f\"Creation of the directory '{directory}' failed\")", "def create_app_folders(self):\n\t\tif not os.path.exists(self.TEMP_FOLDER):\n\t\t\tos.makedirs(self.TEMP_FOLDER)\n\t\tif not os.path.exists(self.SAVE_FOLDER):\n\t\t\tos.makedirs(self.SAVE_FOLDER)", "def create_directory(self):\n dirname = self.name+\"_distillates\"\n i = 1\n while True:\n try:\n mkdir(dirname)\n return dirname\n except OSError:\n dirname = self.name+\"_distillates_{0}\".format(i)\n i += 1", "def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")", "def make_directories(file_path):\n logger.info(\"Create all directories in the path %s\", file_path)\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n else:\n logger.warning(\"Cannot create directories %s. The directory already exists\", file_path)", "def prepare_output_dir(raw_output_dir: str) -> Path:\n try:\n output_dir = Path(raw_output_dir).resolve()\n output_dir.mkdir(exist_ok=True, parents=True)\n except RuntimeError:\n raise argparse.ArgumentTypeError(\"Invalid path to output directory.\")\n except OSError:\n raise argparse.ArgumentTypeError(\"Could not create output directory.\")\n\n return output_dir", "def create_folders(self):\n\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))", "def initialize_output_files(self):\r\n if not self.C.restart:\r\n print(\"* Touching output files.\", flush=True)\r\n # begin writing `generation.csv` file\r\n csv_path_and_filename = self.C.job_dir + \"generation.csv\"\r\n util.properties_to_csv(\r\n prop_dict=self.ts_properties,\r\n csv_filename=csv_path_and_filename,\r\n epoch_key=\"Training set\",\r\n append=False,\r\n )\r\n\r\n # begin writing `convergence.csv` file\r\n util.write_model_status(append=False)\r\n\r\n # create `generation/` subdirectory to write generation output to\r\n os.makedirs(self.C.job_dir + \"generation/\", exist_ok=True)", "def check_out(out_dir, in_dirs=None):\n # create output directory\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n # for every class create a subdirectory\n for in_dir in in_dirs:\n path = os.path.join(out_dir, in_dir.split('/')[-2])\n if not os.path.exists(path):\n os.makedirs(path)", "def prepare_supplemental_output_directory():\n output_dir = workspace_path('%s/%s' % (scenario_filename(), \"Supplemental Output Files\")) # this does not have the .db suffix\n output_args = ['--output-dir', output_dir] # to be returned and passed to adsm_simulation.exe\n if not os.path.exists(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n return output_args", "def create_directories() -> Tuple[str, str, str]:\n src = os.path.join(_DIRECTORY, 'src')\n templates = os.path.join(src, 'templates')\n app = os.path.join(src, 'app')\n os.makedirs(app, exist_ok=True)\n os.makedirs(templates, exist_ok=True)\n return src, app, templates", "def create_directory():\n try:\n if os.path.isdir(\"./imagesFromTweets\") != True:\n os.makedirs(\"./imagesFromTweets\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def reset_dirs():\n\n image_dir = Config.IMAGE_DIRECTORY\n fig_dir = Config.FIGURE_DIRECTORY\n\n # delete directories\n if os.path.isdir(image_dir):\n shutil.rmtree(image_dir) \n if os.path.isdir(fig_dir):\n shutil.rmtree(fig_dir) \n\n # create directories\n os.mkdir(image_dir)\n orig_dir = os.path.join(image_dir, 'original')\n processed_dir = os.path.join(image_dir, 'processed')\n os.mkdir(orig_dir)\n os.mkdir(processed_dir)\n os.mkdir(fig_dir)\n\n print(f'[INFO] Created image and figure directories.')", "def make_run_directory(output_root: Union[str, Path]) -> Path:\n run_directory = get_run_directory(output_root)\n mkdir(run_directory)\n return run_directory", "def create_output_directory_for_resized_images():\n\n try:\n if not os.path.isdir(RESIZED_NEGATIVE_PATH):\n return os.makedirs(RESIZED_NEGATIVE_PATH)\n elif not os.path.isdir(RESIZED_POSITIVE_PATH):\n return os.makedirs(RESIZED_POSITIVE_PATH)\n except OSError as e:\n print('Error --> {}'.format(e))", "def create_duplicates_directory(self) -> None:\n dups_path = os.path.join(self.get_directory(), \"duplicates\")\n if not self.directory_exists(dups_path): os.mkdir(dups_path)", "def prepare_target_dir(self, target):\n marker_file = os.path.join(target, CONTROLLED_DIR_MARKER)\n if os.path.isdir(target):\n if not self.disable_marker and not os.path.isfile(marker_file):\n self.stderr.write(\"Target directory already exists, but it appears to have been \"\n \"created by some other means. Marker file missing.\\n\")\n raise LayerCombinerExceptionCode(\"Target directory exists without marker file\",\n EXIT_CODE_COMBINE_MARKER_MISSING)\n\n elif self.dry_run:\n self.stderr.write(\"Skipping creating destination directory {target} (dry-run)\\n\")\n else:\n try:\n os.mkdir(target)\n except OSError as e:\n self.stderr.write(f\"Unable to create destination directory {target}. {e}\\n\")\n raise LayerCombinerExceptionCode(f\"Unable to create destination directory {target}\",\n EXIT_CODE_NO_SUCH_FILE)\n self.stderr.write(f\"Created destination directory {target}\\n\")\n if not self.disable_marker:\n with open(marker_file, \"w\") as f:\n f.write(\"This directory is managed by KSCONF. Don't touch\\n\")", "def _create_dir(self):\n images_train_dir = os.path.join('images', self.name, 'train')\n images_test_dir = os.path.join('images', self.name, 'test')\n log_dir = os.path.join('log', self.name)\n model_dir = os.path.join('checkpoint', self.name)\n if not os.path.exists(images_train_dir):\n os.makedirs(images_train_dir)\n\n if not os.path.exists(images_test_dir):\n os.makedirs(images_test_dir)\n\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n return images_train_dir, images_test_dir, log_dir, model_dir" ]
[ "0.8051411", "0.7945233", "0.7910225", "0.78630805", "0.78603286", "0.78426844", "0.78248453", "0.77685827", "0.77487713", "0.759815", "0.7533603", "0.75111306", "0.74982744", "0.745719", "0.7449486", "0.7433374", "0.7404734", "0.7333325", "0.72971153", "0.7263825", "0.7233742", "0.72197175", "0.721562", "0.7212619", "0.720471", "0.71681523", "0.7145743", "0.7076383", "0.7074686", "0.70561564", "0.7045613", "0.70215875", "0.70081645", "0.6998487", "0.6979427", "0.6977696", "0.6971947", "0.6950183", "0.69395524", "0.69216174", "0.69191664", "0.6907373", "0.68991697", "0.6892609", "0.6890406", "0.6879888", "0.687665", "0.6856603", "0.68203837", "0.68149924", "0.6798045", "0.67750925", "0.67735094", "0.6757046", "0.67503583", "0.6732486", "0.6696994", "0.66967577", "0.668102", "0.66661", "0.66594625", "0.66569304", "0.663968", "0.65960765", "0.65956646", "0.655463", "0.6527599", "0.6521141", "0.6515165", "0.6508785", "0.648798", "0.6471248", "0.64574766", "0.6451979", "0.64505804", "0.6448494", "0.6444421", "0.64338833", "0.6429359", "0.6421281", "0.6421281", "0.6413983", "0.64127636", "0.64097273", "0.63848066", "0.63743925", "0.63732094", "0.6368076", "0.6363452", "0.6360806", "0.6355336", "0.63328654", "0.633224", "0.6317909", "0.63137424", "0.6313154", "0.63046557", "0.629657", "0.62839246", "0.62819225" ]
0.6967413
37
Execute a bash command. This method is obsolete now. At one time it called a library function that worked around a deadlock bug in popen2
def ExecCmd(self, cmd, halt_on_error=True): self.f_bash.write("%s\n"%cmd) self.f_bash.flush() if not self.dry_run: try: execCmd(cmd, self.f_log, self.f_crash, self.verbose) self.f_log.flush() except RuntimeError, errstr: if halt_on_error: raise RuntimeError(errstr) else: self.LogErrors('%s' % errstr) return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bash_command(cmd):\n subprocess.Popen(['/bin/bash', '-c', cmd])", "def bash(cmd):\n subprocess.run(cmd, shell=True, executable='/bin/bash') # ,", "def bash_command(cmd):\n cmd = str(cmd)\n chain = cmd.split(\"|\")\n n_pipes = len(chain)\n\n for i in range(n_pipes):\n if i == 0:\n p = Popen(split(chain[0]), stdout=PIPE)\n else:\n p = Popen(split(chain[i]), stdin=p.stdout, stdout=PIPE)\n\n return p.communicate()", "def bash_command(cmd):\n return check_output([\"/bin/bash\",\"-c\",cmd])", "def execute_command(self, command):\n\n def wrap(s):\n if s is not None:\n return s\n return open(os.devnull)\n\n proc = Popen(\n command,\n shell=True,\n executable=\"/bin/bash\",\n stdout=PIPE,\n stderr=PIPE,\n text=True,\n )\n return wrap(None), wrap(proc.stdout), wrap(proc.stderr)", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "def execute(cmd) :\n return os.system( cmd )", "def shell(command):\n log(\"Executing: \" + command)\n result = subprocess.call(command, shell=True, executable=\"/bin/bash\")\n if (result != 0):\n log(\"Execution failed (result=%d)\" % result)\n sys.exit()", "def execute(args):\n print '################################'\n print 'args: ', args\n p = subprocess.Popen(args, shell=True, executable='/bin/bash')\n # p = subprocess.call(args, shell=True, executable='/bin/bash')\n p.wait()\n return p\n print '################################'", "def execute_command(command):\n proc = subprocess.Popen(\n [\"/bin/bash\"], shell=True, cwd=os.environ['PWD'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n )\n proc.stdin.write(command)\n stdout, stderr = proc.communicate()\n rc = proc.returncode\n\n return stdout, stderr, rc", "def execute(\n cmd,\n showout=False,\n cwd=None,\n shell=\"/bin/bash\",\n timeout=600,\n asynchronous=False,\n env=None,\n replace_env=False,\n die=False,\n):\n return j.core.executors.run_local(\n cmd=cmd,\n hide=not showout,\n cwd=cwd,\n shell=shell,\n timeout=timeout,\n asynchronous=asynchronous,\n env=env or {},\n replace_env=replace_env,\n warn=not die,\n )", "def bash(bash_command: str):\n # not used for now but can be useful if the bot is running on the same\n # system as the server. such as seeing ram usage ect\n\n process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n\n output = output.decode('ascii')", "def bash(cmd, prnt=True, wait=True):\n p = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)\n if wait:\n p.wait()\n while True and prnt:\n line = p.stdout.readline()\n if line:\n print(line)\n else:\n break\n\n return (p)", "def _shcmd(cmd, timeout=15):\n delay = 1.0\n obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if sys.hexversion < 0x03000000:\n while (obj.poll() is None) and (timeout > 0):\n time.sleep(delay)\n timeout -= delay\n if not timeout:\n obj.kill()\n stdout, stderr = obj.communicate()\n else:\n try:\n stdout, stderr = obj.communicate(timeout=timeout)\n except subprocess.TimeoutExpired:\n obj.kill()\n stdout, stderr = obj.communicate()\n if obj.returncode:\n print(\"COMMAND: \" + (\" \".join(cmd)))\n print(\"STDOUT:\" + os.linesep + _tostr(stdout))\n print(\"STDERR:\" + os.linesep + _tostr(stderr))\n raise RuntimeError(\"Shell command could not be executed successfully\")\n stdout = _tostr(stdout).split(os.linesep)\n stderr = _tostr(stderr).split(os.linesep)\n return stdout, stderr", "def executeCommand(command):\n time.sleep(1)\n #return os.system(command)\n subprocess.Popen(command, shell=True)", "def run_command(command):\n os.system('(echo {} | {})&'.format(command, SHELL))", "def sh(cmd):\r\n return check_call(cmd, shell=True)", "def run(cmd, fail=True, capture_stdout=False, capture_stderr=False, verbose=False):\n stdout, stderr = None, None\n if capture_stderr:\n stderr = subprocess.PIPE\n if capture_stdout:\n stdout = subprocess.PIPE\n\n if verbose:\n print(cmd)\n\n p = subprocess.Popen(['bash', '-c', cmd], stderr=stderr, stdout=stdout)\n if p.returncode and fail:\n sys.exit(1)\n\n return p", "def exec_command(cmd):\n with subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True) as p:\n stdout, _ = p.communicate()\n if p.returncode != 0:\n logger.error(stdout)\n return None\n\n return stdout", "def run_bash_command(command):\n process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)\n while True:\n output = process.stdout.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n print(output.strip())\n rc = process.poll()\n return rc", "def system(self,cmd):\n code = 'import os;f=os.popen(\"%s\");res = f.read(-1);f.close();' % cmd\n return self.exec_code(code,returns=['res'])", "def _run_command(command, cwd, output=True, decode=False, loop=None):\n loop = loop or asyncio.get_event_loop()\n\n if output:\n out = asyncio.subprocess.PIPE\n else:\n out = None\n\n process = yield from asyncio.create_subprocess_shell(\n command, loop=loop, stdout=out, stderr=out,\n limit=GIT_COMMAND_BUFFER_SIZE, cwd=cwd)\n\n if output:\n # communicate() also waits on the process termination\n stdout, stderr = yield from process.communicate()\n if decode:\n stdout = stdout.decode(sys.getdefaultencoding())\n stderr = stderr.decode(sys.getdefaultencoding())\n else:\n stdout, stderr = None, None\n yield from process.wait()\n\n if process.returncode:\n raise base.AiogitException(\n (stderr or stdout).decode(sys.getdefaultencoding()))\n\n return stdout, stderr", "def shell(cmd, check=True, stdin=None, stdout=None, stderr=None):\n return subprocess.run(cmd, shell=True, check=check, stdin=stdin, stdout=stdout, stderr=stderr)", "def _Shell(*cmd, **kw):\n _LOGGER.info('Executing %s.', cmd)\n prog = subprocess.Popen(cmd, shell=True, **kw)\n\n stdout, stderr = prog.communicate()\n if prog.returncode != 0:\n raise RuntimeError('Command \"%s\" returned %d.' % (cmd, prog.returncode))\n return (stdout, stderr)", "def exec_bash(self, cmd_list):\n for cmd in cmd_list:\n _, result = self.run_cmd(cmd)\n result = result.decode('utf-8').strip()\n return result", "def sh(cmd):\n print 'CMD:', cmd\n return check_call(cmd, shell=True)", "def execute_command(command):\n try:\n if \"|\" in command:\n # save for restoring later on\n s_in, s_out = (0, 0)\n s_in = os.dup(0)\n s_out = os.dup(1)\n\n # first command takes commandut from stdin\n fdin = os.dup(s_in)\n\n # iterate over all the commands that are piped\n for cmd in command.split(\"|\"):\n # fdin will be stdin if it's the first iteration\n # and the readable end of the pipe if not.\n os.dup2(fdin, 0)\n os.close(fdin)\n\n # restore stdout if this is the last command\n if cmd == command.split(\"|\")[-1]:\n fdout = os.dup(s_out)\n else:\n fdin, fdout = os.pipe()\n\n # redirect stdout to pipe\n os.dup2(fdout, 1)\n os.close(fdout)\n\n try:\n subprocess.run(cmd.strip().split())\n except Exception:\n print(\"psh: command not found: {}\".format(cmd.strip()))\n\n # restore stdout and stdin\n os.dup2(s_in, 0)\n os.dup2(s_out, 1)\n os.close(s_in)\n os.close(s_out)\n else:\n subprocess.run(command.split(\" \"))\n except Exception:\n print(\"psh: command not found: {}\".format(command))", "def exec_run2(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,\n privileged=False, user='', detach=False, stream=False,\n socket=False, environment=None):\n resp = self.client.api.exec_create(\n self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,\n privileged=privileged, user=user, environment=environment\n )\n exec_output = self.client.api.exec_start(\n resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket\n )\n exec_inspect = self.client.api.exec_inspect(resp['Id'])\n return exec_output, exec_inspect['ExitCode']", "def exec_test_command(cmd):\n process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True, env=os.environ)\n result = process.communicate()\n return (\n process.returncode,\n bytes(result[0]).decode(\"utf-8\"),\n bytes(result[1]).decode(\"utf-8\"),\n )", "def command(cmd, timeout=60): \n is_linux = platform.system() == 'Linux' \n \n p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True,shell=True, preexec_fn=os.setsid if is_linux else None)\n if timeout==0:\n return p.stdout.read()\n t_beginning = time.time() \n seconds_passed = 0 \n while True: \n if p.poll() is not None: \n break \n seconds_passed = time.time() - t_beginning \n if timeout and seconds_passed > timeout: \n if is_linux: \n os.killpg(p.pid, signal.SIGTERM) \n else: \n p.terminate() \n raise TimeoutError(cmd, timeout) \n time.sleep(0.1) \n return p.stdout.read()", "def sys_cmd(cmd: list) -> str:\n\n out, err = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()\n # Checking return code\n if err != b\"\":\n log.error(err.decode())\n notify_owner(f\"Exited(1) for: {err.decode()}\")\n exit(1)\n else:\n return out.decode()", "def run_command(cmd):\n return subprocess.call(cmd, shell=True)", "def execute(cmd, fail_ok=False, merge_stderr=False):\n cmdlist = shlex.split(cmd)\n result = ''\n result_err = ''\n stdout = subprocess.PIPE\n stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE\n proc = subprocess.Popen(cmdlist, stdout=stdout, stderr=stderr)\n result, result_err = proc.communicate()\n result = result.decode('utf-8')\n if not fail_ok and proc.returncode != 0:\n raise exceptions.CommandFailed(proc.returncode, cmd, result,\n result_err)\n return result", "def executeOld(cmd):\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)\n for stdout_line in iter(popen.stdout.readline, \"\"):\n yield stdout_line \n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, cmd)", "def execute(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n return process.communicate()", "def _subexec(command):\n lcwd = fabric.state.env.get('lcwd', None) or None #sets lcwd to None if it bools to false as well\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=lcwd)\n out, err = process.communicate()\n print \"command : %s \" % command\n print \"out: %s\" % out\n print \"err: %s\" % err", "def execute_command(cmd):\n popen = Popen(cmd, stdout=PIPE, stderr=PIPE)\n stdout = b''\n while True: # Save output to youtube_stdout while this being echoed\n tmp = popen.stdout.read(1)\n stdout += tmp\n _print(tmp, end=\"\")\n sys.stdout.flush()\n # do it until the process finish and there isn't output\n if tmp == b\"\" and popen.poll() is not None:\n break", "def execute(cmd, output_file, env={}):\n return subprocess.Popen(shlex.split(cmd),\n stderr=subprocess.STDOUT,\n stdout=open(output_file, \"w\"),\n env = dict(os.environ, **env))", "def execute_cmd(self, cmd):\n stdout = \"\"\n returncode = -1\n process = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n try:\n signal.signal(signal.SIGALRM, self.signal_handler)\n signal.alarm(self.timeout)\n stdout, stderr = process.communicate()\n returncode = process.returncode\n self.print_debug(\"cmd={0}, returncode={1}\".format(cmd, returncode))\n if returncode != 0:\n self.print_debug(\"stderr={0}\".format(stderr))\n signal.alarm(0)\n except Exception as err:\n self.print_debug(str(err))\n return (returncode, stdout)", "def exec_cmd(command, wait_after=None):\n logging.debug('Executing command: %s', command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n proc.wait()\n\n if proc.returncode:\n cmd = ' '.join(command)\n error = subprocess.CalledProcessError(\n returncode=proc.returncode, cmd=cmd,\n output=proc.stdout.read().decode('utf-8'))\n\n logging.error('Error executing command \"%s\"', cmd)\n logging.debug('Command \"%s\" output: [%s]', cmd, error.output, stack_info=True, exc_info=error)\n raise error\n\n if wait_after:\n time.sleep(wait_after)\n\n return proc.stdout.read().decode('utf-8')", "def shell_cmd(*args):\n proc = subprocess.run(args)\n returncode = proc.returncode\n if returncode != 0:\n raise RuntimeError(\n f\"Command {args} failed with return code {returncode}\")\n return proc", "def exec_cmd(command):\r\n global _verbose\r\n debug(\"Executing command: %s\" % command)\r\n if not _verbose:\r\n command = \"%s > /dev/null 2>&1\" % command\r\n resp = os.system(command)\r\n if resp != 0:\r\n exit(\"Command [%s] failed\" % command, resp)", "def shell_execute(self, cmd):\n self.log.debug(\"Executing command in shell: \" + str(cmd))\n\n dcos_config = os.path.expanduser('~/.dcos/dcos.toml')\n os.environ['PATH'] = ':'.join([os.getenv('PATH'), '/src/bin'])\n os.environ['DCOS_CONFIG'] = dcos_config\n os.makedirs(os.path.dirname(dcos_config), exist_ok=True)\n \n try:\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n output, errors = p.communicate()\n except OSError as e:\n self.log.error(\"Error executing command \" + str(cmd) + \". \" + e)\n raise e\n\n return output.decode(\"utf-8\"), errors.decode(\"utf-8\")", "def shell_cmd(self, cmd):\n cmd_ex = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n output = cmd_ex.communicate()[0]", "def _exec_cmd_helper(self, cmd: str, nvim_ipc: str):\n assert self.busy is False\n\n self.shared_status.set_running()\n self.busy = True\n os.system(\"clear\")\n logging.info(\"Executing cmd {0}\".format(cmd))\n\n start = time.time()\n\n success = False\n if self.command_group.is_cmd_runner_command(cmd):\n for runner in self.runners:\n if runner.config.name == cmd:\n success = runner.run_all()\n break\n else:\n # The code block below essentially just \"tees\" the stdout and\n # stderr to a log file, while still preserving the terminal\n # output (inclusive colors).\n # Using subprocess.PIPE does not seem possible under Darwin,\n # since the pipe does not have the isatty flag set (the isatty\n # flag affects the color output).\n # Note that the file is only written at the end and not streamed.\n master, slave = pty.openpty()\n\n # This prevents LF from being converted to CRLF\n attr = termios.tcgetattr(slave)\n attr[1] = attr[1] & ~termios.ONLCR\n termios.tcsetattr(slave, termios.TCSADRAIN, attr)\n\n proc = subprocess.Popen(cmd, shell=True, stdout=slave, stderr=slave, close_fds=False)\n\n # Close the write end of the pipe in this process, since we don't need it.\n # Otherwise we would not get EOF etc.\n os.close(slave)\n\n read_stdout_stderr = os.fdopen(master, 'rb', buffering=0)\n complete_output = \"\"\n\n try:\n while proc.poll() is None:\n output = read_stdout_stderr.readline()\n os.write(1, output)\n complete_output += output.decode()\n\n # Read the last line\n output = read_stdout_stderr.readline()\n os.write(1, output)\n complete_output += output.decode()\n # This error is \"expected\" under Linux systems.\n # readline() doesn't seem to behave properly there.\n # The exception does not occur on MacOS.\n except OSError as oserr:\n if oserr.errno != errno.EIO or proc.poll() is None:\n logging.critical(\"Unexpected OS error: {0}\".format(oserr))\n except:\n logging.critical(\"Unexpected error while reading from process\")\n\n os.close(master)\n proc.wait()\n\n if proc.returncode == 0:\n success = True\n\n logfile, logfilename = tempfile.mkstemp(dir=cybld_helpers.get_base_path(),\n prefix=cybld_helpers.NVIM_LOG_PREFIX)\n\n # strip color codes from logfile\n # complete_output = re.sub(r'(\\x9B|\\x1B\\[)[0-?]*[ -\\/]*[@-~]', '', complete_output)\n complete_output = re.sub(r'\\x1b(\\[.*?[@-~]|\\].*?(\\x07|\\x1b\\\\))', '', complete_output)\n\n with open(logfile, 'w+') as logfile_opened:\n logfile_opened.write(complete_output)\n\n CyBldIpcNeovim(True, nvim_ipc, logfilename, cmd)\n\n end = time.time()\n\n self.busy = False\n cybld_helpers.print_seperator_lines()\n\n timediff_in_seconds = str(int(end - start))\n\n if success:\n cybld_helpers.print_centered_text(\"SUCCESS: {0} ({1} seconds)\".format(cmd, timediff_in_seconds), True)\n self.shared_status.set_success()\n else:\n cybld_helpers.print_centered_text(\"FAIL: {0} ({1} seconds)\".format(cmd, timediff_in_seconds), False)\n self.shared_status.set_fail()\n\n if self.settings.print_stats:\n cybld_helpers.print_centered_text(self.stats.get_command_stats(cmd), None)\n\n if success:\n self.talker.say_success()\n else:\n self.talker.say_fail()\n\n cybld_helpers.print_seperator_lines()\n self.stats.update_command_stats(cmd, success, int(timediff_in_seconds))\n\n if success:\n self.success_callback(cmd)\n else:\n self.fail_callback(cmd)", "def sys_exec(command):\n print('Running: {}'.format(command))\n return os.popen(command).read().rstrip()", "def backtick(cmd, input=None, timeout=None):\n PIPE = subprocess.PIPE\n stdin = PIPE if input is not None else None\n return subprocess.check_output(cmd, stdin=stdin, stderr=PIPE,\n universal_newlines=True,\n timeout=timeout)", "def cmd(self, command):\n self.enode.get_shell('bash').send_command(command, matches=self.scapy_prompt)\n response = self.enode.get_shell('bash').get_response()\n return response", "def run_cmd(cmd):\n return check_output(cmd, shell=True).decode('utf-8')", "def shell_command(self, path, timeout=5):\n if self.verbose > 0:\n print('STARTING PROGRAM: ' + str(path))\n self.program = pexpect.spawn(\"/bin/bash\", [\"-c\", path], timeout, \n encoding='utf-8')\n # pexpect copies all input and output to this file\n self.program.logfile = open('tester.log', 'a')", "def native_cmd(cmd, whitespace=False):\n result = subprocess.check_output(cmd, shell=True).decode()\n\n result = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode('utf-8')\n\n return result", "def os_system(cmd):\n print cmd\n failure = os.system(cmd)\n if failure:\n print \"\"\"Command\n %s\nfailed\"\"\" % cmd\n sys.exit(1)\n unix_command_recorder.append(cmd) # record command for bash script", "def runCommand(self, cmd, stdin=None, env=None):\n\n\t mycmd=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t output, error=mycmd.communicate()\n\t while not mycmd.wait():\n\t \t# do stuff\n\t \treturn 0\n\n\n\n\t #if not isList(cmd):\n\t #cmd = shlex.split(cmd)\n\t #opts = dict(stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n\t #if env:\n\t # opts.update(env=env)\n\t #if stdin:\n\t # opts.update(stdin=subprocess.PIPE)\n\t # stdout, stderr=subprocess.Popen(cmd, **opts).communicate(stdin)\n\t #else :\n\t # stdout, stderr=subprocess.Popen(cmd, **opts).communicate()\n\t #return stdout, stderr", "def execCMD(self, cmd, arg):\n result = subprocess.check_output([cmd, arg])\n return result", "def shell(cmd, check=True):\n eprint(f\"+ {cmd}\")\n return run(cmd, shell=True, check=check)", "def _exec_command(command):\n\n log(\"Run command for '%s'\" % command)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n (output, err) = p.communicate()\n p_status = p.wait()\n return p_status, output", "def shell(cmd):\n print('Running \"{}\"...'.format(cmd))\n subprocess.check_call(cmd, shell=True)", "def _process_command(self, command, stdout=None, supress_dry_run=False):\n logging.debug('Executing shell command: %s', command)\n if (self._dry_run and supress_dry_run) or not self._dry_run:\n prc = Popen(command, shell=True, stdout=stdout)\n std = list(prc.communicate())\n if std[0] is not None:\n std[0] = std[0].decode('utf-8')\n return prc.returncode, std\n return 0, ('', '')", "def _run_shell(self, cmd):\n self._logger.info(\"Running command\\n{}\".format(\" \".join(cmd)))\n\n out = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n stdout, stderr = out.communicate()\n result = stdout.decode(encoding='utf-8')\n if stderr:\n error_msg = stderr.decode(encoding='utf-8')\n print(error_msg)\n raise Exception(error_msg)\n\n return result", "def exec_cmd(cmd):\n args = shlex.split(cmd)\n verbose = True\n\n # TRY\n FNULL = open(os.devnull, 'w')\n try:\n if verbose == True:\n subprocess.check_call(args, env=my_env)\n else:\n subprocess.check_call(args, stdout=FNULL, stderr=subprocess.STDOUT, env=my_env)\n # Exception\n except subprocess.CalledProcessError as e:\n print \"Command :: \", e.cmd\n print \"Return Code :: \", e.returncode\n print \"Output :: \", e.output\n # Finally\n finally:\n FNULL.close()", "def exec_cmd(cmd):\n print(' '.join(str(e) for e in cmd))\n try:\n res = subprocess.run(cmd, capture_output=True, check=True)\n print(res.stdout.decode(\"utf8\"))\n return res\n except subprocess.CalledProcessError as err:\n logging.error(err.stderr)\n raise err", "def _execute(cmd):\n LOGGER.info('otool command: {}'.format(cmd))\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n stdout = process.communicate()[0]\n\n retcode = process.returncode\n LOGGER.info('otool return status code: {}'.format(retcode))\n if retcode:\n raise tr.OtoolError(retcode)\n\n return stdout", "def _exec_cmd(self, cmd):\n proc = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n ret = proc.returncode\n logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', cmd, out,\n err, ret)\n if ret == 0:\n return out\n else:\n raise AdbError(cmd=cmd, stdout=out, stderr=err, ret_code=ret)", "def execute_command(self, command):\n return self.ssh.exec_command(command)", "def do_shell(self, command):\n os.system(command)", "def shell_command(context, cmd, err_msg=\"Shell command error\"):\n try:\n\n context.last_cmd = cmd\n output = check_output(cmd, shell=True, cwd=os.getcwd())\n context.output = output\n\n except:\n raise Exception(err_msg)", "def execute(cmd):\n print(f\"Execute command: {' '.join(cmd)}\")\n popen = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n universal_newlines=False,\n bufsize=1, # unbuffered\n )\n for stdout_line in iter(popen.stdout.readline, b''):\n yield stdout_line\n\n popen.stdout.close()\n popen.kill()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, cmd)", "def execute_command(cmd, debug=True, sep=False):\n if debug:\n print('#', cmd)\n if sep:\n print(\"-\" * 78)\n args = shlex.split(cmd)\n child = Popen(args)\n child.communicate()\n return child.returncode", "def run_cmd ( cmd, \n sin = \"\", \n shell = True, \n wait = False, \n log_error = True,\n secure = None,\n stop_waiting_if = None,\n do_not_log = False,\n encerror = \"ignore\",\n encoding = \"utf8\",\n change_path = None,\n communicate = True,\n preprocess = True) :\n if secure is not None :\n with open(secure,\"w\") as f : f.write(\"\")\n add = \">%s\" % secure \n if isinstance (cmd, str) : cmd += \" \" + add\n else : cmd.append(add)\n if not do_not_log : \n fLOG (\"execute \", cmd)\n \n if change_path is not None :\n current = os.getcwd()\n os.chdir(change_path)\n \n if sys.platform.startswith(\"win\") :\n \n startupinfo = subprocess.STARTUPINFO() \n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n \n pproc = subprocess.Popen (cmd, \n shell = shell, \n stdout = subprocess.PIPE if wait else None, \n stderr = subprocess.PIPE if wait else None,\n startupinfo = startupinfo)\n else :\n cmdl = split_cmp_command(cmd) if preprocess else cmd\n if not do_not_log : \n fLOG(\"--linux\", cmdl)\n pproc = subprocess.Popen (cmdl,\n shell = shell, \n stdout = subprocess.PIPE if wait else None, \n stderr = subprocess.PIPE if wait else None)\n\n if isinstance(cmd, list):\n cmd = \" \".join(cmd)\n \n if wait : \n \n out = [ ]\n skip_waiting = False\n \n if communicate:\n stdoutdata, stderrdata = pproc.communicate(sin, timeout = None)\n out = decode_outerr(stdoutdata, encoding, encerror, cmd)\n err = decode_outerr(stderrdata, encoding, encerror, cmd)\n else :\n stdout, stderr = pproc.stdout, pproc.stderr\n \n if secure is None :\n for line in stdout :\n decol = decode_outerr(line, encoding, encerror, cmd)\n if not do_not_log :\n fLOG(decol.strip(\"\\n\\r\"))\n\n out.append(decol.strip(\"\\n\\r\"))\n if stdout.closed: break\n if stop_waiting_if is not None and stop_waiting_if(decol) :\n skip_waiting = True\n break\n else :\n last = []\n while pproc.poll() is None :\n if os.path.exists (secure) :\n with open(secure,\"r\") as f :\n lines = f.readlines()\n if len(lines) > len(last) :\n for line in lines[len(last):] :\n fLOG(line.strip(\"\\n\\r\"))\n out.append(line.strip(\"\\n\\r\"))\n last = lines\n if stop_waiting_if is not None and len(last)>0 and stop_waiting_if(last[-1]) :\n skip_waiting = True\n break\n time.sleep(0.1)\n \n if not skip_waiting :\n pproc.wait ()\n \n out = \"\\n\".join(out)\n temp = err = stderr.read()\n try:\n err = decode_outerr(temp, encoding, encerror, cmd)\n except :\n err = decode_outerr(temp, encoding, \"ignore\", cmd)\n \n stdout.close()\n stderr.close()\n \n err = err.replace(\"\\r\\n\",\"\\n\")\n if not do_not_log : fLOG (\"end of execution \", cmd)\n if len (err) > 0 and log_error : fLOG (\"error (log)\\n%s\" % err)\n \n if change_path is not None :\n os.chdir(current)\n \n if sys.platform.startswith(\"win\") :\n return out.replace(\"\\r\\n\",\"\\n\"), err.replace(\"\\r\\n\",\"\\n\")\n else:\n return out, err\n else :\n \n if change_path is not None :\n os.chdir(current)\n \n return \"\",\"\"", "def bash(ctx):\n if os.environ.get('RUNNING_ON_DOCKER', False):\n prefix = 'docker-'\n else:\n prefix = ''\n\n file_name = prefix + 'bashrc'\n rcfile = os.path.join(os.path.dirname(__file__), 'data', file_name)\n _execvp('bash', ['bash', '--rcfile', rcfile])", "def execute_local_cmd(cmd, timeout=10):\n l.info(\"Executing local command [%s]\", cmd)\n pg_cmd = PySysCommand(cmd)\n pg_cmd.run(timeout=timeout)\n output = pg_cmd.stdout + pg_cmd.stderr\n l.info(\"Result: %s\", output)", "def do_shell(self, line):\n print 'Running shell command:', line\n output = os.popen(line).read()\n print output\n self.last_output = output", "def shell(self, cmd):\n raise NotImplementedError", "def call(*args, **kwargs):\n return Popen(*args, **kwargs).wait()", "def shellcommand(command):\n\n subprocess.call(str(command))", "def RunExternal(command, str_stdin=\"\"):\n\n logging.info(\"Running external command: %s\" % command)\n popen_inst = Popen3(command, True)\n logging.debug(\"stdin = %s\" % str_stdin)\n str_stdout = str_stderr = \"\"\n while 1:\n read_from_child = -1\n if not popen_inst.tochild.closed:\n (rlist, wlist, xlist) = select([popen_inst.fromchild, popen_inst.childerr], \\\n [popen_inst.tochild], [])\n else:\n (rlist, wlist, xlist) = select([popen_inst.fromchild, popen_inst.childerr], [], [])\n\n if popen_inst.fromchild in rlist:\n tmpread = popen_inst.fromchild.read(4096)\n read_from_child = len(tmpread)\n str_stdout += tmpread\n \n if popen_inst.childerr in rlist:\n tmpread = popen_inst.childerr.read(4096)\n read_from_child += len(tmpread)\n str_stderr += tmpread\n \n if popen_inst.tochild in wlist and len(str_stdin) > 0:\n popen_inst.tochild.write(str_stdin[:min( [ len(str_stdin), 4096])])\n str_stdin = str_stdin[min( [ len(str_stdin), 4096]):]\n read_from_child += 1\n elif popen_inst.tochild in wlist:\n popen_inst.tochild.close()\n\n #logging.debug(\"len(str_stdin) = %i, read_from_child = %i, rlist = %s, wlist = %s\", len(str_stdin), read_from_child, rlist, wlist)\n if popen_inst.poll() != -1 and len(str_stdin) == 0 and (read_from_child == -1 or read_from_child == 0):\n break\n \n logging.debug(\"Exit code: %i\", popen_inst.wait())\n logging.debug(\"stdout: %s\", str_stdout)\n logging.debug(\"strerr: %s\", str_stderr)\n return str_stdout, str_stderr", "def command(cmd: list, stdin: str):\n proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, stdin=PIPE)\n out, err = proc.communicate(stdin.encode(\"utf-8\"))\n exit = proc.wait()\n return out.decode(\"utf-8\")", "def exec_cmd(cmd):\n\targs = shlex.split(cmd)\n\tverbose = True\n\n\ttry:\n\t\tif verbose == True:\n\t\t\tsubprocess.check_call(args)\n\t\telse:\n\t\t\tsubprocess.check_call(args,\n\t\t\t\t\t\t\t\t stdout=subprocess.STDOUT,\n\t\t\t\t\t\t\t\t stderr=subprocess.STDOUT)\n\t# Exception\n\texcept subprocess.CalledProcessError as e:\n\t\tprint \"Command\t :: \", e.cmd\n\t\tprint \"Return Code :: \", e.returncode\n\t\tprint \"Output\t :: \", e.output", "def _ExecuteCommand( self, command, contents = None ):\n phandle = utils.SafePopen( command,\n stdin = subprocess.PIPE,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE )\n\n stdoutdata, stderrdata = phandle.communicate( contents )\n\n if phandle.returncode:\n message = SHELL_ERROR_MESSAGE.format(\n command = ' '.join( command ),\n code = phandle.returncode,\n error = ToUnicode( stderrdata.strip() ) )\n _logger.error( message )\n raise RuntimeError( message )\n\n return stdoutdata", "def run_cmd(cmd):\n print 'running: %s' % cmd\n return subprocess.call(cmd.split(), env=os.environ, shell=False)", "def subprocess_nowait(cmd, shell=False, cwd=None, env=None):\n # type: (str, bool, str, dict) -> subprocess.Process\n return subprocess.Popen(cmd, shell=shell, cwd=cwd, env=env)", "def cmd(command):\n pflush(\"[%s]> %s\" % (HOSTNAME, command))\n code = os.system(command)\n if code != 0:\n raise RuntimeError(\"Error executing: \" + command)", "def run(cmd):\n cmd = str(cmd)\n\n if env['verbose']:\n sys.stdout.write('--> %s\\n' % cmd)\n\n cmd_list = shlex.split(cmd)\n\n p = subprocess.Popen(\n cmd_list,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n\n return p.communicate()", "def executeCommand(cmd,loopsleep):\n\tsleep(loopsleep)\n\tresult = subprocess.getoutput(cmd)\n\treturn(result.split(\"\\n\"))", "def execute_shell(self, cmd):\n try:\n return common.execute_shell(cmd, False)\n except Exception, e:\n raise exception.TermSaverException(help=_(\n\"\"\"Could not execute the command [%(cmd)s] properly.\n%(message)s \\nError details: %(error)s\"\"\") % {\n \"cmd\": \" \".join(cmd),\n \"message\": \"Make sure you have figlet installed!\",\n \"error\": str(e)\n }\n )", "def unchecked_call(self, cmd):\n args = self._parse_command(cmd)\n\n if self.proc is not None:\n nullbyte = struct.pack('B', 0)\n for arg in args:\n self.proc.stdin.write(b'ARG')\n self.proc.stdin.write(nullbyte)\n self.proc.stdin.write(arg.encode())\n self.proc.stdin.write(nullbyte)\n self.proc.stdin.write(b'RUN')\n self.proc.stdin.write(nullbyte)\n self.proc.stdin.flush()\n reply = {\n 'STDOUT': '',\n 'STDERR': '',\n 'STATUS': '',\n }\n for _ in range(0, 3):\n fieldname = Herbstluftwm._read_text_until_null_byte(self.proc.stdout)\n if fieldname is None:\n raise Exception('herbstclient did non print a full reply')\n fieldvalue = Herbstluftwm._read_text_until_null_byte(self.proc.stdout)\n if fieldvalue is None:\n raise Exception('herbstclient did non print a full reply')\n reply[fieldname] = fieldvalue\n #\n complete_proc = subprocess.CompletedProcess(args, int(reply['STATUS']))\n complete_proc.stdout = reply['STDOUT']\n complete_proc.stderr = reply['STDERR']\n else:\n complete_proc = subprocess.run(\n [self.herbstclient_path, '-n'] + args,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=self.env,\n universal_newlines=True,\n # Kill hc when it hangs due to crashed server:\n timeout=2)\n\n return complete_proc", "def run_command(cmd, cmd_input=None, ok_exit_codes=None):\n proc = make_subprocess(cmd, stdout=True, stderr=True, stdin=True,\n close_fds=True)\n return finish_subprocess(proc, cmd, cmd_input=cmd_input,\n ok_exit_codes=ok_exit_codes)", "def execCommand(command: str):\n \n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n \n return stdout.decode(\"utf-8\")", "def run_cmd(cmd):\n logging.debug('Run command \"'+cmd+'\"')\n try:\n process = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n process.check_returncode()\n\n except Exception as e:\n logging.exception(str(e) +\"\\nCMD_SHELL : \"+cmd+\"\\nSTDOUT : \"+process.stdout.decode()+\"\\nSTDERR : \"+process.stderr.decode(), exc_info=True)\n #logging.critical(\"{CDM : \"+cmd+\", \"} : \"+cmd)\n #logging.critical(\"STDOUT : \"+process.stdout.decode())\n #logging.critical(\"STDERR : \"+process.stderr.decode())\n #raise e\n\n return process.stdout.decode()", "def run_subprocess(cmd):\n subprocess.Popen(cmd, stdin =subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True,)", "def launch_shell(*, cwd: Optional[pathlib.Path] = None) -> None:\n with emit.pause():\n subprocess.run([\"bash\"], check=False, cwd=cwd)", "def run_psh_cmd(self, ps_cmd, timeout=310, timeout_exception=True):\n if 'powershell' not in self.info['plugins']:\n self.load_plugin('powershell')\n ps_cmd = f'powershell_execute \"{ps_cmd}\"'\n out = self.run_with_output(ps_cmd, ['[-]', '[+]'], timeout=timeout, timeout_exception=timeout_exception)\n return out", "def run(cmd, cmd_input=None, cwd=None):\n\n with Popen(\n \" \".join(cmd) if cwd else cmd,\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE,\n cwd=cwd,\n shell=True,\n env={\"PATH\": cwd} if cwd else None,\n ) as proc:\n out, err = proc.communicate(\n input=cmd_input.encode(\"utf-8\") if cmd_input else None\n )\n rcode = proc.returncode\n\n return out.decode(\"utf-8\"), err.decode(\"utf-8\"), rcode", "def bash(\n args: Tuple[str, ...],\n cwd: str = \".\",\n log: bool = True,\n ignore_return_code: bool = False,\n) -> str:\n process = subprocess.Popen(\n args, cwd=cwd, stdout=subprocess.PIPE, universal_newlines=True\n )\n result = \"\"\n for stdout_line in iter(process.stdout.readline, \"\"):\n result += stdout_line\n if log:\n print(stdout_line, end=\"\")\n process.stdout.close()\n return_code = process.wait()\n if not ignore_return_code and return_code != 0:\n raise subprocess.CalledProcessError(return_code, args)\n else:\n return result", "def _shell_std(self, commandName, args, stdin=None):\n cmd = [commandName]\n if type(args) == str:\n cmd.append(args)\n else:\n cmd.extend(args)\n \n #print 'running', cmd\n \n if stdin:\n out = subprocess.Popen(cmd, stdin=stdin, stdout=subprocess.PIPE)\n else:\n out = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n \n return out.stdout", "def _execute_command(self, cmd, sudo=False, pty=True, log_cmd=True,\n retries=0, warn_only=False):\n if log_cmd:\n self.logger.info('Executing command: {0}'.format(cmd))\n else:\n self.logger.info('Executing command: ***')\n with fab_env(**self.fab_env_conf):\n while True:\n if sudo:\n out = fab.sudo(cmd, pty=pty, warn_only=warn_only)\n else:\n out = fab.run(cmd, pty=pty, warn_only=warn_only)\n\n self.logger.info(\"\"\"Command execution result:\n Status code: {0}\n STDOUT:\n {1}\n STDERR:\n {2}\"\"\".format(out.return_code, out, out.stderr))\n if out.succeeded or (warn_only and retries == 0):\n return out\n else:\n if retries > 0:\n time.sleep(30)\n retries -= 1\n else:\n raise Exception('Command: {0} exited with code: '\n '{1}. Tried {2} times.'\n .format(cmd, out.return_code,\n retries + 1))", "def shell_call(cmd):\n try:\n x = subprocess.run(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True\n )\n ret = (x.returncode, str(x.stdout, \"utf-8\"), str(x.stderr, \"utf-8\"))\n return ret\n except subprocess.SubprocessError as e:\n logger.error(\"System error running command: \" + str(cmd))\n logger.error(str(e.output))\n raise RuntimeError()", "def run_command(cmd, shell=False):\n\tlog.debug(\"Running command: \" + ' '.join(cmd))\n\tprocess = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=shell)\n\tcmd_out = ''\n\tcmd_err = ''\n\twhile True:\n\t\tout = process.stdout.readline()\n\t\tif out == '' and process.poll() != None:\n\t\t\tcmd_err = process.stderr.read()\n\t\t\tbreak\n\t\tif out != '':\n\t\t\tsys.stdout.write(out)\n\t\t\tsys.stdout.flush()\n\t\t\tcmd_out += out\n\t\t\t\n\tif cmd_err != '':\n\t\tlog.warning(\"Error running command: \" + cmd_err)\n\treturn cmd_out, cmd_err, process.returncode", "def exec_command(command):\n exit_code = 1\n stdo = ''\n stde = ''\n from subprocess import Popen, PIPE\n try:\n pobj = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)\n #pobj.wait()\n stdo, stde = pobj.communicate()\n exit_code = pobj.returncode\n except:\n print \"Unexpected error at exec_command:\", sys.exc_info()\n import platform\n s = traceback.format_exc()\n logStr = \" exec command error : error\\n> stderr:\\n%s\\n\" %s\n error = platform.node()+\"-\"+logStr\n return (1,error,\"\")\n return (exit_code, stdo, stde)" ]
[ "0.75032", "0.7133294", "0.70825005", "0.7043764", "0.70271105", "0.6944504", "0.6944504", "0.6944504", "0.6891018", "0.6809242", "0.67647636", "0.66959184", "0.66665363", "0.66484356", "0.6620994", "0.65703243", "0.65675676", "0.65449697", "0.6537749", "0.65313303", "0.65089", "0.6504813", "0.6419698", "0.6414128", "0.6410751", "0.640202", "0.63350517", "0.6327819", "0.6318725", "0.6308358", "0.6298505", "0.6287915", "0.6264903", "0.6263038", "0.6262541", "0.625801", "0.62530637", "0.6226538", "0.6226411", "0.62093645", "0.6205465", "0.6192789", "0.6191168", "0.61726713", "0.6167898", "0.6164675", "0.61547786", "0.61539257", "0.6153531", "0.61433595", "0.61429477", "0.6136995", "0.61354434", "0.6119816", "0.611762", "0.6111299", "0.6098198", "0.60914844", "0.6085654", "0.6085269", "0.60787773", "0.60783803", "0.60734856", "0.6071953", "0.60710603", "0.60689193", "0.6060254", "0.6052984", "0.60514575", "0.6049055", "0.60459703", "0.6034996", "0.602273", "0.6020284", "0.6015846", "0.60157865", "0.6002794", "0.59932077", "0.59875023", "0.5985798", "0.5984637", "0.59833634", "0.5979935", "0.59791315", "0.5977544", "0.59742725", "0.59599644", "0.5952521", "0.59444267", "0.59425527", "0.59363407", "0.5935099", "0.5932331", "0.59314847", "0.59221494", "0.5919294", "0.59163207", "0.59162474", "0.5913806", "0.5913584", "0.59125376" ]
0.0
-1
Convert anatomical images from dicom or ifiles to briks or niftis.
def ConvertAnat(self): if self.verbose: print 'Convert T1 and T2 images...' for entry in self.info: info = self.info[entry] if self.info[entry]['imgfile'] is None: continue if self.info[entry]['type'] in self.anat_types: key = self.info[entry]['type'] imgfile = self.info[entry]['imgfile'] cmd = 'convert_file %s %s %s %s' % (self.flip_opts, entry, \ imgfile, self.info[entry]['filetype']) checkfile = '%s%s' % (imgfile, self.info[entry]['suffix']) self.CheckExec(cmd, [checkfile]) if self.info[entry]['norm_src'] and self.skull_strip: cmd = "3dSkullStrip -input %s -prefix %s" % \ (checkfile, self.info[entry]['imgfile_skstrip']) checkfile = '%s+orig.BRIK' % \ (self.info[entry]['imgfile_skstrip']) self.CheckExec(cmd, [checkfile])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(input_folder, output_images_folder, output_files_folder, bb_file,\n archive_folder, name_mapping):\n\n output_images_folder = Path(output_images_folder)\n output_files_folder = Path(output_files_folder)\n archive_folder = Path(archive_folder)\n output_images_folder.mkdir(exist_ok=True)\n archive_folder.mkdir(exist_ok=True)\n logger.info(\"Converting Dicom to Nifty - START\")\n converter = NiftiConverter(\n padding=\"whole_image\",\n resampling_spacing=-1,\n list_labels=[\"GTVt\"],\n cores=10,\n )\n _ = converter(input_folder, output_folder=output_images_folder)\n\n logger.info(\"Converting Dicom to Nifty - END\")\n logger.info(\"Removing extra VOI - START\")\n move_extra_vois(output_images_folder, archive_folder)\n logger.info(\"Removing extra VOI - END\")\n logger.info(\"Renaming files- START\")\n correct_names(output_images_folder, name_mapping)\n logger.info(\"Renaming files- END\")\n logger.info(\"Cleaning the VOIs - START\")\n clean_vois(output_images_folder)\n logger.info(\"Cleaning the VOIs - END\")\n\n logger.info(\"Computing the bounding boxes - START\")\n bb_df = compute_bbs(output_images_folder)\n bb_df.to_csv(bb_file)\n logger.info(\"Computing the bounding boxes - END\")", "def read_isbi2013_2shell():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'isbi2013')\n fraw = pjoin(folder, 'phantom64.nii.gz')\n fbval = pjoin(folder, 'phantom64.bval')\n fbvec = pjoin(folder, 'phantom64.bvec')\n\n md5_dict = {'data': '42911a70f232321cf246315192d69c42',\n 'bval': '90e8cf66e0f4d9737a3b3c0da24df5ea',\n 'bvec': '4b7aa2757a1ccab140667b76e8075cb1'}\n\n check_md5(fraw, md5_dict['data'])\n check_md5(fbval, md5_dict['bval'])\n check_md5(fbvec, md5_dict['bvec'])\n\n bvals, bvecs = read_bvals_bvecs(fbval, fbvec)\n\n gtab = gradient_table(bvals, bvecs)\n img = nib.load(fraw)\n return img, gtab", "def transform_images(img1,img2):", "def _get_image_blob(roidb):\n num_images = len(roidb)\n\n processed_ims = []\n im_scales = []\n for i in range(num_images):\n im = io.imread(roidb[i]['image'], plugin='tifffile')\n assert im is not None, \\\n 'Failed to read image \\'{}\\''.format(roidb[i]['image'])\n im, im_scale = blob_utils.prep_im_for_blob(im, roidb[i], 'train')\n im_scales.append(im_scale[0])\n processed_ims.append(im[0])\n\n # Create a blob to hold the input images [n, c, s, h, w]\n blob = blob_utils.im_list_to_blob(processed_ims)\n\n return blob, im_scales", "def _get_image_blobs(self, roidb, scale_inds):\n num_images = len(roidb)\n processed_ims = []\n im_scales = []\n for i in range(num_images):\n im = cv2.imread(roidb[i]['image'])\n if roidb[i]['flipped']:\n im = im[:, ::-1, :]\n im, im_scale = self._get_image_blob(im, scale_inds[i], False)\n im_scales.append(im_scale)\n processed_ims.append(im)\n \n # Create a blob to hold the input images\n blob = self.im_list_to_blob(processed_ims)\n \n return blob, im_scales", "def nifti2dicom(seg_nifti, bk_nifti, ref_dicom_dir, save_dir, description, mode_RGB=False, zoom_num=4, watermarks=True): \n #Load nifti, here is segmentation and background\n seg_image = sitk.ReadImage(seg_nifti)\n seg_image = sitk.GetArrayFromImage(seg_image)\n seg_image = seg_image.astype(np.uint8)\n \n # print(nifti_image.shape)\n bk_image = sitk.ReadImage(bk_nifti)\n bk_image = sitk.GetArrayFromImage(bk_image)\n\n #Get Volume report from the seg_image, cubic ml, and the 95% CI:\n v_nonenhancing = round(seg_image[seg_image==1].sum()/1000,1)\n ci_nonenhancing = round(v_nonenhancing*0.2,1)\n v_enhancing = round(seg_image[seg_image==4].sum()/1000,1)\n ci_enhancing = round(v_enhancing*0.3,1)\n v_edema = round(seg_image[seg_image==2].sum()/1000,1)\n ci_edema = round(v_edema*0.1,1)\n\n #Loading the reference dicom, in order to get the headers of each slice. \n series_IDs = sitk.ImageSeriesReader.GetGDCMSeriesIDs(ref_dicom_dir)\n if not series_IDs:\n print(\"ERROR: given directory \\\"\"+data_directory+\"\\\" does not contain a DICOM series.\")\n sys.exit(1)\n\n series_file_names = sitk.ImageSeriesReader.GetGDCMSeriesFileNames(ref_dicom_dir, series_IDs[0])\n\n series_reader = sitk.ImageSeriesReader()\n series_reader.SetFileNames(series_file_names)\n\n # Configure the reader to load all of the DICOM tags (public+private):\n # By default tags are not loaded (saves time).\n # By default if tags are loaded, the private tags are not loaded.\n # We explicitly configure the reader to load tags, including the private ones.\n series_reader.MetaDataDictionaryArrayUpdateOn()\n series_reader.LoadPrivateTagsOn()\n ref_image = series_reader.Execute()\n \n #set reader for slice \n reader = sitk.ImageFileReader()\n reader.LoadPrivateTagsOn()\n \n writer = sitk.ImageFileWriter()\n # Use the study/series/frame of reference information given in the meta-data\n # dictionary and not the automatically generated information from the file IO\n writer.KeepOriginalImageUIDOn()\n\n # Copy some of the tags and add the relevant tags indicating the change.\n # For the series instance UID (0020|000e), each of the components is a number, cannot start\n # with zero, and separated by a '.' We create a unique series ID using the date and time. tags of interest:\n \n castFilter = sitk.CastImageFilter()\n castFilter.SetOutputPixelType(sitk.sitkInt16)\n ORG_ROOT=\"1.3.12.2\"\n #create SeriesInstanceUID and StudyInstanceUID\n SeriesInstanceUID = generateUID(org_root=ORG_ROOT)\n StudyInstanceUID = generateUID(org_root=ORG_ROOT)\n #create a prefix for the accession number\n acc='BTS'+series_reader.GetMetaData(0,\"0008|0050\")\n #changing spacing\n reader.SetFileName(series_file_names[0])\n reader.ReadImageInformation()\n\n if mode_RGB:\n customized_tag_values = [(\"0008|103e\", description),\n (\"0020|000e\", SeriesInstanceUID),\n (\"0008|0050\", acc),\n (\"0020|000d\", StudyInstanceUID), \n (\"0028|0004\", 'RGB'),\n (\"0028|0002\", \"3\")]\n else:\n customized_tag_values = [(\"0008|103e\", description),\n (\"0020|000e\", SeriesInstanceUID), \n (\"0008|0050\", acc),\n (\"0020|000d\", StudyInstanceUID)] \n\n os.makedirs(save_dir, exist_ok = True)\n\n #for nifti, the main axis is the first one, while for dicoms it is the last one\n for i in range(ref_image.GetDepth()):\n #zoom 2 times, todo need to figure out which axis to zoom, post is the 3rd\n #pre assume the first axis is the slice numbers\n bk_slice = ndimage.zoom(bk_image[i,:,:], zoom_num, order=0)\n seg_slice = ndimage.zoom(seg_image[i,:,:], zoom_num, order=0)\n \n #Due to the DICOM saving coordinate system is different with nifti,i.e mirrored, it is easier to flip array\n bk_slice = np.flip(bk_slice, (0, 1)) \n seg_slice = np.flip(seg_slice, (0, 1)) \n\n #get contours\n seg_idx = get_contours(seg_slice)\n \n #add watermarks\n if watermarks:\n canvas_tmp = np.zeros(list(bk_slice.shape), dtype=np.uint8)\n font = cv2.FONT_HERSHEY_PLAIN\n cv2.putText(canvas_tmp,'FOR RESEARCH ONLY;REFER TO OFFICIAL REPORT FOR DETAILS',(10,30), \n font,2,255,1)\n cv2.putText(canvas_tmp,'(This tool is intended for evaluation of gliomas, and results may be unreliable for other pathologies)',(90,50), \n font,1,255,1) \n #add Legend and volumes \n cv2.putText(canvas_tmp, 'Legend Volume(+/-95% CI)',(10,900), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Edema {v_edema}+/-{ci_edema} mL',(30,920), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Enhancing {v_enhancing}+/-{ci_enhancing} mL',(30,940), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Non- {v_nonenhancing}+/-{ci_nonenhancing} mL',(30,960), font,0.8,255,1)\n cv2.putText(canvas_tmp,'Enhancing', (30,975), font,0.8,255,1)\n cv2.putText(canvas_tmp,'(The error is based on testing of algorithm performance vs. manual segmentation)', (150,1000), font,1,255,1)\n\n \n \n #burning segmentation contour into slices\n cv2.line(seg_idx, (10,915), (20,915), 2, 2)\n cv2.line(seg_idx, (10,935), (20,935), 4, 2)\n cv2.line(seg_idx, (10,955), (20,955), 1, 2)\n \n if mode_RGB:\n #burning the watermarks\n bk_slice[canvas_tmp==255]=bk_slice.max()\n #convert dicom from nomogram to RGB\n bk_slice = toRGB(bk_slice)\n #colorize the bk_slice according to seg_idx\n bk_slice[0,:,:,0][seg_idx==1] = 255\n bk_slice[0,:,:,1][seg_idx==4] = 255\n bk_slice[0,:,:,2][seg_idx==2] = 255 \n else:\n #grey the ori_image_slice according to seg_idx\n bk_slice[canvas_tmp==255]=bk_slice.max()//2\n bk_slice[seg_idx==1] = bk_slice.max()*2//50\n bk_slice[seg_idx==2] = bk_slice.max()*1//50\n bk_slice[seg_idx==4] = bk_slice.max()*3//50\n\n converted_slice = sitk.GetImageFromArray(bk_slice)\n reader.SetFileName(series_file_names[i])\n reader.ReadImageInformation()\n spacing_new = [i/zoom_num for i in reader.GetSpacing()[:-1]] + [reader.GetSpacing()[-1]]\n \n #generate SOPInstanceUID\n SOPInstanceUID = generateUID(org_root=ORG_ROOT)\n series_tag_values = [(k, reader.GetMetaData(k)) for k in reader.GetMetaDataKeys()] + customized_tag_values + [(\"0008|0018\", SOPInstanceUID)]\n# print(series_tag_values)\n if '_seg_' in description:\n converted_slice = converted_slice \n \n # Tags shared by the series.\n for tag, value in series_tag_values:\n converted_slice.SetMetaData(tag, value)\n \n # especially set spacing tags\n # Image Position (Patient)\n converted_slice.SetMetaData(\"0020|0013\", str(i)) # Instance Number\n converted_slice.SetSpacing(spacing_new)\n \n # Write to the output directory and add the extension dcm, to force writing in DICOM format \n writer.SetFileName(os.path.join(save_dir, str(i)+'.dcm'))\n writer.Execute(converted_slice)", "def getimgs():", "def ToIco( self, forced_bpp_conversion, imagepaths, icopaths ):\n global flag_bit\n \n flag_bit = forced_bpp_conversion\n all_log_mess = []\n \n ## Checks icopaths.\n if not icopaths:\n log_err = 'Output: file/s missing\\n'\n return log_err\n else:\n if isinstance(icopaths, list):\n for path in icopaths:\n if path.lower().endswith('.ico'):\n idType = 1\n elif path.lower().endswith('.cur'):\n idType = 2\n else:\n log_err = 'Output: file \"%s\" with wrong file extension' %path\n return log_err\n else:\n log_err = 'Output: file/s not in a list\\n'\n return log_err\n \n ## Checks imagepaths.\n if not imagepaths:\n log_err = 'Input: file/s missing\\n'\n return log_err\n for ii, paths in enumerate(imagepaths):\n if isinstance(paths, list):\n if not paths:\n log_err = 'Input: file/s missing\\n'\n return log_err\n elif idType == 2 and len(paths) > 1:\n log_err = \"Input: can't create multi-size .cur\\n\"\n return log_err\n else:\n for path in paths:\n if not isfile(path):\n log_err = 'Input: file \"%s\" not exists\\n' %path\n return log_err\n else:\n log_err = 'Input: entry #%s is not a list\\n' %ii\n return log_err\n \n ## Do process.\n for path in zip(imagepaths, icopaths):\n all_log_mess.append(self.Build( path[0], path[1], idType ))\n \n return all_log_mess", "def convert(self):\n \n vrtlist = sorted(glob.glob(self.fullPath + '/*vrt'))\n splitAt = len(self.fullPath) + 1\n \n if len(vrtlist)!=0:\n for i in range(0,len(vrtlist)):\n prefix = str(vrtlist[i].split(\".vrt\")[0])\n prefix = prefix[:splitAt] + 'full' + prefix[splitAt:]\n ct = pymodis.convertmodis_gdal.convertModisGDAL(hdfname = vrtlist[i], \n prefix = prefix, subset = self.subset, res = self.resolution, \n outformat = self.outformat, wkt = self.projection, resampl = 'NEAREST_NEIGHBOR', vrt = True)\n ct.run()\n mosdel = glob.glob(self.fullPath + '/*mos.tif')\n for f in mosdel:\n os.remove(f)\n xmldel = glob.glob(self.fullPath + '/*mos.tif.xml') \n for f in xmldel:\n os.remove(f)\n vrtdel = glob.glob(self.fullPath + '/*.vrt')\n for f in vrtdel:\n os.remove(f)\n tifCount = len(glob.glob(self.fullPath + '/*.tif'))\n dataCount = self.subset.count('1')\n logger.log('SUCCESS', 'Conversion complete! The %d bands of %d mosaicked images were successfully converted to %d %s files.' % (dataCount, len(vrtlist), tifCount, str(self.outformat)))\n \n \n if len(vrtlist)==0: \n \n hdflist = sorted(glob.glob(self.fullPath + '/*.hdf'))\n for i in range(len(hdflist)):\n ms = pymodis.convertmodis_gdal.createMosaicGDAL(hdfnames = [hdflist[i]], subset = self.subset, outformat = 'GTiff')\n ms.run(str(hdflist[i].split('.h')[0]) + 'mos.tif')\n ms.write_vrt(output = str(hdflist[i].split('.h')[0]), separate = True)\n\n vrtlist = sorted(glob.glob(self.fullPath + '/*vrt'))\n splitAt = len(self.fullPath) + 1\n \n for i in range(0,len(vrtlist)):\n prefix = str(vrtlist[i].split(\".vrt\")[0])\n prefix = prefix[:splitAt] + 'full' + prefix[splitAt:]\n ct = pymodis.convertmodis_gdal.convertModisGDAL(hdfname = vrtlist[i], \n prefix = prefix, subset = self.subset, res = self.resolution, \n outformat = self.outformat, wkt = self.projection, resampl = 'NEAREST_NEIGHBOR', vrt = True)\n ct.run()\n \n mosdel = glob.glob(self.fullPath + '/*mos.tif')\n for f in mosdel:\n os.remove(f)\n xmldel = glob.glob(self.fullPath + '/*mos.tif.xml') \n for f in xmldel:\n os.remove(f)\n vrtdel = glob.glob(self.fullPath + '/*.vrt')\n for f in vrtdel:\n os.remove(f)\n tifCount = len(glob.glob(self.fullPath + '/full*.tif'))\n dataCount = self.subset.count('1')\n logger.log('SUCCESS', 'Conversion complete! The %d bands of %d HDF files were successfully converted to %d %s files.' % (dataCount, len(hdflist), tifCount, str(self.outformat)))", "def pdftoimages(input_dir,output_dir): \n dirListing = os.listdir(input_dir)\n files = []\n imagespath = output_dir\n for item in dirListing:\n files.append(item)\n n = len(files)\n for num in range(n):\n doc = fitz.open(input_dir+\"/\"+files[num])\n for img in doc.getPageImageList(0):\n xref = img[0]\n pix = fitz.Pixmap(doc, xref)\n if pix.n < 5: # this is GRAY or RGB\n pix.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n else: # CMYK: convert to RGB first\n pix1 = fitz.Pixmap(fitz.csRGB, pix)\n pix1.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n pix1 = None \n pix=None\n break", "def forward(self, rpn_rois, roidb, im_info):\n im_scales = im_info.data.numpy()[:, 2]\n\n # get_fast_rcnn_blob_names()\n output_blob_names = ['rois', \n 'labels_int32', 'bbox_targets', 'bbox_inside_weights', 'bbox_outside_weights',\n 'mask_rois', 'roi_has_mask_int32', 'masks_int32']\n \n # For historical consistency with the original Faster R-CNN\n # implementation we are *not* filtering crowd proposals.\n # This choice should be investigated in the future (it likely does\n # not matter).\n # Note: crowd_thresh=0 will ignore _filter_crowd_proposals\n self.add_proposals(roidb, rpn_rois, im_scales, crowd_thresh=0)\n blobs = {k: [] for k in output_blob_names}\n self.add_fast_rcnn_blobs(blobs, im_scales, roidb)\n\n return blobs", "def transform_images(symbol_dict,\n gray=True,\n gauss_filter=-1,\n bilat_filter=-1,\n global_thresh=-1,\n adapt_thresh_mean=-1,\n adapt_thresh_gauss=-1,\n otsus=-1,\n laplacian=False,\n canny=-1,\n rescale_global_mean=False,\n resize=-1):\n \n for s in symbol_dict.values():\n for symb_img in s:\n if gray:\n gray_img = cv2.cvtColor(symb_img.img, cv2.COLOR_BGR2GRAY)\n symb_img.img = gray_img\n if gauss_filter != -1:\n blur_img = cv2.GaussianBlur(symb_img.img,\n (gauss_filter, gauss_filter),\n 0)\n symb_img.img = blur_img\n if bilat_filter != -1:\n bilat_img = cv2.bilateralFilter(symb_img.img,\n bilat_filter[0],\n bilat_filter[1],\n bilat_filter[2])\n symb_img.img - bilat_img\n if global_thresh != -1:\n ret, thresh_img = cv2.threshold(symb_img.img,\n global_thresh, 255,\n cv2.THRESH_BINARY)\n symb_img.img = thresh_img\n if adapt_thresh_mean != -1:\n thresh_img = cv2.adaptiveThreshold(symb_img.img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \\\n cv2.THRESH_BINARY, adapt_thresh_mean, 2)\n symb_img.img = thresh_img\n if adapt_thresh_gauss != -1:\n thresh_img = cv2.adaptiveThreshold(symb_img.img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \\\n cv2.THRESH_BINARY, adapt_thresh_gauss, 2)\n symb_img.img = thresh_img\n if otsus != -1:\n ret, thresh_img = cv2.threshold(\n symb_img.img, otsus, 255,\n cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n symb_img.img = thresh_img\n if laplacian:\n lap_img = cv2.Laplacian(symb_img.img, cv2.CV_64F)\n symb_img.img = lap_img\n if canny != -1:\n canny_img = cv2.Canny(symb_img.img, canny[0], canny[1])\n symb_img.img = canny_img\n # TODO: is normalizing before resizing correct?\n if rescale_global_mean:\n scaled_img = symb_img.img / 255.0\n symb_img.img = scaled_img - np.mean(scaled_img)\n if resize != -1:\n old_size = symb_img.img.shape[:2]\n\n delta_w = max(old_size) - old_size[1]\n delta_h = max(old_size) - old_size[0]\n top, bottom = delta_h // 2, delta_h - (delta_h // 2)\n left, right = delta_w // 2, delta_w - (delta_w // 2)\n\n color = [0, 0, 0]\n symb_img.img = cv2.copyMakeBorder(symb_img.img,\n top,\n bottom,\n left,\n right,\n cv2.BORDER_CONSTANT,\n value=color)\n\n symb_img.img = cv2.resize(symb_img.img, (resize, resize))", "def LIDC_to_niftis(extraction_results_dataframe, spacing=[1.0, 1.0, 1.0], debug=False):\n loop = map(\n lambda t: t[1][[\"extraction_location\", \"annotation_file\"]].values,\n extraction_results_dataframe.iterrows(),\n )\n progbar = tqdm.tqdm(\n loop, total=extraction_results_dataframe.shape[0], desc=\"Converting to NiFTIs...\"\n )\n converted_dicoms = Parallel(n_jobs=1, prefer=\"processes\")(\n delayed(convert_to_niftis)(*t, spacing=spacing) for t in progbar\n )\n initial_shape = extraction_results_dataframe.shape[0]\n extraction_results_dataframe = extraction_results_dataframe[converted_dicoms]\n final_shape = extraction_results_dataframe.shape[0]\n print(f\"{final_shape}/{initial_shape} DICOMs folders successfully converted.\")\n\n # Update config file\n config_file = get_config_file_path(dataset_name=\"fed_lidc_idri\", debug=debug)\n write_value_in_config(config_file, \"preprocessing_complete\", True)\n\n return extraction_results_dataframe", "def dicom_to_nrrd(self, dicom_root_dir, nrrd_files_dir):\n TEMP_FILE = '/Users/chunwei/Downloads/_TEMP'\n SYSTEM_COMMAND = 'gdcmconv -w {0} {1}'\n\n for i, subject_folder in enumerate(glob.glob(dicom_root_dir + '/*')):\n nrrd_file = nrrd_files_dir + '/'\\\n + re.search(self.KEY_WORD_FLODER, subject_folder).group()\\\n + '_%02d.nrrd' % (i + 1)\n print 'Processing ' + nrrd_file\n\n if not os.path.exists(nrrd_files_dir):\n os.makedirs(nrrd_files_dir)\n\n data_3d = None\n\n dicom_files = glob.glob(subject_folder + '/*')\n for j, dicom_file in enumerate(dicom_files):\n # prompt\n ratio = 100 * float(j)/float(len(dicom_files))\n sys.stdout.write('\\r%d%%' % ratio)\n sys.stdout.flush()\n\n # uncompress the dicom image\n command = SYSTEM_COMMAND.format(dicom_file, TEMP_FILE)\n call(command.split(), shell=False)\n\n # concatenate dicom image layer by layer\n ds = dicom.read_file(TEMP_FILE)\n data = ds.pixel_array\n data_3d = self.concatenate_layers(data_3d, data) # bottom up\n\n # get nrrd options\n options = self.load_dicom_options(TEMP_FILE, len(dicom_file))\n\n # transpose the data\n data_3d = numpy.swapaxes(data_3d, 0, 1)\n data_3d = data_3d[:, :, ::-1]\n\n # write the stack files in nrrd format\n nrrd.write(nrrd_file, data_3d, options)\n\n print", "def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])", "def to_nifti(self,folder_path: str):\n data_path = settings.STORAGE_DIR\n path = folder_path \n nifti=series.get_series_object(path) \n nifti_str=str(nifti)\n nifti_str=nifti_str[1:44]\n if nifti_str=='dicom_to_cnn.model.reader.SeriesCT.SeriesCT': \n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')\n if nifti_str=='dicom_to_cnn.model.reader.SeriesPT.SeriesPT':\n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n nifti.set_ExportType('suv')\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')", "def get_images(algorithm=None):\n if algorithm == \"RMA\":\n #Ims = {}\n dates = []\n #Ims = np.load(\"Set_images_RMA.npy\").item()\n #dates = np.load(\"Dates_RMA.npy\")\n for i in range(n_im):\n i += i_o # Empieza en la posicion 10\n data = RMA.main(\"dset_\"+str(i)+\".hdf5\")\n #Ims[10+i] = data['Sf_n']\n dates.append(data['date'])\n np.save(os.getcwd()+\"/Results/Output_RMA/Im_\"+str(i)+\".npy\",data['Sf_n'])\n #np.save(\"Set_images_RMA\",Ims) # Para guardar el set de imagenes\n np.save(\"Parameters_RMA\",data)\n np.save(\"Dates_RMA\",np.array(dates))\n\n elif algorithm == \"BP\":\n #Ims = {}\n dates = []\n #Ims = np.load(\"Set_images_BP.npy\").item()\n #dates = np.load(\"Dates_BP.npy\")\n for i in range(n_im): #(4991):\n i += i_o # Empieza en la posicion 10\n data = BP.main(\"dset_\"+str(i)+\".hdf5\") \n #Ims[i] = data['Im']\n dates.append(data['date'])\n np.save(os.getcwd()+\"/Results/Output_BP/Im_\"+str(i)+\".npy\",data['Im']) # Imagenes de todo el dataset\n np.save(\"Parameters_BP\",data) # Parametros geometricos como dimensiones y grilla de la imagen\n np.save(\"Dates_BP\",np.array(dates)) # Fechas de las iamgenes tomadas de todo el dset\n\n return 'Ok'", "def _get_images(self):\n raw_outputs = self.interface.get_data(self.target_charge,\n self.charge_deviation,\n n_samples=self.n_samples)\n\n # apply roi to images\n roi_images = []\n for i in range(self.n_samples):\n roi_images += [apply_roi(raw_outputs['raw_images'][i], raw_outputs['ROI'])]\n\n # process and identify blobs in image\n min_size = 100\n outputs = {}\n for ele in self.output_keys:\n outputs[ele] = []\n\n for i in range(len(roi_images)):\n processed_image_data = image_processing.process_and_fit(roi_images[i],\n min_size)\n\n for ele in self.output_keys:\n if ele == 'image_check':\n outputs[ele] += [image_processing.check_image(processed_image_data['binary_image'],\n processed_image_data['smoothed_image'])]\n elif ele == 'processed_images':\n outputs[ele] += [processed_image_data['smoothed_image']]\n else:\n outputs[ele] += [processed_image_data[ele]]\n\n for ele in self.output_keys:\n outputs[ele] = np.array(outputs[ele])\n\n # add in raw data\n outputs.update(raw_outputs)\n\n # if we need to, get averaged results\n if self.average_measurements:\n avg_keys = ['rms_x', 'rms_y', 'CX', 'CY', 'n_blobs', 'FWHMX', 'FWHMY', 'centroid_offset']\n for key in avg_keys:\n outputs[key] = np.nanmean(outputs[key])\n\n return outputs", "def masterflat(input_file):\n #set original directory\n original_path = os.getcwd()\n data_path = input_file['data_path']\n save_path = input_file['save_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n flat = glob.glob('flat*.fits')\n print 'Loading flat images \\nTotal of flat files = ',len(flat),'\\nFiles = \\n'\n print flat\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n os.system('cp flat*.fits '+save_path)\n #creating the names of flat with bias subctracted\n bflat = []\n for i in flat:\n bflat.append('B'+i)\n print '\\n Names os flat images with bias subtracted: \\n \\n',bflat\n #change for save_path directory\n os.chdir(save_path)\n #verify if previous superbias exist\n if os.path.isfile('superflat.fits') == True:\n os.system('rm superflat.fits')\n #verify if exits previous bflat*.fits files and remove then.\n for i in bflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n print '\\nCreating superflat .... \\n'\n #create the list of flat images and bflat images\n #flat = string.join(flat,',')\n #bflat = string.join(bflat,',')\n print '\\n Subtracting bias from flat images and creating bflat images.... \\n'\n #iraf.imarith()\n for i in range(len(flat)):\n iraf.imarith(flat[i],'-','superbias.fits',bflat[i])\n #print statistics from bflat*.fits images\n iraf.imstat(bflat[i])\n print '\\n .... done \\n'\n #clean previos flat*.fits files\n print '\\n Clean flat*.fits images .... \\n'\n os.system('rm flat*.fits')\n print '\\n .... done. \\n'\n #normalizing each flat\n print '\\nNormalizing each flat ....\\n'\n #checking if mean from numpy is the same from your bflat images using imstat\n #take the mean of each bflat image\n bflat_mean = np.zeros(len(bflat))\n for i in range(len(bflat)):\n image = fits.getdata(bflat[i])\n image = np.array(image,dtype='Float64')\n bflat_mean[i] = round(np.mean(image))\n image = 0 #clean image allocate to this variable\n print 'The mean of each bflat image, respectivaly ...'\n print bflat_mean\n #creating the names of bflat images after the normalization:\n abflat = []\n for i in bflat:\n abflat.append('A'+i)\n print '\\n Names os bflat images with bias subtracted and normalizad: \\n \\n',abflat\n #verify if exist previous ABflat*.fits images and remove then.\n for i in abflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n for i in range(len(abflat)):\n iraf.imarith(bflat[i],'/',bflat_mean[i],abflat[i])\n print '\\n.... done!\\n'\n # print '\\n Cleaning bflat*.fits images ....\\n'\n # os.system('rm Bflat*.fits')\n print '\\n.... done.\\n'\n print 'Statistics of the abflat*.fits images .... \\n'\n for i in range(len(abflat)):\n iraf.imstat(abflat[i])\n print '\\n Combining abflat images ....\\n'\n\n # ablist = string.join(abflat,',')\n # iraf.imcombine(ablist,'superflat.fits')\n #change how import flat files\n #usning the abflat list of flat files We will create a pandas python dataframe\n ablist = DataFrame(abflat)\n ablist.columns=['flat_files']\n ablist.to_csv('flat_list',index_label=False,index=False,header=False)\n #combine all flat images\n iraf.imcombine('@flat_list','superflat.fits')\n iraf.imstat('superflat.fits')\n print '\\n .... done. \\n'\n # print '\\nCleaning ABflat*.fits images ....\\n'\n # os.system('rm ABflat*.fits')\n print '\\n.... done!'\n #Verify if the image was created:\n output = glob.glob('superflat*.fits')\n if len(output) != 0:\n output = 0\n else:\n output = 1\n #Return to original directory\n os.chdir(original_path)\n #last mensage\n print '\\n MASTERFLAT.FITS created! \\n'\n print '\\n END of Data Reduction for create a masterflat.fits file. \\n'\n #obtain the value of return\n if output == 1:\n print '!!! ERROR/WARNING !!!'\n print 'Check if the superbias was created or if there is more than one superbias image.'\n return output", "def selfies2image(s):\n mol = MolFromSmiles(sf.decoder(s), sanitize=True)\n return Draw.MolToImage(mol)", "def convert_to_nifti(log, brain):\n log.info('Doing convert_to_nifti')\n cmdargs = split('3dAFNItoNIFTI {}'.format(brain))\n proc = Popen(cmdargs, stdout=PIPE, stderr=STDOUT)\n log.info(proc.stdout.read())", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X = []\n y = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y.append(label)\n X.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X,y", "def _convert_images(raw):\n # Convert the raw images from the data-files to floating-points.\n #raw_float = np.array(raw, dtype=float) / 255.0\n\n # Reshape the array to 4-dimensions.\n images = raw.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images.transpose([0, 2, 3, 1])\n\n return images", "def convert_image(rel_path_in, rel_path_out):\n #Lade Bild mit Originalmaske im Grayscale-Modus\n img = cv2.imread(rel_path_in, cv2.IMREAD_GRAYSCALE)\n #Jetzt steht in img ein 2D-Array/Matrix mit jedem Graufstufen-Wert der Pixel\n #Skaliere Pixelwerte runter\n for zeilen_index in range(0,img.__len__()):\n for spalten_index in range(0, img[zeilen_index].__len__()):\n #Hole Pixel-Wert an aktueller Stelle\n wert = img[zeilen_index][spalten_index]\n #Falls Wert != 0 (also Pixel gehoert nicht zum Hintergrund)\n if wert != 0: # != 0 statt == 255, da auch z.B. 253er Werte in den Masken existieren... (vielleicht durch Konvertierung in anderes Format?)\n #Markiere den Pixel mit 1 statt 255\n img[zeilen_index][spalten_index]=1\n #print(img)\n #*NACHDEM* alle Pixel skaliert wurden, zeichne Umrandung der Objekte\n umrandung_zeichnen(img)\n #change_color(img, 0, 255)\n #change_color(img, 1, 0)\n #print(img)\n #Schreibe Ergebnis-Bild in uebergebene Datei\n cv2.imwrite(rel_path_out, img)", "def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y", "def test_BinaryDilation_interface(tmpdir):\n\n data = np.zeros((80, 80, 80), dtype=\"uint8\")\n data[30:-30, 35:-35, 20:-20] = 1\n\n nb.Nifti1Image(data, np.eye(4), None).to_filename(\"mask.nii.gz\")\n\n out1 = (\n BinaryDilation(\n in_mask=str(Path(\"mask.nii.gz\").absolute()),\n radius=4,\n )\n .run()\n .outputs.out_mask\n )\n shutil.move(out1, \"large_radius.nii.gz\")\n\n out2 = (\n BinaryDilation(\n in_mask=str(Path(\"mask.nii.gz\").absolute()),\n radius=1,\n )\n .run()\n .outputs.out_mask\n )\n shutil.move(out2, \"small_radius.nii.gz\")\n\n out_final = (\n BinarySubtraction(\n in_base=str(Path(\"large_radius.nii.gz\").absolute()),\n in_subtract=str(Path(\"small_radius.nii.gz\").absolute()),\n )\n .run()\n .outputs.out_mask\n )\n\n out_data = np.asanyarray(nb.load(out_final).dataobj, dtype=\"uint8\")\n\n assert np.all(out_data[data] == 0)", "def get_bayer_images(self) -> typing.List[np.ndarray]:\n return [rbg_to_bayer_bg(c.get_image()) for c in self.cameras]", "def to_nii(self, outbase, spirec='spirec', saveInOut=False):\n if self.image_data is None:\n self.recon(spirec)\n\n image_tlhc = np.array([self.header.image.tlhc_R, self.header.image.tlhc_A, self.header.image.tlhc_S])\n image_trhc = np.array([self.header.image.trhc_R, self.header.image.trhc_A, self.header.image.trhc_S])\n image_brhc = np.array([self.header.image.brhc_R, self.header.image.brhc_A, self.header.image.brhc_S])\n #image_cent = np.array([self.header.image.ctr_R, self.header.image.ctr_A, self.header.image.ctr_S])\n\n row_vec = (image_trhc-image_tlhc)/np.sqrt(np.dot(image_trhc-image_tlhc, image_trhc-image_tlhc))\n col_vec = -(image_trhc-image_brhc)/np.sqrt(np.dot(image_trhc-image_brhc, image_trhc-image_brhc))\n # The DICOM standard defines these two unit vectors in an LPS coordinate frame, but we'll\n # need RAS (+x is right, +y is anterior, +z is superior) for NIFTI. So, we compute them\n # such that row_vec points to the right and col_vec points up.\n # Not sure if we need to negate the slice_norm. From the NIFTI-1 header:\n # The third column of R will be either the cross-product of the first 2 columns or\n # its negative. It is possible to infer the sign of the 3rd column by examining\n # the coordinates in DICOM attribute (0020,0032) \"Image Position (Patient)\" for\n # successive slices. However, this method occasionally fails for reasons that I\n # (RW Cox) do not understand.\n\n # can also get slice_norm from: slice_norm = np.cross(row_vec, col_vec)\n slice_norm = np.array([self.header.image.norm_R, self.header.image.norm_A, self.header.image.norm_S])\n slice_fov = np.abs(self.header.series.start_loc - self.header.series.end_loc)\n\n # This is either the first slice tlhc (image_tlhc) or the last slice tlhc. How to decide?\n # And is it related to wheather I have to negate the slice_norm?\n # Tuned this empirically by comparing spiral and EPI data with the sam Rx.\n # Everything seems reasonable, except the test for axial orientation (start_ras==S|I).\n # I have no idea why I need that! But the flipping only seems necessary for axials, not\n # coronals or the few obliques I've tested.\n # FIXME: haven't tested sagittals! (to test for spiral: 'sprt' in self.psd_name.lower())\n if (self.header.series.start_ras=='S' or self.header.series.start_ras=='I') and self.header.series.start_loc > self.header.series.end_loc:\n pos = image_tlhc - slice_norm*slice_fov\n # FIXME: since we are reversing the slice order here, should we change the slice_order field below?\n self.image_data = self.image_data[:,:,::-1,]\n if self.fm_data is not None:\n self.fm_data = self.fm_data[:,:,::-1,]\n else:\n pos = image_tlhc\n\n if self.num_bands > 1:\n pos = pos - slice_norm * self.band_spacing_mm * (self.num_bands - 1.0) / 2.0\n\n qto_xyz = np.zeros((4,4))\n qto_xyz[0,0] = row_vec[0]\n qto_xyz[0,1] = col_vec[0]\n qto_xyz[0,2] = slice_norm[0]\n\n qto_xyz[1,0] = row_vec[1]\n qto_xyz[1,1] = col_vec[1]\n qto_xyz[1,2] = slice_norm[1]\n\n qto_xyz[2,0] = row_vec[2]\n qto_xyz[2,1] = col_vec[2]\n qto_xyz[2,2] = slice_norm[2]\n\n qto_xyz[:,3] = np.append(pos, 1).T\n qto_xyz[0:3,0:3] = np.dot(qto_xyz[0:3,0:3], np.diag(self.mm_per_vox))\n\n nii_header = nibabel.Nifti1Header()\n nii_header.set_xyzt_units('mm', 'sec')\n nii_header.set_qform(qto_xyz, 'scanner')\n nii_header.set_sform(qto_xyz, 'scanner')\n\n nii_header['slice_start'] = 0\n nii_header['slice_end'] = self.num_slices - 1\n # nifti slice order codes: 0 = unknown, 1 = sequential incrementing, 2 = seq. dec., 3 = alternating inc., 4 = alt. dec.\n slice_order = 0\n nii_header['slice_duration'] = self.tr * 1000 / self.num_slices\n # FIXME: check that this is correct.\n if self.header.series.se_sortorder == 0:\n slice_order = 1 # or 2?\n elif self.header.series.se_sortorder == 1:\n slice_order = 3 # or 4?\n nii_header['slice_code'] = slice_order\n\n # Note: the freq/phase dir isn't meaningful for spiral trajectories.\n if self.header.image.freq_dir==1:\n nii_header.set_dim_info(freq=1, phase=0, slice=2)\n else:\n nii_header.set_dim_info(freq=0, phase=1, slice=2)\n\n # FIXME: There must be a cleaner way to set the TR! Maybe bug Matthew about it.\n nii_header.structarr['pixdim'][4] = self.tr\n nii_header.set_slice_duration(nii_header.structarr['pixdim'][4] / self.num_slices)\n nii_header.structarr['cal_max'] = self.image_data.max()\n nii_header.structarr['cal_min'] = self.image_data.min()\n\n if self.num_echoes == 1:\n nifti = nibabel.Nifti1Image(self.image_data, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n elif self.num_echoes == 2:\n if saveInOut:\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,0], None, nii_header)\n nibabel.save(nifti, outbase + '_in.nii.gz')\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,1], None, nii_header)\n nibabel.save(nifti, outbase + '_out.nii.gz')\n # FIXME: Do a more robust test for spiralio!\n # Assume spiralio, so do a weighted average of the two echos.\n # FIXME: should do a quick motion correction here\n w_in = np.mean(self.image_data[:,:,:,:,0], 3)\n w_out = np.mean(self.image_data[:,:,:,:,1], 3)\n inout_sum = w_in + w_out\n w_in = w_in / inout_sum\n w_out = w_out / inout_sum\n avg = np.zeros(self.image_data.shape[0:4])\n for tp in range(self.image_data.shape[3]):\n avg[:,:,:,tp] = w_in*self.image_data[:,:,:,tp,0] + w_out*self.image_data[:,:,:,tp,1]\n nifti = nibabel.Nifti1Image(avg, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n else:\n for echo in range(self.num_echoes):\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,echo], None, nii_header)\n nibabel.save(nifti, outbase + '_echo%02d.nii.gz' % echo)\n\n if self.fm_data is not None:\n nii_header.structarr['cal_max'] = self.fm_data.max()\n nii_header.structarr['cal_min'] = self.fm_data.min()\n nifti = nibabel.Nifti1Image(self.fm_data, None, nii_header)\n nibabel.save(nifti, outbase + '_B0.nii.gz')", "def preprocess_nico(path: Path) -> None:\n for superclass in (\"animals\", \"vehicles\"):\n superclass_dir = path / superclass\n for class_dir in superclass_dir.glob(\"*\"):\n for context_dir in class_dir.glob(\"*\"):\n images_paths: list[Path] = []\n for ext in (\"jpg\", \"jpeg\", \"png\", \"gif\"):\n images_paths.extend(context_dir.glob(f\"**/*.{ext}\"))\n for counter, image_path in enumerate(images_paths):\n try:\n image = Image.open(image_path)\n if image.format == \"GIF\":\n image = image.convert(\"RGBA\")\n # Convert from gif to jpeg by extracting the first frame\n new_image = _gif_to_jpeg(image)\n new_image_path = image_path.with_suffix(\".jpg\")\n # Delete the original gif\n image_path.unlink()\n new_image.save(new_image_path, \"JPEG\")\n assert new_image_path.exists()\n image_path = new_image_path\n\n concept = image_path.parent.parent.stem\n context = image_path.parent.stem\n new_name = (\n image_path.parent\n / f\"{concept}_{context}_{counter:04}{image_path.suffix}\".replace(\n \" \", \"_\"\n )\n )\n image_path.rename(new_name)\n # Image is corrupted - delete it\n except UnidentifiedImageError:\n image_path.unlink()", "def create_brainmask(registered_images, truncate_intensity=(.01, .99), verbose=True, antsxnet_cache_directory=None):\n\n preprocessed_image = ants.image_clone(registered_images)\n if antsxnet_cache_directory is None:\n antsxnet_cache_directory = \"ANTsXNet\"\n\n # Truncate intensity\n if truncate_intensity is not None:\n quantiles = (preprocessed_image.quantile(truncate_intensity[0]),\n preprocessed_image.quantile(truncate_intensity[1]))\n if verbose:\n print(\"Preprocessing: truncate intensities ( low =\", quantiles[0], \", high =\", quantiles[1], \").\")\n\n preprocessed_image[preprocessed_image < quantiles[0]] = quantiles[0]\n preprocessed_image[preprocessed_image > quantiles[1]] = quantiles[1]\n\n # Brain extraction\n if verbose:\n print(\"Preprocessing: brain extraction.\")\n probability_mask = antspynet.brain_extraction(preprocessed_image,\n antsxnet_cache_directory=antsxnet_cache_directory,\n verbose=verbose)\n mask = ants.threshold_image(probability_mask, 0.5, 1, 1, 0)\n\n return preprocessed_image, mask", "def cellomics2tiff((file_in,dir_out)):\n \n file_out = cutils.getTifPath(file_in,dir_out)\n\n # don't repeat conversion if converted file exists\n # and is newer than the original data\n if os.path.isfile(file_out) \\\n and os.stat(file_out).st_mtime > os.stat(file_in).st_mtime:\n return\n\n if platform.system() == 'Linux':\n #cmd = ['bfconvert','-nogroup',file_in,file_out,'> /dev/null']\n #cmd = ['/opt/bftools/bfconvert','-nogroup',file_in,file_out,']\n #print \" \".join(cmd)\n #FNULL = open(os.devnull,'w')\n #subprocess.call(cmd, stdout=FNULL, shell=False)\n #FNULL.close()\n cmd = '/opt/bftools/bfconvert -overwrite -nogroup %s %s > /dev/null'%(file_in,file_out)\n #print cmd\n os.system(cmd)\n else:\n cmd = ['bfconvert','-nogroup',file_in,file_out]\n print \" \".join(cmd)\n subprocess.call(cmd, shell=True)", "def get_images(image_folder_root, image_label_list):\n file_dcm=[]\n X_test = []\n y_test = []\n for file_name,label in image_label_list:\n try:\n current_file = pydicom.dcmread(image_folder_root + file_name + '.dcm')\n pixel_array = current_file.pixel_array\n if (pixel_array.shape != (512,512)):\n continue\n file_dcm.append((file_name,label,brain_window(current_file)))\n y_test.append(label)\n X_test.append(pydicom.dcmread(image_folder_root + file_name + '.dcm').pixel_array)\n except ValueError:\n continue\n return X_test,y_test", "def _get_blobs(im, rois):\n\n blobs = {'data' : None, 'rois' : None}\n blobs['data'], im_scale_factors = _get_image_blob(im)\n return blobs, im_scale_factors", "def _preprocessing(self, path: str) -> np.array:\n if Checker.check_input_type_bool(path, 'nii'):\n image = sitk.ReadImage(path)\n self.space = image.GetSpacing()\n image = sitk.GetArrayFromImage(image).astype('float32')\n\n elif Checker.check_input_type_bool(path, 'npy'):\n image = np.load(path)\n self.space = [1., 1., 1.]\n warnings.warn(\n '.npy is not recommended as an image format.'\n 'Since spacing cannot be identified from .npy, spacing is set as [1., 1., 1.].', UserWarning)\n\n elif Checker.check_input_type_bool(path, 'dcm'):\n raise ValueError(\n '.dcm is not supported.'\n 'Please convert dcm dummies to nii format.')\n\n else:\n input_ext = path.split('.')[-1]\n raise ValueError(\n f'.{input_ext} format is not supported.')\n\n self.img_shape = image.shape\n\n # normalize\n windowing_range = [-40., 120.]\n windowing_min = windowing_range[0] - windowing_range[1] // 2\n windowing_max = windowing_range[0] + windowing_range[1] // 2\n image = ndimage.zoom(image, [.5, .5, .5], order=1, mode='constant')\n image = np.clip(image, windowing_min, windowing_max)\n image = (image - windowing_min) / (windowing_max - windowing_min)\n image = image[np.newaxis, ..., np.newaxis]\n return image", "def preprocessing(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I = scipy.misc.imresize(I,size=(img_dim,img_dim))\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n #I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n I = I/255\n return I.astype(np.float).ravel() #flattens", "def create_png_images(self):\n if self.subject is None:\n print Console.WARNING + 'You need to specify a subject first' + Console.ENDC\n return\n\n check_dir_of = self.locations.check_dir_of\n check_dir_of(self.locations.HISTO_PNG_U)\n check_dir_of(self.locations.HISTO_PNG)\n check_dir_of(self.locations.SOURCE_PNG)\n\n\n\n fmap_img = ImageUtils.load_nifti_image(self.locations.HIST_FMAP) #loading subject nifti files\n volumes = []\n try:\n for s in self.locations.SOURCES:\n volumes.append(ImageUtils.load_nifti_image(s))\n except IOError as e:\n print Console.FAIL + 'There are errors loading nifi files for subject %s'%self.subject + Console.ENDC\n return False\n \n\n num_slices = volumes[0].shape[2] #use first volume to check expected number of slices\n\n self.locations.create_empty_dir(self.locations.IMAGES_DIR)\n\n print 'Creating input PNGs for %s'%self.subject\n for k, vol in enumerate(volumes):\n for i in range(num_slices):\n imslice = ImageUtils.data_to_bytescale_rgb(vol[:, :, i])\n im = Image.fromarray(imslice)\n im.save(self.locations.SOURCE_PNG % (self.locations.LABELS[k],i))\n\n \n print 'Creating histology PNGs for %s'%self.subject\n for i in range(num_slices):\n\n im_unscaled = ImageUtils.data_to_unscaled_rgb(fmap_img[:, :, i]); #keeps the original values\n im_unscaled = Image.fromarray(im_unscaled)\n im_unscaled = im_unscaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_unscaled.save(self.locations.HISTO_PNG_U % i)\n\n im_scaled = ImageUtils.data_to_bytescale_rgb(fmap_img[:,:,i]); # bytescaled histology\n im_scaled = Image.fromarray(im_scaled)\n im_scaled = im_scaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_scaled.save(self.locations.HISTO_PNG % i)\n\n print\n return True", "def _get_blobs(im, rois):\n blobs = {'data' : None, 'rois' : None}\n blobs['data'], im_scale_factors = _get_image_blob(im)\n \n return blobs, im_scale_factors", "def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]", "def test_imibread(self):\n gen = imibread(TEST_MIB)\n arr = next(gen)\n self.assertEqual(arr.shape, (256, 256))\n self.assertEqual(arr.dtype, np.dtype(\">u2\"))", "def import_L1B(cls,infile):\r\n try:\r\n import gdal\r\n import rasterio\r\n except:\r\n raise ImportError(\"Can not import module GDAL or RasterIO\")\r\n\r\n\r\n image=image()\r\n\r\n #except:\r\n # raise ImportError(\"Can not read band\")\r", "def _project_im_rois(im_rois, im_scale_factor):\n rois = np.zeros(im_rois.shape)\n rois[:,0:8] = im_rois[:,0:8] * im_scale_factor\n return rois", "def prepare_roidb(self, imdb):\n sizes = imdb.get_sizes()\n roidb = imdb.roidb\n for i in range(imdb.nrof_images):\n roidb[i]['image'] = imdb.image_path_at(i)\n roidb[i]['width'] = sizes[i][0]\n roidb[i]['height'] = sizes[i][1]\n gt_overlaps = roidb[i]['gt_overlaps'].toarray() # need gt_overlaps as a dense array for argmax\n \n max_classes = gt_overlaps.argmax(axis=1) # gt class that had the max overlap\n roidb[i]['max_classes'] = max_classes\n \n max_overlaps = gt_overlaps.max(axis=1) # max overlap with gt over classes (columns)\n roidb[i]['max_overlaps'] = max_overlaps\n # sanity checks\n # max overlap of 0 => class should be zero (background)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # max overlap > 0 => class should not be zero (must be a fg class)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n roidb[i]['bbox_targets'] = self._compute_targets(roidb[i]['boxes'], max_overlaps, max_classes)", "def test_CCI_SM_v33_025Img_img_reading_2D():\n parameter = ['sm']\n img_c = CCI_SM_025Img(\n os.path.join(os.path.dirname(__file__), \"esa_cci_sm-test-data\",\n \"esa_cci_sm_dailyImages\", \"v03.3\", \"combined\", \"2016\",\n \"ESACCI-SOILMOISTURE-L3S-SSMV-COMBINED-20160101000000-fv03.3.nc\"),\n parameter=parameter)\n\n image_c = img_c.read()\n\n assert sorted(image_c.data.keys()) == sorted(parameter)\n assert image_c.data['sm'].shape == (720, 1440)\n assert image_c.lon[0, 0] == -179.875\n assert image_c.lon[0, 1439] == 179.875\n assert image_c.lat[0, 0] == 89.875\n assert image_c.lat[719, 0] == -89.875\n assert abs(image_c.data['sm'][203, 693] - 0.23484) <= 1e-5\n assert image_c.lon.shape == image_c.lat.shape == (720, 1440)\n\n\n parameter = ['sm']\n img_a = CCI_SM_025Img(\n os.path.join(os.path.dirname(__file__), \"esa_cci_sm-test-data\",\n \"esa_cci_sm_dailyImages\", \"v03.3\", \"active\", \"2016\",\n \"ESACCI-SOILMOISTURE-L3S-SSMS-ACTIVE-20160101000000-fv03.3.nc\"),\n parameter=parameter)\n\n image_a = img_a.read()\n\n assert sorted(image_a.data.keys()) == sorted(parameter)\n assert image_a.data['sm'].shape == (720, 1440)\n assert image_a.lon[0, 0] == -179.875\n assert image_a.lon[0, 1439] == 179.875\n assert image_a.lat[0, 0] == 89.875\n assert image_a.lat[719, 0] == -89.875\n assert abs(image_a.data['sm'][203, 693] - 67.70157) <= 1e-5\n assert image_a.lon.shape == image_a.lat.shape == (720, 1440)\n\n\n parameter = ['sm']\n img_p = CCI_SM_025Img(\n os.path.join(os.path.dirname(__file__), \"esa_cci_sm-test-data\",\n \"esa_cci_sm_dailyImages\", \"v03.3\", \"passive\", \"2016\",\n \"ESACCI-SOILMOISTURE-L3S-SSMV-PASSIVE-20160101000000-fv03.3.nc\"),\n parameter=parameter)\n\n image_p = img_p.read()\n\n assert sorted(image_p.data.keys()) == sorted(parameter)\n assert image_p.data['sm'].shape == (720, 1440)\n assert image_p.lon[0, 0] == -179.875\n assert image_p.lon[0, 1439] == 179.875\n assert image_p.lat[0, 0] == 89.875\n assert image_p.lat[719, 0] == -89.875\n assert abs(image_p.data['sm'][203, 693] - 0.322685) <= 1e-5\n assert image_p.lon.shape == image_p.lat.shape == (720, 1440)", "def load_cifar_images(filename):\n\n from load_cifar import load_file\n from load_cifar import label_dict\n\n data,labels = load_file(filename)\n\n # two classes to keep\n class0 = label_dict['airplane']\n class1 = label_dict['bird']\n # remove all but two classes\n keep = np.logical_or(labels==class0,labels==class1)\n data = data[keep,...]\n labels = labels[keep]\n # set labels to 0 or 1\n labels[labels==class0]=0\n labels[labels==class1]=1\n\n # rgb -> grayscale\n gray_data = rgb2gray(data)\n return data,gray_data,labels", "def _get_blobs(im, rois):\n blobs = {'data' : None, 'rois' : None}\n blobs['data'], im_scale_factors = _get_image_blob(im)\n blobs['rois'] = _get_rois_blob(rois, im_scale_factors)\n \n return blobs, im_scale_factors", "def _get_blobs(im, rois):\n blobs = {}\n blobs['data'], im_scale_factors = _get_image_blob(im)\n if cfg.MODEL.FASTER_RCNN and rois is None:\n height, width = blobs['data'].shape[2], blobs['data'].shape[3]\n scale = im_scale_factors[0]\n blobs['im_info'] = np.array([[height, width, scale]], dtype=np.float32)\n if rois is not None:\n blobs['rois'] = _get_rois_blob(rois, im_scale_factors)\n return blobs, im_scale_factors", "def process(bayer_images, red_gains, blue_gains, cam2rgbs):\n bayer_images.shape.assert_is_compatible_with((None, None, None, 4))\n with tf.name_scope(None, 'process'):\n # White balance.\n bayer_images = apply_gains(bayer_images, red_gains, blue_gains)\n # Demosaic.\n bayer_images = tf.clip_by_value(bayer_images, 0.0, 1.0)\n images = demosaic(bayer_images)\n # Color correction.\n images = apply_ccms(images, cam2rgbs)\n # Gamma compression.\n images = tf.clip_by_value(images, 0.0, 1.0)\n images = gamma_compression(images)\n return images", "def normalize_images(image_sitk):\n\n max = 400\n min = -1000\n\n image_np = sitk.GetArrayFromImage(image_sitk)\n\n # Normalization\n image_np = (image_np - min)/(max - min)\n image_np[image_np > 1] = 1\n image_np[image_np < 0] = 0\n\n # Convert back to SITK\n out_image_sitk = sitk.GetImageFromArray(image_np)\n out_image_sitk.CopyInformation(image_sitk)\n\n return out_image_sitk", "def _process_image_files(self, input_files):\n # Handle single file-object as arg.\n if not isinstance(input_files, list):\n input_files = [input_files]\n self._check_batch_size(input_files)\n # Handle unnames images as lists of file objects. Named by index in list.\n image_files = []\n for i, tup in enumerate(input_files):\n if not isinstance(tup, tuple):\n image_files.append((tup, str(i)))\n assert hasattr(image_files[i][0], 'read'), (\n 'image_files[%d] has wrong type: %s. Must be file-object with read method.') % (\n i, type(image_files[i][0]))\n else: # already tuples passed in.\n image_files.append(tup)\n # Resize any images such that the min dimension is in range.\n if CAN_RESIZE:\n for i, image_tup in enumerate(image_files):\n image_files[i] = self._resize_image_tuple(image_tup)\n # Return a list of (bytes, name) tuples of the encoded image bytes.\n image_data = []\n for image_file in image_files:\n image_data.append((bytes(image_file[0].read()), image_file[1]))\n return image_data", "def bilateralize(ds):\n ds_ROIs = ds.copy('deep')\n ds_ROIs.sa['bilat_ROIs'] = [label.split(' ')[-1] for label in ds_ROIs.sa.all_ROIs]\n mv.h5save(results_dir + 'ds_ROIs.hdf5', ds_ROIs)\n print('Combined lateralized ROIs for the provided dataset and saved the dataset.')\n return ds_ROIs", "def read_image(fileame, representation):\n validate_representation(representation)\n\n im = imread(fileame)\n if representation == 1 and is_rgb(im):\n # We should convert from Grayscale to RGB\n im = rgb2gray(im)\n return im.astype(np.float32)\n\n return normlized_image(im)", "def test_sanity_ati2():\n\n with Image.open(TEST_FILE_ATI2) as im:\n im.load()\n\n assert im.format == \"DDS\"\n assert im.mode == \"RGB\"\n assert im.size == (256, 256)\n\n assert_image_equal_tofile(im, TEST_FILE_DX10_BC5_UNORM.replace(\".dds\", \".png\"))", "def _get_image_blob(roidb, scale_inds):\n num_images = len(roidb)\n processed_ims = []\n im_scales = []\n # gt boxes: (x1, y1, x2, y2, theta, cls) 5->6,4->5\n im = cv2.imread(roidb[0]['image'])\n if im is None:\n print \"Read image failed:\", roidb[0]['image']\n\n if roidb[0]['flipped']:\n im = im[:, ::-1, :]\n\n gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]\n im, new_gt_boxes = _augment_data(im, roidb[0]['boxes'][gt_inds, 0:8])\n\n gt_boxes = np.empty((len(new_gt_boxes), 9), dtype=np.float32)\n gt_boxes[:, 0:8] = new_gt_boxes\n\n target_size = cfg.TRAIN.SCALES[scale_inds[0]]\n im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,\n cfg.TRAIN.MAX_SIZE)\n im_scales.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = im_list_to_blob(processed_ims)\n gt_boxes[:, 0:8] *= im_scales\n gt_boxes[:, 8] = roidb[0]['gt_classes'][gt_inds[:len(new_gt_boxes)]]\n return blob, im_scales, gt_boxes", "def _imequalize(self, results):\n for key in results.get('img_fields', ['image']):\n img = results[key]\n results[key] = mmcv.imequalize(img).astype(img.dtype)", "def get_all_labels(input_yaml, riib=False):\n images = yaml.load(open(input_yaml, 'rb').read())\n\n count = 0\n for i in range(len(images)):\n images[i]['path'] = os.path.abspath(os.path.join(os.path.dirname(input_yaml), images[i]['path']))\n if riib:\n images[i]['path'] = images[i]['path'].replace('.png', '.pgm')\n images[i]['path'] = images[i]['path'].replace('rgb/train', 'riib/train')\n images[i]['path'] = images[i]['path'].replace('rgb/test', 'riib/test')\n for box in images[i]['boxes']:\n box['y_max'] = box['y_max'] + 8\n box['y_min'] = box['y_min'] + 8\n\n #for box in images[i]['boxes']:\n # box['y_max'] = -1 * box['y_max']\n\n #print(images[i])\n #print(images[i]['path'])\n # change the path to be just the filename (e.g. \"1234.jpg\")\n count += 1\n fname = images[i]['path'][::-1] # reverse the string\n index = fname.find(\"/\")\n fname = fname[:index][::-1] # isolate just the file name and reverse back \n images[i]['path'] = fname\n return images", "def imageToArray(i):\r\n a=gdalnumeric.numpy.fromstring(i.tostring(),'b')\r\n a.shape=i.im.size[1], i.im.size[0]\r\n return a", "def _iter_images(self):\n raise NotImplementedError", "def prepare_roidb(self):\n # for pascal_voc dataset\n roidb = self.gt_roidb()\n # data argument\n if self.cfg.if_flipped is True:\n print('append flipped images to training')\n roidb = self.append_flipped_images(roidb)\n\n sizes = [PIL.Image.open(self.image_path_at(i)).size\n for i in range(self.num_images)]\n\n for i in range(len(self.image_index)):\n roidb[i]['image'] = self.image_path_at(i)\n roidb[i]['width'] = sizes[i][0]\n roidb[i]['height'] = sizes[i][1]\n # need gt_overlaps as a dense array for argmax\n gt_overlaps = roidb[i]['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n roidb[i]['max_classes'] = max_classes\n roidb[i]['max_overlaps'] = max_overlaps\n # sanity checks\n # max overlap of 0 => class should be zero (background)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # max overlap > 0 => class should not be zero (must be a fg class)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n\n self.roi_data = ROIGenerator(roidb, self.num_classes, self.cfg)\n return self.roi_data", "def convert_masks():\n for fn in sorted(glob.glob('../input/extra_data/*/masks/*.png')):\n print(fn)\n img = skimage.io.imread(fn)\n # utils.print_stats('mask', img)\n img[img > 0] = 255\n skimage.io.imsave(fn, img)", "def classify_all_images(cc):\n print 'Classify images'\n images = cc.d.images\n for img_idx in range(comm_rank, len(images), comm_size): # PARALLEL\n print 'classify image %d/%d at %d'%(img_idx/comm_size, len(images)/comm_size, comm_rank)\n img = images[img_idx]\n scores = classify_image(cc, img_idx)\n savefile = config.get_classifier_score_name(img, cc.L)\n cPickle.dump(scores, open(savefile,'w'))", "def convert(self,inputDir, outputDir):\n print \"mp_cellomics2tiff:\",\"INPUT:\", inputDir\n print \"mp_cellomics2tiff:\",\"OUTPUT:\", outputDir\n\n # input image files\n c01s = glob.glob(inputDir + \"/*.C01\")\n\n if os.path.isdir(outputDir):\n # check if entire dataset is already converted\n if cutils.isDatasetConverted(inputDir,outputDir):\n logfile = open(os.path.join(outputDir,'cellomics2tiff_error.log'),'w')\n msg = \"Seems that data was converted already, stopping.\"\n print >> logfile, msg\n print \"mp_cellomics2tiff:\",msg\n logfile.close()\n return\n else:\n os.makedirs(outputDir)\n\n metadataDir = os.path.join(outputDir,\"metadata\")\n if not os.path.isdir(metadataDir):\n os.makedirs(metadataDir)\n \n logging.basicConfig(filename=outputDir+'/cellomics2tiff.log', format='%(levelname)s:%(message)s', level=logging.DEBUG)\n logging.basicConfig(level=logging.DEBUG)\n\n # convert the metadata in MS Access files to CSV \n msg = \"Converting metadata to \", metadataDir\n print \"mp_cellomics2tiff:\",msg \n mdbs = glob.glob(inputDir + \"/*.MDB\")\n mdbs.extend(glob.glob(inputDir + \"/*.mdb\"))\n for mdb in mdbs:\n print \"MDB:\",mdb\n mdb_export(mdb, metadataDir)\n\n # Convert the data\n start_time_convert = time.time()\n msg = \"Converting...\"\n print \"mp_cellomics2tiff:\",msg \n logging.info(msg)\n pool = multiprocessing.Pool(None)\n files = glob.glob(inputDir + \"/*.C01\")\n\n # http://stackoverflow.com/questions/8521883/multiprocessing-pool-map-and-function-with-two-arguments\n r = pool.map(cellomics2tiff, zip(files,repeat(outputDir)))\n msg = \"Time elapsed: \" + str(time.time() - start_time_convert) + \"s\"\n print \"mp_cellomics2tiff:\",msg\n logging.info(msg)", "def convert_all(input: str, out: str):\n dateien = listdir(input)\n for datei in dateien:\n out_datei = datei.replace(\" \", \"_\") # Leertasten durch Unterstriche ersetzen\n convert_image(input + datei, out + out_datei)", "def blackout_images(image,ticlass):\n rgb = ocropy.intarray()\n ticlass.textImageProbabilities(rgb,image)\n r = ocropy.bytearray()\n g = ocropy.bytearray()\n b = ocropy.bytearray()\n ocropy.unpack_rgb(r,g,b,rgb)\n components = ocropy.intarray()\n components.copy(g)\n n = ocropy.label_components(components)\n print \"[note] number of image regions\",n\n tirects = ocropy.rectarray()\n ocropy.bounding_boxes(tirects,components)\n for i in range(1,tirects.length()):\n r = tirects.at(i)\n ocropy.fill_rect(image,r,0)\n r.pad_by(-5,-5)\n ocropy.fill_rect(image,r,255)", "def load_all_dicom_images(self, verbose=True):\n if verbose: print(\"Loading dicom files ... This may take a moment.\")\n\n path = self.get_path_to_dicom_files()\n fnames = [fname for fname in os.listdir(path)\n if fname.endswith('.dcm') and not fname.startswith(\".\")]\n images = []\n for fname in fnames:\n image = dicom.dcmread(os.path.join(path,fname))\n\n seid = str(image.SeriesInstanceUID).strip()\n stid = str(image.StudyInstanceUID).strip()\n\n if seid == self.series_instance_uid and\\\n stid == self.study_instance_uid:\n images.append(image)\n\n # ##############################################\n # Clean multiple z scans.\n #\n # Some scans contain multiple slices with the same `z` coordinate \n # from the `ImagePositionPatient` tag.\n # The arbitrary choice to take the slice with lesser \n # `InstanceNumber` tag is made.\n # This takes some work to accomplish...\n zs = [float(img.ImagePositionPatient[-1]) for img in images]\n inums = [float(img.InstanceNumber) for img in images]\n inds = list(range(len(zs)))\n while np.unique(zs).shape[0] != len(inds):\n for i in inds:\n for j in inds:\n if i!=j and zs[i] == zs[j]:\n k = i if inums[i] > inums[j] else j\n inds.pop(inds.index(k))\n\n # Prune the duplicates found in the loops above.\n zs = [zs[i] for i in range(len(zs)) if i in inds]\n images = [images[i] for i in range(len(images)) if i in inds]\n\n # Sort everything by (now unique) ImagePositionPatient z coordinate.\n sort_inds = np.argsort(zs)\n images = [images[s] for s in sort_inds]\n # End multiple z clean.\n # ##############################################\n\n return images", "def load_images_from_folder(folder, n_cases,patch_size, mask_path, mask_type, mask_name,normalize=False, imrotate=False):\n\n# # Initialize the arrays:\n# if imrotate: # number of images is 4 * n_im\n# bigy = np.empty((n_im * 4, 64, 64))\n# bigx = np.empty((n_im * 4, 64, 64, 2))\n# else:\n# bigy = np.empty((n_im, 64, 64))\n# bigx = np.empty((n_im, 64, 64, 2))\n\n# im = 0 # image counter\n bigy = []\n filenames = os.listdir(folder)\n\n for filename in filenames[n_cases[0]:n_cases[1]]:\n if not filename.startswith('.'):\n temp = loadmat(os.path.join(folder, filename))['res']\n print temp.shape\n # Clean the STONE sense recon data\n row, col = temp.shape\n temp = np.reshape(temp, (row, col, -1))\n #valid_mask = (np.abs(np.squeeze(temp[int(row/2), int(col/2), :])) != 0)\n #final_images = temp[:,:,valid_mask]\n final_images = temp\n \n# # Resize images\n #final_images = np.abs(final_images)\n final_images_resized = np.zeros((patch_size,patch_size,final_images.shape[2]))\n for i in range(final_images.shape[2]):\n final_images_resized[:,:,i] = cv2.resize(final_images[:,:,i], (patch_size,patch_size))\n \n# # Only take a small part of the data\n# final_images = final_images[140:180,140:180,:]\n \n# # Convert to abs values\n# final_images = np.abs(final_images)\n# \n# # Normalize based on single patient case\n# final_images = (final_images - np.mean(final_images)) / np.std(final_images)\n \n# bigy_temp = cv2.imread(os.path.join(folder, filename),\n# cv2.IMREAD_GRAYSCALE)\n \n \n bigy.append(final_images_resized)\n \n bigy = np.asarray(bigy)\n cases, row, col, imgs = bigy.shape\n bigy = np.transpose(np.reshape(np.transpose(bigy, (1,2,3,0)), (row, col, -1)), (2,0,1))\n \n # convert to k-space\n imgs, row, col = bigy.shape\n bigx = np.empty((imgs, row, col, 2))\n mask = read_mask(mask_path=mask_path,mask_type=mask_type,mask_name=mask_name,patch_size=patch_size,show_image=False)\n for i in range(imgs):\n bigx[i, :, :, :] = create_x(np.squeeze(bigy[i,:,:]),mask)\n \n # convert bigx from complex to abs values\n bigy = np.abs(bigy)\n \n# im += 1\n# if imrotate:\n# for angle in [90, 180, 270]:\n# bigy_rot = im_rotate(bigy_temp, angle)\n# bigx_rot = create_x(bigy_rot, normalize)\n# bigy[im, :, :] = bigy_rot\n# bigx[im, :, :, :] = bigx_rot\n# im += 1\n\n# if imrotate:\n# if im > (n_im * 4 - 1): # how many images to load\n# break\n# else:\n# if im > (n_im - 1): # how many images to load\n# break\n\n# if normalize:\n# bigx = (bigx - np.amin(bigx)) / (np.amax(bigx) - np.amin(bigx))\n\n return bigx, bigy", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def problem2():\n\n data = loaddata(\"data/bayerdata.npy\")\n r, g, b = separatechannels(data)\n\n img = assembleimage(r, g, b)\n display_image(img)\n\n img_interpolated = interpolate(r, g, b)\n display_image(img_interpolated)", "def masterbias(input_file):\n #Set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all bias images\n bias = glob.glob('bias*.fits')\n print 'Loading bias images \\nTotal of bias files = ',len(bias),'\\nFiles = \\n'\n print bias\n print '\\nCreating superbias \\n'\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #copy bias images to save_path\n os.system('cp bias*.fits '+save_path)\n #change to sabe_path\n os.chdir(save_path)\n #verify if previous superbias exist\n if os.path.isfile('superbias.fits') == True:\n os.system('rm superbias.fits')\n # --------------------------------------------------------------------------\n # --- Using only with a few bias images\n #create the list of bias images\n #bias_list = string.join(bias,',')\n #combine the bias image and create the superbias\n #iraf.imcombine(bias_list,'superbias.fits')\n #iraf.imstat('superbias.fits')\n # --------------------------------------------------------------------------\n\n #Using numpy package to take the mean value of bias images\n #Problem: does not include the superbias header in this version\n bias_array = []\n for i in range(len(bias)):\n image = fits.getdata(bias[i])\n bias_array.append(np.array(image,dtype='Float64'))\n superbias_array = np.median(bias_array,axis=0)\n hdu_superbias = fits.PrimaryHDU(superbias_array)\n hdulist_superbias = fits.HDUList([hdu_superbias])\n hdulist_superbias.writeto('superbias.fits')\n\n #clean previos bias files\n print '\\n Cleaning bias*.fits images ....\\n'\n os.system('rm bias*.fits')\n print '\\n.... done.'\n #print output\n #test of outpu value\n #os.remove('superbias.fits')\n #Verify if the image was created:\n output = glob.glob('superbias*.fits')\n if len(output) != 0:\n output = 0\n else:\n output = 1\n #Return to original directory\n os.chdir(original_path)\n #END of the masterbias reduction messsage\n print '\\nsuperbias.fits created!\\n'\n print '\\nEND of superbias reduction!\\n'\n #obtain the value of return\n if output == 1:\n print '!!! ERROR/WARNING !!!'\n print 'Check if the superbias was created or if there is more than one superbias image.'\n return output", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels", "def test_on_merlin_image_binary(self):\n im = diffread(TEST_MIB)\n self.assertEqual(im.shape, (256, 256))\n self.assertEqual(im.dtype, np.dtype(\">u2\"))", "def prepare_images(images):\n images = color.rgb2lab(images)\n\n l = images[:,:,:,:1]/100.\n ab = images[:,:,:,1:]/200. + 0.5\n\n return l, ab", "def cast(*args):\n return _itkBoundedReciprocalImageFilterPython.itkBoundedReciprocalImageFilterIUC2IUC2_cast(*args)", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def clean(imagedata):\n\n if isinstance(imagedata, ndarray):\n imagedata = [imagedata]\n\n outdict = [array_to_im(im) for im in imagedata]\n\n return {'images': outdict}", "def _convert_images_to_omni_observations(self, measurements, uniform_sphere_pixel_coords,\n holes_prior, batch_size, num_timesteps, num_cams, image_dims):\n\n # coords from all scene cameras wrt world\n\n images_list = list()\n images_var_list = list()\n cam_rel_poses_list = list()\n cam_rel_poses_cov_list = list()\n cam_rel_mats_list = list()\n validity_mask_list = list()\n for key, item in measurements.to_iterator():\n if key == 'img_mean':\n # B x N x 1 x H x W x (3+f)\n images_list.append(ivy.expand_dims(item, 2))\n elif key == 'img_var':\n # B x N x 1 x H x W x (3+f)\n images_var_list.append(ivy.expand_dims(item, 2))\n elif key == 'pose_mean':\n # B x N x 1 x 6\n cam_rel_poses_list.append(ivy.expand_dims(item, 2))\n elif key == 'pose_cov':\n # B x N x 1 x 6 x 6\n cam_rel_poses_cov_list.append(ivy.expand_dims(item, 2))\n elif key == 'cam_rel_mat':\n # B x N x 1 x 3 x 4\n cam_rel_mats_list.append(ivy.expand_dims(item, 2))\n elif key == 'validity_mask':\n validity_mask_list.append(ivy.expand_dims(item, 2))\n else:\n raise Exception('Invalid image key: {}'.format(key))\n\n # B x N x C x H x W x (3+f)\n images = ivy.concatenate(images_list, 2)\n\n # B x N x C x H x W x (3+f)\n var_to_project = ivy.concatenate(images_var_list, 2)\n\n # B x N x C x 6\n cam_to_cam_poses = ivy.concatenate(cam_rel_poses_list, 2)\n\n # B x N x C x 3 x 4\n cam_to_cam_mats = ivy.concatenate(cam_rel_mats_list, 2)\n\n # B x N x C x 6 x 6\n cam_to_cam_pose_covs = ivy.concatenate(cam_rel_poses_cov_list, 2)\n\n # B x N x C x 1\n validity_masks = ivy.concatenate(validity_mask_list, 2) > 0\n\n # B x N x OH x OW x (3+f)\n holes_prior_var = ivy.ones([batch_size, num_timesteps] + self._sphere_img_dims + [3 + self._feat_dim],\n dev_str=self._dev) * 1e12\n\n # reset invalid regions to prior\n\n # B x N x C x H x W x (3+f)\n images = ivy.where(validity_masks, images,\n ivy.concatenate((images[..., 0:2], ivy.zeros_like(images[..., 2:], dev_str=self._dev)), -1))\n\n # B x N x C x H x W x (3+f)\n var_to_project = ivy.where(validity_masks, var_to_project,\n ivy.ones_like(var_to_project, dev_str=self._dev) * 1e12)\n\n # B x N x OH x OW x (3+f) # B x N x OH x OW x (3+f)\n return self._frame_to_omni_frame_projection(\n cam_to_cam_poses, cam_to_cam_mats, uniform_sphere_pixel_coords, images[..., 0:3], images[..., 3:],\n cam_to_cam_pose_covs, var_to_project, holes_prior, holes_prior_var, batch_size, num_timesteps, num_cams,\n image_dims)", "def convert_im(im, tree, lbls, block_imgs):\n h, w, _ = im.shape\n step = 16\n for r in range(0, h, step):\n for c in range(0, w, step):\n rnext = min(r+step, h)\n cnext = min(c+step, w)\n patch = im[r:rnext, c:cnext]\n color = np.average(patch, axis=(0, 1))\n\n # Get closest block\n _, ind = tree.query([color], k=1)\n lbl = lbls[ind[0][0]]\n block = block_imgs[lbl]\n\n # Copy values\n rmax = rnext-r\n cmax = cnext-c\n im[r:rnext, c:cnext] = block[:rmax, :cmax]", "def cast(*args):\n return _itkBoundedReciprocalImageFilterPython.itkBoundedReciprocalImageFilterIF2IF2_cast(*args)", "def read_vanhateren_images (n_imgs=5):\n folder_name = r'D:\\VanHateren\\vanhateren_imc' # change this to point to the directory which holds the van hateren data\n # files = listdir(folder_name)\n onlyfiles = [ f for f in listdir(folder_name) if isfile(join(folder_name,f)) ]\n imgs = []\n for i in range(n_imgs):\n filename = join(folder_name, onlyfiles[i])\n with open(filename, 'rb') as handle:\n s = handle.read()\n arr = array.array('H', s)\n arr.byteswap()\n img_i = np.array(arr, dtype='uint16').reshape(1024, 1536)\n imgs.append(img_i) \n return imgs\n #pylab.imshow(img)\n #pylab.show()", "def collate_images(imgs, final=None):\n ims = [Image.open(pic) for pic in imgs]\n size = [_.size for _ in ims]\n fsize = (sum([_[0] for _ in size]),\n max([_[1] for _ in size]))\n blank = Image.new(\"RGB\", fsize, (255, 255, 255))\n s = 0\n for im in ims:\n blank.paste(im, (s, 0))\n s += im.size[0]\n if final is not None:\n blank.save(final)\n return blank", "def save2nifti(self, file_path):\n #Define nifti1 datatype codes\n NIFTI_TYPE_UINT8 = 2 # unsigned char\n NIFTI_TYPE_INT16 = 4 # signed short\n NIFTI_TYPE_INT32 = 8 # signed int.\n NIFTI_TYPE_FLOAT32 = 16 # 32 bit float.\n NIFTI_TYPE_COMPLEX64 = 32 # 64 bit complex = 2 32 bit floats\n NIFTI_TYPE_FLOAT64 = 64 # 64 bit float = double.\n NIFTI_TYPE_RGB24 = 128 # 3 8 bit bytes.\n NIFTI_TYPE_INT8 = 256 # signed char.\n NIFTI_TYPE_UINT16 = 512 # unsigned short.\n NIFTI_TYPE_UINT32 = 768 # unsigned int.\n NIFTI_TYPE_INT64 = 1024 #signed long long.\n NIFTI_TYPE_UINT64 = 1280 # unsigned long long.\n NIFTI_TYPE_FLOAT128 = 1536 # 128 bit float = long double.\n NIFTI_TYPE_COMPLEX128 = 1792 #128 bit complex = 2 64 bit floats.\n NIFTI_TYPE_COMPLEX256 = 2048 # 256 bit complex = 2 128 bit floats\n NIFTI_TYPE_RGBA32 = 2304 # 4 8 bit bytes.\n\n #Detect the data type of the input data.\n data_type = {\n np.uint8: NIFTI_TYPE_UINT8,\n np.uint16: NIFTI_TYPE_UINT16,\n np.uint32: NIFTI_TYPE_UINT32,\n np.float32: NIFTI_TYPE_FLOAT32,\n np.int16: NIFTI_TYPE_INT16,\n np.int32: NIFTI_TYPE_INT32,\n np.int8: NIFTI_TYPE_INT8\n }\n if sys.maxint > 2 ** 32: # The platform is 64 bit\n data_type[np.float128] = NIFTI_TYPE_FLOAT128\n data_type[np.float64] = NIFTI_TYPE_FLOAT64\n data_type[np.int64] = NIFTI_TYPE_INT64\n data_type[np.uint64] = NIFTI_TYPE_UINT64\n data_type[np.complex64] = NIFTI_TYPE_COMPLEX64\n data_type[np.complex128] = NIFTI_TYPE_COMPLEX128\n data_type[np.complex256] = NIFTI_TYPE_COMPLEX256\n\n data = np.rot90(self._data, 3)\n if data_type.has_key(data.dtype.type):\n self._header['datatype'] = data_type[data.dtype.type]\n self._header['cal_max'] = data.max()\n self._header['cal_min'] = 0\n image = nib.nifti1.Nifti1Image(data, None, self._header)\n nib.nifti1.save(image, file_path)", "def images_for_denoising():\r\n return list_images(relpath('image_dataset/train'), True)", "def convert_all_in_bmp(self, path, new_path):\n DbWorker.mkdir(new_path)\n for i in os.listdir(path):\n self.convert_and_save_image(path+'/'+i, new_path)", "def split(bin_lid):\n resolvers = resolver.parse_stream(RESOLVER)\n suffixes = ['_cfa_' + camera for camera in 'LR']\n outdirs = [scratch(bin_lid,bin_lid + suffix) for suffix in suffixes]\n for od in outdirs:\n mkdirs(od)\n imagenames = list(list_images(bin_lid))\n (h,w)=(None,None)\n tiff = None\n # read an image to determine h,w\n for imagename in imagenames:\n for outdir,suffix in zip(outdirs,suffixes):\n LRout = os.path.join(outdir,remove_extension(imagename) + suffix + '.tif')\n if h is None:\n if tiff is None:\n tiff = as_tiff(imagename)\n cfain = resolvers['image'].resolve(pid=as_tiff(imagename)).value\n (h,w) = imread(cfain,plugin='freeimage').shape\n # now fork\n pids = []\n for n in range(NUM_PROCS):\n pid = os.fork()\n if pid == 0:\n for imagename in imagenames[n::NUM_PROCS]:\n tiff = None\n for outdir,suffix,offset in zip(outdirs,suffixes,[0,1]):\n LRout = os.path.join(outdir,remove_extension(imagename) + suffix + '.tif')\n if not os.path.exists(LRout):\n if tiff is None:\n tiff = as_tiff(imagename)\n cfain = resolvers['image'].resolve(pid=as_tiff(imagename)).value\n logging.info('loading %s' % cfain)\n cfa = imread(cfain,plugin='freeimage')\n (h,w) = cfa.shape\n if not os.path.exists(LRout):\n logging.info('splitting %s -> %s' % (cfain, LRout))\n half = w / 2\n off = offset * half\n imsave(LRout,cfa[:,off:off+half],plugin='freeimage')\n os._exit(0)\n else:\n pids += [pid]\n for pid in pids:\n os.waitpid(pid,0)\n logging.info('joined splitting process %d' % pid)\n return (h,w),outdirs", "def main(dataset_path, output_path):\n print(\"converting all snirf files in the input dataset to nwb...\")\n print(f\"dataset directory: {dataset_path}\")\n print(f\"output directory: {output_path}\")\n print()\n\n for subject_id in list_subject_ids(dataset_path):\n convert_subject_snirf_to_nwb(\n input_root=dataset_path, output_root=output_path, subject_id=subject_id\n )\n\n print()\n print(\"Conversion successful!\")", "def dicom_to_nii(acqpath):\n log.info('anonymizer.py dicom_to_nii {0}'.format(acqpath))\n\n subj_path = get_abspath(acqpath)\n\n if subj_path.isfile():\n try:\n subprocess.call('dcm2nii {0}'.format(subj_path), shell=True)\n except Exception as e:\n log.error('Error calling dcm2nii on {0}'.format(subj_path))\n return -1\n\n else:\n for ext in dicom_file_extensions:\n regex = '*' + ext\n if subj_path.glob(regex):\n try:\n subprocess.call('dcm2nii {0}'.format(subj_path.joinpath(regex)), shell=True)\n except Exception as e:\n log.error('Error calling dcm2nii on {0}'.format(subj_path.joinpath(regex)))\n return -1\n\n return 0", "def __init__(self,imageDataPath,onlyRGB=False):\r\n self.bands = [] if onlyRGB else [None] * 13\r\n self.bandsNames = [] if onlyRGB else [None] * 13\r\n self.isRgb = onlyRGB\r\n with os.scandir(imageDataPath) as imageSCND:\r\n for imgFolderItem in sorted(imageSCND, key=lambda e: e.name):\r\n if imgFolderItem.is_dir():\r\n self.day = imgFolderItem.name[11:15]\r\n self.month = imgFolderItem.name[15:17]\r\n self.year = imgFolderItem.name[17:19]\r\n self.nameDiscriminator = imgFolderItem.name[-27:-5] #Needed because snap unity\r\n granulePath = imageDataPath + slash + imgFolderItem.name + \\\r\n slash + \"GRANULE\" + slash\r\n #log.debug(\"Granule path is: \" + granulePath)\r\n with os.scandir(granulePath) as granuleSCND:\r\n for granuleItem in sorted(granuleSCND, key=lambda e: e.name):\r\n imgDataPath = granulePath + granuleItem.name + \\\r\n slash + \"IMG_DATA\" + slash\r\n #log.debug(\"IMG_DATA path is: \" + imgDataPath)\r\n with os.scandir(imgDataPath) as imgDataSCND:\r\n for band in sorted(imgDataSCND, key=lambda e: e.name):\r\n bandPath = imgDataPath + slash + band.name\r\n if onlyRGB and Bands.TCI.Id in band.name:\r\n #log.debug(\"loading TCI: \" + band.name)\r\n self.bands.append(rasterio.open(bandPath))\r\n self.bandsNames.append(band.name)\r\n return None\r\n elif not onlyRGB and Bands.TCI.Id not in band.name:\r\n #log.debug(\"loading band: \" + band.name)\r\n index = utils.bandIdToIndex( \\\r\n utils.getBandIdFromBandPath(band.name))\r\n self.bands[index] = rasterio.open(bandPath)\r\n self.bandsNames[index] = band.name\r\n return None", "def preprocess(exam, data_folder, save_path, image_format):\n for v in ['L-CC', 'L-MLO', 'R-CC', 'R-MLO']:\n if len(exam[v]) == 0:\n continue\n else:\n for image in exam[v]:\n image_path = data_folder + '/' + image + '.' + image_format\n # Extract subdirectories\n subdirs = \"/\".join(image.split('/')[:-1])\n save_dirs = os.path.join(save_path, subdirs)\n # Extract image id\n image_id = image.split('/')[-1]\n # Create save directories\n os.makedirs(save_dirs, exist_ok=True)\n png_save_path = os.path.join(save_dirs, image_id + '.png')\n with Image(filename=image_path, format=image_format) as img:\n with img.clone() as i:\n i.auto_level()\n with i.convert('png') as png_image:\n png_image.transform(resize='896x1152!')\n png_image.save(filename=png_save_path)", "def annot_to_gifti(atlas):\n\n labels, ctab, names = nib.freesurfer.read_annot(atlas)\n\n darr = nib.gifti.GiftiDataArray(labels, intent='NIFTI_INTENT_LABEL',\n datatype='NIFTI_TYPE_INT32')\n labeltable = nib.gifti.GiftiLabelTable()\n for key, label in enumerate(names):\n (r, g, b), a = (ctab[key, :3] / 255), (1.0 if key != 0 else 0.0)\n glabel = nib.gifti.GiftiLabel(key, r, g, b, a)\n glabel.label = label.decode()\n labeltable.labels.append(glabel)\n\n return nib.GiftiImage(darrays=[darr], labeltable=labeltable)", "def dcm2niix() -> str:\n fsldir = fslplatform.platform.fsldir\n candidates = [\n shutil.which('dcm2niix')\n ]\n\n if fsldir is not None:\n candidates.insert(0, op.join(fsldir, 'bin', 'dcm2niix'))\n\n for c in candidates:\n if c is not None and op.exists(c):\n return c\n\n return 'dcm2niix'", "def read_image(self, ifd):\n ifd.img_data = np.array([], dtype='uint8')\n strips = ifd.get_strips() # [(strip_offset, strip_byte_count)]\n for strip in strips:\n ifd.img_data = np.append(ifd.img_data, self.tif_file.read(size=strip[1], location=strip[0]))", "def _project_im_rois(im_rois, scales):\n im_rois = im_rois.astype(np.float, copy=False)\n\n if len(scales) > 1:\n widths = im_rois[:, 2] - im_rois[:, 0] + 1\n heights = im_rois[:, 3] - im_rois[:, 1] + 1\n areas = widths * heights\n scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)\n diff_areas = np.abs(scaled_areas - 224 * 224)\n levels = diff_areas.argmin(axis=1)[:, np.newaxis]\n else:\n levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)\n\n rois = im_rois * scales[levels]\n\n return rois, levels", "def _project_im_rois(im_rois, scales):\n im_rois = im_rois.astype(np.float, copy=False)\n\n if len(scales) > 1:\n widths = im_rois[:, 2] - im_rois[:, 0] + 1\n heights = im_rois[:, 3] - im_rois[:, 1] + 1\n\n areas = widths * heights\n scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :]**2)\n diff_areas = np.abs(scaled_areas - 224 * 224)\n levels = diff_areas.argmin(axis=1)[:, np.newaxis]\n else:\n levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)\n\n rois = im_rois * scales[levels]\n\n return rois, levels", "def check_affines(data_dir, out_dir, input_type):\n fmri_layout = BIDSLayout(str(data_dir), validate=False, derivatives=False)\n xcp_layout = BIDSLayout(str(out_dir), validate=False, derivatives=False)\n if input_type == \"cifti\": # Get the .dtseries.nii\n denoised_files = xcp_layout.get(\n invalid_filters=\"allow\",\n datatype=\"func\",\n extension=\".dtseries.nii\",\n )\n space = denoised_files[0].get_entities()[\"space\"]\n bold_files = fmri_layout.get(\n invalid_filters=\"allow\",\n datatype=\"func\",\n space=space,\n extension=\".dtseries.nii\",\n )\n\n elif input_type == \"nifti\": # Get the .nii.gz\n # Problem: it's collecting native-space data\n denoised_files = xcp_layout.get(\n datatype=\"func\",\n suffix=\"bold\",\n extension=\".nii.gz\",\n )\n space = denoised_files[0].get_entities()[\"space\"]\n bold_files = fmri_layout.get(\n invalid_filters=\"allow\",\n datatype=\"func\",\n space=space,\n suffix=\"bold\",\n extension=\".nii.gz\",\n )\n\n else: # Nibabies\n denoised_files = xcp_layout.get(\n datatype=\"func\",\n space=\"MNIInfant\",\n suffix=\"bold\",\n extension=\".nii.gz\",\n )\n bold_files = fmri_layout.get(\n invalid_filters=\"allow\",\n datatype=\"func\",\n space=\"MNIInfant\",\n suffix=\"bold\",\n extension=\".nii.gz\",\n )\n\n bold_file = bold_files[0].path\n denoised_file = denoised_files[0].path\n\n if input_type == \"cifti\":\n assert (\n nb.load(bold_file)._nifti_header.get_intent()\n == nb.load(denoised_file)._nifti_header.get_intent()\n )\n else:\n if not np.array_equal(nb.load(bold_file).affine, nb.load(denoised_file).affine):\n raise AssertionError(f\"Affines do not match:\\n\\t{bold_file}\\n\\t{denoised_file}\")\n\n print(\"No affines changed.\")", "def regrid_in_miriad(taskid, image_name, hdu_image, b, c):\n\n\t# Change the reference pixel of beam model to reference pixel of image to correct\n\tcb_model = beam_lookup.model_lookup2(taskid, b)\n\thdulist_cb = pyfits.open(cb_model)\n\thdulist_cb[0].header['CRVAL1'] = hdu_image[0].header['CRVAL1']\n\thdulist_cb[0].header['CRVAL2'] = hdu_image[0].header['CRVAL2']\n\n\t# Rescale to appropriate frequency. This should work for either drift scans or Gaussian regression (only tested on latter):\n\tavg_cube_freq = (hdu_image[0].header['CRVAL3'] + hdu_image[0].header['CDELT3'] * hdu_image[0].data.shape[0]) * u.Hz\n\thdulist_cb[0].header['CDELT1'] = (hdulist_cb[0].header['CDELT1'] * get_cb_model_freq().to(u.Hz) / avg_cube_freq).value\n\thdulist_cb[0].header['CDELT2'] = (hdulist_cb[0].header['CDELT2'] * get_cb_model_freq().to(u.Hz) / avg_cube_freq).value\n\n\tcb2d_name = 'temp_b{}_c{}_cb-2d.fits'.format(b, c)\n\thdulist_cb.writeto(cb2d_name)\n\thdulist_cb.close()\n\n\tprint('\\tRegridding in miriad using model {}'.format(cb_model))\n\n\tfits = lib.miriad('fits')\n\tregrid = lib.miriad('regrid')\n\n\t# Convert files to miriad:\n\tfits.in_ = image_name\n\tfits.out = '{}.mir'.format(image_name[:-5])\n\tfits.op = 'xyin'\n\tfits.go()\n\n\tfits.in_ = cb2d_name\n\tfits.out = '{}.mir'.format(cb2d_name[:-5])\n\tfits.op = 'xyin'\n\tfits.go()\n\n\t# Regrid beam image\n\tregrid.in_ = '{}.mir'.format(cb2d_name[:-5])\n\tregrid.out = '{}_rgrid.mir'.format(cb2d_name[:-5])\n\tregrid.tin = '{}.mir'.format(image_name[:-5])\n\tregrid.axes = '1,2'\n\tregrid.go()\n\n\t# Convert regrided beam image to fits\n\tfits.in_ = '{}_rgrid.mir'.format(cb2d_name[:-5])\n\tfits.out = '{}_rgrid.fits'.format(cb2d_name[:-5])\n\tfits.op = 'xyout'\n\tfits.go()\n\n\t# Make cb 3D and save as FITS:\n\thdu_cb = pyfits.open('{}_rgrid.fits'.format(cb2d_name[:-5]))\n\td_new = np.ones((hdu_image[0].header['NAXIS3'], hdu_cb[0].header['NAXIS2'], hdu_cb[0].header['NAXIS2']))\n\td_beam_cube = d_new * hdu_cb[0].data\n\thdu_cb[0].data = np.float32(d_beam_cube)\n\n\tprint('\\tWriting compound beam cube.')\n\thdu_cb.writeto('{}_cb.fits'.format(image_name[:-5]))\n\n\thdu_cb.close()\n\n\t# Clean up the extra Miriad & 2D cb files\n\tos.system('rm -rf {}*.mir'.format(image_name[:-5]))\n\tos.system('rm -rf {}*'.format(cb2d_name[:-5]))", "def cast_and_normalise_images(images):\n images = (tf.cast(images, tf.float32) / 255.0) - 0.5\n return images", "def _canonicalize(smi):\n return rdkit_.to_smiles(rdkit_.from_smiles(smi))", "def load_base_images(base_img):\n if base_img is not None:\n if not os.path.exists(base_img):\n base_img = os.path.join(LIGHTHOUSES_DIR, base_img)\n return (\n Image.open(os.path.join(base_img, 'on.gif')).convert('RGBA'),\n Image.open(os.path.join(base_img, 'off.gif'))\n )\n return None, None", "def export_data(ibs, gid_list, aid_list, nid_list, new_dbpath=None):\n import wbia\n\n imgsetid_list = ut.unique_unordered(ut.flatten(ibs.get_image_imgsetids(gid_list)))\n gsgrid_list = ut.unique_unordered(ut.flatten(ibs.get_image_gsgrids(gid_list)))\n\n # TODO: write SQL query to do this\n am_rowids = ibs._get_all_annotmatch_rowids()\n flags1_list = [aid in set(aid_list) for aid in ibs.get_annotmatch_aid1(am_rowids)]\n flags2_list = [aid in set(aid_list) for aid in ibs.get_annotmatch_aid2(am_rowids)]\n flag_list = ut.and_lists(flags1_list, flags2_list)\n am_rowids = ut.compress(am_rowids, flag_list)\n # am_rowids = ibs.get_valid_aids(ibs.get_valid_aids())\n\n rowid_subsets = {\n const.ANNOTATION_TABLE: aid_list,\n const.NAME_TABLE: nid_list,\n const.IMAGE_TABLE: gid_list,\n const.ANNOTMATCH_TABLE: am_rowids,\n const.GSG_RELATION_TABLE: gsgrid_list,\n const.IMAGESET_TABLE: imgsetid_list,\n }\n ibs_dst = wbia.opendb(dbdir=new_dbpath, allow_newdir=True)\n # Main merge driver\n merge_databases(ibs, ibs_dst, rowid_subsets=rowid_subsets)\n logger.info('Exported to {!r}'.format(new_dbpath))\n return new_dbpath", "def normalise(image):", "def zip_imagenet100c():\n #First make sure the directory we are given is correct!\n if not os.path.isdir(DATA_SRC_ROOT):\n raise Exception(\"Bad filepath given\")\n\n #create the destiantion directories if they don't exist\n if not os.path.isdir(IMAGENET100_DIR):\n os.mkdir(IMAGENET100_DIR)\n\n #grab the subset wnids for the 100 class-subset\n with open(IMAGENET100_CLASSES) as f:\n subset_wnids = f.readlines()\n subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab\n\n #Grab the names of all of the folders inside the root data source\n #Structure is distortion/sub_distortion/level/wnids\n for distortion in os.listdir(DATA_SRC_ROOT):\n if distortion != \"meta.bin\":\n print(distortion)\n\n folder_path = os.path.join(DATA_SRC_ROOT, distortion)\n\n if not os.path.isdir(folder_path):\n continue\n\n for sub_distortion in os.listdir(folder_path):\n print(sub_distortion)\n\n subfolder_path = os.path.join(folder_path, sub_distortion)\n\n if not os.path.isdir(subfolder_path):\n continue\n\n for level in os.listdir(subfolder_path):\n print(level)\n\n level_path = os.path.join(subfolder_path, level)\n\n #grab the correcrt validation d9recotires\n for wnid in os.listdir(level_path):\n wnid_path = os.path.join(level_path, wnid)\n\n if not os.path.isdir(wnid_path):\n continue\n\n if wnid in subset_wnids:\n dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid)\n\n shutil.copytree(wnid_path, dest_path)\n\n #copy the metadata bin file\n meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin')\n meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin')\n\n shutil.copy(meta_file, meta_dest)\n\n #Zip the destinatio file\n shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR)" ]
[ "0.5878483", "0.5656656", "0.5646914", "0.5597715", "0.5594963", "0.55353117", "0.5508064", "0.54495406", "0.54446286", "0.5424856", "0.5355759", "0.5332224", "0.5326884", "0.53148586", "0.52936196", "0.5291717", "0.52617556", "0.5258072", "0.5257662", "0.525628", "0.52407", "0.52356297", "0.5216156", "0.5215271", "0.518692", "0.5172915", "0.516383", "0.51601756", "0.51597375", "0.51367646", "0.51341754", "0.5127874", "0.5117684", "0.51107633", "0.51082355", "0.5103025", "0.50989157", "0.50939643", "0.5086482", "0.5083015", "0.5082068", "0.50765777", "0.50507873", "0.50499666", "0.504508", "0.50425583", "0.50413144", "0.5038906", "0.50376934", "0.5035045", "0.5029886", "0.5027619", "0.5026837", "0.5010731", "0.50043154", "0.5001447", "0.49976462", "0.49964112", "0.49943116", "0.49925286", "0.49906906", "0.49857375", "0.49853122", "0.4984075", "0.4982829", "0.49725705", "0.49691176", "0.4964149", "0.49496174", "0.4948634", "0.4948347", "0.49456835", "0.49370602", "0.49339482", "0.4933302", "0.49298328", "0.4925208", "0.49236876", "0.49209985", "0.49192184", "0.4914476", "0.49104553", "0.49021262", "0.48955938", "0.4894349", "0.4892979", "0.48887798", "0.48856607", "0.48856288", "0.48794454", "0.48790926", "0.487797", "0.4875936", "0.48744738", "0.48727724", "0.4871328", "0.48711738", "0.4865446", "0.48628533", "0.48612463" ]
0.5975682
0
Register the magnitude image from the fieldmap data to the hires structural. Save the matrices for later use in motion correction.
def AlignFieldmaps(self): for entry in self.entry_map['fmap']: info = self.info[entry] # Register the magnitude image at the shortest TR to the T1-IR # structural image. target = self.info[self.norm_src]['imgfile'] + \ self.info[self.norm_src]['suffix'] source = info['magfile'] + info['suffix'] matfile = info['matfile'] fmt = '3dAllineate -prefix NULL -1Dmatrix_save %s -base %s ' + \ '-source %s -cost mi -warp shift_rotate' cmd = fmt % (info['matfile'], target, source) self.CheckExec(cmd, [info['matfile']]) # Convert to unitary matrix (remove scaling component.) cmd = 'cat_matvec -ONELINE %s -P > %s' % \ (info['matfile'], info['matfile_unitary']) self.CheckExec(cmd, [info['matfile_unitary']]) # Rotate the magnitude image to the new grid. fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s' cmd = fmt % (info['magfile_r']+info['suffix'], \ info['matfile_unitary'], info['magfile'] + info['suffix']) self.CheckExec(cmd, [info['magfile_r']+info['suffix']]) # Rotate the fieldmap to the new grid. fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s' cmd = fmt % (info['imgfile_r']+info['suffix'], \ info['matfile_unitary'], info['imgfile'] + info['suffix']) self.CheckExec(cmd, [info['imgfile_r']+info['suffix']])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recon(self, spirec):\n tmpdir = tempfile.mkdtemp()\n basename = 'recon'\n basepath = os.path.join(tmpdir, basename)\n pfilename = os.path.abspath(self.pfilename)\n\n # run spirec to get the mag file and the fieldmap file\n cmd = spirec + ' -l --rotate -90 --magfile --savefmap2 --b0navigator -r ' + pfilename + ' -t ' + basename\n self.log and self.log.debug(cmd)\n sp.call(shlex.split(cmd), cwd=tmpdir, stdout=open('/dev/null', 'w'))\n\n self.image_data = np.fromfile(file=basepath+'.mag_float', dtype=np.float32).reshape([self.size_x,self.size_y,self.num_timepoints,self.num_echoes,self.num_slices],order='F').transpose((0,1,4,2,3))\n if os.path.exists(basepath+'.B0freq2') and os.path.getsize(basepath+'.B0freq2')>0:\n self.fm_data = np.fromfile(file=basepath+'.B0freq2', dtype=np.float32).reshape([self.size_x,self.size_y,self.num_echoes,self.num_slices],order='F').transpose((0,1,3,2))\n shutil.rmtree(tmpdir)", "def add_mag(self, band, m, errm):\n if self.mag is None:\n names = ['BAND', 'MAG', 'MAG_ERR']\n dtypes = [TABLES_SCHEMA['MAG'][name]['dtype'] for name in names]\n self.mag = Table(names=names, rows=[[band, m, errm]],\n dtype=dtypes, masked=True)\n _set_table_attributes('MAG', self.mag)\n else:\n if band in self.mag['BAND']:\n self.mag['MAG'][self.mag['BAND'] == band] = m\n self.mag['MAG_ERR'][self.mag['BAND'] == band] = errm\n else:\n self.mag.add_row([band, m, errm])", "def calcMagneticFieldMap(self):\n # Normalised b-field (note lower case)\n self.solenoid.calcMagneticFieldMap()\n self.b = lambda z: self.solenoid.B_interp(z) * -e / (2 * m * c)\n self.calc_level = CALC_B_MAP", "def _get_magnitudes(self):\n\n self.logging.debug('Get magnitudes ' )\n\n self.mags = {}\n\n steps = ['dbopen netmag', 'dbsubset orid != NULL']\n\n fields = ['orid', 'magid', 'magnitude', 'magtype',\n 'auth', 'uncertainty', 'lddate']\n\n for v in extract_from_db(self.db, steps, fields):\n orid = v.pop('orid')\n self.logging.debug('new mag for orid:%s' % orid)\n\n try:\n v['strmag'] = '%0.1f %s' % ( float(v['magnitude']), v['magtype'] )\n except:\n v['strmag'] = '-'\n\n if not orid in self.mags:\n self.mags[ orid ] = {}\n\n self.mags[ orid ][ v['magid'] ] = v", "def add_Mag_Value(self, input):\n self.magflux = input", "def magnetization(h):\n if h.has_eh: raise\n if h.has_spin: \n mx = extract.mx(h.intra)\n my = extract.my(h.intra)\n mz = extract.mz(h.intra)\n else: raise\n np.savetxt(\"MAGNETIZATION_X.OUT\",np.matrix([h.geometry.x,h.geometry.y,mx]).T)\n np.savetxt(\"MAGNETIZATION_Y.OUT\",np.matrix([h.geometry.x,h.geometry.y,my]).T)\n np.savetxt(\"MAGNETIZATION_Z.OUT\",np.matrix([h.geometry.x,h.geometry.y,mz]).T)", "def add_refmag(self, ra, dec, mag, filt, mjd, mag_err=None):\n\n rm = Table(names=self.__mag_colnames, \n data=[[ra],[dec],[mag],[mag_err],[filt],[mjd]]) \n \n self.__ref_mags.add_row(rm[0])", "def add_mag(self, ra, dec, mag, mag_err, filt, mjd):\n \n pt = Table(names=self.__mag_colnames, \n data=[[ra],[dec],[mag],[mag_err],[filt],[mjd]])\n \n LightCurve.add_tables(self, pt)", "def openMANGASpectrum(self, path_to_logcube, path_to_dapall, bin_number, plate_number, ifu_number, emlines,mpl='mpl-9'):\n\t\t\n\t\t# Read in MAPS file as this contains part of the information.\n\t\tmaps_header = pyfits.open(self.path_to_spectrum)\n\t\tbin_identification = maps_header['BINID'].data\n\t\twhere = np.where(bin_number == bin_identification[0,:,:]) #use 1st channel of bin_identification\n\t\tx_position, y_position = where[0][0], where[1][0]\n\t\t\n\t\t# Get S/N, right ascension and declination.\n\t\tsignal, ra, dec = maps_header['BIN_SNR'].data[x_position,y_position], maps_header[0].header['OBJRA'],maps_header[0].header['OBJDEC']\n\t\tvelocity_dispersion = maps_header['STELLAR_SIGMA'].data \t\t\t\t\n\t\tvelocity_dispersion_correction = maps_header['STELLAR_SIGMACORR'].data[0,:,:]\n\t\t\n\t\tif velocity_dispersion[x_position,y_position] > velocity_dispersion_correction[x_position,y_position]:\n\t\t\tcorrection = np.sqrt((velocity_dispersion[x_position,y_position])**2-(velocity_dispersion_correction[x_position,y_position])**2)\n\t\t\tvdisp = correction\n\t\telse:\n\t\t\tvdisp = 0\n\n\t\t\n\t\t# Open LOGCUBE to get the flux, wavelength, and error\n\t\theader = pyfits.open(path_to_logcube)\n\t\twavelength, flux, emline, bit_mask, inverse_variance = header['WAVE'].data, header['FLUX'].data, header['EMLINE'].data, header['MASK'].data, header['IVAR'].data\n\t\tself.wavelength = wavelength\n\t\tcorrect_flux = flux[:,x_position,y_position]\n\t\tcorrect_flux_emline = emline[:, x_position, y_position]\n\t\toutput_flux = correct_flux - correct_flux_emline\n\t\tcorrect_inverse_variance = inverse_variance[:, x_position, y_position]\n\t\t\n\t\tLSF = header['LSF'].data[:,x_position,y_position]\t\t# LSF given as sigma of Gaussian in Angstrom\n\t\tsig2fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0))\n\t\tLSF_FWHM = LSF*sig2fwhm\n\t\tRES = wavelength/LSF_FWHM\n\t\t\n\t\tself.r_instrument = RES\n\t\tself.error = np.sqrt(1.0/(correct_inverse_variance))\n\t\tself.bad_flags = np.ones(len(output_flux))\n\t\tself.flux = output_flux\n\t\tself.vdisp = vdisp\n\n\t\tif (mpl=='mpl-10') or (mpl=='mpl-11'):\n\t\t\text=2\n\t\telse:\n\t\t\text=1\n\t\t\n\t\tdap_all = pyfits.open(path_to_dapall)\n\t\tget = np.where(dap_all[ext].data['PLATEIFU']==str(plate_number)+'-'+str(ifu_number))\n\t\tc = const.c.value/1000\n\t\t# Use redshift as measured from the stellar kinematics by the DAP.\n\t\tredshift = dap_all[ext].data['STELLAR_Z'][get][0]\n\t\t# If redshift measurement failed, use redshift estimate from NSA or ancillary programs.\n\t\tif redshift<0:\n\t\t\tredshift = dap_all[ext].data['Z'][get][0]\n\t\t\t\n\t\tsys_vel = maps_header[0].header['SCINPVEL']\n\t\tbin_vel = maps_header['STELLAR_VEL'].data[x_position,y_position]\t\n\t\t\t\n\t\tif redshift<0:\n\t\t\tprint('WARNING: The redshift of this object is negative.')\n\t\t\tprint('z = {}'.format(redshift))\n\t\t\n\t\tredshift_corr = (sys_vel+bin_vel)/c\n\t\tself.redshift = redshift\n\t\tself.restframe_wavelength = self.wavelength / (1.0+redshift_corr)\n\n\t\tbitmask = bit_mask[:,x_position,y_position]&2**0+2**1+2**2+2**3+2**4\n\t\tself.mask_emissionlines(emlines)\n\t\tself.final_mask = (bitmask | self.lines_mask)\n\n\t\tself.wavelength = self.wavelength[(self.final_mask==False)] \n\t\tself.restframe_wavelength = self.restframe_wavelength[(self.final_mask==False)] \n\t\tself.flux = self.flux[(self.final_mask==False)] \n\t\tself.error = self.error[(self.final_mask==False)]\n\t\tself.bad_flags = self.bad_flags[(self.final_mask==False)]\n\t\t\t\t\t\n\t\t# Get Trust flag, object_id, xpos, ypos and instrumental resolution.\n# \t\tself.trust_flag, self.objid, self.r_instrument = True, 0, np.loadtxt(os.path.join(os.environ['FF_DIR'],'data/MaNGA_spectral_resolution.txt'))\n\t\tself.trust_flag, self.objid= True, 0\n# \t\tself.r_instrument = self.r_instrument[0:self.r_instrument.shape[0]//2]\n\t\tself.r_instrument = self.r_instrument[(self.final_mask==False)]\n\t\tself.xpos, self.ypos = ra, dec\n\t\t\n\t\t# gets the amount of MW reddening on the models\n\t\tif self.milky_way_reddening :\n\t\t\tself.ebv_mw = get_dust_radec(ra, dec, 'ebv')\n\t\telse:\n\t\t\tself.ebv_mw = 0.0", "def map(self, mapunit):\n\n #The number of bands to measure the LF for\n if len(mapunit['luminosity'].shape)>1:\n self.nbands = mapunit['luminosity'].shape[1]\n else:\n mapunit['luminosity'] = np.atleast_2d(mapunit['luminosity']).T\n self.nbands = 1\n\n #If only measuring for centrals, get the appropriate\n #rows of the mapunit\n\n mu = {}\n if self.central_only:\n delete_after_map = True\n for k in mapunit.keys():\n mu[k] = mapunit[k][mapunit['central']==1]\n else:\n delete_after_map = False\n mu = mapunit\n\n #Want to count galaxies in bins of luminosity for\n #self.nbands different bands in self.nzbins\n #redshift bins\n if self.lumcounts is None:\n self.lumcounts = np.zeros((self.njack, len(self.magbins)-1,\n self.nbands, self.nzbins))\n\n #Assume redshifts are provided, and that the\n #mapunit is sorted in terms of them\n \n if self.lightcone:\n for i, z in enumerate(self.zbins[:-1]):\n zlidx = mu['redshift'].searchsorted(self.zbins[i])\n zhidx = mu['redshift'].searchsorted(self.zbins[i+1])\n\n #Count galaxies in bins of luminosity\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][zlidx:zhidx])\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,i] += c\n else:\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][:,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][:])\n c, e = np.histogram(mu['luminosity'][:,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,0] += c\n\n if delete_after_map:\n True", "def MakeFieldmaps(self):\n if self.verbose:\n print 'Compute fieldmaps.'\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n if self.info[entry]['imgfile'] == None:\n# Fieldmap data not found.\n return\n# Make a magnitude image for use in checking registration.\n cmd = 'convert_file -f0 -m0 %s %s nii' % \\\n (entry, self.info[entry]['magfile'])\n self.CheckExec(cmd, [self.info[entry]['magfile'] + '.nii'])\n\n# Make fieldmap. Use separate loop in case make_fmap aborts.\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n fmapname = self.info[entry]['imgfile']\n if not os.path.exists('%s.nii' % fmapname) or self.redo:\n# Couldn't find or existing fmap, compute a new one.\n if self.verbose:\n extra_args = '-v'\n else:\n extra_args = ''\n if self.info[entry]['correct_fmap_phase'] == 'force':\n extra_args += ' --force-slicecorr'\n elif self.info[entry]['correct_fmap_phase'] == 'omit':\n extra_args += ' --omit-slicecorr'\n cmd = 'make_fmap %s %s %s' % (extra_args, entry, fmapname)\n# error = self.ExecCmd(cmd, halt_on_error=False)\n if self.no_fmapcorr:\n halt_on_error = False\n else:\n halt_on_error = True\n error = self.CheckExec(cmd, ['%s.nii' % fmapname], \\\n halt_on_error=halt_on_error)\n if error:\n self.info[entry]['valid'] = False\n del self.fmaps[entry]", "def getMagneticFieldMap(self):\n return self.solenoid.B_interp(self.z_array)", "def ingest_magrini_photometric_temperatures(self, filename, extension=-1):\n\n image = fits.open(filename)\n data = image[extension].data\n\n # The columns might be different, but in general if we lowerize them all\n # then we are looking for:\n # ('CNAME_2', 'GES_FLD', 'teffjk', 'jk', 'FILENAME')\n cname_col, teff_col = (data.dtype.names[0], \"teffjk\")\n\n # Update the value in the spectra table, unless it already exists.\n N = 0\n for row in data:\n result = self.execute(\n \"\"\" UPDATE spectra\n SET teff_irfm = %s\n WHERE cname = %s AND\n teff_irfm = 'NaN'\"\"\",\n (float(row[teff_col]), row[cname_col], ))\n return True", "def recarregar_imagem(self):\n self.salvar_imagem(caminho_imagem=self.caminho_temp)\n self.carregar_imagem(caminho_imagem=self.caminho_temp)", "def analyze(self, options, target):\r\n\r\n target = 0\r\n\r\n upf = None\r\n\r\n dwnf = None\r\n\r\n if options.upfile is not None:\r\n\r\n upf = basepath + options.upfile + '.ma'\r\n\r\n if options.downfile is not None:\r\n\r\n dwnf = basepath + options.downfile + '.ma'\r\n\r\n\r\n\r\n for filename in (upf, dwnf):\r\n\r\n # if options.upfile is not None and options.downfile is not None:\r\n\r\n if filename is None:\r\n\r\n break\r\n\r\n im=[]\r\n\r\n self.imageData = []\r\n\r\n print (\"Loading data from %s\" % filename)\r\n\r\n try:\r\n\r\n im = MetaArray(file = filename, subset=(slice(0,2), slice(64,128), slice(64,128)))\r\n\r\n except:\r\n\r\n print(' Error loading upfile: %s' % filename)\r\n\r\n return\r\n\r\n print(' Data loaded')\r\n\r\n target = target + 1\r\n\r\n self.times = im.axisValues('Time').astype('float32')\r\n\r\n self.imageData = im.view(np.ndarray).astype('float32')\r\n\r\n im=[]\r\n\r\n self.analysis_fourier_map(period=self.period, target=target, bins=binsize,)\r\n\r\n if target > 0:\r\n\r\n self.plot_maps(mode = 1, target = target, gfilter = self.gfilter)", "def plot_mira_field(\n field, times, melting_height, display, vmin, vmax, cmap, figname\n):\n\n plt.ioff()\n\n # Opening fig()\n fig = plt.figure(figsize=(10, 5))\n\n # Plotting\n display.plot_vpt(\n field,\n vmin=vmin,\n vmax=vmax,\n cmap=cmap,\n time_axis_flag=True,\n mask_outside=True,\n raster=True,\n )\n plt.ylim((0, 18))\n # Adding melting layer height\n # plt.plot(times, melting_height, \"k-\", label=\"Melting Layer Height\")\n display.plot_grid_lines()\n # Adding legend of melting layer height\n # plt.legend(loc=\"upper right\")\n\n # Saving figure\n plt.savefig(figname + \".png\", dpi=300, bbox_inches=\"tight\")\n\n plt.clf()\n plt.close()\n gc.collect()\n del fig", "def set_mag(self, target_mag):\n raise NotImplementedError", "def magn(names, values, data, model_key, plot_key=False):\n # Making sure number of parameters matches number of names given:\n assert len(names) == len(values), \"len(names) != len(values) in datasim.magn\"\n\n zpicks = data['zpicks']\n # Corrected absolute magnitude M of SN.\n M = values[0]\n\n# dlpc, da, integrated_zpicks, integrated_dlpc, plot_var = zodesolve.zodesolve(names, values, zpicks, model_key, plot_key)\n\n dlpc, da, plot_var = zodesolve.zodesolve(names, values, zpicks, model_key, plot_key)\n\n # Calculating apparent magnitudes of supernovae at the simulated\n # luminosity distances using the distance modulus formula.\n mag = 5 * np.log10(dlpc/10) + M\n# integrated_mag = 5 * np.log10(integrated_dlpc/10) + M\n# print('redshift =',zpicks[-1],'da =', da[-1])\n\n# # plotting interpoated data vs input and full\n# import matplotlib.pyplot as plt\n# import matplotlib as mpl\n# #mpl.style.use('default') # has to be switched on to set figure size\n# mpl.style.use('fivethirtyeight')\n# plt.rcParams['axes.facecolor'] = 'white'\n# plt.rcParams['figure.facecolor'] = 'white'\n# plt.rcParams['grid.color'] = 'white'\n#\n# print('integrated_zpicks',integrated_zpicks[0])\n# print('zpicks', zpicks[0])\n#\n# plt.figure()\n# plt.scatter(integrated_zpicks, integrated_mag, s=70, label='integrated', c=\"C{}\".format(0))\n# plt.plot(zpicks, mag, label='interpolated', linestyle='-', c=\"C{}\".format(1))\n# plt.legend()\n\n if plot_key:\n # Plotting evolution of parameters in the model.\n import plots\n plots.modelcheck(mag, zpicks, plot_var, model_key)\n\n return mag, da", "def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):", "def field_map(ar_field, ar_coorx, ar_coory, X, picture_out, title, flip=0):\n max_val=max(ar_field)\n\n xmin=min(ar_coorx);xmax=max(ar_coorx)\n ymin=min(ar_coory);ymax=max(ar_coory)\n step=X\n nx=(xmax-xmin)/step+1\n ny=(ymax-ymin)/step+1\n\n ar_indx=np.array((ar_coorx-xmin)/step,int)\n ar_indy=np.array((ar_coory-ymin)/step,int)\n\n ar_map=np.ones((ny,nx))*-99.9\n ar_map[ar_indy,ar_indx]=ar_field\n\n if flip==1:\n ar_map=np.flipud(ar_map)\n\n ar_map2 = ma.masked_where(ar_map <0, ar_map)\n\n\n ut.check_file_exist(picture_out)\n\n pl.clf()\n pl.imshow(ar_map2, interpolation='Nearest',\n origin='lower', vmax=max_val,vmin=0)\n\n pl.title(title)\n pl.colorbar()\n pl.savefig(picture_out)", "def moment0_map(gal_index,quant='m', res=0.5, plane='xy', units='Jy', **kwargs):\n \n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n #print('TEST, fixing R_max = 60')\n p.gal_index = gal_index\n # p.R_max = 60\n \n location = aux.moment0_map_location(res=res,plane=plane,gal_index=p.gal_index)\n \n # Getting matrix with projected emmision values: \n #momentmap = np.load(location, allow_pickle=True)\n #pdb.set_trace()\n if p.ow:\n print('Overwrite is on - creating')\n aux.convert_cell_data_to_regular_grid(res=res, plane=plane, gal_index=p.gal_index)\n momentmap = np.load(location, allow_pickle=True)\n #try:\n # momentmap = np.load(location, allow_pickle=True)\n # print('Found stored momentmap data for %i' % p.gal_index)\n # print(location)\n # if p.ow:\n # print('But overwrite is on - creating')\n # aux.convert_cell_data_to_regular_grid(res=res, plane=plane, gal_index=p.gal_index)\n # momentmap = np.load(location, allow_pickle=True)\n #except:\n # print('Did not find stored momentmap data for %i - creating' % p.gal_index)\n # aux.convert_cell_data_to_regular_grid(res=res, plane=plane, gal_index=p.gal_index)\n # momentmap = np.load(location, allow_pickle=True)\n \n n = momentmap[-1]\n momentmap = momentmap[:-1]\n indexes = momentmap[-1]\n index1, index2 = int(indexes[1]), int(indexes[2])\n\n # Getting the desired quantity to create the momentmap:\n dictionary = p.moment0_dict\n\n num = dictionary[quant]\n lumus = np.array(momentmap[:,3])\n lum = []\n mass = []\n metal = []\n for prop in lumus:\n if (quant == 'Z') | (quant == 'G0') | (quant == 'ne_mw') | (quant == 'Te_mw') | (quant == 'Tk_mw'):\n lum.append(prop[num]/prop[0])\n else:\n lum.append(prop[num])\n lum = np.array(lum)\n\n if 'L_' in quant:\n print('Sum over %s image: %.2f Lsun' % (quant,np.sum(lum)*6))\n print('Or: %.2f K km/s pc^2' % (aux.Lsun_to_K_km_s_pc2(np.sum(lum)*6,quant.replace('L_',''))))\n print('Or: %.2f K km/s pc^2' % (aux.Lsun_to_K_km_s_pc2(1.8e8,quant.replace('L_',''))))\n lum = lum / (res**2)\n \n # Converting to Jy*km/s / kpc^2 units:\n if units == 'Jy':\n if 'L_' in quant:\n quant_name = quant.replace('L_','')\n frequencies = p.freq\n \n z = p.zred\n D = 10 # Mpc (Luminosity Distance)\n freq = frequencies[quant_name]\n \n lum = lum*(1+z) / (1.04e-3 * D**2 * freq)\n # Soloman et al. 1997\n \n # Creating momentmaps:\n ax1,ax2 = momentmap[:, 1], momentmap[:, 2]\n \n nrows, ncols = int(n[1]), int(n[2])\n grid = lum.reshape((nrows, ncols))\n #pdb.set_trace()\n # grid = np.flipud(grid)\n # normal = mpl.colors.Normalize(vmin = min(lum), vmax = max(lum))\n\n # Setting 0 values to something very low\n print(len(lum))\n print(len(lum[lum == 0]))\n grid[grid == 0] = 1e-30\n grid[np.isnan(grid)] = 1e-30\n\n # Default min,max values\n if p.log: grid = np.log10(grid)\n if (not p.vmin) : \n p.vmin = np.max(grid)/1e5\n if p.log: p.vmin = np.max(grid) - 5\n if (not p.vmax) : \n p.vmax = 5*np.max(grid)\n if p.log: p.vmax = np.max(grid)\n \n if quant == 'Z':\n p.vmin = 0.05\n p.vmax = 3\n\n if p.add:\n fig,ax = plt.gcf(),p.ax\n else:\n fig = plt.figure(figsize=(8,6))\n ax = fig.add_axes([0.15, 0.15, 0.8, 0.8]) \n ax.axis('equal')\n\n if not p.R_max:\n gal_ob = gal.galaxy(p.gal_index)\n p.R_max = gal_ob.R_max\n grid = np.flipud(grid) \n if p.rotate:\n grid = np.rot90(grid)\n grid = np.rot90(grid)\n gal_ob = gal.galaxy(p.gal_index)\n #cell_data = gal_ob.cell_data.get_dataframe()\n #extent = np.max(np.abs(cell_data[['x','y','z']].values))\n if p.R_max:\n extent = 1*p.R_max\n else:\n extent = 50\n cs = ax.imshow(grid, extent=(-extent, extent, -extent, extent),\\\n vmin=p.vmin, vmax=p.vmax, interpolation='nearest', cmap=p.cmap)\n print(extent)\n # Add half-light radius\n x_axis = np.linspace(-extent,extent,grid.shape[0])\n y_axis = np.linspace(-extent,extent,grid.shape[1])\n x,y = np.meshgrid(x_axis,y_axis)\n r = np.sqrt(x**2 + y**2)\n r_bins = np.linspace(0,r.max(),200)\n L_bins = np.zeros(len(r_bins)-1)\n l0 = 0\n for i in range(len(r_bins)-1):\n L_bins[i] = np.sum(10.**grid[(r < r_bins[i+1])])\n R_half = r_bins[1::][L_bins >= 0.5*L_bins.max()][0]\n print('R_half: ',R_half)\n circle = plt.Circle((0,0),R_half,ec='green',fc=None,fill=False,lw=3,ls='--')\n ax.add_patch(circle)\n\n #if p.R_max: extent = p.R_max\n print(p.R_max,extent)\n ax.set_xlim([-1.1*extent,1.1*extent])\n ax.set_ylim([-1.1*extent,1.1*extent])\n\n\n if num == 0:\n #plt.title('mass density')\n labels = 'log surface density (M$_{\\odot}$ / kpc$^2$)' \n if 'L_' in quant:\n #plt.title(quant + ' density')\n if units == 'Jy':\n labels = 'Jy${\\cdot}$km/s / kpc$^2$'\n else:\n labels = 'log surface brightness density (L$_{\\odot}$ / kpc$^2$)'\n if quant == 'Z': \n labels = 'log Z (Z$_{\\odot}$)'\n if quant == 'FUV': \n labels = 'log FUV flux (G$_{0}$)'\n\n if not p.add: plt.xlabel(plane[0]+' [kpc]')\n if not p.add: plt.ylabel(plane[1]+' [kpc]')\n\n formatter = mpl.ticker.LogFormatterExponent(10, labelOnlyBase=False, minor_thresholds=(100,20))\n if p.legend: \n if not p.label: labels = ''\n cbar = fig.colorbar(cs, label=labels, pad=0, shrink=0.85)#0.5)#\n \n if p.savefig:\n plt.tight_layout()\n if not os.path.isdir(p.d_plot + 'moment0/'): os.mkdir(p.d_plot + 'moment0/') \n plt.savefig(p.d_plot + 'moment0/moment0_%i_%s%s' % (p.gal_index,p.sim_name,p.sim_run) + '_' + plane + '_res' + str(res) +'_'+ quant.replace('(','').replace(')','') + '.png',dpi=500)", "def add_snapshot(self):\n\n\t\tself.mu_values = self.cvt_handler.mu_values\n\t\tdim_mu = self.mu_values.shape[1]\n\t\taux_snapshot = self.file_handler.parse(self.namefile_prefix + str(dim_mu-1) + self.file_format, self.output_name)\n\t\tsnapshot = aux_snapshot.reshape(aux_snapshot.shape[0],1)\n\t\tself.snapshots = np.append(self.snapshots, snapshot, 1)\n\t\t\n\t\tself.print_info()", "def update_maps(self):\n if self.fmodel is None:\n return\n def fft_map(map_coeffs, resolution_factor = 0.25):\n return map_coeffs.fft_map(resolution_factor = resolution_factor,\n ).apply_sigma_scaling().real_map_unpadded()\n map_types = [\"2mFo-DFc\", \"mFo-DFc\"]\n map_keys = [\"2mFo-DFc\", \"mFo-DFc\"]\n if (self.fmodel.f_obs().anomalous_flag()):\n if (self.params.anom_map_type == \"phaser\"):\n map_types.append(\"llg\")\n elif (self.params.anom_map_type == \"residual\"):\n map_types.append(\"anom_residual\")\n else :\n map_types.append(\"anom\")\n map_keys.append(\"anom\")\n if (self.use_svm):\n map_types.append(\"mFo\")\n map_keys.append(\"mFo\")\n # To save memory, we sample atomic positions immediately and throw out\n # the actual maps (instead of keeping up to 3 in memory)\n sites_frac = self.xray_structure.sites_frac()\n sites_cart = self.xray_structure.sites_cart()\n self._principal_axes_of_inertia = [ None ] * len(sites_frac)\n self._map_variances = [ None ] * len(sites_frac)\n self._map_gaussian_fits = {}\n self.calpha_mean_two_fofc = 0\n for map_type, map_key in zip(map_types, map_keys):\n real_map = self.get_map(map_type)\n if (real_map is not None):\n # Gather values for map peaks at each site\n self._map_values[map_key] = flex.double(sites_frac.size(), 0)\n self._map_gaussian_fits[map_key] = [ None ] * len(sites_frac)\n for i_seq, site_frac in enumerate(sites_frac):\n atom = self.pdb_atoms[i_seq]\n resname = atom.fetch_labels().resname.strip().upper()\n if (resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED or\n atom.segid.strip().upper() in [\"ION\"]):\n value = real_map.eight_point_interpolation(site_frac)\n self._map_values[map_key][i_seq] = value\n if (self.use_svm):\n gaussian_fit = utils.fit_gaussian(\n unit_cell=self.unit_cell,\n site_cart=atom.xyz,\n real_map=real_map)\n self._map_gaussian_fits[map_key][i_seq] = gaussian_fit\n\n if map_type in [\"2mFo-DFc\"]:\n # Gather values on map variance and principal axes of interia\n from cctbx import maptbx\n for i_seq, site_cart in enumerate(sites_cart):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n if resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED:\n # XXX not totally confident about how I'm weighting this...\n p_a_i = maptbx.principal_axes_of_inertia(\n real_map = real_map,\n site_cart = site_cart,\n unit_cell = self.unit_cell,\n radius = self.params.map_sampling_radius)\n self._principal_axes_of_inertia[i_seq] = p_a_i\n variance = maptbx.spherical_variance_around_point(\n real_map = real_map,\n unit_cell = self.unit_cell,\n site_cart = site_cart,\n radius = self.params.map_sampling_radius)\n self._map_variances[i_seq] = variance\n elif (i_seq in self.calpha_sel):\n # Also collect some info in average C_alpha 2FoFc peak heights\n self.calpha_mean_two_fofc += real_map.eight_point_interpolation(\n sites_frac[i_seq])\n del real_map\n\n if (self.calpha_mean_two_fofc > 0):\n n_calpha = len(self.calpha_sel)\n assert (n_calpha > 0)\n self.calpha_mean_two_fofc /= n_calpha\n\n # Gather info on carbons' average Fo peak height for use in estimating other\n # sites' atomic weight\n self.carbon_fo_values = None\n if (len(self.carbon_sel) > 0):\n self.carbon_fo_values = flex.double()\n self._map_values[\"mFo\"] = flex.double(sites_frac.size(), 0)\n fo_map = fft_map(self.fmodel.map_coefficients(\n map_type = \"mFo\",\n exclude_free_r_reflections = True,\n fill_missing = True))\n\n for i_seq, site_frac in enumerate(sites_frac):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n element = self.pdb_atoms[i_seq].element.strip()\n if (element == \"C\") or ((element == \"O\") and (resname in WATER_RES_NAMES)):\n map_value = fo_map.eight_point_interpolation(site_frac)\n self._map_values[\"mFo\"][i_seq] = map_value\n if (element == \"C\"):\n self.carbon_fo_values.append(map_value)\n del fo_map", "def magnetic_field(date: datetime.datetime, lat, lon, alt, output_format='cartesian'):\n g = GeoMag()\n return g.GeoMag(np.array([lat, lon, alt]), date, location_format='geodetic', output_format=output_format)", "def apply_magnitude_offset(conn, ref_phot, refimg_id, delta_mag, delta_mag_err, log):\n\n log.info('Applying the magnitude offset to all photometry calculated using reference image '+str(refimg_id))\n\n query = 'SELECT phot_id, star_id, hjd, calibrated_mag, calibrated_mag_err, calibrated_flux, calibrated_flux_err FROM phot WHERE reference_image=\"'+str(refimg_id)+'\"'\n phot_data = phot_db.query_to_astropy_table(conn, query, args=())\n\n values = []\n for dp in phot_data:\n\n dp['calibrated_mag'] += delta_mag\n dp['calibrated_mag_err'] = np.sqrt(dp['calibrated_mag_err']*dp['calibrated_mag_err'] + delta_mag_err*delta_mag_err)\n\n (cal_flux, cal_flux_error) = photometry.convert_mag_to_flux(dp['calibrated_mag_err'],\n dp['calibrated_mag_err'])\n dp['calibrated_flux'] = cal_flux\n dp['calibrated_flux_err'] = cal_flux_error\n\n values.append( ( str(dp['phot_id']), str(dp['star_id']), str(dp['hjd']),\n str(dp['calibrated_mag']), str(dp['calibrated_mag_err']),\n str(dp['calibrated_flux']), str(dp['calibrated_flux_err']) ) )\n\n command = 'INSERT OR REPLACE INTO phot (phot_id, star_id, hjd, calibrated_mag, calibrated_mag_err, calibrated_flux, calibrated_flux_err) VALUES (?,?,?,?,?,?,?)'\n\n cursor = conn.cursor()\n\n cursor.executemany(command, values)\n\n conn.commit()", "def flux2mag(flux, threshold=None, mode=\"table\") :\n\n\timport parameters as param\n\timport scipy.interpolate\n\n\tif mode == \"compute\" :\n\t\treturn flux2mag_conversion(mag, threshold)\n\telif not mode == \"table\":\n\t\traise ValueError(\"Mode not recognised.\")\n\n\tif threshold is None:\n\t\tthreshold = param.ppm_threshold\n\tthreshold *= 1e-6\n\n\tif type(flux) == list:\n\t\tflux = np.asarray(flux)\n\telif type(flux) in [np.float, np.float32, np.float64]:\n\t\tflux = np.asarray([flux])\n\n\tx = np.log10(param.flux_in_aperture[:,1])\n\ty = param.flux_in_aperture[:,0]\n\tinterp_out = scipy.interpolate.UnivariateSpline(x, y)\n\tinterp_in = scipy.interpolate.interp1d(x, y)\n\n\tflux = np.asarray(flux)\n\tflux_of_star = flux / threshold\n\tflux_in_aperture = flux_of_star * (np.pi * param.radius_psf * param.radius_psf)\n\n\tflux_in_aperture = flux_in_aperture.clip(min=1e-40)\n\n\tif np.log10(flux_in_aperture) >= np.amin(x) and np.log10(flux_in_aperture) <= np.amax(x):\n\t\tmag = interp_in(np.log10(flux_in_aperture))\t\n\telse:\n\t\tmag = interp_out(np.log10(flux_in_aperture))\n\n\n\n\tmag[np.where(mag>param.magnitude_max)]=param.magnitude_max\n\n\treturn mag", "def Register(filename,path_r,path_Ms,path_Ls,cont):\r\n\timg_in_stack = 0;\r\n\tprint(os.path.join(path_r,filename))\r\n\tImage = imread(os.path.join(path_r,filename))\r\n\tpixels_h = 24\r\n\tnumber,height,widht = Image.shape\r\n\thalf_image = int(height/2)\r\n\tregister, t1 = translation(Image[0,pixels_h:half_image,:], Image[0,pixels_h+half_image:,:]) #Compare the first frame of the stack and find registration number\r\n\tprint(register)\r\n\tlow_r = np.zeros((number,half_image,widht),dtype=np.uint16)\r\n\tlow = np.zeros((number,half_image-pixels_h,widht),dtype=np.uint16)\r\n\thigh = np.zeros((number,half_image-pixels_h,widht),dtype=np.uint16)\r\n\thigh_r = np.zeros((number,half_image,widht), dtype=np.uint16)\r\n\thigh[:,:,:] = Image[:,pixels_h:half_image,:]\r\n\tlow[:,:,:] = Image[:,half_image+pixels_h:,:]\r\n\tprint(Image[:,half_image:,:].shape)\r\n\tif register == 0:\r\n\t\tlow_r[:,register:,:] = Image[:,half_image:,:]\r\n\t\tlow[:,:,:] = low_r[:,pixels_h:,:]\r\n\telif register < 0:\r\n\t\thigh_r[:,-register:,:] = Image[:,-register:half_image,:]\r\n\t\thigh[:,:,:] = high_r[:,pixels_h:,:]\r\n\telse:\r\n\t\tlow_r[:,register:,:] = Image[:,half_image:-register,:]\r\n\t\tlow[:,:,:] = low_r[:,pixels_h:,:]\r\n\t#imsave(os.path.join(str(path_Ms),\"P_\" + filename), high[:,:,:])\r\n\t#imsave(os.path.join(str(path_Ls),\"P_\" + filename),low[:,:,:])\r\n\tfor i in range(0,number):\r\n\t\t#Save the ith image, upload it with a number in the name and delete it\r\n\t\t#High exposure image\r\n\t\tprint(cont+i+1)\r\n\t\timsave(os.path.join(str(path_Ms),str(cont+i+1)+\".tif\"),high[i,:,:]) #Save it\r\n\t\t#Upload(S,M_id,os.path.join(str(path_Ms),str(cont+i+1)+\".tif\"), str(cont+i+1)+\".tif\") #Upload it\r\n\t\t#os.remove(os.path.join(str(path_Ms),str(cont+i+1)+\".tif\")) #Delete it\r\n\t\t#Low exposure image\r\n\t\timsave(os.path.join(str(path_Ls),str(cont+i+1)+\".tif\"),low[i,:,:]) #Save it\r\n\t\t#Upload(S,L_id,os.path.join(str(path_Ls),str(cont+i+1)+\".tif\"), str(cont+i+1)+\".tif\") #Upload it\r\n\t\t#os.remove(os.path.join(str(path_Ls),str(cont+i+1)+\".tif\")) #Delete it\r\n\t\timg_in_stack += 1\r\n\tcont += img_in_stack\r\n\treturn cont", "def addTextureToOcc(self):\n\t\tshas = self._getShapes()\n\t\tfname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file ',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'/home')\n\t\tif fname:\n\t\t\tfor sha in shas:\n\t\t\t\t#get texture Path\n\t\t\t\tif not sha.a.texture_Occ.exists:\n\t\t\t\t\toccText = sha.a.texture_Occ.add( dt='string' )\n\t\t\t\tsha.a.texture_Occ.v = fname", "def _show_magnitudes(self) -> None:\n\n # create the label text\n if self.magnitude_range is not None:\n mag_min = self.magnitude_range.min_magnitude\n mag_max = self.magnitude_range.max_magnitude\n bandpass = self.magnitude_range.bandpass\n if mag_max - mag_min < 0.1:\n mag_text = bandpass + \" = %.1f\" % mag_max\n else:\n mag_text = bandpass + \" = %.1f - %.1f\" % (mag_min, mag_max)\n else:\n mag_text = \"no magnitude available\"\n\n # add the label\n self.draw_label(\n self.ra,\n self.dec - 4.8 * u.arcmin,\n mag_text,\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"center\",\n verticalalignment=\"bottom\",\n color=(0, 0.5, 1),\n )", "def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)", "def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag", "def register ():\n dsf_geom_import.register ()\n dsf_morph_import.register ()\n dsf_morph_export.register ()\n dsf_uvset_import.register ()\n dsf_arm_import.register ()\n dsf_pose_import.register ()\n dsf_wm_import.register ()\n dsf_geom_export.register ()\n dsf_prop_export.register ()", "def camera(self):\n self.spectrum = self.spectrum", "def loadDataFile(self, filename):\n self.magneticfield = MagneticField(filename)\n self._meqfile = filename\n\n wallR, wallZ = self.magneticfield.getWall()\n\n self.rmin, self.rmax = np.amin(wallR), np.amax(wallR)\n self.zmin, self.zmax = np.amin(wallZ), np.amax(wallZ)", "def analysis_dFF_map(self):\r\n\r\n \r\n\r\n print ('Starting dF/F analysis:')\r\n\r\n self.print_image_info()\r\n\r\n # smoothwin = int(self.imageData.shape[1]/8.)\r\n\r\n # get the average image and the average of the whole image over time\r\n\r\n avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n \r\n\r\n mpl.figure(99)\r\n\r\n mpl.imshow(avgimg, vmin=0, vmax=np.max(np.max(avgimg, axis=0), axis=0))\r\n\r\n # self.meanimagevalue = np.mean(np.mean(avgimg, axis=1), axis=0)\r\n\r\n # self.stdimg = np.std(self.imageData, axis= 0) # and standard deviation\r\n\r\n imgdatasm = scipy.ndimage.filters.gaussian_filter(self.imageData,[0,2,2],order=0,output=None,mode='reflect',cval=0.0,truncate=4.0)\r\n # field correction: smooth the average image, subtract it from the imagedata, then add back the mean value\r\n avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, 2, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n #self.imageData = (self.imageData-avgimgsm)+ self.meanimagevalue\r\n\r\n mpl.figure(98)\r\n mpl.imshow(avgimgsm,vmin=0, vmax=np.max(np.max(avgimgsm, axis=0), axis=0))\r\n mpl.figure(97)\r\n mpl.imshow(np.mean(imgdatasm,axis=0))\r\n self.n_times = self.timebase\r\n\r\n periodsize = int(self.period*self.framerate)\r\n print('periodsize: ',periodsize)\r\n\r\n # windowsize = int(self.freqperiod*self.framerate) # window size for every response\r\n\r\n # r = range(0, self.imageData.shape[0], windowsize)\r\n\r\n sig = np.reshape(imgdatasm, (self.nrepetitions, periodsize, \r\n\r\n self.imageData.shape[1], self.imageData.shape[2]), order='C')\r\n\r\n delresp=np.zeros([19,256,256])\r\n repback = np.mean(sig[:,1:4,:,:],axis=1)\r\n resp = np.mean(sig[:,5:9,:,:],axis=1)\r\n for counter in range(19):\r\n delresp[counter,:,:]=(resp[counter,:,:]-repback[counter,:,:])/repback[counter,:,:]\r\n quot=np.mean(delresp,axis=0)\r\n quot=-quot\r\n print ('shape of quot: ', np.shape(quot))\r\n # quot=(resp-repback)/repback\r\n # quot[quot>0]=0\r\n # quot=-1000*quot\r\n\r\n mpl.figure(7)\r\n mpl.imshow(quot,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n\r\n quotsm = scipy.ndimage.filters.gaussian_filter(quot, 3, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n mpl.figure(8)\r\n mpl.imshow(quotsm,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n \r\n # bl = np.mean(sig[:, range(0, sig.shape[1], windowsize), :, :], axis=0)\r\n\r\n # bl = scipy.ndimage.filters.gaussian_filter(bl, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n\r\n\r\n # print (' windowsize: ', windowsize)\r\n\r\n # print (' periodsize: ', periodsize)\r\n # mc = matplotlib.cm\r\n\r\n # only use sequential maps here\r\n\r\n # clist = [mc.Reds, mc.YlOrBr, mc.Oranges, mc.Greens, mc.GnBu, mc.Blues, mc.RdPu, mc.Purples,mc.Reds,mc.Greens,mc.Blues,mc.Reds,mc.Reds,mc.Reds,mc.Reds]\r\n # clist2 = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet', 'black','red','purple','green','blue','red','red','red','red']\r\n\r\n cs = {}\r\n\r\n # sigd = np.zeros((bl.shape[0], sig.shape[2], sig.shape[3]))\r\n# \r\n # localmax = {}\r\n\r\n # sigmax = 0.\r\n# \r\n # kernel = np.ones((5, 5))\r\n\r\n # psf = kernel / np.sum(kernel)\r\n\r\n # compute dF/F, and get maximum over all frequencies\r\n\r\n print (' sig shape: ', sig.shape)\r\n\r\n # print (' bl shape: ', bl.shape)\r\n\r\n # smax = np.zeros(bl.shape[0])\r\n\r\n # for i in range(bl.shape[0]):\r\n\r\n # sigd[i] = (np.mean(np.max(sig[:,range(i*windowsize, i*windowsize+windowsize),:,:], axis=0), axis=0) - bl[i,:,:])/bl[i,:,:]\r\n\r\n # sigd[i] = sigd[i]**2.0\r\n\r\n # smooth\r\n\r\n #sigd[i] = scipy.ndimage.filters.gaussian_filter(sigd[i], 1., order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # deconvolve\r\n\r\n # sigd[i] = restoration.richardson_lucy(sigd[i], psf, 5)\r\n\r\n# sm = sigd[i].max().max()\r\n\r\n# if sm > sigmax:\r\n\r\n# sigmax = sm\r\n\r\n# smax[i] = sm\r\n\r\n# print( ' i, sm: ', i, sm)\r\n\r\n# # now process for display\r\n\r\n# print (' sigd shape: ', sigd.shape)\r\n\r\n# wdat = np.mean(sig, axis=0)\r\n\r\n# wds = wdat.shape\r\n\r\n# print('wdat shape: ', wds)\r\n\r\n# # print (range(int(wds[1]/2.), int(3.*wds[1]/4.)), range(int(wds[2]/2.), int(3.*wds[2]/4.)))\r\n\r\n# print( 'reduced shape: ', wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))].shape)\r\n\r\n# wp = wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))]\r\n\r\n# wp = np.mean(np.mean(wdat, axis=1), axis=1)\r\n\r\n# mpl.figure(1)\r\n\r\n# mpl.plot(np.linspace(0., len(wp)*1./self.framerate, num=len(wp)), wp)\r\n\r\n\r\n\r\n# mpl.figure(2)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# sigd[i][sigd[i] < self.threshold*sigmax] = 0.\r\n\r\n# # find center of mass of areas above threshold\r\n\r\n# # mass = sigd[i].copy()\r\n\r\n# # mass[sigd[i] > 0.] = 1.\r\n\r\n# # structuring_element = [[0,1,0],[1,1,1],[0,1,0]]\r\n\r\n# # segmentation, segments = scipy.ndimage.label(mass, structuring_element)\r\n\r\n# # coords = scipy.ndimage.center_of_mass(sigd[i], segmentation, range(1,segments+1))\r\n\r\n# # xcoords = np.array([x[1] for x in coords])\r\n\r\n# # ycoords = np.array([x[0] for x in coords])\r\n\r\n# # cs[i] = (xcoords, ycoords)\r\n\r\n\r\n\r\n# # Calculating local maxima\r\n\r\n# lm = skif.peak_local_max(sigd[i], min_distance=2, threshold_rel=0.25, exclude_border=False, \r\n\r\n# indices=True, num_peaks=10, footprint=None, labels=None)\r\n\r\n# localmax[i] = [(m[0], m[1], sigd[i][(m[0], m[1])]) for m in lm]\r\n\r\n# # print ('i, local max: ',i, localmax)\r\n\r\n# mpl.subplot(5,5,i+1)\r\n# print ('shape of sigd: ',[np.shape(sigd),i])\r\n\r\n# imga1 = mpl.imshow(sigd[i], cmap=clist[i], vmin=0, origin='lower')\r\n\r\n# if len(localmax[i]) > 0:\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# else:\r\n\r\n# continue\r\n\r\n# scattersize = 30.\r\n\r\n# for k, lm in enumerate(localmax[i]):\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], edgecolors='k',\r\n\r\n# s=scattersize*lm[2]/max_fr, linewidths=0.125, alpha=0.5)\r\n\r\n# mpl.subplot(6,5,i+15+1)\r\n\r\n# wr = range(i*windowsize, i*windowsize+windowsize)\r\n\r\n# # print (' wr: len, min max: ', len(wr), min(wr), max(wr))\r\n\r\n# wmax = 0.\r\n\r\n# for lmax in localmax[i]: # was xcoords\r\n\r\n# wave = wdat[wr, lmax[0],lmax[1]]\r\n\r\n# wdff = (wave-wave[0])/wave[0]\r\n\r\n# if np.max(wdff) > wmax:\r\n\r\n# wmax = np.max(wdff)\r\n\r\n# mpl.plot(np.linspace(0., len(wave)*1./self.framerate, num=len(wave)),\r\n\r\n# wdff, color=clist2[i])\r\n\r\n# mpl.ylim(-0.1*wmax, wmax)\r\n\r\n# fig = mpl.figure(3)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# if len(localmax[i]) == 0:\r\n\r\n# continue\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# for lm in localmax[i]:\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], \r\n\r\n# s=scattersize*lm[2]/max_fr, alpha=0.5, edgecolors='k')\r\n\r\n# mpl.ylim(0, sigd.shape[2])\r\n\r\n# mpl.xlim(0, sigd.shape[1])\r\n\r\n# mpl.axis('equal')\r\n\r\n mpl.show()\r\n\r\n print (' DF/F analysis finished.\\n')", "def export_register_map(self, mapfile_location):\n with open(mapfile_location, 'w') as f:\n f.write(\"# This register map has been generated for the odin-devices SI5324 driver.\\n\")\n\n # The registers that will be read are the ones found in output register\n # maps from DSPLLsim.\n for register in SI5324._regmap_registers:\n\n if register == 136:\n # This register will read 00, but should be written as 0x40 to match\n # the versions generated by DSPLLsim. This would trigger an iCAL if\n # written, but is ignored in apply_register_map().\n f.write(\"136, 40h\\n\")\n continue\n\n value = self.readU8(register)\n logger.info(\"Read register {}: {:02X}\".format(register, value))\n f.write(\"{}, {:02X}h\\n\".format(register, value))\n\n logger.info(\"Register map extraction complete, to file: {}\".format(mapfile_location))", "def make_temperature_map(time: u.s, field, instr, **kwargs):\n plot_settings = {'cmap': cm.get_cmap('inferno')}\n plot_settings.update(kwargs.get('plot_settings', {}))\n bins, bin_range = instr.make_detector_array(field)\n visible = is_visible(instr.total_coordinates, instr.observer_coordinate)\n hist_coordinates, _, _ = np.histogram2d(instr.total_coordinates.Tx.value,\n instr.total_coordinates.Ty.value,\n bins=(bins.x.value, bins.y.value),\n range=(bin_range.x.value, bin_range.y.value),\n weights=visible)\n with h5py.File(instr.counts_file, 'r') as hf:\n try:\n i_time = np.where(u.Quantity(hf['time'],\n get_keys(hf['time'].attrs), ('unit', 'units')) == time)[0][0]\n except IndexError:\n raise IndexError(f'{time} is not a valid time in observing time for {instr.name}')\n weights = np.array(hf['electron_temperature'][i_time, :])\n units = u.Unit(get_keys(hf['electron_temperature'].attrs, ('unit', 'units')))\n hist, _, _ = np.histogram2d(instr.total_coordinates.Tx.value,\n instr.total_coordinates.Ty.value,\n bins=(bins.x.value, bins.y.value),\n range=(bin_range.x.value, bin_range.y.value),\n weights=weights * visible)\n hist /= np.where(hist_coordinates == 0, 1, hist_coordinates)\n meta = instr.make_fits_header(field, instr.channels[0])\n del meta['wavelnth']\n del meta['waveunit']\n meta['bunit'] = units.to_string()\n meta['detector'] = 'Electron Temperature'\n meta['comment'] = 'Column-averaged electron temperature calculated by synthesizAR'\n\n return GenericMap(hist.T, meta, plot_settings=plot_settings)", "def _standard_mapping(self):\n mapping_raw = scipy.io.loadmat(join(self.dataset_dir, 'scripts/mapping.mat'))\n self.camvidMap = mapping_raw['camvidMap'] * 255\n self.cityscapesMap = mapping_raw['cityscapesMap'] * 255", "def plot_sm_orbit(smdf, orbit_name, fieldname='Soil_Moisture', vmin=0, vmax=1, save_fig_directory=None):\n\n logging.debug('Plotting {} orbit, field {}...'.format(orbit_name, fieldname))\n\n fig, m, dot_size = setup_sm_plot(smdf['Latitude'].values, smdf['Longitude'].values)\n\n if fieldname == 'Soil_Moisture':\n plt.title('{} : {}'.format(fieldname.replace('_',' '), orbit_name), wrap=True)\n cmap = 'viridis'\n c = smdf[fieldname] # geophysical variable to plot\n m.scatter(smdf['Longitude'].values,\n smdf['Latitude'].values,\n latlon=True,\n c=c,\n s=dot_size,\n zorder=10,\n cmap=cmap,\n vmin=vmin,\n vmax=vmax,\n )\n cbar = m.colorbar()\n cbar.set_label(r'[m$^3$/m$^3$]')\n\n else:\n plt.title('{} : {}'.format(fieldname.replace('_',' '), orbit_name), wrap=True)\n cmap = 'viridis'\n c = smdf[fieldname] # geophysical variable to plot \n m.scatter(smdf['Longitude'].values,\n smdf['Latitude'].values,\n latlon=True,\n c=c,\n s=dot_size,\n zorder=10,\n cmap=cmap,\n ) \n cbar = m.colorbar()\n\n if (save_fig_directory != None):\n # Requested to save the figure\n save_name = 'orbit-({})-field-({})-{}.png'.format(orbit_name, fieldname.replace(' ', ''), datetime.now().strftime('%Y%m%d-%H%M%S'))\n logging.debug('Attempting to save figure with name \"{}\"'.format(save_name))\n plt.savefig(os.path.join(save_fig_directory, save_name))\n plt.close()\n else:\n plt.show()", "def _fractalize(self, f, compMap):\n\n from PIL import Image\n\n def toImage(cmObject):\n \"\"\"cmObject is the ComplexMap instance\"\"\"\n size = self.gridsize, self.gridsize\n cm = cmObject()\n master = []\n for item in cm:\n master.extend(item)\n\n #Apply default Mandelbrot Set Function\n master = map(f, master)\n\n col1 = (0,0,102,0)\n col2 = (255,204,51,0)\n\n def select_color(x):\n if x == 1: return col1\n else: return col2\n\n master = map(select_color, master)\n \n image = Image.new(\"RGBA\", size, (0,0,0,0))\n image.putdata(master)\n return image\n\n image_width = 0\n image_height = 0\n image_list = []\n #Unpack row\n for (y, row) in enumerate(compMap):\n image_row = []\n\n #Unpack columns\n for item in row:\n #Unpack the individual\n image_row.append(toImage(item))\n\n width = len(image_row) * self.gridsize\n height = self.gridsize\n row_holder_image = Image.new(\"RGBA\", (width, height), (0,0,0,0)) \n\n for (n, image) in enumerate(image_row):\n row_holder_image.paste(image, ((n*self.gridsize),0))\n\n image_list.append(row_holder_image)\n \n image_width = width\n image_height = len(image_list) * self.gridsize\n\n image_whole = Image.new(\"RGBA\", (image_width, image_height), (0,0,0,0))\n for (n, image) in enumerate(image_list):\n image_whole.paste(image, (0, (n*self.gridsize)))\n image_whole.save(\"fractal.jpg\", \"JPEG\")\n\n return", "def CreateAndSave_l_matrices(self, lmin, filename, theta, phi):\n\t\tif lmin == 0:\n\t\t\tmode = 'w'\n\t\telse:\n\t\t\tmode = 'r+'\n\n\t\tf = tables.openFile(filename, mode)\n\t\troot = f.root\n\t\tindex_iterator = self.Config.AngularRepresentation.index_iterator\n\n\t\tprint \"Legendre ...\"\n\t\tprevl = -1;\n\t\tfor i, lm in enumerate(index_iterator.__iter__()):\n\t\t\tprint i\n\t\t\tif lm.l >= lmin:\n\t\t\t\tif lm.l != prevl:\n\t\t\t\t\tmidx = 0\n\t\t\t\t\tleg = zeros([(2 * lm.l + 1), len(theta), len(phi)], dtype=complex)\n\n\t\t\t\tfor j, my_theta in enumerate(theta):\n\t\t\t\t\tleg[midx,j,:] = sph_harm(lm.m, lm.l, phi, my_theta)\n\t\t\t\t\n\t\t\t\tmidx += 1\n\n\t\t\t\tif midx == 2 * lm.l + 1:\n\t\t\t\t\tf.createArray('/','l_' + str(lm.l),leg)\n\n\t\t\t\tprevl = lm.l\n\t\tf.setNodeAttr(\"/\",\"lmax\",index_iterator.lmax)\n\t\tf.close()", "def fake_image(self, arf, src_flux, exposure,\n pix_scale=0.5, num_pix=[2400,2400],\n lmin=None, lmax=None, save_file=None, **kwargs):\n assert len(src_flux) == len(self.lam)\n\n xlen, ylen = num_pix\n xcen, ycen = xlen//2, ylen//2\n ccdx, ccdy = np.meshgrid(np.arange(xlen), np.arange(ylen))\n radius = np.sqrt((ccdx - xcen)**2 + (ccdy - ycen)**2)\n\n # Typical ARF files have columns 'ENERG_LO', 'ENERG_HI', 'SPECRESP'\n arf_data = fits.open(arf)['SPECRESP'].data\n arf_x = 0.5*(arf_data['ENERG_LO'] + arf_data['ENERG_HI'])\n arf_y = arf_data['SPECRESP']\n arf = InterpolatedUnivariateSpline(arf_x, arf_y, k=1)\n\n # Source counts to use for each energy bin\n if self.lam_unit == 'angs':\n ltemp = self.lam * u.angstrom\n ltemp_kev = ltemp.to(u.keV, equivalencies=u.spectral()).value\n arf_temp = arf(ltemp_kev)[::-1]\n src_counts = src_flux * arf_temp * exposure\n else:\n src_counts = src_flux * arf(self.lam) * exposure\n\n # Decide which energy indexes to use\n if lmin is None:\n imin = 0\n else:\n imin = min(np.arange(len(self.lam))[self.lam >= lmin])\n if lmax is None:\n iend = len(self.lam)\n else:\n iend = max(np.arange(len(self.lam))[self.lam <= lmax])\n\n #iend = imax\n #if imax < 0:\n # iend = np.arange(len(self.lam)+1)[imax]\n\n # Add up randomized image for each energy index value\n r_asec = radius * pix_scale\n result = np.zeros_like(radius)\n for i in np.arange(imin, iend):\n # interp object for halo grid (arcsec, counts/arcsec^2)\n h_interp = InterpolatedUnivariateSpline(\n self.theta, self.norm_int[i,:] * src_counts[i], k=1) # counts/arcsec^2\n # Get the corresponding counts at each radial value in the grid\n pix_counts = h_interp(r_asec) * pix_scale**2 # counts per pixel\n # Some of the interpolated values are below zero. This is not okay. Set those to zero.\n pix_counts[pix_counts < 0.0] = 0.0\n # Use poisson statistics to get a random value\n pix_random = np.random.poisson(pix_counts)\n # add it to the final result\n result += pix_random\n\n if save_file is not None:\n hdu = fits.PrimaryHDU(result)\n hdul = fits.HDUList([hdu])\n hdul.writeto(save_file, overwrite=True)\n\n return result", "def __init__(self, ministry, central_only=False, zbins=None, magbins=None,\n catalog_type=['galaxycatalog'], tag=None, CMASS=False,\n lightcone=True, **kwargs):\n\n if zbins is None:\n zbins = np.linspace(ministry.minz, ministry.maxz, 5)\n\n if magbins is None:\n magbins = np.linspace(-25, -16, 50)\n\n self.lightcone = lightcone\n\n MagnitudeMetric.__init__(self, ministry, zbins=zbins, magbins=magbins,\n catalog_type=catalog_type, tag=tag, **kwargs)\n\n self.central_only = central_only\n self.CMASS = CMASS\n \n if central_only:\n self.mapkeys = ['luminosity', 'central']\n else:\n self.mapkeys = ['luminosity']\n\n if self.lightcone:\n self.mapkeys.append('redshift')\n\n if self.CMASS:\n self.mapkeys.append('appmag')\n\n self.aschema = 'galaxyonly'\n\n self.lumcounts = None", "def plot_phot_transform(params, inst_mag, cal_mag, bandpass):\n\n fig = plt.figure(2)\n\n plt.plot(cal_mag, inst_mag,'k.')\n\n plt.xlabel('Catalog magnitude')\n\n plt.ylabel('Instrumental magnitude')\n\n plt.title('Relation between instrumental and catalogue magnitudes in '+\\\n bandpass)\n\n [xmin,xmax,ymin,ymax] = plt.axis()\n\n plt.axis([xmax,xmin,ymax,ymin])\n\n plt.savefig(path.join(params['red_dir'],\n 'phot_transform_'+bandpass+'.eps'))\n\n plt.close(2)", "def build_magmom(self,list_oxidizable_site_indices,number_of_electrons):\n\n MAGMOM = []\n ne = number_of_electrons\n\n dict_oxidizable = {}\n\n for i_s in list_oxidizable_site_indices:\n symbol = self.structure.sites[i_s].specie.symbol\n if ne > 0:\n dict_oxidizable[i_s] = self.variable_magnetization_dict[symbol][1]\n ne -= 1\n elif ne == 0:\n dict_oxidizable[i_s] = self.variable_magnetization_dict[symbol][0]\n\n else:\n print(\"SOMETHING IS WRONG. REVIEW CODE!\")\n sys.exit()\n\n for i_s, site in enumerate(self.structure):\n if i_s in dict_oxidizable:\n # add a bit of randomness to not get trapped in metastable solution.\n # It is quite useless to have a random number with 16 decimals, and it \n # makes the INCAR ugly; let's round.\n random_addition = np.round( 0.2*np.random.random(1)[0]-0.1, 6)\n MAGMOM.append(dict_oxidizable[i_s]+random_addition)\n else:\n MAGMOM.append(0.6)\n\n return MAGMOM", "def __mag_file_append(self, file):\n t = Table.read(file, format=\"ascii\")\n LightCurve.__mag_table_append(self, t)", "def magnitude(self, magnitude):\n\n self._magnitude = magnitude", "def get_alms(maps=None,\n mask=None,\n maplabel='353',\n showI=False,\n pol=True,\n intensity=True,\n rewrite=False,\n writemap=False,\n savealms=True,\n masktype='PowerSpectra',#'GalPlane2',\n lmax=100):\n\n\n newname = 'alms_lmax{}_mask_{}__'.format(lmax, masktype) + maplabel + '.npy'\n \n\n \n if not os.path.exists(data_path + newname) or rewrite:\n print 'alms file {} does not exist; calculating alms...'.format(newname)\n if mask is None:\n if masktype == 'PowerSpectra':\n maskname = 'HFI_PowerSpect_Mask_2048_R1.10.fits'\n maskfield = 0\n elif masktype == 'GalPlane60':\n maskname = 'HFI_Mask_GalPlane-apo0_2048_R2.00.fits',\n maskfield = 2\n elif masktype == 'no':\n maskname = 'HFI_PowerSpect_Mask_2048_R1.10.fits'\n maskfield = 0\n mask = hp.read_map(data_path + maskname, field=maskfield)\n if masktype == 'no':\n mask = mask*0. + 1.\n masknside = hp.get_nside(mask)\n if maps is None:\n Imap,Qmap,Umap = hp.read_map( data_path + 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(maplabel),hdu=1, field=(0,1,2) )\n mapnside = hp.get_nside(Imap)\n else:\n if intensity and pol:\n Imap = maps[0]\n Qmap = maps[1]\n Umap = maps[2]\n mapnside = hp.get_nside(Imap)\n elif intensity and not pol:\n Imap = maps[0]\n mapnside = hp.get_nside(Imap)\n elif pol and not intensity:\n Qmap = maps[0]\n Umap = maps[1]\n mapnside = hp.get_nside(Qmap)\n \n if masknside != mapnside:\n print 'adjusting mask to match map resolution...'\n mask = hp.pixelfunc.ud_grade(mask, nside_out=mapnside)\n\n if showI:\n hp.mollview(Imap*mask)\n\n alms = []\n if intensity:\n Imap = Imap*mask\n Tlm = hp.map2alm(Imap, lmax=lmax)\n alms.append(Tlm)\n if pol:\n Qmap *= mask\n Umap *= mask\n Elm,Blm = hp.map2alm_spin( (Qmap,Umap), 2, lmax=lmax )\n alms.append(Elm)\n alms.append(Blm)\n\n #this will only work if get_intensity and get_pol\n if writemap and intensity and pol:\n hp.fitsfunc.write_map( data_path + newname, [Imap, Qmap, Umap])\n \n if savealms and intensity and pol:\n np.save(data_path + newname, alms)\n\n return alms\n\n\n else:\n alms = np.load(data_path + newname, 'r')\n if intensity and pol:\n return alms[0], alms[1], alms[2]\n else:\n if intensity:\n return alms[0]\n if pol:\n return alms[1], alms[2]", "def load_meg_maps(self, osc_file):\n\n # Get the full path for the file name\n osc_maps_file = os.path.join(self.db.maps_path, 'Oscs', osc_file + '.p')\n\n # Load data from pickle file\n dat_in = pickle.load(open(osc_maps_file, 'rb'))\n\n # Get the oscillation bands used in current maps\n self.bands = dat_in['bands']\n\n # Initialize the var to store meg map data\n self.meg_maps = _init_meg_map_dict(self.bands.keys())\n\n # Pull out oscillation band data\n for band in self.bands:\n self.meg_maps[band] = dat_in['osc_dat'][band]\n\n # Update boolean that oscs are loaded\n self.oscs_loaded = True", "def plot_maps(self, mode=0, target=1, gfilter=0):\r\n\r\n mpl.figure(1)\r\n\r\n mpl.imshow(self.avgimg, cmap=matplotlib.cm.gray, interpolation=None) # scipy.ndimage.gaussian_filter(ampmap, filter, order=0, mode='reflect'), cmap=matplotlib.cm.gray)\r\n\r\n mpl.colorbar()\r\n\r\n mpl.title('Average image')\r\n\r\n print ('target, mode: ', target, mode)\r\n\r\n max1 = np.amax(self.amplitudeImage1)\r\n\r\n if target > 1:\r\n\r\n max1 = np.amax([max1, np.amax(self.amplitudeImage2)])\r\n\r\n max1 = 10.0*int(max1/10.0)\r\n\r\n mpl.figure(2)\r\n\r\n mpl.subplot(2,2,4)\r\n\r\n ipy0, posl, coll = self.plot_averaged_amplitude()\r\n\r\n\r\n\r\n mpl.subplot(2,2,1)\r\n\r\n self.plot_amplitude_map(self.amplitudeImage1, max1, 'Amplitude Map1', filter=gfilter)\r\n\r\n mpl.subplot(2,2,3)\r\n\r\n self.plot_phase_map(self.phaseImage1, 'Phase Map1', filter=gfilter)\r\n\r\n for i, px in enumerate(posl):\r\n\r\n mpl.plot(px, self.ipy+ipy0, 'o-', markersize=5.0, markerfacecolor = coll[i], markeredgecolor='w')\r\n\r\n if target > 1:\r\n\r\n mpl.subplot(2,2,4)\r\n\r\n self.plot_phase_map(self.phaseImage1, 'Phase Map1', filter=gfilter)\r\n\r\n mpl.subplot(2,2,2)\r\n\r\n self.plot_fft()\r\n\r\n \r\n\r\n mpl.figure(3)\r\n\r\n mpl.title('Phase across center horizontally')\r\n\r\n # extract middle line\r\n\r\n sh = self.phaseImage1.shape\r\n\r\n iy0 = int(sh[1]/2)\r\n\r\n mpl.plot(self.phaseImage1[iy0, :], 'ko-')\r\n\r\n return\r\n\r\n \r\n\r\n if mode == 0:\r\n\r\n mpl.subplot(2,3,3)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(ta.n_times, self.DF[:,5,5].view(ndarray))\r\n\r\n #mpl.plot(self.n_times, D[:,i*55+20, 60])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('Waveforms')\r\n\r\n\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(ta.n_times, self.DF[:,5,5].view(ndarray))\r\n\r\n #mpl.plot(self.DF[:,i*55+20, 60])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('FFTs')\r\n\r\n\r\n\r\n if mode == 1 and target > 1:\r\n\r\n \r\n\r\n mpl.subplot(2,3,2)\r\n\r\n mpl.title('Amplitude Map2')\r\n\r\n #scipy.ndimage.gaussian_filter(self.amplitudeImage2, 2, order=0, output=self.amplitudeImage2, mode='reflect')\r\n\r\n imga2 = mpl.imshow(scipy.ndimage.gaussian_filter(self.amplitudeImage2, gfilter, order=0, mode='reflect'))\r\n\r\n imga2.set_clim = (0.0, max1)\r\n\r\n mpl.colorbar()\r\n\r\n mpl.subplot(2,3,5)\r\n\r\n imgp2 = mpl.imshow(scipy.ndimage.gaussian_filter(self.phaseImage2, gfilter, order=0, mode='reflect'), cmap=matplotlib.cm.hsv)\r\n\r\n mpl.colorbar()\r\n\r\n imgp2.set_clim=(-np.pi/2.0, np.pi/2.0)\r\n\r\n mpl.title('Phase Map2')\r\n\r\n # doubled phase map\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n #scipy.ndimage.gaussian_filter(self.phaseImage2, 2, order=0, output=self.phaseImage2, mode='reflect')\r\n\r\n np1 = scipy.ndimage.gaussian_filter(self.phaseImage1, gfilter, order=0, mode='reflect')\r\n\r\n np2 = scipy.ndimage.gaussian_filter(self.phaseImage2, gfilter, order=0, mode='reflect')\r\n\r\n dphase = np1 + np2\r\n\r\n #dphase = self.phaseImage1 - self.phaseImage2\r\n\r\n \r\n\r\n #scipy.ndimage.gaussian_filter(dphase, 2, order=0, output=dphase, mode='reflect')\r\n\r\n imgpdouble = mpl.imshow(dphase, cmap=matplotlib.cm.hsv)\r\n\r\n mpl.title('2x Phi map')\r\n\r\n mpl.colorbar()\r\n\r\n imgpdouble.set_clim=(-np.pi, np.pi)\r\n\r\n\r\n\r\n if mode == 2 or mode == 1:\r\n\r\n if self.phasex == []:\r\n\r\n self.phasex = np.random.randint(0, high=self.DF.shape[1], size=self.DF.shape[1])\r\n\r\n self.phasey = np.random.randint(0, high=self.DF.shape[2], size=self.DF.shape[2])\r\n\r\n\r\n\r\n mpl.subplot(2,3,3)\r\n\r\n sh = self.DF.shape\r\n\r\n spr = sh[2]/self.nPhases\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n Dm = self.avgimg[i*spr,i*spr] # diagonal run\r\n\r\n mpl.plot(self.n_times, 100.0*(self.DF[:,self.phasex[i], self.phasey[i]]/Dm))\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('Waveforms')\r\n\r\n\r\n\r\n if mode == 2:\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n sh = self.DF.shape\r\n\r\n x0 = int(sh[1]/2)\r\n\r\n y0 = int(sh[2]/2)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(self.DF[1:,x0,y0])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('FFTs')", "def add_field(self, img_dict):\n for k in img_dict.keys():\n assert k in self.bands, \"Celeste model doesn't support band %s\"%k\n self.field_list.append(Field(img_dict))", "def convert_F_vs_mag(value, F_0=None, band='H', system='Johnson', \n conversion='to_mag'): \n \n dico_zero_pts_Jo = {'U': [0.36,1823.],\n 'B': [0.44,4130.],\n 'V': [0.55,3781.],\n 'R': [0.71,2941.],\n 'I': [0.97,2635.],\n 'J': [1.25,1603.],\n 'H': [1.60,1075.],\n 'K': [2.22,667.],\n 'L': [3.54,288.],\n 'M': [4.80,170.],\n 'N': [10.6,36.],\n 'O': [21.0,9.4]}\n dico_zero_pts_2M = {'J': [1.235,1594.],\n 'H': [1.662,1024.],\n 'K': [2.159,666.7]}\n dico_zero_pts_UK = {'V': [0.5556,3540.], # TOKUNAGA (from Cohen 1992)\n 'I': [0.9,2250.], # UKIRT webpage\n 'J': [1.215,1630.], # TOKUNAGA (from Cohen 1992)\n 'H': [1.654,1050.], # TOKUNAGA (from Cohen 1992)\n 'Ks': [2.157,667.], # TOKUNAGA (from Cohen 1992)\n 'K': [2.179,655.], # TOKUNAGA (from Cohen 1992) \n 'L': [3.547,276.], # TOKUNAGA (from Cohen 1992) \n \"L'\": [3.761,248.], # TOKUNAGA (from Cohen 1992) \n 'M': [4.769,160.], # TOKUNAGA (from Cohen 1992) \n '8.7': [8.756,50.], # TOKUNAGA (from Cohen 1992) \n 'N': [10.472,35.3], # TOKUNAGA (from Cohen 1992) \n '11.7': [11.653,28.6], # TOKUNAGA (from Cohen 1992) \n 'Q': [20.13,9.7]} # TOKUNAGA (from Cohen 1992)\n dico_zero_pts_ESO = {'J': [1.228,3.44e-9], # van der Bliek 1996\n 'H': [1.651,1.21e-9], # van der Bliek 1996\n 'K': [2.216,4.12e-10], # van der Bliek 1996\n \"L'\": [3.771,5.58e-11], # van der Bliek 1996\n \"M\": [4.772,2.21e-11]} # van der Bliek 1996 \n \n if F_0 is None:\n if system == 'Johnson' and band in dico_zero_pts_Jo:\n dico_F_0 = dico_zero_pts_Jo\n elif system == '2MASS' and band in dico_zero_pts_2M:\n dico_F_0 = dico_zero_pts_2M\n elif system == 'UKIRT' and band in dico_zero_pts_UK:\n dico_F_0 = dico_zero_pts_UK\n elif system == 'ESO' and band in dico_zero_pts_UK:\n dico_F_0 = dico_zero_pts_ESO \n else:\n msg = 'Combination of band name and band system not recognized.'\n raise TypeError(msg)\n F_0 = dico_F_0[band][1]\n if system == 'ESO':\n # convert from W m-2 mu-1 to Jy\n F_0 = convert_F_units(F_0, dico_F_0[band][0], in_unit='si', \n out_unit='jy')\n \n if conversion == 'to_mag':\n return -2.5*np.log10(value/F_0)\n elif conversion == 'to_flux':\n return F_0*np.power(10.,-value/2.5)\n else:\n msg = \"conversion not recognized, must be 'to_mag' or 'to_flux'.\"\n raise TypeError(msg)", "def waveform_2_magnitude(waveform, frame_length=512, frame_step=128, log_magnitude=True,\n n_mel_bins=None, mel_lower_hertz_edge=0.0,\n mel_upper_hertz_edge=8000.0):\n\n spectogram = waveform_2_spectogram(\n waveform, frame_length=frame_length, frame_step=frame_step,\n log_magnitude=log_magnitude, n_mel_bins=n_mel_bins,\n mel_lower_hertz_edge=mel_lower_hertz_edge,\n mel_upper_hertz_edge=mel_upper_hertz_edge\n )\n\n magnitude = spectogram[:, :, :, 0]\n return magnitude", "def uturuncu_map(surfaceFile,dem,comp=2):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n #print(datadir)\n #print(dem)\n geosurface = pu.surface2geotiff(dem,surfaceFile,outname=None,comp=comp,nanval=-9999)\n\n #load georeferenced fem output from pu.surface2geotiff\n #geosurface = '/home/scott/research/models/pylith/scripts/geo_fem_Uz.tif'\n data,geotrans,proj = pu.load_gdal(geosurface)\n data = data*100 # N-up, units=m\n nLat, nLon = data.shape\n\n\n #NOTE: are basmap ll and ur corner or center pixel locations??\n bmap = bm.Basemap(projection='tmerc', #NOTE: if changing to 'merc' have to use latlon=True\n resolution='i',\n lon_0=-67.18,\n lat_0=-22.27,\n width=200000.0,\n height=200000.0,\n suppress_ticks=True, #set to true if using drawmeridians\n ax=ax)\n\n # Set map background\n #dem = '/home/scott/data/dems/cgiar/uturuncu_1000_1000.tif'\n # full res\n dem = os.path.join(datadir,'dems/cgiar/srtm_23_17.tif')\n bmap.background(style='srtm', file=dem, zscale=1.5)\n\n # Annotate\n bmap.drawcountries(linewidth=1,color='k')\n bmap.drawcoastlines(linewidth=1,color='k')\n meridians = np.arange(-68,-65,1)\n md = bmap.drawmeridians(meridians, labels=[0,0,0,1])\n parallels = np.arange(-24,-20,1)\n pl = bmap.drawparallels(parallels, labels=[1,0,0,0])\n\n # Overlay FEM result\n compdict = {0:'Ux',1:'Uy',2:'Uz',3:'Ur'}\n im = bmap.imshow(data, origin='upper', alpha=0.7) #alternatively flipud(data)\n cb = bmap.colorbar(im)\n cb.set_label('{} [cm]'.format(compdict[comp]))\n\n # Uturunu Summit\n bmap.plot(-67.18, -22.27, 'r^', latlon=True,label='Uturuncu')\n\n # Location of maximum uplift\n # NOTE: multiple coordinate transforms needed here\n maxval = np.nanmax(data)\n indflat = np.nanargmax(data)\n ind = np.unravel_index(indflat, data.shape) #NOTE row,col --> (y,x)\n lon,lat = ind2latlon(ind, geosurface)\n bmap.plot(lon,lat,'y*',latlon=True,label='Uz_max')\n print('Maximum={} at ({:.2f},{:.2f})\\n'.format(maxval, lon, lat))\n\n # PLUTONS seismometers\n path = os.path.join(datadir,'vector/uturuncu_plutons_seis')\n sm = bmap.readshapefile(path,'seis',drawbounds=False)\n x,y = np.hsplit(np.array(bmap.seis),2)\n bmap.plot(x,y,'wv', mec='k', markersize=10, mew=2, label='3T')\n\n # Continuous GPS\n path = os.path.join(datadir,'vector/uturuncu_contGPS')\n bmap.readshapefile(path,'cGPS',drawbounds=False)\n x,y = np.hsplit(np.array(bmap.cGPS),2)\n bmap.plot(x,y,'go', mec='k', markersize=10, mew=2, label='cGPS')\n\n # Scalebar\n length = 50 #km\n # Scale in lower left\n lon = bmap.llcrnrlon + (length/2.0/100) + (bmap.lonmax - bmap.lonmin)*0.05 #pad by 5% of length, also add 1/2 length of scale length\n lat = bmap.llcrnrlat + (bmap.latmax - bmap.latmin)*0.05\n # Upper Right (todo)\n scale = bmap.drawmapscale(lon, lat, bmap.projparams['lon_0'],bmap.projparams['lon_0'],\n length, #50km\n barstyle='fancy',\n #barstyle='simple',\n fontsize=14)\n\n # More Annotations\n plt.legend(loc='upper right',numpoints=1)\n plt.title('FEM Model Output')\n #plt.savefig('map_fem.png',bbox_inches='tight')\n plt.show()", "def _make_image_info_hst(self, flistname):\n\n flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n #fname=line.strip()\n flist.append(fname)\n magzp_list.append(magzp)\n\n magzp = np.array(magzp_list)\n\n nimage = len(flist)\n\n path_len = max([len(f) for f in flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f.replace('sci.fits','wht.fits')\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def get_mag(self):\n raise NotImplementedError", "def assemblePlot(self):\n self.clearPlot()\n self.axes = self.figure.add_subplot(111)\n\n # Reset handles\n self._fluxOverlayHandles = []\n self._magneticAxisHandle = None\n self._orbitHandles = []\n self._separatrixOverlayHandle = None\n self._wallCrossSectionOverlayHandle = None\n\n # Plot image\n self.plotEq()\n\n # Plot overlays\n self.plotOverlays()\n\n self.adjustAxes()", "def build_map(self):\n # Initialize the world map\n self.world_map = np.zeros((self.map_size, self.map_size))\n \n # Subscribe data and process them in the callback func\n sonar_sub = message_filters.Subscriber('/RosAria/sonar', PointCloud)\n pose_sub = message_filters.Subscriber('/RosAria/pose', Odometry)\n\n time_sync = message_filters.TimeSynchronizer([sonar_sub, pose_sub], queue_size=10)\n time_sync.registerCallback(self.callback_map)\n \n # show map interactively\n rospy.sleep(1)\n while not rospy.is_shutdown():\n cv2.imshow('world_map', self.world_prob)\n cv2.waitKey(100)\n\n if self.save_map and self.count%1000==0:\n with open(self.map_file, 'w') as f:\n pickle.dump(self.world_prob, f)\n print(\"=== Save map to {} ===\".format(self.map_file))", "def __mag_table_append(self, table_new):\n for r in table_new[self.__mag_colnames]:\n self.__mags.add_row(r)\n self.__mags.sort(['ra','dec','MJD'])", "def __refmag_table_append(self, table_new): \n if not \"mag_calib_unc\" in table_new.colnames:\n table_new[\"mag_calib_unc\"] = [None for i in range(len(table_new))]\n \n for r in table_new[self.__mag_colnames]:\n self.__ref_mags.add_row(r)\n self.__ref_mags.sort(['ra','dec','MJD'])", "def write_mat_file(self):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat('geometry_description',mat_dict)", "def MeshMachine(main):\n\n # oDesign definition\n oDesign = main['ANSYS']['oDesign']\n\n # Data for the rotor mesh\n RotorName = main['ANSYS']['Rotor&Magnets']['Name'][0]\n RotorNumMaxElem = main['ANSYS']['Mesh']['Rotor']['NumMaxElem']\n RotorMaxLength = main['ANSYS']['Mesh']['Rotor']['MaxLength']\n\n # Data for the magnets mesh\n PMNames = main['ANSYS']['Rotor&Magnets']['PMNames']\n PMNumMaxElem = main['ANSYS']['Mesh']['Magnets']['NumMaxElem']\n PMMaxLength = main['ANSYS']['Mesh']['Magnets']['MaxLength']\n\n # Data for the Stator mesh\n StatorName = main['ANSYS']['Stator']['Name']\n StatorNormalDev = main['ANSYS']['Mesh']['Stator']['NormalDev']\n StatorAspectRatio = main['ANSYS']['Mesh']['Stator']['AspectRatio']\n\n # Data for the Stator mesh\n CoilNames = main['ANSYS']['Winding']['CoilNames']\n WindingNumMaxElem = main['ANSYS']['Mesh']['Winding']['NumMaxElem']\n WindingMaxLength = main['ANSYS']['Mesh']['Winding']['MaxLength']\n\n WindingName = []\n for phase in CoilNames:\n for direction in phase:\n WindingName += direction\n\n # Creating meshes\n oModule = oDesign.GetModule(\"MeshSetup\")\n\n # Rotor meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Rotor\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", [RotorName],\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(RotorNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(RotorMaxLength)+\"mm\"\n ]\n )\n # Magnet meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Magnets\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", PMNames,\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(PMNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(PMMaxLength)+\"mm\"\n ]\n )\n # Stator meshes\n oModule.AssignTrueSurfOp(\n [\n \"NAME:Stator\",\n \"Objects:=\", [StatorName],\n \"CurvedSurfaceApproxChoice:=\", \"ManualSettings\",\n \"SurfDevChoice:=\", 0,\n \"NormalDevChoice:=\", 2,\n \"NormalDev:=\", str(StatorNormalDev) + \"deg\",\n \"AspectRatioChoice:=\", 2,\n \"AspectRatio:=\", str(StatorAspectRatio)\n ]\n )\n\n # Coil meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Coils\",\n \"RefineInside:=\"\t, True,\n \"Enabled:=\"\t\t, True,\n \"Objects:=\"\t\t, WindingName,\n \"RestrictElem:=\"\t, False,\n \"NumMaxElem:=\"\t\t, str(WindingNumMaxElem),\n \"RestrictLength:=\"\t, True,\n \"MaxLength:=\"\t\t, str(WindingMaxLength) +\"mm\"\n ]\n )\n\n return main", "def make_quad_frs_imag(dims,numlevels,numorientations,bandwidth):\n \n freq_resps_imag = make_steer_frs(dims,numlevels,numorientations,bandwidth)\n freq_resps_imag[0] = np.zeros(dims)\n freq_resps_imag[2] = np.zeros(dims)\n return freq_resps_imag", "def register_magics(*args, **kwargs):\n ipy = None\n magics = None\n\n try:\n ipy = get_ipython()\n except NameError:\n logger.error(\"no running notebook kernel found\")\n\n # create the magics\n if ipy:\n magics = create_magics(*args, **kwargs)\n\n # register it\n if ipy and magics:\n ipy.register_magics(magics)\n\n names = list(magics.magics[\"cell\"].keys()) + list(magics.magics[\"line\"].keys())\n names = \", \".join(\"%{}\".format(name) for name in names)\n logger.info(\"magics successfully registered: {}\".format(names))\n else:\n logger.error(\"no magics registered\")", "def add_altitude(chunk, flightHeightFile): \n # Get the flight height\n try:\n # flightHeightFile = \"/SNOWDATA/SnowDrones-Processing/LDP/01-31-2020/RGB/100MEDIA/FlightHeight.txt\"\n with open(flightHeightFile , 'r') as myfile:\n data = myfile.read()\n alt = int(data)\n except:\n alt = int(55)\n\n # Update flight altitudes\n for camera in chunk.cameras:\n if camera.reference.location:\n coord = camera.reference.location\n camera.reference.location = PhotoScan.Vector([coord.x, coord.y, alt])", "def build_magmom(self, list_oxidized_site_indices, list_reduced_site_indices):\n\n MAGMOM = []\n # tabulate how many sites must be reduced from every species in the variable_magnetization_dict.\n\n for i_s, site in enumerate(self.structure):\n\n random_addition = np.round( 0.02*np.random.random(1)[0]-0.01, 6)\n\n if i_s in list_oxidized_site_indices:\n m0 = self.variable_magnetization_dict['Fe']['m_reduced']\n elif i_s in list_reduced_site_indices:\n m0 = self.variable_magnetization_dict['Fe']['m_oxidized']\n else:\n m0 = 0.3\n random_addition = 0.\n\n MAGMOM.append(m0+random_addition)\n\n return MAGMOM", "def __init__(self, mapfile, camera=None, light=None,\r\n width=100.0, depth=100.0, height=10.0,\r\n divx=0, divy=0, ntiles=1.0, name=\"\",\r\n x=0.0, y=0.0, z=0.0, rx=0.0, ry=0.0, rz=0.0,\r\n sx=1.0, sy=1.0, sz=1.0, cx=0.0, cy=0.0, cz=0.0, smooth=True, cubic=False):\r\n super(ElevationMap, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\r\n sx, sy, sz, cx, cy, cz)\r\n if divx > 200 or divy > 200:\r\n print(\"... Map size can't be bigger than 200x200 divisions\")\r\n divx = 200\r\n divy = 200\r\n if issubclass(type(mapfile), type(\"\")): #HORRIBLE. Only way to cope with python2v3\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n if VERBOSE:\r\n print(\"Loading height map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n else:\r\n im = mapfile #allow image files to be passed as mapfile\r\n ix, iy = im.size\r\n if (ix > 200 and divx == 0) or (divx > 0):\r\n if divx == 0:\r\n divx = 200\r\n divy = 200\r\n im = im.resize((divx, divy), Image.ANTIALIAS)\r\n ix, iy = im.size\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n self.pixels = im.load()\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.ix = ix\r\n self.iy = iy\r\n self.ttype = GL_TRIANGLE_STRIP\r\n\r\n if VERBOSE:\r\n print(\"Creating Elevation Map ...\", ix, iy)\r\n\r\n wh = width * 0.5\r\n hh = depth * 0.5\r\n ws = width / ix\r\n hs = depth / iy\r\n ht = height / 255.0\r\n tx = 1.0*ntiles / ix\r\n ty = 1.0*ntiles / iy\r\n\r\n verts = []\r\n norms = []\r\n tex_coords = []\r\n idx = []\r\n\r\n for y in xrange(0, iy):\r\n for x in xrange(0, ix):\r\n hgt = (self.pixels[x, y])*ht\r\n this_x = -wh + x*ws\r\n this_z = -hh + y*hs\r\n if cubic:\r\n \"\"\" this is a bit experimental. It tries to make the map either zero\r\n or height high. Vertices are moved 'under' adjacent ones if there is\r\n a step to make vertical walls. Goes wrong in places - mainly because\r\n it doesn't check diagonals\r\n \"\"\"\r\n if hgt > height / 2:\r\n hgt = height\r\n else:\r\n hgt = 0.0\r\n if hgt == 0 and y > 0 and y < iy-1 and x > 0 and x < ix-1:\r\n if self.pixels[x-1, y] > 127:\r\n this_x = -wh + (x-1)*ws\r\n elif self.pixels[x+1, y] > 127:\r\n this_x = -wh + (x+1)*ws\r\n elif self.pixels[x, y-1] > 127:\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x, y+1] > 127:\r\n this_z = -hh + (y+1)*hs\r\n elif self.pixels[x-1, y-1] > 127:\r\n this_x = -wh + (x-1)*ws\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x-1, y+1] > 127:\r\n this_x = -wh + (x-1)*ws\r\n this_z = -hh + (y+1)*hs\r\n elif self.pixels[x+1, y-1] > 127:\r\n this_x = -wh + (x+1)*ws\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x+1, y+1] > 127:\r\n this_x = -wh + (x+1)*ws\r\n this_z = -hh + (y+1)*hs\r\n verts.append((this_x, hgt, this_z))\r\n tex_coords.append(((ix-x) * tx,(iy-y) * ty))\r\n\r\n s = 0\r\n #create one long triangle_strip by alternating X directions\r\n for y in range(0, iy-1):\r\n for x in range(0, ix-1):\r\n i = (y * ix)+x\r\n idx.append((i, i+ix, i+ix+1))\r\n idx.append((i+ix+1, i+1, i))\r\n s += 2\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, verts, tex_coords, idx, None, smooth))", "def map_sim_property(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n counter = 0\n fignum = 1\n if p.gal_index == 'all':\n\n for gal_index in GR.N_gal - np.arange(GR.N_gal) - 1:\n\n if counter == 0:\n fig, axes = plt.subplots(3, 3, figsize=(20,15))\n axs = [axes[0,0],axes[0,1],axes[0,2],axes[1,0],axes[1,1],axes[1,2],axes[2,0],axes[2,1],axes[2,2]]\n counter = 9\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type='simgas')\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n\n # Plot\n Rmax = max_scale/2\n ax1 = axs[9 - counter]\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n if not p.log:\n map2D[map2D < p.vmin] = p.vmin/2\n map2D[map2D > p.vmax] = p.vmax\n im = ax1.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,cmap=p.cmap)\n fig.colorbar(im,shrink=0.8,ax=ax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-0.99*Rmax,0.99*Rmax])\n ax1.set_ylim([-0.99*Rmax,0.99*Rmax])\n if (p.prop == 'm') & (p.text == True):\n ax1.text(0.05,0.85,'M$_{gas}$=%.2eM$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.75,'SFR=%.2eM$_{\\odot}$/yr' % GR.SFR[gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n\n counter -= 1\n\n #if counter == 0:\n # ax1 = plt.subplots(1, 1)\n #cbar = fig.colorbar(im, ax=axes.ravel().tolist(), shrink=0.95, label=lab)\n # fig.colorbar(im,shrink=0.8,label=lab)\n\n if counter == 0 or gal_index == GR.N_gal-1:\n print('Saving in ' + p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format))\n # plt.tight_layout()\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/')\n plt.savefig(p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format), format=p.format, dpi=250, facecolor='w')\n fignum += 1\n\n else:\n if p.add:\n fig, ax1 = plt.gcf(), p.ax\n if not p.add:\n fig = plt.figure(figsize=(8,6))\n ax1 = fig.add_axes([0.1, 0.01, 0.8, 0.8]) \n ax1.axis('equal')\n\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n if p.R_max:\n # Cut out square\n simgas = simgas[(np.abs(simgas.x) < p.R_max) & (np.abs(simgas.y) < p.R_max)]\n # Add bottom left corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = -p.R_max,-p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n # Add top right corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = p.R_max,p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n else:\n pass\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n print('Min and max of map: ',map2D.min(),map2D.max())\n #map2D[map2D < 1e4] = 1e6\n # Plot map\n if not p.R_max:\n p.R_max = max_scale/2\n if p.log: \n if not p.vmax: p.vmax = np.log10(map2D).max()\n if not p.vmin: p.vmin = np.log10(map2D).max() - 4\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n else:\n if not p.vmax: p.vmax = np.max(map2D)\n if not p.vmin: p.vmin = np.min(map2D) / 1e3\n map2D[map2D < p.vmin] = p.vmin #np.min(map2D[map2D > 0])\n map2D = np.flipud(map2D)\n\n im = ax1.imshow(map2D,\\\n extent=[-max_scale/2,max_scale/2,-max_scale/2,max_scale/2],vmin=p.vmin,vmax=p.vmax,cmap=p.cmap)\n # Limit axes limits a bit to avoid area with no particles...\n zoom = 1#/1.5\n ax1.set_xlim([-1/zoom * p.R_max,1/zoom * p.R_max])\n ax1.set_ylim([-1/zoom * p.R_max,1/zoom * p.R_max])\n if p.colorbar: \n divider = make_axes_locatable(ax1)\n cax1 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(im,cax=cax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n if (p.prop == 'm') & (p.text == True):\n simstar = aux.load_temp_file(gal_ob=gal_ob,data_type='simstar')\n ax1.text(0.05,0.92,'M$_{star}$=%.1e M$_{\\odot}$' % np.sum(simstar.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.86,'M$_{gas}$=%.1e M$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.80,'SFR=%.2f M$_{\\odot}$/yr' % GR.SFR[p.gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/') \n plt.savefig(p.d_plot + 'sim_data/map_%s_G%i.png' % (p.prop,p.gal_index), format=p.format, dpi=250, facecolor='w')\n\n if not p.colorbar: return(im)", "def __init__(self, ima_path):\n\n self.ima_path = ima_path\n\n hdu_ideal = fits.open(ima_path) # read in fits file\n header = hdu_ideal[1].header\n\n self.hdu_ideal = hdu_ideal\n self.data = hdu_ideal[1].data # image to be altered\n\n self.nrows = header['NAXIS1']\n self.ncols = header['NAXIS2']\n self.ngroups = header['NAXIS3'] # number of groups per integration\n self.nintegs = header['NAXIS4'] # number of integrations in time series observations\n self.subarray = hdu_ideal[0].header['SUBARRAY']\n self.tgroup = hdu_ideal[0].header['TGROUP'] # TODO Used as exposure time per frame.\n\n self.modif_str = '_mod' # string encoding the modifications", "def j_band_abs_mag(self):\n\n # Load in the IRAC 3.6 um filter as the observed filter\n irac_36 = SpectralElement.from_file(self._irac_filter, wave_unit=u.um)\n flamingos_j = SpectralElement.from_file(self._j_band_filter, wave_unit=u.nm)\n\n # We will use the official IRAC 3.6 um zero-point flux\n irac_36_zp = 280.9 * u.Jy\n\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Get the 3.6 um apparent magnitudes and photometric redshifts from the catalog\n se_catalog = cluster_info['catalog']\n irac_36_mag = se_catalog['I1_MAG_APER4']\n galaxy_z = se_catalog['REDSHIFT']\n\n # Given the observed IRAC 3.6 um photometry, compute the rest-frame J-band absolute (Vega) magnitude.\n j_abs_mag = k_corr_abs_mag(apparent_mag=irac_36_mag, z=galaxy_z, f_lambda_sed=self._sed,\n zero_pt_obs_band=irac_36_zp, zero_pt_em_band='vega', obs_filter=irac_36,\n em_filter=flamingos_j, cosmo=self._cosmo)\n\n # Store the J-band absolute magnitude in the catalog and update the data structure\n se_catalog['J_ABS_MAG'] = j_abs_mag\n cluster_info['catalog'] = se_catalog", "def write_map(self, file_name):\n\n if self.pixel == \"HEALPIX\":\n hp.fitsfunc.write_map(file_name, self.data, overwrite=True)\n if self.pixel == \"CAR\":\n enmap.write_map(file_name, self.data)", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def calculate_mags(self):\n res = numpy.fft.rfft(self.cur_input)\n self.mags = []\n for num in res[1:]:\n real = float(numpy.real(num))\n imag = float(numpy.imag(num))\n mag = math.sqrt((real**2)+(imag**2))\n self.mags.append(mag)", "def __init__(self, filename):\n\t\tself.im_raw = sunpy.map.Map(filename)\n\t\ttry:\n\t\t\tself.B0 = self.im_raw.meta['B0']\n\t\texcept KeyError:\n\t\t\tself.B0 = self.im_raw.meta['OBS_B0']\n\t\ttry:\n\t\t\tself.L0 = self.im_raw.meta['L0']\n\t\texcept KeyError:\n\t\t\tself.L0 = self.im_raw.meta['OBS_L0']\n\t\ttry:\n\t\t\tself.X0 = self.im_raw.meta['X0']\n\t\texcept KeyError:\n\t\t\tself.X0 = self.im_raw.meta['IMG_X0']\n\t\ttry:\n\t\t\tself.Y0 = self.im_raw.meta['Y0']\n\t\texcept KeyError:\n\t\t\tself.Y0 = self.im_raw.meta['IMG_Y0']\n\t\tif self.im_raw.detector == 'SPMG':\n\t\t\tself.rsun = self.im_raw.rsun_obs.value / self.im_raw.meta['SCALE']\t\n\t\telse:\n\t\t\tself.rsun = self.im_raw.rsun_obs.value", "def test_mag_form_fac():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac(q=1.)[0], ion.calc_mag_form_fac(q=1.)[1:]\n del _temp\n assert (abs(formfac - 0.932565) < 1e-6)", "def build_magmom(self,list_oxidizable_site_indices):\n\n MAGMOM = []\n # tabulate how many sites must be reduced from every species in the variable_magnetization_dict.\n reduction_counter = {}\n for key in self.variable_magnetization_dict:\n reduction_counter[key] = self.variable_magnetization_dict[key]['n_reduced']\n\n dict_reduction = {}\n #reduce according to proximity\n for i_s in list_oxidizable_site_indices:\n symbol = self.structure.sites[i_s].specie.symbol\n \n if reduction_counter[symbol] > 0:\n dict_reduction[i_s] = self.variable_magnetization_dict[symbol]['m_reduced']\n reduction_counter[symbol] -= 1\n elif reduction_counter[symbol] == 0:\n dict_reduction[i_s] = self.variable_magnetization_dict[symbol]['m_oxidized']\n\n else:\n print(\"SOMETHING IS WRONG. REVIEW CODE!\")\n sys.exit()\n\n for i_s, site in enumerate(self.structure):\n if i_s in dict_reduction:\n # add a bit of randomness to not get trapped in metastable solution.\n # It is quite useless to have a random number with 16 decimals, and it \n # makes the INCAR ugly; let's round.\n random_addition = np.round( 0.2*np.random.random(1)[0]-0.1, 6)\n MAGMOM.append(dict_reduction[i_s]+random_addition)\n else:\n MAGMOM.append(0.6)\n\n return MAGMOM", "def magnetometer(self):\n self._mag[X] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_X_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_X_L_M), 16)\n self._mag[Y] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Y_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Y_L_M), 16)\n self._mag[Z] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Z_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Z_L_M), 16)\n\n return vector(self._mag)", "def __refmag_file_append(self, file):\n t = Table.read(file, format=\"ascii\")\n LightCurve.__refmag_table_append(self, t)", "def _init_meg_map_dict(bands, length=0):\n\n # Initialize dictionary\n meg_map = dict()\n\n # Add oscillation bands\n for band in bands:\n meg_map[band] = np.zeros(length)\n\n return meg_map", "def apply_register_map(self, mapfile_location, verify=True):\n with open(mapfile_location, 'r') as f:\n for line in f.readlines():\n # The register map starts after general information is printed preceded by '#'\n if line[0] != '#':\n # Extract register-value pairing from register map\n register, value = line.split(',')\n register = int(register)\n value = int(value[1:3],16) # Value is in hex\n\n if register == 136 and (value & 0x40):\n logger.info(\"Ignoring write to iCAL, will be applied next\")\n continue\n\n # Write register value\n logger.info(\"Writing register {} with value {:02X}\".format(register,value))\n self.write8(register, value)\n\n if verify:\n verify_value = self.readU8(register)\n logger.debug(\"Verifying value written ({:b}) against re-read: {:b}\".format(\n value,verify_value))\n if verify_value != value:\n raise I2CException(\n \"Write of byte to register {} failed.\".format(register))\n\n # ICAL-sensitive registers will have been modified during this process\n self.iCAL_required = True\n self.calibrate()", "def homogeneize_spectra_resolution(res=2.95, dataset=\"MUSE\"):\n for field in context.fields:\n print(field)\n input_dir = os.path.join(context.data_dir, dataset, \"combined\", field,\n \"spec1d_ellipv0\")\n if not os.path.exists(input_dir):\n continue\n output_dir = os.path.join(context.data_dir, dataset, \"combined\", field,\n \"spec1d_ellipv0_fwhm{}\".format(res))\n if not(os.path.exists(output_dir)):\n os.mkdir(output_dir)\n specs = sorted([_ for _ in os.listdir(input_dir) if _.endswith(\n \".fits\")])\n for i, filename in enumerate(specs):\n print(\"Convolving file {} ({} / {})\".format(filename, i+1,\n len(specs)))\n filepath = os.path.join(input_dir, filename)\n output = os.path.join(output_dir, filename)\n data = Table.read(filepath, format=\"fits\")\n wave = data[\"wave\"]\n flux = data[\"flux\"]\n fluxerr = data[\"fluxerr\"]\n muse_fwhm = get_muse_fwhm()\n obsres = muse_fwhm(wave)\n newflux, newfluxerr = broad2res(wave, flux, obsres,\n res, fluxerr=fluxerr)\n newtable = Table([wave, newflux, newfluxerr],\n names=[\"wave\", \"flux\", \"fluxerr\"])\n newtable.write(output, overwrite=True)", "def radiance_map(file, config, vmax=4200, levels=20, typ=''):\n \n # Select data from configuration \n azimuths = config['skymap'][:, 0] # +180 # azimuths\n zeniths = config['skymap'][:, 1] # zeniths\n\n if typ == 'sim':\n # look for wavelength index in array\n waves_sim = dataset.attrs['simulated_Columns'].split('nm')[0].split('[')[1].split(\n ']')[0].split(',')\n waves = np.asarray(list(map(int, waves_sim)))\n wave_indx = np.where(waves == wave)\n try:\n wave_indx = np.int(wave_indx[0][0])\n except:\n print(\"Wavelength is not in dataset\")\n z = dataset.simulated[:, wave_indx, time_indx]\n\n elif typ == 'meas':\n wave_indx = int((config['wavelength'] - 250 - config['wave_correction']) / 0.446)\n with h5py.File(file, 'r') as data:\n z = data['data'][:, wave_indx]\n else:\n print('Select a input data type(sim or meas)')\n\n # Add values in the origin to close the surface interpolation\n azimuths = np.append(azimuths, [270, 0, 0, 0, 0, 0, 0, 0])\n zeniths = np.append(zeniths, [0, 12, 24, 36, 48, 60, 72, 84])\n z = np.append(z, [z[0], z[3], z[9], z[19], z[33], z[51], z[73], z[99]])\n\n # Convert x to radians\n azimuths = np.radians(azimuths)\n zeniths = np.radians(zeniths)\n\n # Remove dead channels of the dataset\n azimuths = np.delete(azimuths, config['dead_fibre'])\n zeniths = np.delete(zeniths, config['dead_fibre'])\n z = np.delete(z, config['dead_fibre'])\n\n # Set up a regular grid of interpolation point\n thetai, ri = np.linspace(azimuths.min(), azimuths.max(),\n num=len(azimuths)), \\\n np.linspace(zeniths.min(), zeniths.max(), num=len(zeniths))\n\n ri, thetai = np.meshgrid(ri, thetai, indexing='ij')\n\n # zi = scipy.interpolate.griddata((azimuths, zeniths), z, (thetai, ri),\n # method='linear')\n\n rbf = scipy.interpolate.Rbf(azimuths, zeniths, z, fucntion='gaussian',\n epsilon=0.05)\n\n ZI = rbf(thetai, ri)\n\n if typ == 'sim':\n name = str(dataset.time[time_indx].values) # ''\n else:\n name = 'testing' #str(dataset.time[time_indx].values)\n\n # Create the directory to save the results\n # os.makedirs(os.path.dirname(config['path_note'] + '/figures/'),\n # exist_ok=True)\n if vmax == 'default':\n vmax = 4200\n else:\n vmax = vmax\n\n # Plot the dataset\n fig, ax = plt.subplots(subplot_kw=dict(projection='polar'))\n cmap = 'Spectral_r' # 'rainbow'\n a = plt.contourf(thetai, ri, ZI, levels, cmap=cmap, vmin=0,\n vmax=vmax) # , vmax=4932)\n plt.title('{} UTC {}nm'.format(name, config['wavelength']))\n plt.axis([0, 2*np.pi, 0, 1.48])\n\n plt.scatter(azimuths, zeniths, cmap=cmap, s=1)\n ax.grid(False)\n ax.set_theta_zero_location(\"N\") # Set the direction of polar plot\n ax.set_theta_direction(1) # Set the increase direction on azimuth angles\n # (-1 to clockwise, 1 counterclockwise)\n cbar = plt.colorbar(a)\n cbar.set_label(\"counts\", rotation=90)\n\n # if typ == 'sim':\n # plt.savefig(\n # 'figures/skymap/simulated/skymap{}nm_{}UTC_sim.jpeg'.format(wave,\n # name),\n # dpi=300)\n # plt.show();\n # else:\n # plt.savefig(\n # 'figures/skymap/measured/skymap{}nm_{}UTC_meas.jpeg'.format(wave,\n # name),\n # dpi=300)", "def write_mat_file(self, geom_filename):\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat(geom_filename,mat_dict)", "def F_save_l2g_to_mat(self,file_path,data_fields=[],data_fields_l2g=[]):\n if not self.l2g_data:\n self.logger.warning('l2g_data is empty. Nothing to save.')\n return\n \n import scipy.io\n l2g_data = self.l2g_data.copy()\n for i in range(len(data_fields)):\n if data_fields[i] in l2g_data.keys():\n l2g_data[data_fields_l2g[i]] = l2g_data.pop(data_fields[i])\n # reshape 1d arrays to (nl2, 1)\n for key in l2g_data.keys():\n if key not in {'latr','lonr'}:\n l2g_data[key] = l2g_data[key].reshape(len(l2g_data[key]),1)\n else:# otherwise, the order of 2d array is COMPLETELY screwed\n l2g_data[key] = np.asfortranarray(l2g_data[key])\n scipy.io.savemat(file_path,{'output_subset':l2g_data})", "def flux2Mag(flux, zeropoint=27.0):\n return -2.5 * np.log10(flux) + zeropoint", "def jam_axi_rms(surf_lum, sigma_lum, qobs_lum, surf_pot, sigma_pot, qobs_pot,\n inc, mbh, distance, xbin, ybin, ml=None, normpsf=1., pixang=0.,\n pixsize=0., plot=True, rms=None, erms=None, sigmapsf=0.,\n goodbins=None, quiet=False, beta=None, step=0., nrad=20,\n nang=10, rbh=0.01, tensor='zz', vmin=None, vmax=None, **kwargs):\n if beta is None:\n beta = np.zeros_like(surf_lum) # Anisotropy parameter beta = 1 - (sig_z/sig_R)**2\n if not (surf_lum.size == sigma_lum.size == qobs_lum.size == beta.size):\n raise ValueError(\"The luminous MGE components do not match\")\n if not (surf_pot.size == sigma_pot.size == qobs_pot.size):\n raise ValueError(\"The total mass MGE components do not match\")\n if xbin.size != ybin.size:\n raise ValueError(\"xbin and ybin do not match\")\n if rms is not None:\n if erms is None:\n erms = np.full_like(rms, np.median(rms)*0.05) # Constant ~5% errors\n if goodbins is None:\n goodbins = np.ones_like(rms, dtype=bool)\n elif goodbins.dtype != bool:\n raise ValueError(\"goodbins must be a boolean vector\")\n if not (xbin.size == rms.size == erms.size == goodbins.size):\n raise ValueError(\"(rms, erms, goodbins) and (xbin, ybin) do not match\")\n\n sigmapsf = np.atleast_1d(sigmapsf)\n normpsf = np.atleast_1d(normpsf)\n if sigmapsf.size != normpsf.size:\n raise ValueError(\"sigmaPSF and normPSF do not match\")\n\n pc = distance*np.pi/0.648 # Constant factor to convert arcsec --> pc\n\n surf_lum_pc = surf_lum\n surf_pot_pc = surf_pot\n sigma_lum_pc = sigma_lum*pc # Convert from arcsec to pc\n sigma_pot_pc = sigma_pot*pc # Convert from arcsec to pc\n xbin_pc = xbin*pc # Convert all distances to pc\n ybin_pc = ybin*pc\n pixSize_pc = pixsize*pc\n sigmaPsf_pc = sigmapsf*pc\n step_pc = step*pc\n\n # Add a Gaussian with small sigma and the same total mass as the BH.\n # The Gaussian provides an excellent representation of the second moments\n # of a point-like mass, to 1% accuracy out to a radius 2*sigmaBH.\n # The error increses to 14% at 1*sigmaBH, independently of the BH mass.\n #\n if mbh > 0:\n sigmaBH_pc = rbh*pc # Adopt for the BH just a very small size\n surfBH_pc = mbh/(2*np.pi*sigmaBH_pc**2)\n surf_pot_pc = np.append(surfBH_pc, surf_pot_pc) # Add Gaussian to potential only!\n sigma_pot_pc = np.append(sigmaBH_pc, sigma_pot_pc)\n qobs_pot = np.append(1., qobs_pot) # Make sure vectors do not have extra dimensions\n\n qobs_lum = qobs_lum.clip(0, 0.999)\n qobs_pot = qobs_pot.clip(0, 0.999)\n\n t = clock()\n rmsModel = _vrms2(xbin_pc, ybin_pc, inc, surf_lum_pc, sigma_lum_pc,\n qobs_lum, surf_pot_pc, sigma_pot_pc, qobs_pot, beta,\n tensor, sigmaPsf_pc, normpsf, pixSize_pc, pixang,\n step_pc, nrad, nang)\n if not quiet:\n print('jam_axi_rms elapsed time sec: %.2f' % (clock() - t))\n\n if tensor in ('xx', 'yy', 'zz'):\n rmsModel = np.sqrt(rmsModel.clip(0)) # Return SQRT and fix possible rounding errors\n if tensor in ('xy', 'xz'):\n rmsModel *= np.sign(xbin*ybin) # Calculation was done in positive quadrant\n\n # Analytic convolution of the MGE model with an MGE circular PSF\n # using Equations (4,5) of Cappellari (2002, MNRAS, 333, 400)\n #\n lum = surf_lum_pc*qobs_lum*sigma_lum**2 # Luminosity/(2np.pi) of each Gaussian\n flux = np.zeros_like(xbin) # Total MGE surface brightness for plotting\n for sigp, norp in zip(sigmapsf, normpsf): # loop over the PSF Gaussians\n sigmaX = np.sqrt(sigma_lum**2 + sigp**2)\n sigmaY = np.sqrt((sigma_lum*qobs_lum)**2 + sigp**2)\n surfConv = lum / (sigmaX*sigmaY) # PSF-convolved in Lsun/pc**2\n for srf, sx, sy in zip(surfConv, sigmaX, sigmaY): # loop over the galaxy MGE Gaussians\n flux += norp*srf*np.exp(-0.5*((xbin/sx)**2 + (ybin/sy)**2))\n\n if rms is None:\n\n chi2 = None\n if ml is None:\n ml = 1.\n else:\n rmsModel *= np.sqrt(ml)\n\n else:\n\n if (ml is None) or (ml <= 0):\n\n # y1, dy1 = rms, erms # (y1 are the data, y2 the model)\n # scale = sum(y1*y2/dy1**2)/sum(y2**2/dy1**2) # (equation 51)\n #\n ml = (np.sum(rms[goodbins]*rmsModel[goodbins]/erms[goodbins]**2)\n / np.sum((rmsModel[goodbins]/erms[goodbins])**2))**2\n\n rmsModel *= np.sqrt(ml)\n chi2 = np.sum(((rms[goodbins]-rmsModel[goodbins])/erms[goodbins])**2) / goodbins.sum()\n\n if not quiet:\n print('inc=%.1f beta_z=%.2f M/L=%.3g BH=%.2e chi2/DOF=%.3g' % (inc, beta[0], ml, mbh*ml, chi2))\n mass = 2*np.pi*surf_pot_pc*qobs_pot*sigma_pot_pc**2\n print('Total mass MGE: %.4g' % np.sum(mass*ml))\n\n if plot:\n\n rms1 = rms.copy() # Only symmetrize good bins\n rms1[goodbins] = symmetrize_velfield(xbin[goodbins], ybin[goodbins], rms[goodbins])\n\n if (vmin is None) or (vmax is None):\n vmin, vmax = stats.scoreatpercentile(rms1[goodbins], [0.5, 99.5]) # Could use np.percentile in Numpy 1.10\n\n plt.clf()\n plt.subplot(121)\n plot_velfield(xbin, ybin, rms1, vmin=vmin, vmax=vmax, flux=flux, **kwargs)\n plt.title(r\"Input $V_{\\rm rms}$\")\n\n plt.subplot(122)\n plot_velfield(xbin, ybin, rmsModel, vmin=vmin, vmax=vmax, flux=flux, **kwargs)\n plt.plot(xbin[~goodbins], ybin[~goodbins], 'ok', mec='white')\n plt.title(r\"Model $V_{\\rm rms}$\")\n plt.tick_params(labelleft='off')\n plt.subplots_adjust(wspace=0.03)\n\n return rmsModel, ml, chi2, flux", "def _add_dimensions_to_file(locus_f):\n ld_lines = []\n i = 0\n with open(locus_f) as ld_file:\n for i, line in enumerate(ld_file):\n ld_lines.append(line)\n no_lines = i + 1\n file_out = locus_f.split('.matrix')[0] + '.LD'\n with open(file_out, 'w' ) as paintor_ld:\n paintor_ld.write(str(no_lines) + ' ' + str(no_lines) + '\\n')\n for line in ld_lines:\n paintor_ld.write(line)", "def magnitude(self, matrix):\n\n x = matrix.shape[0]\n y = matrix.shape[1]\n\n # Magnitude matrix:\n dft = np.zeros([x, y], float)\n\n for i in range(0, x):\n for j in range(0, y):\n dft[i, j] = np.sqrt(np.square(np.real(matrix[i, j])) + np.square(np.imag(matrix[i, j])))\n\n\n return dft", "def generate_data_mlab(self):\n # Create some data\n X, Y = mgrid[-2:2:200j, -2:2:200j]\n R = 10*sqrt(X**2 + Y**2)\n Z = sin(R)/R\n\n # Here we are using mlab to generate data. We could just as well have\n # used the mayavi API.\n self.scene.mlab.surf(X, Y, Z, colormap='gist_earth')", "def register_hemispheres(self):\n try:\n safe_execute_command(self._prepare_segmentation_cmd(self.hemispheres_img_path,\n self.registered_hemispheres_img_path),\n self.segmentation_log_file, self.segmentation_error_file)\n except SafeExecuteCommandError as err:\n SegmentationError('Segmentation failed; {}'.format(err))", "def generic_magpha_to_reim(mag, pha):\n complex_nr = to_complex(mag, pha)\n real_part = np.real(complex_nr)\n imag_part = np.imag(complex_nr)\n return real_part, imag_part", "def j_band_abs_mag(self):\n\n # Load in the IRAC 3.6 um filter as the observed filter\n irac_36 = SpectralElement.from_file(self._irac_filter, wave_unit=u.um)\n flamingos_j = SpectralElement.from_file(self._j_band_filter, wave_unit=u.nm)\n\n # We will use the official IRAC 3.6 um zero-point flux\n irac_36_zp = 280.9 * u.Jy\n\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Retrieve the cluster redshift from the SPT catalog\n catalog_idx = cluster_info['SPT_cat_idx']\n cluster_z = self._spt_catalog['REDSHIFT'][catalog_idx]\n\n # Get the 3.6 um apparent magnitudes from the catalog\n se_catalog = cluster_info['catalog']\n irac_36_mag = se_catalog['I1_MAG_APER4']\n\n # Given the observed IRAC 3.6 um photometry, compute the rest-frame J-band absolute (Vega) magnitude.\n j_abs_mag = k_corr_abs_mag(apparent_mag=irac_36_mag, z=cluster_z, f_lambda_sed=self._sed,\n zero_pt_obs_band=irac_36_zp, zero_pt_em_band='vega', obs_filter=irac_36,\n em_filter=flamingos_j, cosmo=self._cosmo)\n\n # Store the J-band absolute magnitude in the catalog and update the data structure\n se_catalog['J_ABS_MAG'] = j_abs_mag\n cluster_info['catalog'] = se_catalog", "def test_mag_form_fac_case1():\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac()[0], ion.calc_mag_form_fac()[1:]\n del _temp\n assert (abs(np.sum(formfac) - 74.155233575216599) < 1e-12)", "def app_mag(abs_mag, phase_angle, slope_g, d_ast_sun, d_ast_earth):\n\n # Compute the apparent / visual magnitude\n mag = red_mag(abs_mag, phase_angle, slope_g) \\\n + 5.0 * np.log10(d_ast_sun * d_ast_earth)\n\n # Return the apparent magnitude\n return mag", "def reconstruct(self, lam = 0.0001, display=False):\n \n self.history = {}\n \n x = utils.initImage(self.data, self.fov, self.scales[0], self.pulse_ft)\n \n if display:\n plt.ion()\n plt.show()\n self.__fig, self.__axs = plt.subplots(1, 2)\n plt.subplots_adjust(wspace=-0.1)\n self.__display(x, np.zeros_like(x), 'Initialization')\n \n self.history['init'] = x\n \n for scale in self.scales:\n\n x = utils.upscaleImage(x, self.fov, scale, self.pulse)\n \n gammas = (utils.ftVectors(self.data['bi_uvcoord1'], self.fov, \n scale, self.pulse_ft),\n utils.ftVectors(self.data['bi_uvcoord2'], self.fov, \n scale, self.pulse_ft),\n utils.ftVectors(self.data['bi_uvcoord3'], self.fov, \n scale, self.pulse_ft))\n \n for beta in self.betas:\n \n # (a) solve for Z while keeping x constant\n Z = utils.mostLikelyPatches(x, beta, self.data, \n self.patch_size, self.gmm)\n \n # (b) solve for x while keeping Z constant\n x = utils.taylorExpansion(x, Z, beta, self.data, gammas,\n self.patch_size, lam=lam)\n \n if display:\n self.__axs[0].clear()\n self.__axs[1].clear()\n self.__display(x, Z, 'Scale: ' + str(scale) + '\\n' +\\\n r'$\\beta$: ' + str(beta))\n self.history[scale] = x\n \n if display:\n plt.ioff()\n \n self.res = np.rot90(utils.upscaleImage(x, self.fov, \n self.naxis, self.pulse),2)\n return self.res", "def local_composition(self, outfile):\n # TODO Rewrite if I ever need this again\n radius = 3.6 * 2\n npix = 64\n #mat = np.zeros((npix,npix,npix),dtype=np.float)\n #mat = np.zeros((npix,npix,npix),dtype={'names':['col1', 'col2', 'col3'], 'formats':['f4','f4','f4']})\n #mat = np.zeros((npix,npix,npix),dtype={'names':['40', '13', '29'], 'formats':['f4','f4','f4']})\n #mat = np.zeros((npix,npix,npix),dtype={'names':['id','data'], 'formats':['f4','f4']})\n #names = ['id','data']\n #formats = ['i4',('f4','f4','f4')]\n #mat = np.zeros((npix,npix,npix),dtype=dict(names = names, formats=formats))\n #mat = np.zeros((npix,npix,npix),dtype={'40':('i4',0), '29':('f4',0), '13':('f4',0)})\n print(\"Creating matrix...\")\n mat = [[[{} for i in range(npix)] for j in range(npix)] for k in range(npix)]\n print(\"Finished creating matrix.\")\n #print(repr(mat))\n dx = self.xsize/npix\n dy = self.ysize/npix\n dz = self.zsize/npix\n for ii,i in enumerate(drange(-npix/2*dx,npix/2*dx-dx,dx)):\n print(\"On ii = {0}\".format(ii))\n for jj,j in enumerate(drange(-npix/2*dy,npix/2*dy-dy,dy)):\n for kk,k in enumerate(drange(-npix/2*dz,npix/2*dz-dz,dz)):\n atoms = self.get_atoms_in_cutoff( (i,j,k), radius )\n comp = {}\n for atom in atoms:\n comp[str(atom.z)] = comp.get(str(atom.z),0) + 1.0\n for key in comp:\n comp[key] /= len(atoms)\n #print(comp)\n #mat[ii][jj][kk] = copy.copy(comp)\n mat[ii][jj][kk] = comp\n of = open(outfile,'w')\n of.write('IGOR\\n')\n for atomtype in self.atomtypes:\n of.write('\\nWAVES/N=({0},{1},{2})\\t {3}\\nBEGIN\\n'.format(npix,npix,npix,'partial_comp_'+znum2sym.z2sym(atomtype)))\n for layer in mat:\n for column in layer:\n for value in column:\n try:\n of.write(\"{0} \".format(value[str(atomtype)]))\n except KeyError:\n of.write(\"{0} \".format(0.0))\n of.write(\"\\n\")\n of.write('END\\n')\n of.write('X SetScale/P x 0,1,\"\", {0}; SetScale/P y 0,1,\"\", {0}; SetScale/P z 0,1,\"\", {0}; SetScale d 0,0,\"\", {0}\\n'.format('partial_comp_'+znum2sym.z2sym(atomtype)))\n of.close()\n return mat", "def calc_fft_mag(self, ch_id: int, func_id: int) -> None:\n self.write(':function{0}:fftmagnitude channel{1}'.format(func_id, ch_id))", "def load_registers_in_memory(self):\n register = (self.opcode & 0xF00) >> 8\n for x in range(register+1):\n self.memory[self.I + x] = self.registers[x]\n logger.info(\"Loaded registers from V0 to V{} into {}\".format(\n register,\n hex(self.I)))", "def replace_map_image(self, mapframe, col, **kwargs):\n \n #Destroy the existing mapslider if present\n if self.mapslider_list[col]:\n self.mapslider_list[col].destroy()\n self.mapslider_label_list[col].destroy()\n \n #Pull the color fill settings and format them\n fill_color_title = self.translate(self.color_setting_name_list[col].get(),\n input_language=self.language,\n output_language='english')\n if fill_color_title == 'None':\n fill_color = []\n fill_color_title_input = []\n self.mapslider_list[col] = []\n self.datenumlist[col] = []\n self.datefieldlist[col] = []\n self.map_temporalflag[col] = 0\n else:\n fill_color = [self.color_field_modes[self.color_longname_modes_inverted[fill_color_title]]]\n fill_color_title_input = fill_color_title\n \n if fill_color:\n testfield = self.fieldnamelookup(fill_color[0], self.shp_fields)\n vis_params = testfield.vis_params\n \n #Generate a new mapslider if the field is temporal\n if testfield.temporal_flag:\n self.map_temporalflag[col] = testfield.temporal_flag\n if 'slideval' in kwargs:\n slideval = kwargs.pop('slideval')\n else:\n slideval = 0\n fill_color = [self.make_map_slider(self.frame_map_list[col], col, fill_color[0], slideval=slideval)]\n self.date_setting_list[col] = fill_color\n else:\n self.mapslider_list[col] = []\n self.datenumlist[col] = []\n self.datefieldlist[col] = []\n self.map_temporalflag[col] = 0\n else:\n vis_params = [[],[],[]]\n \n #Destroy the existing imgslider if present\n if self.imgslider_list[col]:\n self.imgslider_list[col].destroy()\n self.imgslider_label_list[col].destroy()\n \n #Pull the image visualization settings and format them\n image_title = self.translate(self.image_setting_name_list[col].get(),\n input_language=self.language,\n output_language='english')\n image_path = self.image_dict[image_title]\n \n if image_title == 'None':\n img_params = [[],[],[], [], []]\n self.imgslider_list[col] = [] \n self.img_temporalflag[col] = 0 \n self.img_datenumlist[col] = [] \n self.img_date_setting_list[col] = [] \n new_map_loc = self.map_loc\n else:\n testimg = self.imagenamelookup(image_title, self.image_filepath)\n img_params = testimg.vis_params\n if testimg.lat:\n lat = testimg.lat\n else:\n lat = self.map_loc[0]\n if testimg.lon:\n lon = testimg.lon\n else:\n lon = self.map_loc[1]\n if testimg.zoom:\n zoom = testimg.zoom\n else:\n zoom = self.map_loc[2]\n \n new_map_loc = [lat,lon,zoom]\n \n #Generate a new imgslider if the image is temporal\n if testimg.temporal_flag:\n self.img_temporalflag[col] = testimg.temporal_flag\n if 'img_slideval' in kwargs:\n img_slideval = kwargs.pop('img_slideval')\n else:\n img_slideval = 0\n img_band = self.make_image_slider(self.frame_map_list[col], col, testimg, img_slideval=img_slideval)\n self.img_date_setting_list[col] = img_band\n img_params.append(self.img_date_setting_list[col])\n else:\n self.imgslider_list[col] = []\n self.img_temporalflag[col] = 0 \n self.img_datenumlist[col] = [] \n self.img_date_setting_list[col] = [] \n img_params.append([])\n \n #Delete exisiting map\n self.MAP_list[col].delete(\"all\")\n slaveitems = mapframe.slaves()\n for item in slaveitems:\n item.destroy() \n griditems = mapframe.grid_slaves()\n for item in griditems:\n item.destroy()\n \n #Generate the new map\n self.MAP_list[col] = MapWindow.Map(mapframe,\n self.shps,\n background_image = image_path, \n color_range = fill_color,\n color_title = fill_color_title_input,\n color_params = vis_params,\n image_params = img_params,\n lat_lon_zoom = new_map_loc,\n null_zeros=1,\n window_dimensions = [self.screenwidth,self.screenheight])\n self.MAP_list[col].configure(bg='white')", "def registration_resolution_changed(self):\n self._write_image('res' + str(self.resolution))\n self.resolution = self.resolution + 1" ]
[ "0.61536443", "0.5817665", "0.5757287", "0.5740727", "0.5657208", "0.56416994", "0.5542085", "0.54642224", "0.54503155", "0.54379785", "0.5347235", "0.52391076", "0.5095052", "0.5094995", "0.5089604", "0.50434417", "0.5038968", "0.5036496", "0.5012753", "0.497948", "0.49721107", "0.49167353", "0.4907614", "0.4892537", "0.48865864", "0.4882742", "0.4861333", "0.48607603", "0.48603606", "0.48554426", "0.48500907", "0.48457012", "0.48430458", "0.48380926", "0.48282528", "0.48167455", "0.48111597", "0.4783083", "0.47812667", "0.47765416", "0.47761685", "0.4772451", "0.47693542", "0.47635788", "0.47612992", "0.4760463", "0.475854", "0.4758343", "0.47484306", "0.47466543", "0.47433168", "0.47339594", "0.4732992", "0.47288263", "0.47107112", "0.47031823", "0.47031182", "0.4701334", "0.47001877", "0.46955752", "0.46890327", "0.46882758", "0.46720862", "0.46707004", "0.46680126", "0.4664363", "0.46585584", "0.46562237", "0.4650533", "0.46498585", "0.46371552", "0.46240884", "0.4621005", "0.4619768", "0.46146086", "0.46145794", "0.4612129", "0.46109268", "0.4609061", "0.4600262", "0.45975155", "0.45894137", "0.4587163", "0.45849237", "0.45761007", "0.4575253", "0.45742095", "0.45736846", "0.45711917", "0.456913", "0.45690268", "0.4561532", "0.45608822", "0.45579854", "0.45547962", "0.4552405", "0.455239", "0.4548946", "0.4547051", "0.45468912" ]
0.49835664
19
Convert anatomical images to briks.
def ProcessDTI(self): for entry in self.info: if self.info[entry]['type'] == 'dti': if self.verbose: print 'Processing DTI data in %s' % os.path.basename(entry) # dtiname = '%s/s%s_dti' % \ # (self.info[entry]['outdir'],self.info[entry]['series']) cmd = 'convert_file %s %s %s' % (entry, \ self.info[entry]['imgfile'], self.info[entry]['filetype']) fname = '%s%s' % \ (self.info[entry]['imgfile'], self.info[entry]['suffix']) self.CheckExec(cmd, [fname])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bayer_images(self) -> typing.List[np.ndarray]:\n return [rbg_to_bayer_bg(c.get_image()) for c in self.cameras]", "def ConvertAnat(self):\n if self.verbose:\n print 'Convert T1 and T2 images...'\n for entry in self.info:\n info = self.info[entry]\n if self.info[entry]['imgfile'] is None:\n continue\n if self.info[entry]['type'] in self.anat_types:\n key = self.info[entry]['type']\n imgfile = self.info[entry]['imgfile']\n cmd = 'convert_file %s %s %s %s' % (self.flip_opts, entry, \\\n imgfile, self.info[entry]['filetype'])\n checkfile = '%s%s' % (imgfile, self.info[entry]['suffix'])\n self.CheckExec(cmd, [checkfile])\n if self.info[entry]['norm_src'] and self.skull_strip:\n cmd = \"3dSkullStrip -input %s -prefix %s\" % \\\n (checkfile, self.info[entry]['imgfile_skstrip'])\n checkfile = '%s+orig.BRIK' % \\\n (self.info[entry]['imgfile_skstrip'])\n self.CheckExec(cmd, [checkfile])", "def _get_image_blob(roidb):\n num_images = len(roidb)\n\n processed_ims = []\n im_scales = []\n for i in range(num_images):\n im = io.imread(roidb[i]['image'], plugin='tifffile')\n assert im is not None, \\\n 'Failed to read image \\'{}\\''.format(roidb[i]['image'])\n im, im_scale = blob_utils.prep_im_for_blob(im, roidb[i], 'train')\n im_scales.append(im_scale[0])\n processed_ims.append(im[0])\n\n # Create a blob to hold the input images [n, c, s, h, w]\n blob = blob_utils.im_list_to_blob(processed_ims)\n\n return blob, im_scales", "def to_imgaug(self, image_shape):\n image_height, image_width, _ = image_shape\n\n # Create ia bounding boxes from json\n regions = []\n for region in self.regions:\n regions.append(region.to_imgaug(image_width, image_height))\n bbs = BoundingBoxesOnImage(regions, shape=image_shape)\n\n return bbs", "def _get_image_blobs(self, roidb, scale_inds):\n num_images = len(roidb)\n processed_ims = []\n im_scales = []\n for i in range(num_images):\n im = cv2.imread(roidb[i]['image'])\n if roidb[i]['flipped']:\n im = im[:, ::-1, :]\n im, im_scale = self._get_image_blob(im, scale_inds[i], False)\n im_scales.append(im_scale)\n processed_ims.append(im)\n \n # Create a blob to hold the input images\n blob = self.im_list_to_blob(processed_ims)\n \n return blob, im_scales", "def process(bayer_images, red_gains, blue_gains, cam2rgbs):\n bayer_images.shape.assert_is_compatible_with((None, None, None, 4))\n with tf.name_scope(None, 'process'):\n # White balance.\n bayer_images = apply_gains(bayer_images, red_gains, blue_gains)\n # Demosaic.\n bayer_images = tf.clip_by_value(bayer_images, 0.0, 1.0)\n images = demosaic(bayer_images)\n # Color correction.\n images = apply_ccms(images, cam2rgbs)\n # Gamma compression.\n images = tf.clip_by_value(images, 0.0, 1.0)\n images = gamma_compression(images)\n return images", "def birch(args):\n p = OptionParser(birch.__doc__)\n opts, args, iopts = p.set_image_options(args, figsize=\"8x6\")\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n seqids, layout = args\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n K = Karyotype(fig, root, seqids, layout)\n L = K.layout\n\n xs = 0.79\n dt = dict(rectangle=False, circle=False)\n # Embed a phylogenetic tree to the right\n coords = {}\n coords[\"Amborella\"] = (xs, L[0].y)\n coords[\"Vitis\"] = (xs, L[1].y)\n coords[\"Prunus\"] = (xs, L[2].y)\n coords[\"Betula\"] = (xs, L[3].y)\n coords[\"Populus\"] = (xs, L[4].y)\n coords[\"Arabidopsis\"] = (xs, L[5].y)\n coords[\"fabids\"] = join_nodes(root, coords, \"Prunus\", \"Betula\", xs, **dt)\n coords[\"malvids\"] = join_nodes(root, coords, \"Populus\", \"Arabidopsis\", xs, **dt)\n coords[\"rosids\"] = join_nodes(root, coords, \"fabids\", \"malvids\", xs, **dt)\n coords[\"eudicots\"] = join_nodes(root, coords, \"rosids\", \"Vitis\", xs, **dt)\n coords[\"angiosperm\"] = join_nodes(root, coords, \"eudicots\", \"Amborella\", xs, **dt)\n\n # Show branch length\n branch_length(root, coords[\"Amborella\"], coords[\"angiosperm\"], \">160.0\")\n branch_length(root, coords[\"eudicots\"], coords[\"angiosperm\"], \">78.2\", va=\"top\")\n branch_length(root, coords[\"Vitis\"], coords[\"eudicots\"], \"138.5\")\n branch_length(root, coords[\"rosids\"], coords[\"eudicots\"], \"19.8\", va=\"top\")\n branch_length(\n root, coords[\"Prunus\"], coords[\"fabids\"], \"104.2\", ha=\"right\", va=\"top\"\n )\n branch_length(root, coords[\"Arabidopsis\"], coords[\"malvids\"], \"110.2\", va=\"top\")\n branch_length(\n root, coords[\"fabids\"], coords[\"rosids\"], \"19.8\", ha=\"right\", va=\"top\"\n )\n branch_length(root, coords[\"malvids\"], coords[\"rosids\"], \"8.5\", va=\"top\")\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n pf = \"birch\"\n image_name = pf + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)", "def part_1b():\n shift_0 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'Shift0.png'), 0) / 255.\n shift_r10 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'ShiftR10.png'), 0) / 255.\n shift_r20 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'ShiftR20.png'), 0) / 255.\n shift_r40 = cv2.imread(os.path.join(input_dir, 'TestSeq',\n 'ShiftR40.png'), 0) / 255.\n\n raise NotImplementedError", "def encode_bboxes(ann, bboxes, img_name):\n\n ann_root = ann.getroot()\n\n folder = ET.Element(\"folder\")\n folder.text = ann_root.find('folder').text\n filename = ET.Element(\"filename\")\n filename.text = img_name\n path = ET.Element(\"path\")\n path.text = ann_root.find('folder').text + '/' + img_name\n source = ET.Element(\"source\")\n database = ET.Element(\"database\")\n database.text = ann_root.find(\"source\").find('database').text\n source.append(database)\n size = ET.Element(\"size\")\n width = ET.Element(\"width\")\n width.text = ann_root.find(\"size\").find('width').text\n height = ET.Element(\"height\")\n height.text = ann_root.find(\"size\").find('height').text\n depth = ET.Element(\"depth\")\n depth.text = ann_root.find(\"size\").find('depth').text\n size.append(width)\n size.append(height)\n size.append(depth)\n segmented = ET.Element(\"segmented\")\n segmented.text = ann_root.find('segmented').text\n\n new_root = ET.Element(\"annotation\")\n new_root.append(folder)\n new_root.append(filename)\n new_root.append(path)\n new_root.append(source)\n new_root.append(size)\n new_root.append(segmented)\n\n for b in bboxes:\n xmin = ET.Element(\"xmin\")\n xmin.text = str(int(b[0]))\n ymin = ET.Element(\"ymin\")\n ymin.text = str(int(b[1]))\n xmax = ET.Element(\"xmax\")\n xmax.text = str(int(b[2]))\n ymax = ET.Element(\"ymax\")\n ymax.text = str(int(b[3]))\n name = ET.Element(\"name\")\n name.text = self.classes[int(b[4])]\n bndbox = ET.Element(\"bndbox\")\n bndbox.append(xmin)\n bndbox.append(ymin)\n bndbox.append(xmax)\n bndbox.append(ymax)\n pose = ET.Element(\"pose\")\n truncated = ET.Element(\"truncated\")\n difficult = ET.Element(\"difficult\")\n pose.text = \"Unspecified\"\n truncated.text = \"0\"\n difficult.text = \"0\"\n obj = ET.Element(\"object\")\n obj.append(name)\n obj.append(pose)\n obj.append(truncated)\n obj.append(difficult)\n obj.append(bndbox)\n\n new_root.append(obj)\n\n new_tree = ET.ElementTree(new_root)\n\n return new_tree", "def get_all_labels(input_yaml, riib=False):\n images = yaml.load(open(input_yaml, 'rb').read())\n\n count = 0\n for i in range(len(images)):\n images[i]['path'] = os.path.abspath(os.path.join(os.path.dirname(input_yaml), images[i]['path']))\n if riib:\n images[i]['path'] = images[i]['path'].replace('.png', '.pgm')\n images[i]['path'] = images[i]['path'].replace('rgb/train', 'riib/train')\n images[i]['path'] = images[i]['path'].replace('rgb/test', 'riib/test')\n for box in images[i]['boxes']:\n box['y_max'] = box['y_max'] + 8\n box['y_min'] = box['y_min'] + 8\n\n #for box in images[i]['boxes']:\n # box['y_max'] = -1 * box['y_max']\n\n #print(images[i])\n #print(images[i]['path'])\n # change the path to be just the filename (e.g. \"1234.jpg\")\n count += 1\n fname = images[i]['path'][::-1] # reverse the string\n index = fname.find(\"/\")\n fname = fname[:index][::-1] # isolate just the file name and reverse back \n images[i]['path'] = fname\n return images", "def prepare_roidb(self):\n # for pascal_voc dataset\n roidb = self.gt_roidb()\n # data argument\n if self.cfg.if_flipped is True:\n print('append flipped images to training')\n roidb = self.append_flipped_images(roidb)\n\n sizes = [PIL.Image.open(self.image_path_at(i)).size\n for i in range(self.num_images)]\n\n for i in range(len(self.image_index)):\n roidb[i]['image'] = self.image_path_at(i)\n roidb[i]['width'] = sizes[i][0]\n roidb[i]['height'] = sizes[i][1]\n # need gt_overlaps as a dense array for argmax\n gt_overlaps = roidb[i]['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n roidb[i]['max_classes'] = max_classes\n roidb[i]['max_overlaps'] = max_overlaps\n # sanity checks\n # max overlap of 0 => class should be zero (background)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # max overlap > 0 => class should not be zero (must be a fg class)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n\n self.roi_data = ROIGenerator(roidb, self.num_classes, self.cfg)\n return self.roi_data", "def getimgs():", "def generate_images_with_bboxes(file_path):\n output_folder = \"../verification/\"\n\n with open(file_path) as f:\n data = pickle.load(f, encoding='latin1')\n\n for record in data:\n image = record[\"image\"]\n image_name = record[\"image_name\"].split(\"/\")[-1]\n rois_bboxes = [roi[\"bbox\"] for roi in record[\"rois\"]]\n gt_bboxes = [gt[\"bbox\"] for gt in record[\"gt_bboxes\"]]\n\n print(\"Generating image {}\".format(image_name))\n\n image_tools.generate_image_with_bboxes(\n image, image_name, rois_bboxes, gt_bboxes, output_folder)", "def split_two_canals(image):\n # get the average of each pixel\n grayImage = np.mean(image, axis=2)\n # get the R-B/R+B ratio of each pixel\n eps = 0.00001\n r_bImage = (image[:,:,0] - image[:,:,2])/(image[:,:,0] + image[:,:,2] + eps)\n\n return grayImage, r_bImage", "def problem2():\n\n data = loaddata(\"data/bayerdata.npy\")\n r, g, b = separatechannels(data)\n\n img = assembleimage(r, g, b)\n display_image(img)\n\n img_interpolated = interpolate(r, g, b)\n display_image(img_interpolated)", "def get_images(algorithm=None):\n if algorithm == \"RMA\":\n #Ims = {}\n dates = []\n #Ims = np.load(\"Set_images_RMA.npy\").item()\n #dates = np.load(\"Dates_RMA.npy\")\n for i in range(n_im):\n i += i_o # Empieza en la posicion 10\n data = RMA.main(\"dset_\"+str(i)+\".hdf5\")\n #Ims[10+i] = data['Sf_n']\n dates.append(data['date'])\n np.save(os.getcwd()+\"/Results/Output_RMA/Im_\"+str(i)+\".npy\",data['Sf_n'])\n #np.save(\"Set_images_RMA\",Ims) # Para guardar el set de imagenes\n np.save(\"Parameters_RMA\",data)\n np.save(\"Dates_RMA\",np.array(dates))\n\n elif algorithm == \"BP\":\n #Ims = {}\n dates = []\n #Ims = np.load(\"Set_images_BP.npy\").item()\n #dates = np.load(\"Dates_BP.npy\")\n for i in range(n_im): #(4991):\n i += i_o # Empieza en la posicion 10\n data = BP.main(\"dset_\"+str(i)+\".hdf5\") \n #Ims[i] = data['Im']\n dates.append(data['date'])\n np.save(os.getcwd()+\"/Results/Output_BP/Im_\"+str(i)+\".npy\",data['Im']) # Imagenes de todo el dataset\n np.save(\"Parameters_BP\",data) # Parametros geometricos como dimensiones y grilla de la imagen\n np.save(\"Dates_BP\",np.array(dates)) # Fechas de las iamgenes tomadas de todo el dset\n\n return 'Ok'", "def transform_images(img1,img2):", "def prepare_roidb(self, imdb):\n sizes = imdb.get_sizes()\n roidb = imdb.roidb\n for i in range(imdb.nrof_images):\n roidb[i]['image'] = imdb.image_path_at(i)\n roidb[i]['width'] = sizes[i][0]\n roidb[i]['height'] = sizes[i][1]\n gt_overlaps = roidb[i]['gt_overlaps'].toarray() # need gt_overlaps as a dense array for argmax\n \n max_classes = gt_overlaps.argmax(axis=1) # gt class that had the max overlap\n roidb[i]['max_classes'] = max_classes\n \n max_overlaps = gt_overlaps.max(axis=1) # max overlap with gt over classes (columns)\n roidb[i]['max_overlaps'] = max_overlaps\n # sanity checks\n # max overlap of 0 => class should be zero (background)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # max overlap > 0 => class should not be zero (must be a fg class)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n roidb[i]['bbox_targets'] = self._compute_targets(roidb[i]['boxes'], max_overlaps, max_classes)", "def masterflat(input_file):\n #set original directory\n original_path = os.getcwd()\n data_path = input_file['data_path']\n save_path = input_file['save_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n flat = glob.glob('flat*.fits')\n print 'Loading flat images \\nTotal of flat files = ',len(flat),'\\nFiles = \\n'\n print flat\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n os.system('cp flat*.fits '+save_path)\n #creating the names of flat with bias subctracted\n bflat = []\n for i in flat:\n bflat.append('B'+i)\n print '\\n Names os flat images with bias subtracted: \\n \\n',bflat\n #change for save_path directory\n os.chdir(save_path)\n #verify if previous superbias exist\n if os.path.isfile('superflat.fits') == True:\n os.system('rm superflat.fits')\n #verify if exits previous bflat*.fits files and remove then.\n for i in bflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n print '\\nCreating superflat .... \\n'\n #create the list of flat images and bflat images\n #flat = string.join(flat,',')\n #bflat = string.join(bflat,',')\n print '\\n Subtracting bias from flat images and creating bflat images.... \\n'\n #iraf.imarith()\n for i in range(len(flat)):\n iraf.imarith(flat[i],'-','superbias.fits',bflat[i])\n #print statistics from bflat*.fits images\n iraf.imstat(bflat[i])\n print '\\n .... done \\n'\n #clean previos flat*.fits files\n print '\\n Clean flat*.fits images .... \\n'\n os.system('rm flat*.fits')\n print '\\n .... done. \\n'\n #normalizing each flat\n print '\\nNormalizing each flat ....\\n'\n #checking if mean from numpy is the same from your bflat images using imstat\n #take the mean of each bflat image\n bflat_mean = np.zeros(len(bflat))\n for i in range(len(bflat)):\n image = fits.getdata(bflat[i])\n image = np.array(image,dtype='Float64')\n bflat_mean[i] = round(np.mean(image))\n image = 0 #clean image allocate to this variable\n print 'The mean of each bflat image, respectivaly ...'\n print bflat_mean\n #creating the names of bflat images after the normalization:\n abflat = []\n for i in bflat:\n abflat.append('A'+i)\n print '\\n Names os bflat images with bias subtracted and normalizad: \\n \\n',abflat\n #verify if exist previous ABflat*.fits images and remove then.\n for i in abflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n for i in range(len(abflat)):\n iraf.imarith(bflat[i],'/',bflat_mean[i],abflat[i])\n print '\\n.... done!\\n'\n # print '\\n Cleaning bflat*.fits images ....\\n'\n # os.system('rm Bflat*.fits')\n print '\\n.... done.\\n'\n print 'Statistics of the abflat*.fits images .... \\n'\n for i in range(len(abflat)):\n iraf.imstat(abflat[i])\n print '\\n Combining abflat images ....\\n'\n\n # ablist = string.join(abflat,',')\n # iraf.imcombine(ablist,'superflat.fits')\n #change how import flat files\n #usning the abflat list of flat files We will create a pandas python dataframe\n ablist = DataFrame(abflat)\n ablist.columns=['flat_files']\n ablist.to_csv('flat_list',index_label=False,index=False,header=False)\n #combine all flat images\n iraf.imcombine('@flat_list','superflat.fits')\n iraf.imstat('superflat.fits')\n print '\\n .... done. \\n'\n # print '\\nCleaning ABflat*.fits images ....\\n'\n # os.system('rm ABflat*.fits')\n print '\\n.... done!'\n #Verify if the image was created:\n output = glob.glob('superflat*.fits')\n if len(output) != 0:\n output = 0\n else:\n output = 1\n #Return to original directory\n os.chdir(original_path)\n #last mensage\n print '\\n MASTERFLAT.FITS created! \\n'\n print '\\n END of Data Reduction for create a masterflat.fits file. \\n'\n #obtain the value of return\n if output == 1:\n print '!!! ERROR/WARNING !!!'\n print 'Check if the superbias was created or if there is more than one superbias image.'\n return output", "def rescale_bbs(img, bbs):\n width = img.shape[1]\n height = img.shape[0]\n for box in bbs:\n box[0] = int(box[0] * width)\n box[1] = int(box[1] * height)\n box[2] = int(box[2] * width)\n box[3] = int(box[3] * height)\n return bbs", "def masterbias(input_file):\n #Set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all bias images\n bias = glob.glob('bias*.fits')\n print 'Loading bias images \\nTotal of bias files = ',len(bias),'\\nFiles = \\n'\n print bias\n print '\\nCreating superbias \\n'\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #copy bias images to save_path\n os.system('cp bias*.fits '+save_path)\n #change to sabe_path\n os.chdir(save_path)\n #verify if previous superbias exist\n if os.path.isfile('superbias.fits') == True:\n os.system('rm superbias.fits')\n # --------------------------------------------------------------------------\n # --- Using only with a few bias images\n #create the list of bias images\n #bias_list = string.join(bias,',')\n #combine the bias image and create the superbias\n #iraf.imcombine(bias_list,'superbias.fits')\n #iraf.imstat('superbias.fits')\n # --------------------------------------------------------------------------\n\n #Using numpy package to take the mean value of bias images\n #Problem: does not include the superbias header in this version\n bias_array = []\n for i in range(len(bias)):\n image = fits.getdata(bias[i])\n bias_array.append(np.array(image,dtype='Float64'))\n superbias_array = np.median(bias_array,axis=0)\n hdu_superbias = fits.PrimaryHDU(superbias_array)\n hdulist_superbias = fits.HDUList([hdu_superbias])\n hdulist_superbias.writeto('superbias.fits')\n\n #clean previos bias files\n print '\\n Cleaning bias*.fits images ....\\n'\n os.system('rm bias*.fits')\n print '\\n.... done.'\n #print output\n #test of outpu value\n #os.remove('superbias.fits')\n #Verify if the image was created:\n output = glob.glob('superbias*.fits')\n if len(output) != 0:\n output = 0\n else:\n output = 1\n #Return to original directory\n os.chdir(original_path)\n #END of the masterbias reduction messsage\n print '\\nsuperbias.fits created!\\n'\n print '\\nEND of superbias reduction!\\n'\n #obtain the value of return\n if output == 1:\n print '!!! ERROR/WARNING !!!'\n print 'Check if the superbias was created or if there is more than one superbias image.'\n return output", "def buildAnat(self, parFiles):\n # should only be a single parFile in the list\n anatImage = nib.load(join(self.seriesDir, parFiles[0]), strict_sort=True)\n\n # convert to RAS+\n anatImage_RAS = nib.as_closest_canonical(anatImage)\n\n print('Nifti image dims: {}'.format(anatImage_RAS.shape))\n\n return anatImage_RAS", "def convert_all_in_bmp(self, path, new_path):\n DbWorker.mkdir(new_path)\n for i in os.listdir(path):\n self.convert_and_save_image(path+'/'+i, new_path)", "def _add_roidb_from_annotations(self, entry):\n ann_ids = self._COCO.getAnnIds(imgIds=entry['id'], iscrowd=None)\n objs = self._COCO.loadAnns(ann_ids)\n width = entry['width']\n height = entry['height']\n # valid objs\n # change the annotation boxes from 'xywh' to 'xyxy'\n valid_objs = []\n for obj in objs:\n x1 = np.max((0, obj['bbox'][0]))\n y1 = np.max((0, obj['bbox'][1]))\n x2 = np.min((width, x1 + np.max((0, obj['bbox'][2]))))\n y2 = np.min((height, y1 + np.max((0, obj['bbox'][3]))))\n if obj['area'] > 0 and x2 >= x1 and y2 >= y1:\n obj['clean_box'] = [x1, y1, x2, y2]\n valid_objs.append(obj)\n objs = valid_objs\n num_objs = len(objs)\n\n bboxes = np.zeros((num_objs, 4), dtype=entry['bboxes'].dtype)\n gt_classes = np.zeros((num_objs), dtype=entry['gt_classes'].dtype)\n\n coco_cat_id_to_class_ind = dict(\n [(self._class_to_coco_cat_id[cls], self._class_to_ind[cls]) for cls in self._classes[1:]])\n for ix, obj in enumerate(objs):\n bboxes[ix, :] = obj['clean_box']\n gt_classes[ix] = coco_cat_id_to_class_ind[obj['category_id']]\n entry['bboxes'] = np.append(entry['bboxes'], bboxes, axis=0)\n entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)", "def bayer2rgb(bayer):\n assert bayer.ndim == 3 and bayer.shape[-1] == 3\n\n image = bayer.copy()\n rb_k = np.empty((3, 3))\n g_k = np.empty((3, 3))\n\n rb_k = np.array([[1, 0, 1], [0, 0, 0], [1, 0, 1]]) # use this appropriate kernel\n g_k = np.array([[1, 0, 1], [0, 0, 0], [1, 0, 1]])\n\n image[:, :, 0] = signal.convolve2d(image[:, :, 0], rb_k, mode='same', boundary='wrap')\n image[:, :, 1] = signal.convolve2d(image[:, :, 1], g_k, mode='same', boundary='wrap')/2\n image[:, :, 2] = signal.convolve2d(image[:, :, 2], rb_k, mode='same', boundary='wrap')\n\n assert image.ndim == 3 and image.shape[-1] == 3 and \\\n g_k.shape == (3, 3) and rb_k.shape == (3, 3)\n return image, g_k, rb_k", "def __call__(self, results):\n # Image is bgr\n img = results['img'][..., ::-1]\n img = Image.fromarray(img)\n img = self.transform(img)\n img = np.asarray(img)\n img = img[..., ::-1]\n results['img'] = img\n return results", "def transform_images(symbol_dict,\n gray=True,\n gauss_filter=-1,\n bilat_filter=-1,\n global_thresh=-1,\n adapt_thresh_mean=-1,\n adapt_thresh_gauss=-1,\n otsus=-1,\n laplacian=False,\n canny=-1,\n rescale_global_mean=False,\n resize=-1):\n \n for s in symbol_dict.values():\n for symb_img in s:\n if gray:\n gray_img = cv2.cvtColor(symb_img.img, cv2.COLOR_BGR2GRAY)\n symb_img.img = gray_img\n if gauss_filter != -1:\n blur_img = cv2.GaussianBlur(symb_img.img,\n (gauss_filter, gauss_filter),\n 0)\n symb_img.img = blur_img\n if bilat_filter != -1:\n bilat_img = cv2.bilateralFilter(symb_img.img,\n bilat_filter[0],\n bilat_filter[1],\n bilat_filter[2])\n symb_img.img - bilat_img\n if global_thresh != -1:\n ret, thresh_img = cv2.threshold(symb_img.img,\n global_thresh, 255,\n cv2.THRESH_BINARY)\n symb_img.img = thresh_img\n if adapt_thresh_mean != -1:\n thresh_img = cv2.adaptiveThreshold(symb_img.img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \\\n cv2.THRESH_BINARY, adapt_thresh_mean, 2)\n symb_img.img = thresh_img\n if adapt_thresh_gauss != -1:\n thresh_img = cv2.adaptiveThreshold(symb_img.img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \\\n cv2.THRESH_BINARY, adapt_thresh_gauss, 2)\n symb_img.img = thresh_img\n if otsus != -1:\n ret, thresh_img = cv2.threshold(\n symb_img.img, otsus, 255,\n cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n symb_img.img = thresh_img\n if laplacian:\n lap_img = cv2.Laplacian(symb_img.img, cv2.CV_64F)\n symb_img.img = lap_img\n if canny != -1:\n canny_img = cv2.Canny(symb_img.img, canny[0], canny[1])\n symb_img.img = canny_img\n # TODO: is normalizing before resizing correct?\n if rescale_global_mean:\n scaled_img = symb_img.img / 255.0\n symb_img.img = scaled_img - np.mean(scaled_img)\n if resize != -1:\n old_size = symb_img.img.shape[:2]\n\n delta_w = max(old_size) - old_size[1]\n delta_h = max(old_size) - old_size[0]\n top, bottom = delta_h // 2, delta_h - (delta_h // 2)\n left, right = delta_w // 2, delta_w - (delta_w // 2)\n\n color = [0, 0, 0]\n symb_img.img = cv2.copyMakeBorder(symb_img.img,\n top,\n bottom,\n left,\n right,\n cv2.BORDER_CONSTANT,\n value=color)\n\n symb_img.img = cv2.resize(symb_img.img, (resize, resize))", "def prepare_images(images):\n images = color.rgb2lab(images)\n\n l = images[:,:,:,:1]/100.\n ab = images[:,:,:,1:]/200. + 0.5\n\n return l, ab", "def imgBC(img, mask=None, scale=1.0, numBins=64, returnBias=False):\n spacing = np.array(img.GetSpacing())/scale\n img_ds = imgResample(img, spacing=spacing)\n\n # Calculate bias\n if mask is None:\n mask_ds = sitk.Image(img_ds.GetSize(), sitk.sitkUInt8)+1\n mask_ds.CopyInformation(img_ds)\n else:\n mask_ds = imgResample(mask, spacing=spacing, useNearest=True)\n mask_ds = mask_ds > 0\n\n splineOrder = 2\n img_ds_bc = sitk.N4BiasFieldCorrection(sitk.Cast(img_ds, sitk.sitkFloat32), mask_ds, numberOfHistogramBins=numBins, splineOrder=splineOrder, numberOfControlPoints=[splineOrder+1]*4)\n #bias_ds = img_ds_bc - sitk.Cast(img_ds,img_ds_bc.GetPixelID())\n\n bias_ds = imgFinite(img_ds_bc / img_ds)\n bias_ds = sitk.Mask(bias_ds, mask_ds) + sitk.Cast(1-mask_ds, sitk.sitkFloat32) # Fill background with 1s\n\n # Upsample bias \n bias = imgResample(bias_ds, spacing=img.GetSpacing(), size=img.GetSize())\n bias = sitk.Cast(bias, img.GetPixelID())\n\n # Apply bias to original image and threshold to eliminate negitive values\n try:\n upper = np.iinfo(sitkToNpDataTypes[img.GetPixelID()]).max\n except:\n upper = np.finfo(sitkToNpDataTypes[img.GetPixelID()]).max *0.99\n\n #img_bc = sitk.Threshold(img + sitk.Cast(bias, img.GetPixelID()),\n # lower=0,\n # upper=upper)\n\n img_bc = sitk.Threshold(img * bias, lower=0, upper=upper)\n\n if returnBias:\n return (img_bc, bias)\n else:\n return img_bc", "def convert_image(rel_path_in, rel_path_out):\n #Lade Bild mit Originalmaske im Grayscale-Modus\n img = cv2.imread(rel_path_in, cv2.IMREAD_GRAYSCALE)\n #Jetzt steht in img ein 2D-Array/Matrix mit jedem Graufstufen-Wert der Pixel\n #Skaliere Pixelwerte runter\n for zeilen_index in range(0,img.__len__()):\n for spalten_index in range(0, img[zeilen_index].__len__()):\n #Hole Pixel-Wert an aktueller Stelle\n wert = img[zeilen_index][spalten_index]\n #Falls Wert != 0 (also Pixel gehoert nicht zum Hintergrund)\n if wert != 0: # != 0 statt == 255, da auch z.B. 253er Werte in den Masken existieren... (vielleicht durch Konvertierung in anderes Format?)\n #Markiere den Pixel mit 1 statt 255\n img[zeilen_index][spalten_index]=1\n #print(img)\n #*NACHDEM* alle Pixel skaliert wurden, zeichne Umrandung der Objekte\n umrandung_zeichnen(img)\n #change_color(img, 0, 255)\n #change_color(img, 1, 0)\n #print(img)\n #Schreibe Ergebnis-Bild in uebergebene Datei\n cv2.imwrite(rel_path_out, img)", "def encode_images(self, images):\n # todo\n pass", "def exercise2b(self):\n self.b2 = calibrate_image(self.b1)\n plt.axis('off')\n plt.imshow(self.b2)\n plt.show()\n misc.imsave(\"B2.png\", self.b2)\n misc.imsave(\"B2_Brightness.png\", print_brightness(self.b2))", "def get_minibatch(roidb):\n # We collect blobs from each image onto a list and then concat them into a\n # single tensor, hence we initialize each blob to an empty list\n blobs = {k: [] for k in get_minibatch_blob_names()}\n\n # Get the input image blob\n im_blob, im_scales = _get_image_blob(roidb)\n blobs['data'] = im_blob\n\n if cfg.RPN.RPN_ON:\n # RPN-only or end-to-end Faster/Mask R-CNN\n valid = roi_data.rpn.add_rpn_blobs(blobs, im_scales, roidb)\n elif cfg.RETINANET.RETINANET_ON:\n raise NotImplementedError\n else:\n # Fast R-CNN like models trained on precomputed proposals\n valid = roi_data.fast_rcnn.add_fast_rcnn_blobs(blobs, im_scales, roidb)\n return blobs, valid", "def rbg_to_bayer_bg(image: np.ndarray) -> np.ndarray:\n # there is only one channel but it still needs the third dimension, so that\n # the conversion to a cv::Mat in C++ is easier\n bayer_img = np.zeros((image.shape[0], image.shape[1], 1), dtype=np.uint8)\n\n # channel names, assuming input is RGB\n CHANNEL_RED = 0\n CHANNEL_GREEN = 1\n CHANNEL_BLUE = 2\n\n # channel map to get the following pattern (called \"BG\" in OpenCV):\n #\n # RG\n # GB\n #\n channel_map = {\n (0, 0): CHANNEL_RED,\n (1, 0): CHANNEL_GREEN,\n (0, 1): CHANNEL_GREEN,\n (1, 1): CHANNEL_BLUE,\n }\n\n for r in range(image.shape[0]):\n for c in range(image.shape[1]):\n channel = channel_map[(r % 2, c % 2)]\n bayer_img[r, c] = image[r, c, channel]\n\n return bayer_img", "def scribble_convert(scribbles):\n new_scribbles = np.zeros((*scribbles.shape, 3), dtype=np.uint8)\n # class id\n labels = gt_covert(scribbles * (scribbles < 1000) + (scribbles // 1000) * (scribbles >= 1000))\n new_scribbles[:, :, 1] = labels\n # annotated\n annotated = (labels != 255) * 255\n new_scribbles[:, :, 0] = annotated\n # instance id\n instance = (scribbles % 1000) * (scribbles >= 1000)\n new_scribbles[:, :, 2] = instance\n return new_scribbles", "def _getBrailleRegionsForImage(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForImage\", obj)\n\n return self._getDefaultBrailleRegions(obj, pyatspi.ROLE_IMAGE)", "def get_roidb(self):\n # get all the images_ids in this dataset\n img_ids = self._COCO.getImgIds()\n # sort the ids, make each time the same order\n img_ids.sort()\n # load the 'image' of the COCO dataset\n roidb = copy.deepcopy(self._COCO.loadImgs(img_ids))\n for entry in roidb:\n # predefine some attribute of each image\n self._prep_roidb_entry(entry)\n\n # for cahce_dir\n if not os.path.exists(self.cache_dir):\n os.mkdir(self.cache_dir)\n cache_file = os.path.join(self.cache_dir, self.annFile + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n self._read_roidb_from_cachefile(roidb, cache_file)\n print('{} gt roidb loaded from {}'.format(self.annFile, cache_file))\n else:\n for entry in roidb:\n self._add_roidb_from_annotations(entry)\n with open(cache_file, 'wb') as f:\n pickle.dump(roidb, f, pickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n return roidb", "def createAverageImages(self):\n for grabber in self.grabbers:\n callsign = grabber[\"ID\"]\n callMatch = \"%s/%s*\" % (self.downloadFolder, callsign)\n fnameOut = \"%s/%s.%s.jpg\" % (self.averagesFolder, callsign, self.timeCode())\n cmd = \"convert %s -evaluate-sequence Mean %s\" %(callMatch, fnameOut)\n print(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()", "def _get_image_blob(roidb, scale_inds):\n num_images = len(roidb)\n processed_ims = []\n im_scales = []\n # gt boxes: (x1, y1, x2, y2, theta, cls) 5->6,4->5\n im = cv2.imread(roidb[0]['image'])\n if im is None:\n print \"Read image failed:\", roidb[0]['image']\n\n if roidb[0]['flipped']:\n im = im[:, ::-1, :]\n\n gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]\n im, new_gt_boxes = _augment_data(im, roidb[0]['boxes'][gt_inds, 0:8])\n\n gt_boxes = np.empty((len(new_gt_boxes), 9), dtype=np.float32)\n gt_boxes[:, 0:8] = new_gt_boxes\n\n target_size = cfg.TRAIN.SCALES[scale_inds[0]]\n im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,\n cfg.TRAIN.MAX_SIZE)\n im_scales.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = im_list_to_blob(processed_ims)\n gt_boxes[:, 0:8] *= im_scales\n gt_boxes[:, 8] = roidb[0]['gt_classes'][gt_inds[:len(new_gt_boxes)]]\n return blob, im_scales, gt_boxes", "def blackout_images(image,ticlass):\n rgb = ocropy.intarray()\n ticlass.textImageProbabilities(rgb,image)\n r = ocropy.bytearray()\n g = ocropy.bytearray()\n b = ocropy.bytearray()\n ocropy.unpack_rgb(r,g,b,rgb)\n components = ocropy.intarray()\n components.copy(g)\n n = ocropy.label_components(components)\n print \"[note] number of image regions\",n\n tirects = ocropy.rectarray()\n ocropy.bounding_boxes(tirects,components)\n for i in range(1,tirects.length()):\n r = tirects.at(i)\n ocropy.fill_rect(image,r,0)\n r.pad_by(-5,-5)\n ocropy.fill_rect(image,r,255)", "def normalize_images(image_sitk):\n\n max = 400\n min = -1000\n\n image_np = sitk.GetArrayFromImage(image_sitk)\n\n # Normalization\n image_np = (image_np - min)/(max - min)\n image_np[image_np > 1] = 1\n image_np[image_np < 0] = 0\n\n # Convert back to SITK\n out_image_sitk = sitk.GetImageFromArray(image_np)\n out_image_sitk.CopyInformation(image_sitk)\n\n return out_image_sitk", "def calculate_binaries(dict_data):\n list_all_preprocessed_binaries = []\n for index_patient, patient in enumerate(dict_data):\n # pick and convert image\n image = dict_data[patient][1]\n image = image.astype(\"uint8\")\n # blur image\n image_blurred = cv2.medianBlur(image, 29)\n # segment image using k-means segmentation\n image_segmented = run_kmean_on_single_image(image_blurred, k=10,\n precision=10000, max_iterations=1000)\n # find lower threshold for binarizing images\n \"\"\" the idea i had here was that all the electrodes always occupy the same area on each picture.\n this function basically returns the pixel value, at which we need to threshold in our binary\n function, so that all pixels that have a higher intensity will collectively make up at least \n \"fraction_of_image_threshold\" percent of the picture - electrodes seem to take up about 5-10% of each\n image\"\"\"\n lower_threshold = intelligent_get_threshold(image_segmented,\n fraction_of_image_threshold=0.08)\n # binarize image\n image_binary = binarize_image(image_segmented, \n lower_threshold=lower_threshold, upper_threshold=255)\n list_all_preprocessed_binaries.append(image_binary)\n return list_all_preprocessed_binaries", "def forward(self, rpn_rois, roidb, im_info):\n im_scales = im_info.data.numpy()[:, 2]\n\n # get_fast_rcnn_blob_names()\n output_blob_names = ['rois', \n 'labels_int32', 'bbox_targets', 'bbox_inside_weights', 'bbox_outside_weights',\n 'mask_rois', 'roi_has_mask_int32', 'masks_int32']\n \n # For historical consistency with the original Faster R-CNN\n # implementation we are *not* filtering crowd proposals.\n # This choice should be investigated in the future (it likely does\n # not matter).\n # Note: crowd_thresh=0 will ignore _filter_crowd_proposals\n self.add_proposals(roidb, rpn_rois, im_scales, crowd_thresh=0)\n blobs = {k: [] for k in output_blob_names}\n self.add_fast_rcnn_blobs(blobs, im_scales, roidb)\n\n return blobs", "def get_bboxes(self, image_path: str, img_pipeline=None):\n pass", "def bilateralize(ds):\n ds_ROIs = ds.copy('deep')\n ds_ROIs.sa['bilat_ROIs'] = [label.split(' ')[-1] for label in ds_ROIs.sa.all_ROIs]\n mv.h5save(results_dir + 'ds_ROIs.hdf5', ds_ROIs)\n print('Combined lateralized ROIs for the provided dataset and saved the dataset.')\n return ds_ROIs", "def pipeline(image):\n # undistort image\n undistorted_image = undistort_image(image)\n superimposed_image = find_lanes(undistorted_image)\n labels = find_vehicles(undistorted_image)\n\n draw_img = draw_labeled_bboxes(superimposed_image, labels)\n\n \n return draw_img", "def image_binary(image_convert):\n image_bit=cv2.bitwise_not(image_convert)\n _, image_bina = cv2.threshold(image_bit, 125, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n image_bina=image_bina/255.0\n return image_bina", "def _get_images(self):\n raw_outputs = self.interface.get_data(self.target_charge,\n self.charge_deviation,\n n_samples=self.n_samples)\n\n # apply roi to images\n roi_images = []\n for i in range(self.n_samples):\n roi_images += [apply_roi(raw_outputs['raw_images'][i], raw_outputs['ROI'])]\n\n # process and identify blobs in image\n min_size = 100\n outputs = {}\n for ele in self.output_keys:\n outputs[ele] = []\n\n for i in range(len(roi_images)):\n processed_image_data = image_processing.process_and_fit(roi_images[i],\n min_size)\n\n for ele in self.output_keys:\n if ele == 'image_check':\n outputs[ele] += [image_processing.check_image(processed_image_data['binary_image'],\n processed_image_data['smoothed_image'])]\n elif ele == 'processed_images':\n outputs[ele] += [processed_image_data['smoothed_image']]\n else:\n outputs[ele] += [processed_image_data[ele]]\n\n for ele in self.output_keys:\n outputs[ele] = np.array(outputs[ele])\n\n # add in raw data\n outputs.update(raw_outputs)\n\n # if we need to, get averaged results\n if self.average_measurements:\n avg_keys = ['rms_x', 'rms_y', 'CX', 'CY', 'n_blobs', 'FWHMX', 'FWHMY', 'centroid_offset']\n for key in avg_keys:\n outputs[key] = np.nanmean(outputs[key])\n\n return outputs", "def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])", "def import_L1B(cls,infile):\r\n try:\r\n import gdal\r\n import rasterio\r\n except:\r\n raise ImportError(\"Can not import module GDAL or RasterIO\")\r\n\r\n\r\n image=image()\r\n\r\n #except:\r\n # raise ImportError(\"Can not read band\")\r", "def main(input_folder, output_images_folder, output_files_folder, bb_file,\n archive_folder, name_mapping):\n\n output_images_folder = Path(output_images_folder)\n output_files_folder = Path(output_files_folder)\n archive_folder = Path(archive_folder)\n output_images_folder.mkdir(exist_ok=True)\n archive_folder.mkdir(exist_ok=True)\n logger.info(\"Converting Dicom to Nifty - START\")\n converter = NiftiConverter(\n padding=\"whole_image\",\n resampling_spacing=-1,\n list_labels=[\"GTVt\"],\n cores=10,\n )\n _ = converter(input_folder, output_folder=output_images_folder)\n\n logger.info(\"Converting Dicom to Nifty - END\")\n logger.info(\"Removing extra VOI - START\")\n move_extra_vois(output_images_folder, archive_folder)\n logger.info(\"Removing extra VOI - END\")\n logger.info(\"Renaming files- START\")\n correct_names(output_images_folder, name_mapping)\n logger.info(\"Renaming files- END\")\n logger.info(\"Cleaning the VOIs - START\")\n clean_vois(output_images_folder)\n logger.info(\"Cleaning the VOIs - END\")\n\n logger.info(\"Computing the bounding boxes - START\")\n bb_df = compute_bbs(output_images_folder)\n bb_df.to_csv(bb_file)\n logger.info(\"Computing the bounding boxes - END\")", "def read_isbi2013_2shell():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'isbi2013')\n fraw = pjoin(folder, 'phantom64.nii.gz')\n fbval = pjoin(folder, 'phantom64.bval')\n fbvec = pjoin(folder, 'phantom64.bvec')\n\n md5_dict = {'data': '42911a70f232321cf246315192d69c42',\n 'bval': '90e8cf66e0f4d9737a3b3c0da24df5ea',\n 'bvec': '4b7aa2757a1ccab140667b76e8075cb1'}\n\n check_md5(fraw, md5_dict['data'])\n check_md5(fbval, md5_dict['bval'])\n check_md5(fbvec, md5_dict['bvec'])\n\n bvals, bvecs = read_bvals_bvecs(fbval, fbvec)\n\n gtab = gradient_table(bvals, bvecs)\n img = nib.load(fraw)\n return img, gtab", "def convertion_binaire_arbre(self):\r\n binary_code = self.root.conversion_binaire('')\r\n binary_dict = {}\r\n binary_code = binary_code.strip().split(\"\\n\")\r\n for element in binary_code:\r\n binary_dict[element.split(\":\")[0]] = element.split(\":\")[1]\r\n return binary_dict", "def convert_im(im, tree, lbls, block_imgs):\n h, w, _ = im.shape\n step = 16\n for r in range(0, h, step):\n for c in range(0, w, step):\n rnext = min(r+step, h)\n cnext = min(c+step, w)\n patch = im[r:rnext, c:cnext]\n color = np.average(patch, axis=(0, 1))\n\n # Get closest block\n _, ind = tree.query([color], k=1)\n lbl = lbls[ind[0][0]]\n block = block_imgs[lbl]\n\n # Copy values\n rmax = rnext-r\n cmax = cnext-c\n im[r:rnext, c:cnext] = block[:rmax, :cmax]", "def convert_celeba_annots(root_dir, out_dir):\n\n img_id = 0\n ann_id = 0\n cat_id = 1\n\n\n categories = [{\"id\": 1, \"name\": 'face'}]\n images = []\n annotations = []\n\n print('Parsing annotation file')\n ann_file = os.path.join(root_dir, 'Anno', 'list_bbox_celeba.txt')\n celeba_annot_dict = parse_celeba_gt(ann_file) # [im-file] = [[x,y,w,h], ...]\n\n # Get partition\n print('Parsing partition file')\n partition_file = os.path.join(root_dir, 'Eval', 'list_eval_partition.txt')\n train_imgs, val_imgs, test_imgs = parse_celeba_partition(partition_file)\n\n for subset, img_ids in zip(['train', 'val'], [train_imgs, val_imgs]):\n print('Starting %s' % subset)\n ann_dict = {}\n for filename in img_ids:\n if len(images) % 500 == 0:\n print(\"Processed %s images, %s annotations\" % (\n len(images), len(annotations)))\n\n image = {}\n image['id'] = img_id\n img_id += 1\n im = Image.open(os.path.join(root_dir, 'img_celeba', filename))\n image['width'] = im.width\n image['height'] = im.height\n image['file_name'] = filename\n images.append(image)\n\n for gt_bbox in celeba_annot_dict[filename]: # CelebA only has one box per image, though\n ann = {}\n ann['id'] = ann_id\n ann_id += 1\n ann['image_id'] = image['id']\n ann['segmentation'] = []\n ann['category_id'] = cat_id # 1:\"face\" for CelebA\n ann['iscrowd'] = 0\n ann['area'] = gt_bbox[2] * gt_bbox[3]\n ann['bbox'] = gt_bbox\n annotations.append(ann)\n\n ann_dict['images'] = images\n ann_dict['categories'] = categories\n ann_dict['annotations'] = annotations\n print(\"Num categories: %s\" % len(categories))\n print(\"Num images: %s\" % len(images))\n print(\"Num annotations: %s\" % len(annotations))\n\n json_name = 'instances_CelebA_{}.json'.format(subset)\n with open(os.path.join(out_dir, json_name), 'w', encoding='utf8') as outfile:\n outfile.write(json.dumps(ann_dict, cls=MyEncoder, indent=4))\n outfile.close()", "def _get_blobs(im, rois):\n\n blobs = {'data' : None, 'rois' : None}\n blobs['data'], im_scale_factors = _get_image_blob(im)\n return blobs, im_scale_factors", "def to_bw(self) -> 'BaseImage':\n\n def bw(r: int, g: int, b: int, a: int) -> Tuple4IntType:\n \"\"\"\n To black-white function.\n \"\"\"\n c = int((r + g + b) / 3)\n return c, c, c, a\n\n return self.apply_image_function(image_function=bw)", "def create_new_images(x):\n \n datagen = ImageDataGenerator(width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.1,\n zoom_range=0.1,\n horizontal_flip=True,\n fill_mode='constant',\n cval=0) \n \n i = 0\n for batch in datagen.flow(x, batch_size=1,\n save_to_dir='data/Histology/new_benign',\n save_prefix='benign',\n save_format='jpeg'):\n i += 1 \n if i > 3:\n break\n \n return 0", "def load_next_batch(self, roidb, num_classes):\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n random_scale_inds = np.random.randint( 0, high=len(self.config.TRAIN.SCALES), size=num_images)\n assert (self.config.TRAIN.BATCH_SIZE % num_images == 0), 'num_images ({}) must divide BATCH_SIZE ({})'. \\\n format(num_images, self.config.TRAIN.BATCH_SIZE)\n \n # Get the input image blob, formatted for caffe\n im_blob, im_scales = self._get_image_blobs(roidb, random_scale_inds)\n \n blobs = {'data': im_blob}\n \n assert len(im_scales) == 1, \"Single batch only\"\n assert len(roidb) == 1, \"Single batch only\"\n # gt boxes: (x1, y1, x2, y2, cls)\n gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]\n gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)\n gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]\n gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]\n blobs['gt_boxes'] = gt_boxes\n blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds] if 'gt_ishard' in roidb[0] \\\n else np.zeros(gt_inds.size, dtype=int)\n # blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds]\n blobs['dontcare_areas'] = roidb[0]['dontcare_areas'] * im_scales[0] if 'dontcare_areas' in roidb[0] \\\n else np.zeros([0, 4], dtype=float)\n blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)\n blobs['im_name'] = os.path.basename(roidb[0]['image'])\n \n return blobs", "def ijk2ras(self,A):\n #productive #math #coordinate-space-conversion\n profprint()\n m=vtk.vtkMatrix4x4()\n volumeNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\n volumeNode.GetIJKToRASMatrix(m)\n imageData = volumeNode.GetImageData()\n ras=[0,0,0]\n k = vtk.vtkMatrix4x4()\n o = vtk.vtkMatrix4x4()\n k.SetElement(0,3,A[0])\n k.SetElement(1,3,A[1])\n k.SetElement(2,3,A[2])\n k.Multiply4x4(m,k,o)\n ras[0] = o.GetElement(0,3)\n ras[1] = o.GetElement(1,3)\n ras[2] = o.GetElement(2,3)\n return ras", "def _get_blobs(im, rois):\n blobs = {'data' : None, 'rois' : None}\n blobs['data'], im_scale_factors = _get_image_blob(im)\n \n return blobs, im_scale_factors", "def _get_blobs(im, rois):\n blobs = {}\n blobs['data'], im_scale_factors = _get_image_blob(im)\n if cfg.MODEL.FASTER_RCNN and rois is None:\n height, width = blobs['data'].shape[2], blobs['data'].shape[3]\n scale = im_scale_factors[0]\n blobs['im_info'] = np.array([[height, width, scale]], dtype=np.float32)\n if rois is not None:\n blobs['rois'] = _get_rois_blob(rois, im_scale_factors)\n return blobs, im_scale_factors", "def bilateral(filename,input_image, sigma_spatial, sigma_intensity):\n\t# make a simple Gaussian function taking the squared radius\n\tgaussian = lambda r2, sigma: np.exp(-0.5*r2/sigma**2 )\n\t#print(input_image.shape)\n\tinput_image = cv2.cvtColor(input_image,cv2.COLOR_BGR2RGB)\n\n\t# define the window width to be the 2 time the spatial std. dev. to\n\t# be sure that most of the spatial kernel is actually captured\n\twin_width = int(3*sigma_spatial +1)\n\twgt_sum = np.zeros_like(input_image).astype(np.float64)\n\tresult = np.zeros_like(input_image).astype(np.float64)\n\tout= np.zeros_like(input_image).astype(np.float64)\n\t\n\tfor i in tqdm(range(input_image.shape[-1]),desc=\"Going through color channels\"):\n\t\tnorm_image = normalize(input_image[:,:,i])\n\t\tfor shft_x in range(-win_width,win_width+1):\n\t\t\tfor shft_y in range(-win_width,win_width+1):\n\t\t\t\t# compute the spatial contribution\n\t\t\t\tspatial = gaussian(shft_x**2+shft_y**2, sigma_spatial )\n\t\n\t\t\t\t# shift by the offsets to get image window\n\t\t\t\twindow = np.roll(norm_image, [shft_y, shft_x], axis=[0,1])\n\t\n\t\t\t\t# compute the intensity contribution\n\t\t\t\tcombined_filter = spatial*gaussian( (window-norm_image)**2, sigma_intensity )\n\t\n\t\t\t\t# result stores the mult. between combined filter and image window\n\t\t\t\tresult[:,:,i] += window*combined_filter\n\t\t\t\twgt_sum[:,:,i] += combined_filter\n\tout = normalize(result/wgt_sum)\n\n\t# normalize the result and return\n\tplt.imsave(\"outputImages/Bilateral_\"+filename+\"_\"+str(sigma_spatial)+\"_\"+ str(sigma_intensity) + \".png\" ,out,dpi=600)\n\treturn out", "def create_brainmask(registered_images, truncate_intensity=(.01, .99), verbose=True, antsxnet_cache_directory=None):\n\n preprocessed_image = ants.image_clone(registered_images)\n if antsxnet_cache_directory is None:\n antsxnet_cache_directory = \"ANTsXNet\"\n\n # Truncate intensity\n if truncate_intensity is not None:\n quantiles = (preprocessed_image.quantile(truncate_intensity[0]),\n preprocessed_image.quantile(truncate_intensity[1]))\n if verbose:\n print(\"Preprocessing: truncate intensities ( low =\", quantiles[0], \", high =\", quantiles[1], \").\")\n\n preprocessed_image[preprocessed_image < quantiles[0]] = quantiles[0]\n preprocessed_image[preprocessed_image > quantiles[1]] = quantiles[1]\n\n # Brain extraction\n if verbose:\n print(\"Preprocessing: brain extraction.\")\n probability_mask = antspynet.brain_extraction(preprocessed_image,\n antsxnet_cache_directory=antsxnet_cache_directory,\n verbose=verbose)\n mask = ants.threshold_image(probability_mask, 0.5, 1, 1, 0)\n\n return preprocessed_image, mask", "def create_png_images(self):\n if self.subject is None:\n print Console.WARNING + 'You need to specify a subject first' + Console.ENDC\n return\n\n check_dir_of = self.locations.check_dir_of\n check_dir_of(self.locations.HISTO_PNG_U)\n check_dir_of(self.locations.HISTO_PNG)\n check_dir_of(self.locations.SOURCE_PNG)\n\n\n\n fmap_img = ImageUtils.load_nifti_image(self.locations.HIST_FMAP) #loading subject nifti files\n volumes = []\n try:\n for s in self.locations.SOURCES:\n volumes.append(ImageUtils.load_nifti_image(s))\n except IOError as e:\n print Console.FAIL + 'There are errors loading nifi files for subject %s'%self.subject + Console.ENDC\n return False\n \n\n num_slices = volumes[0].shape[2] #use first volume to check expected number of slices\n\n self.locations.create_empty_dir(self.locations.IMAGES_DIR)\n\n print 'Creating input PNGs for %s'%self.subject\n for k, vol in enumerate(volumes):\n for i in range(num_slices):\n imslice = ImageUtils.data_to_bytescale_rgb(vol[:, :, i])\n im = Image.fromarray(imslice)\n im.save(self.locations.SOURCE_PNG % (self.locations.LABELS[k],i))\n\n \n print 'Creating histology PNGs for %s'%self.subject\n for i in range(num_slices):\n\n im_unscaled = ImageUtils.data_to_unscaled_rgb(fmap_img[:, :, i]); #keeps the original values\n im_unscaled = Image.fromarray(im_unscaled)\n im_unscaled = im_unscaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_unscaled.save(self.locations.HISTO_PNG_U % i)\n\n im_scaled = ImageUtils.data_to_bytescale_rgb(fmap_img[:,:,i]); # bytescaled histology\n im_scaled = Image.fromarray(im_scaled)\n im_scaled = im_scaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_scaled.save(self.locations.HISTO_PNG % i)\n\n print\n return True", "def classify_breed(self, image: LoadedImage, animal: AnimalType, bbox: yolo.BoundBox, top_n: int) \\\n -> Dict[BreedName, float]:\n predict_utils = self.models[animal]\n # get sub-image\n cropped_image = image[bbox.ymin:bbox.ymax, bbox.xmin:bbox.xmax, :]\n new_image_data = misc.imresize(cropped_image, (IMG_SIZE, IMG_SIZE))\n # pass it to the breed classifier\n breed_names = predict_utils.breed_modeler.predict_one_loaded(new_image_data,\n predict_utils.model, predict_utils.cls_names,\n top_n)\n return breed_names", "def selfies2image(s):\n mol = MolFromSmiles(sf.decoder(s), sanitize=True)\n return Draw.MolToImage(mol)", "def imageprepare(self, argv):\n im = Image.open(argv).convert('L')\n img = im.resize((28, 28), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n tv = list(img.getdata()) \n \n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n return tva", "def load_breast_cancer():\n bc_data_train = np.load(_BREAST_CANCER_FOLDER+'bc_data.train')\n bc_data_test = np.load(_BREAST_CANCER_FOLDER+'bc_data.test')\n bc_target_train = np.load(_BREAST_CANCER_FOLDER+'bc_target.train')\n bc_target_test = np.load(_BREAST_CANCER_FOLDER+'bc_target.test')\n for i in range(len(bc_target_test)):\n if bc_target_test[i] == 2:\n bc_target_test[i] = 0\n elif bc_target_test[i] == 4:\n bc_target_test[i] = 1\n for i in range(len(bc_target_train)):\n if bc_target_train[i] == 2:\n bc_target_train[i] = 0\n elif bc_target_train[i] == 4:\n bc_target_train[i] = 1\n return (bc_data_train, bc_target_train.reshape(-1, 1), bc_data_test, bc_target_test.reshape(-1, 1))", "def binarizar_img(p, umbral, use_contrast):\n img = read_img(p)\n if (use_contrast):\n funcion_de_constraste = contrast_function_for_points(umbral, umbral, 0, 255)\n img_binarizada = contrastear(img.reshape((-1)), funcion_de_constraste)\n else:\n img_binarizada = binarizar(img.reshape((-1)), umbral)\n show_imgs([img, img_binarizada.reshape(img.shape)])", "def run_blackbox (telescope=None, mode=None, date=None, read_path=None,\n recursive=False, imgtypes=None, filters=None, image=None,\n image_list=None, master_date=None,\n img_reduce=None, cat_extract=None, trans_extract=None,\n force_reproc_new=None, name_genlog=None, keep_tmp=None):\n\n global tel, filts, types, proc_mode\n\n # in single-image mode, infer global parameter [tel] from the\n # starting characters of the image name\n if image is not None:\n tel_tmp = image.split('/')[-1][0:3]\n if tel_tmp in ['BG2', 'BG3', 'BG4']:\n tel = tel_tmp\n elif tel_tmp[0:2] == 'ML':\n tel = 'ML1'\n else:\n tel = telescope\n else:\n tel = telescope\n\n\n # filters and imgtypes\n filts = filters\n types = imgtypes\n if imgtypes is not None:\n types = imgtypes.lower()\n\n proc_mode = mode\n\n # define number of processes or tasks [nproc]; when running on the\n # ilifu cluster the environment variable SLURM_NTASKS should be\n # set through --ntasks-per-node in the sbatch script; otherwise\n # use the value from the set_bb settings file\n slurm_ntasks = os.environ.get('SLURM_NTASKS')\n if slurm_ntasks is not None:\n nproc = int(slurm_ntasks)\n else:\n nproc = int(get_par(set_bb.nproc,tel))\n\n # update nthreads in set_bb with value of environment variable\n # 'OMP_NUM_THREADS' set at the top\n if int(os.environ['OMP_NUM_THREADS']) != set_bb.nthreads:\n set_bb.nthreads = int(os.environ['OMP_NUM_THREADS'])\n\n # update various parameters in set_bb if corresponding input\n # parameters are not None\n if img_reduce is not None:\n set_bb.img_reduce = str2bool(img_reduce)\n\n if cat_extract is not None:\n set_bb.cat_extract = str2bool(cat_extract)\n\n if trans_extract is not None:\n set_bb.trans_extract = str2bool(trans_extract)\n\n if force_reproc_new is not None:\n set_bb.force_reproc_new = str2bool(force_reproc_new)\n\n if keep_tmp is not None:\n set_bb.keep_tmp = str2bool(keep_tmp)\n\n\n # in night mode, force create_master to be True; N.B.: using\n # blackbox_slurm at ilifu and when running in the google cloud,\n # blackbox will be run in 'day' mode on single images\n if mode == 'night':\n set_bb.create_master = True\n\n\n if get_par(set_zogy.timing,tel):\n t_run_blackbox = time.time()\n\n\n # attach general logfile to logging\n ###################################\n\n # in google_cloud mode, do not keep general logfile\n if not get_par(set_bb.google_cloud,tel):\n\n if not isdir(get_par(set_bb.log_dir,tel)):\n os.makedirs(get_par(set_bb.log_dir,tel))\n\n global genlogfile\n genlogfile = None\n\n if name_genlog is not None:\n # check if path is provided\n fdir, fname = os.path.split(name_genlog)\n if len(fdir)>0 and isdir(fdir):\n log_dir = fdir\n else:\n log_dir = get_par(set_bb.log_dir,tel)\n\n genlogfile = '{}/{}'.format(log_dir, fname)\n\n\n elif mode == 'night':\n\n # in night mode, create general logfile based on date/time\n genlogfile = '{}/{}_{}.log'.format(\n get_par(set_bb.log_dir,tel), tel,\n Time.now().strftime('%Y%m%d_%H%M%S'))\n\n\n if genlogfile is not None:\n\n fileHandler = logging.FileHandler(genlogfile, 'a')\n fileHandler.setFormatter(logFormatter)\n fileHandler.setLevel('INFO')\n log.addHandler(fileHandler)\n log.info ('genlogfile created: {}'.format(genlogfile))\n\n\n\n log.info ('processing mode: {}'.format(mode))\n log.info ('number of processes: {}'.format(nproc))\n log.info ('number of threads: {}'.format(set_bb.nthreads))\n log.info ('switch img_reduce: {}'.format(get_par(set_bb.img_reduce,tel)))\n log.info ('switch cat_extract: {}'.format(get_par(set_bb.cat_extract,tel)))\n log.info ('switch trans_extract: {}'.format(get_par(set_bb.trans_extract,tel)))\n log.info ('force reprocessing new: {}'.format(get_par(set_bb.force_reproc_new,tel)))\n log.info ('keep temporary folders: {}'.format(get_par(set_bb.keep_tmp,tel)))\n\n\n\n mem_use (label='run_blackbox at start')\n\n\n # create master bias, dark and/or flat if [master_date] is specified\n if master_date is not None:\n create_masters (master_date, nproc=nproc)\n logging.shutdown()\n return\n\n\n # leave right away if none of the main processing switches are on\n if (not get_par(set_bb.img_reduce,tel) and\n not get_par(set_bb.cat_extract,tel) and\n not get_par(set_bb.trans_extract,tel)):\n\n log.info ('main processing switches img_reduce, cat_extract '\n 'and trans_extract all False, nothing left to do')\n logging.shutdown()\n return\n\n\n # [read_path] is assumed to be the full path to the directory with\n # raw images to be processed; if not provided as input parameter,\n # it is defined using the input [date] with the function\n # [get_path]\n if read_path is None:\n if date is not None:\n read_path, __ = get_path(date, 'read')\n log.info ('processing files from directory: {}'.format(read_path))\n elif image is not None:\n pass\n elif image_list is not None:\n pass\n else:\n # if [read_path], [date], [image] and [image_list] are all None, exit\n log.critical ('[read_path], [date], [image], [image_list] all None')\n logging.shutdown()\n return\n\n else:\n # if it is provided but does not exist, exit unless in night\n # mode in which case it will be created below\n if not isdir(read_path) and mode != 'night':\n log.critical ('[read_path] directory provided does not exist:\\n{}'\n .format(read_path))\n logging.shutdown()\n return\n\n else:\n # infer date from readpath: [some path]/yyyy/mm/dd in case\n # input read_path is defined but input date is not\n date = read_path.split('/')[-3:]\n\n\n # create global lock instance that can be used in [blackbox_reduce] for\n # certain blocks/functions to be accessed by one process at a time\n global lock\n lock = Lock()\n\n\n # for both day and night mode, create list of all\n # files present in [read_path], in image type order:\n # bias, dark, flat, object and other\n if image is None and image_list is None:\n biases, darks, flats, objects, others = sort_files(read_path, 'fits',\n recursive=recursive)\n lists = [biases, darks, flats, objects, others]\n filenames = [name for sublist in lists for name in sublist]\n else:\n if mode == 'night':\n log.critical ('[image] or [image_list] should not be defined '\n 'in night mode')\n logging.shutdown()\n return\n\n elif image is not None:\n # if input parameter [image] is defined, the filenames\n # to process will contain a single image\n filenames = [image]\n elif image_list is not None:\n # if input parameter [image_list] is defined,\n # read the ascii files into filenames list\n with open(image_list, 'r') as f:\n filenames = [name.strip() for name in f if name[0]!='#']\n\n\n # split into 'day' or 'night' mode\n filename_reduced = None\n if mode == 'day':\n\n if len(filenames)==0:\n log.warning ('no files to reduce')\n\n\n # see https://docs.python.org/3/library/tracemalloc.html\n #snapshot1 = tracemalloc.take_snapshot()\n\n if nproc==1 or image is not None:\n\n # if only 1 process is requested, or [image] input\n # parameter is not None, run it witout multiprocessing;\n # this will allow images to be shown on the fly if\n # [set_zogy.display] is set to True; something that is not\n # allowed (at least not on a macbook) when\n # multiprocessing.\n log.warning ('running with single processor')\n filenames_reduced = []\n for filename in filenames:\n filenames_reduced.append(blackbox_reduce(filename))\n\n else:\n # use [pool_func] to process list of files\n filenames_reduced = pool_func (try_blackbox_reduce, filenames,\n nproc=nproc)\n\n\n log.info ('{} filenames reduced: {}'.format(len(filenames_reduced),\n filenames_reduced))\n\n #snapshot2 = tracemalloc.take_snapshot()\n #top_stats = snapshot2.compare_to(snapshot1, 'lineno')\n #print(\"[ Top 10 differences ]\")\n #for stat in top_stats[:10]:\n # print(stat)\n\n\n elif mode == 'night':\n\n # if in night mode, check if anything changes in input directory\n # and if there is a new file, feed it to [blackbox_reduce]\n\n # [read_path] folder may not exist yet (e.g. no data have yet\n # been synced to it), which will cause watchdog to break, so\n # make sure it exists\n make_dir (read_path)\n\n\n # determine time of next sunrise\n obs = ephem.Observer()\n obs.lat = str(get_par(set_zogy.obs_lat,tel))\n obs.lon = str(get_par(set_zogy.obs_lon,tel))\n sunrise = obs.next_rising(ephem.Sun())\n\n\n # create queue for submitting jobs\n queue = Queue()\n\n # add files that are already present in the read_path\n # directory to the night queue, to reduce these first\n for filename in filenames:\n queue.put(filename)\n\n\n # create and setup observer, but do not start just yet\n observer = PollingObserver()\n observer.schedule(FileWatcher(queue), read_path, recursive=recursive)\n\n\n # create pool of workers\n results = []\n pool = Pool(nproc)\n\n\n # start monitoring [read_path] for incoming files\n observer.start()\n\n\n # keep monitoring queue - which is being filled with new files\n # detected by watchdog - as long as it is nighttime or the\n # queue is not empty yet\n while ephem.now()-sunrise < ephem.hour or not queue.empty():\n\n if queue.empty():\n time.sleep(60)\n else:\n filename = get_file (queue)\n if filename is not None:\n # process it by one of the workers\n results.append(pool.apply_async(try_blackbox_reduce,\n [filename]))\n\n\n log.info ('night has finished and queue is empty')\n\n\n # watchdog can be stopped\n observer.stop() #stop observer\n observer.join() #join observer\n\n\n # closing and joining pool of workers\n pool.close()\n pool.join()\n\n\n # create and email obslog\n log.info ('night processing has finished; creating and emailing obslog')\n try:\n create_obslog (date, email=True, tel=tel, weather_screenshot=True)\n except Exception as e:\n log.exception ('exception was raised in creating obslog: {}'\n .format(e))\n\n\n if get_par(set_zogy.timing,tel):\n log_timing_memory (t0=t_run_blackbox, label='run_blackbox at very end')\n\n\n logging.shutdown()\n return", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def stain_image(image, num_stains, color):", "def main():\n\n parser = argparse.ArgumentParser(description='codec_compare')\n parser.add_argument('path', metavar='DIR',\n help='path to images folder')\n args = parser.parse_args()\n classpath = args.path\n classname = classpath.split('/')[1]\n\n images = set(listdir_full_path(classpath))\n if len(images) <= 0:\n print \"\\033[91m[ERROR]\\033[0m\" + \" no source files in ./images.\"\n sys.exit(1)\n\n codeclist_full = set(['aom', 'deepcoder', 'deepcoder-lite', 'fuif', 'fvdo', 'hevc', 'kakadu', 'jpeg',\n 'pik', 'tat', 'xavs', 'xavs-fast', 'xavs-median', 'webp'])\n\n bpp_targets = set([0.06, 0.12, 0.25, 0.50, 0.75, 1.00, 1.50, 2.00])\n for image in images:\n width, height, depth = get_dimensions(image, classname)\n name, imgfmt = os.path.splitext(image)\n imgfmt = os.path.basename(image).split(\".\")[-1]\n derivative_images = []\n if classname[:6] == 'classB':\n derivative_images = create_derivatives(image, classname)\n else:\n derivative_images.append((image, imgfmt))\n\n for derivative_image, pix_fmt in derivative_images:\n json_dir = 'metrics'\n mkdir_p(json_dir)\n json_file = os.path.join(json_dir,\n os.path.splitext(os.path.basename(derivative_image))[0] + \".\" + pix_fmt + \".json\")\n # if os.path.isfile(json_file):\n # print \"\\033[92m[JSON OK]\\033[0m \" + json_file\n # continue\n main_dict = dict()\n derivative_image_metrics = dict()\n for codecname in codeclist_full:\n convertflag = 1\n caseflag = pix_fmt\n if (codecname == 'webp' or codecname == 'tat' or 'deepcoder' in codecname) and depth != '8':\n continue\n if 'xavs' in codecname and depth != '8' and depth != '10':\n continue\n if 'classE' in classname and ('tat' in codecname or 'xavs' in codecname or 'deepcoder' in codecname):\n continue\n if codecname == 'kakadu' and classname[:6] == 'classB':\n convertflag = 0\n caseflag = imgfmt\n bpp_target_metrics = dict()\n for bpp_target in bpp_targets:\n print(codecname)\n if codecname == 'aom' and classname[:6] == 'classB':\n # ('AERIAL2' in image or 'CATS' in image or 'XRAY' in image or 'GOLD' in image or 'TEXTURE1' in image):\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + 'av1'\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif codecname == 'kakadu' and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif 'xavs' in codecname and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + imgfmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.' + imgfmt)\n original_image = image\n elif codecname == 'fvdo' and classname[:6] == 'classB':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_pgm' + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image = os.path.join('outputs', codecname, 'decoded', encoded_image_name + '.pgm')\n original_image = image\n else:\n if codecname == 'fuif' and 'tif' in imgfmt:\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '.tif_' + str(bpp_target) + '_' + pix_fmt + '.' + codecname\n elif codecname == 'webp' or codecname == 'tat':\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_yuv420p.' + codecname\n else:\n encoded_image_name = os.path.splitext(os.path.basename(derivative_image))[\n 0] + '_' + str(bpp_target) + '_' + pix_fmt + '.' + codecname\n encoded_image = os.path.join('outputs', codecname, encoded_image_name)\n decoded_image_path = os.path.join('outputs', codecname, 'decoded')\n decoded_image = ''\n for decodedfile in os.listdir(decoded_image_path):\n encoderoot = '_'.join(os.path.splitext(os.path.basename(encoded_image_name))[0].split('_')[:-1])\n if encoderoot in decodedfile:\n if ('tat' in codecname or 'webp' in codecname) and os.path.splitext(os.path.basename(decodedfile))[1] == '.yuv':\n decoded_image = os.path.join('outputs', codecname, 'decoded', decodedfile)\n print(decoded_image)\n if ('tat' not in codecname or 'webp' not in codecname) and os.path.splitext(os.path.basename(decodedfile))[1] != '.yuv':\n decoded_image = os.path.join('outputs', codecname, 'decoded', decodedfile)\n if 'classE' not in classname and 'classB' not in classname and os.path.isfile(decoded_image):\n decoded_image = convert_decoded(decoded_image, width, height, depth, codecname)\n original_image = convert_decoded(derivative_image, width, height, depth, 'reference')\n else:\n original_image = derivative_image\n\n print('Reference:' + original_image)\n print('Encoded:' + encoded_image)\n print('Decoded:' + decoded_image)\n if (os.path.isfile(original_image) and os.path.isfile(decoded_image) and os.path.isfile(encoded_image)):\n if 'classE' in classname:\n metrics = compute_metrics_HDR(original_image, decoded_image, encoded_image, bpp_target,\n codecname, width, height, pix_fmt, depth)\n\n elif 'classB' in classname:\n metrics = compute_metrics(original_image, decoded_image, encoded_image, bpp_target, codecname,\n width, height, pix_fmt)\n else:\n metrics = compute_metrics_SDR(original_image, decoded_image, encoded_image, bpp_target,\n codecname, width,\n height, imgfmt, depth)\n measured_bpp = (os.path.getsize(encoded_image) * 1.024 * 8) / (float((int(width) * int(height))))\n bpp_target_metrics[measured_bpp] = metrics\n else:\n continue\n \n derivative_image_metrics[codecname] = bpp_target_metrics\n main_dict[derivative_image] = derivative_image_metrics\n\n mkdir_p(json_dir)\n with open(json_file, 'w') as f:\n f.write(json.dumps(main_dict, indent=2))", "def _get_blobs(im, rois):\n blobs = {'data' : None, 'rois' : None}\n blobs['data'], im_scale_factors = _get_image_blob(im)\n blobs['rois'] = _get_rois_blob(rois, im_scale_factors)\n \n return blobs, im_scale_factors", "def brain_has_lead_image(self, brain=None):", "def get_minibatch(roidb, num_classes):\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),\n size=num_images)\n assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \\\n 'num_images ({}) must divide BATCH_SIZE ({})'. \\\n format(num_images, cfg.TRAIN.BATCH_SIZE)\n rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images\n fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)\n\n # Get the input image blob, formatted for caffe\n im_blob, im_scales, gt_boxes = _get_image_blob(roidb, random_scale_inds)\n\n blobs = {'data': im_blob}\n\n assert len(roidb) == 1, \"Single batch only\"\n # print 'gt_boxes when getting minibatch:', gt_boxes\n blobs['gt_boxes'] = gt_boxes\n blobs['im_info'] = np.array(\n [[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],\n dtype=np.float32)\n\n return blobs", "def detect_labimg_boundaries(lab):\n res_dim1 = lab[:-1]==lab[1:]\n res_dim2 = lab[:,:-1]==lab[:,1:]\n bimg = np.zeros(lab.shape)\n bimg[:-1] += res_dim1\n bimg[1:] += res_dim1\n bimg[:,:-1] += res_dim2\n bimg[:,1:] += res_dim2\n return bimg.astype('uint8')", "def convert_image(self, ros_img):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(ros_img, \"bgr8\")\n return cv_image\n except CvBridgeError as e:\n print(e)", "def preprocess_image(image):\n\n gray_image = grayscale_image(image)\n # any other preprocessing steps such as bluring would go here\n\n binary_image = binarize_image(gray_image)\n\n return binary_image", "def load_birds(self, count, img_floder, mask_floder, imglist, dataset_root_path):\n # Add classes\n self.add_class(\"birds\", 1, \"bird\")\n #self.add_class(\"birds\", 2, \"leg\")\n #self.add_class(\"birds\", 3, \"well\")\n \n for i in range(count):\n # 获取图片宽和高\n \n filestr = imglist[i].split(\".\")[0]\n #print(imglist[i],\"-->\",cv_img.shape[1],\"--->\",cv_img.shape[0])\n #print(\"id-->\", i, \" imglist[\", i, \"]-->\", imglist[i],\"filestr-->\",filestr)\n # filestr = filestr.split(\"_\")[1]\n mask_path = mask_floder + \"/\" + filestr + \".png\"\n #print(mask_path)\n yaml_path = dataset_root_path + \"dataset_json/\" + filestr + \"_json/info.yaml\"\n #print(dataset_root_path + \"dataset_json/\" + filestr + \"_json/img.png\")\n cv_img = cv2.imread(dataset_root_path + \"dataset_json/\" + filestr + \"_json/img.png\")\n #print(dataset_root_path + \"dataset_json/\" + filestr + \"_json/img.png\")\n self.add_image(\"birds\", image_id=i, path=img_floder + \"/\" + imglist[i],\n width=cv_img.shape[1], height=cv_img.shape[0], mask_path=mask_path, yaml_path=yaml_path)", "def annot_to_gifti(atlas):\n\n labels, ctab, names = nib.freesurfer.read_annot(atlas)\n\n darr = nib.gifti.GiftiDataArray(labels, intent='NIFTI_INTENT_LABEL',\n datatype='NIFTI_TYPE_INT32')\n labeltable = nib.gifti.GiftiLabelTable()\n for key, label in enumerate(names):\n (r, g, b), a = (ctab[key, :3] / 255), (1.0 if key != 0 else 0.0)\n glabel = nib.gifti.GiftiLabel(key, r, g, b, a)\n glabel.label = label.decode()\n labeltable.labels.append(glabel)\n\n return nib.GiftiImage(darrays=[darr], labeltable=labeltable)", "def bin_unil_to_bil(a):\n return 2*a - 1", "def pdftoimages(input_dir,output_dir): \n dirListing = os.listdir(input_dir)\n files = []\n imagespath = output_dir\n for item in dirListing:\n files.append(item)\n n = len(files)\n for num in range(n):\n doc = fitz.open(input_dir+\"/\"+files[num])\n for img in doc.getPageImageList(0):\n xref = img[0]\n pix = fitz.Pixmap(doc, xref)\n if pix.n < 5: # this is GRAY or RGB\n pix.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n else: # CMYK: convert to RGB first\n pix1 = fitz.Pixmap(fitz.csRGB, pix)\n pix1.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n pix1 = None \n pix=None\n break", "def dwd_airmass(self, backup_orig_data=False):\n self.check_channels(6.7, 7.3, 9.7, 10.8)\n\n if not self._dwd_channel_preparation([6.7, 7.3, 9.7, 10.8],\n backup_orig_data=backup_orig_data):\n return None\n\n ch1 = self[6.7].data - self[7.3].data\n ch2 = self[9.7].data - self[10.8].data\n ch3 = self[6.7].data\n\n img = self._dwd_create_RGB_image((ch1, ch2, ch3),\n ((-25, 0),\n (-40, 5),\n (243 - CONVERSION, 208 - CONVERSION)))\n return img", "def main():\n\n #Parse input arguments\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n\n parser.add_argument(\"-i\", \"--image\", dest=\"image\",\n help=\"specify the name of the image\", metavar=\"IMAGE\")\n\n args = parser.parse_args()\n\n #Load image\n if args.image is None:\n print(\"Please specify the name of image\")\n print(\"use the -h option to see usage information\")\n sys.exit(2)\n else:\n image_name = args.image.split(\".\")[0]\n input_image = cv2.imread(args.image, 0)\n\n\n bin_img = bi.binary_image()\n hist = bin_img.compute_histogram(input_image)\n\n outputDir = 'output/cellct/'\n outputDir_compress = 'output/Compression/'\n\n #Saving histogram to output directory \n hist_fig = plt.plot(hist)\n plt.savefig(outputDir+\"hist.png\")\n\n threshold = bin_img.find_optimal_threshold(hist)\n print(\"Optimal threshold: \", threshold)\n\n binary_img = bin_img.binarize(input_image)\n output_image_name = outputDir + \"binary_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, binary_img)\n\n #blobcoloring\n cell_count_obj = cc.cell_counting()\n\n regions = cell_count_obj.blob_coloring(binary_img)\n stats = cell_count_obj.compute_statistics(regions)\n\n cell_stats_img = cell_count_obj.mark_regions_image(binary_img, stats)\n output_image_name = outputDir + \"cell_stats_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, cell_stats_img)\n\t\n #Compression\n rle_obj = rle.rle()\n rle_code = rle_obj.encode_image(binary_img)\n print(\"-------------- Runlength Code -------------------\")\n print(rle_code)\n\n [height, width] = binary_img.shape\n\n decoded_image = rle_obj.decode_image(rle_code, height, width)\n\n output_image_name = outputDir_compress + \"decoded_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, decoded_image)", "def frames(self):\n while True:\n ret, frame = self.classification()\n if ret == True:\n yield cv2.imencode('.jpg', frame)[1].tobytes()\n else:\n break", "def normalise(image):", "def load_jpgs():\n X_tr = []\n Y_tr = []\n imges = train_df['id'].values\n for img_id in imges:\n X_tr.append(cv2.imread(fold + img_id)) \n Y_tr.append(train_df[train_df['id'] == img_id]['has_cactus'].values[0]) \n\n X_tr = np.asarray(X_tr)\n X_tr = X_tr.astype('float32')\n X_tr /= 255\n Y_tr = np.asarray(Y_tr)\n\n return X_tr, Y_tr", "def preprocess(exam, data_folder, save_path, image_format):\n for v in ['L-CC', 'L-MLO', 'R-CC', 'R-MLO']:\n if len(exam[v]) == 0:\n continue\n else:\n for image in exam[v]:\n image_path = data_folder + '/' + image + '.' + image_format\n # Extract subdirectories\n subdirs = \"/\".join(image.split('/')[:-1])\n save_dirs = os.path.join(save_path, subdirs)\n # Extract image id\n image_id = image.split('/')[-1]\n # Create save directories\n os.makedirs(save_dirs, exist_ok=True)\n png_save_path = os.path.join(save_dirs, image_id + '.png')\n with Image(filename=image_path, format=image_format) as img:\n with img.clone() as i:\n i.auto_level()\n with i.convert('png') as png_image:\n png_image.transform(resize='896x1152!')\n png_image.save(filename=png_save_path)", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def convert(self):\n \n vrtlist = sorted(glob.glob(self.fullPath + '/*vrt'))\n splitAt = len(self.fullPath) + 1\n \n if len(vrtlist)!=0:\n for i in range(0,len(vrtlist)):\n prefix = str(vrtlist[i].split(\".vrt\")[0])\n prefix = prefix[:splitAt] + 'full' + prefix[splitAt:]\n ct = pymodis.convertmodis_gdal.convertModisGDAL(hdfname = vrtlist[i], \n prefix = prefix, subset = self.subset, res = self.resolution, \n outformat = self.outformat, wkt = self.projection, resampl = 'NEAREST_NEIGHBOR', vrt = True)\n ct.run()\n mosdel = glob.glob(self.fullPath + '/*mos.tif')\n for f in mosdel:\n os.remove(f)\n xmldel = glob.glob(self.fullPath + '/*mos.tif.xml') \n for f in xmldel:\n os.remove(f)\n vrtdel = glob.glob(self.fullPath + '/*.vrt')\n for f in vrtdel:\n os.remove(f)\n tifCount = len(glob.glob(self.fullPath + '/*.tif'))\n dataCount = self.subset.count('1')\n logger.log('SUCCESS', 'Conversion complete! The %d bands of %d mosaicked images were successfully converted to %d %s files.' % (dataCount, len(vrtlist), tifCount, str(self.outformat)))\n \n \n if len(vrtlist)==0: \n \n hdflist = sorted(glob.glob(self.fullPath + '/*.hdf'))\n for i in range(len(hdflist)):\n ms = pymodis.convertmodis_gdal.createMosaicGDAL(hdfnames = [hdflist[i]], subset = self.subset, outformat = 'GTiff')\n ms.run(str(hdflist[i].split('.h')[0]) + 'mos.tif')\n ms.write_vrt(output = str(hdflist[i].split('.h')[0]), separate = True)\n\n vrtlist = sorted(glob.glob(self.fullPath + '/*vrt'))\n splitAt = len(self.fullPath) + 1\n \n for i in range(0,len(vrtlist)):\n prefix = str(vrtlist[i].split(\".vrt\")[0])\n prefix = prefix[:splitAt] + 'full' + prefix[splitAt:]\n ct = pymodis.convertmodis_gdal.convertModisGDAL(hdfname = vrtlist[i], \n prefix = prefix, subset = self.subset, res = self.resolution, \n outformat = self.outformat, wkt = self.projection, resampl = 'NEAREST_NEIGHBOR', vrt = True)\n ct.run()\n \n mosdel = glob.glob(self.fullPath + '/*mos.tif')\n for f in mosdel:\n os.remove(f)\n xmldel = glob.glob(self.fullPath + '/*mos.tif.xml') \n for f in xmldel:\n os.remove(f)\n vrtdel = glob.glob(self.fullPath + '/*.vrt')\n for f in vrtdel:\n os.remove(f)\n tifCount = len(glob.glob(self.fullPath + '/full*.tif'))\n dataCount = self.subset.count('1')\n logger.log('SUCCESS', 'Conversion complete! The %d bands of %d HDF files were successfully converted to %d %s files.' % (dataCount, len(hdflist), tifCount, str(self.outformat)))", "def convert_bboxes_to_albumentations(shape, bboxes, source_format):\n return [convert_bbox_to_albumentations(shape, bbox, source_format) for bbox in bboxes]", "def binarize(self):\n # Loop through the ratings and binarize based on overall average rating\n rating_sum = np.sum(self.ratings)\n rating_count = np.count_nonzero(self.ratings)\n rating_avg = (1.0 * rating_sum) / rating_count\n\n def binary_transform(x, rating_avg):\n if x == 0.0:\n return 0.0\n elif x >= rating_avg:\n return 1.0\n else:\n return -1.0\n\n btransform = np.vectorize(binary_transform, otypes=[np.float])\n if self.is_turbo:\n self.ratings = btransform(self.ratings, rating_avg)", "def arches(self):\n if self.method == 'image':\n return self.params[2]\n if self.arch:\n return [self.arch]\n return []", "def return_bbox_image(self, image, bboxes, label, color):\n if bboxes:\n for obj in bboxes:\n image = self.draw_single_bbox(image, obj.position_xywh, label=label, color=color)\n\n return image", "def __init__(self, path_image, path_imagefile, path_bndboxfile, transform):\r\n # -------------------- DATA ARGUMENT\r\n self.shape = 446\r\n self.hue = 0.1\r\n self.saturation = 1.5\r\n self.exposure = 1.5\r\n self.imagelist = []\r\n self.labellist = []\r\n self.transform = transform\r\n label_dir = os.listdir(path_bndboxfile)\r\n image_dir = os.listdir(path_imagefile)\r\n\r\n # read imagepath\r\n for file in image_dir:\r\n file_name = os.path.join(path_imagefile, file)\r\n with open(file_name) as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n image_name = line.split()[0] + '.JPEG'\r\n image = os.path.join(path_image, image_name)\r\n self.imagelist.append(image)\r\n\r\n # read imagelabel, i.e, (name, xmin, xmax, ymin, ymax)\r\n for file in label_dir:\r\n if file.split('.')[1] == 'xml':\r\n file_name = os.path.join(path_bndboxfile, file)\r\n with open(file_name) as f:\r\n xml_tree = parse(f).documentElement\r\n objects = xml_tree.getElementsByTagName('object')\r\n for object in objects:\r\n label = []\r\n name = object.getElementsByTagName('name')[0]\r\n label.append(name.childNodes[0].data)\r\n bndbox = object.getElementsByTagName('bndbox')[0]\r\n for node in bndbox.childNodes:\r\n if node.nodeType == node.ELEMENT_NODE:\r\n label.append(node.childNodes[0].data)\r\n self.labellist.append(label)\r\n else:\r\n print('Expect files in xml format. but get {}'.format(file.split('.')[1]))", "def pruneBaselines(self, aru_prediction, size=()):\n if self.prune_method == 'simple':\n bl = aru_prediction[0,:,:,0] \n other = aru_prediction[0,:,:,2] \n # binarization\n b = 0.4\n # take both classes into account\n out = np.where(np.logical_and(bl > b, other < b), 1.0, 0)\n # remove some holes and single items\n # important step, otherwise the skeleton will have many small\n # branches\n # TODO: exchange w. opencv counterpart (faster)\n selem = np.ones((1,3))\n out = np.where(binary_closing(out,selem=selem),1.0,0.0)\n out = np.where(binary_opening(out,selem=selem),1.0,0.0)\n# misc.imsave(os.path.join(self.outdir,'tmp.png'), out)\n\n # enlarge output again\n # out = misc.imresize(out, size, interp='nearest') \n # deprecated, use:\n out = np.array(Image.fromarray(out).resize(size,\n resample=Image.NEAREST))\n # TODO: replace w. opencv cv2.resize\n\n # now let's get only single pixel lines\n# misc.imsave(os.path.join(self.outdir,'tmp2.png'), out)\n out = skeletonize(out) \n else:\n print('not implemented yet')\n\n return out", "def image_mask(kmeans_labels, img_gray_orig):\n\n\tmask_img = np.zeros((img_gray_orig.shape[0], img_gray_orig.shape[1]))\n\n\tkmeans_labels_arr = kmeans_labels.reshape(img_gray_orig.shape[0],\n\t\t\t\t\t\t\t\t\t\t\t img_gray_orig.shape[1])\n\n\tsort_labels = sorted(pd.Series(kmeans_labels).unique(),\n\t\t\t\t\t\t\t\t\t\t\t\t\treverse = True)\n\tjust_bone = ()\n\n\tif (np.sum(kmeans_labels_arr==sort_labels[0])) > 8000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[0])\n\t mask_img[just_bone] = 1\n\t\t \n\tif (np.sum(kmeans_labels_arr==sort_labels[1])) > 8000 and\\\n\t\t\t\t (np.sum(kmeans_labels_arr==sort_labels[1])) < 60000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[1])\n\t mask_img[just_bone] = 1\n\t\n\tif (np.sum(kmeans_labels_arr==sort_labels[2]))>8000 and\\\n\t\t\t\t (np.sum(kmeans_labels_arr==sort_labels[2])) < 70000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[2])\n\t mask_img[just_bone] = 1\n\t\n\tif (np.sum(kmeans_labels_arr==sort_labels[3]))>8000 and\\\n\t\t\t\t(np.sum(kmeans_labels_arr==sort_labels[3])) < 70000:\n\t just_bone = np.where(kmeans_labels_arr==sort_labels[3])\n\t mask_img[just_bone] = 1\n\t\n\tif not just_bone:\n\t\tjust_bone = np.where(kmeans_labels_arr==sort_labels[1]) \n\t\tmask_img[just_bone] = 1\n\n\treturn just_bone, mask_img", "def downsample(self, hr_img):\r\n lr_img = hr_img.resize((int(hr_img.width / self.scaling_factor), int(hr_img.height / self.scaling_factor)),\r\n Image.BICUBIC)\r\n # Convert the LR and HR image to the required type\r\n lr_img = convert_image(lr_img, source='pil', target=self.lr_img_type)\r\n hr_img = convert_image(hr_img, source='pil', target=self.hr_img_type)\r\n return lr_img, hr_img", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels" ]
[ "0.60772157", "0.58175284", "0.5723036", "0.5568555", "0.5511439", "0.5510956", "0.5482435", "0.5474333", "0.5450582", "0.53886837", "0.53840804", "0.5379313", "0.5322583", "0.53046983", "0.52473575", "0.5239021", "0.52281713", "0.5225414", "0.5208012", "0.5204904", "0.51531446", "0.51403147", "0.5131917", "0.5093438", "0.5086972", "0.50825906", "0.5064867", "0.505959", "0.50556237", "0.5024545", "0.5019181", "0.50171167", "0.50017005", "0.49883813", "0.49840292", "0.49789864", "0.4973821", "0.49657926", "0.49584305", "0.49537003", "0.4938274", "0.49331757", "0.4926528", "0.49250787", "0.4911468", "0.49107644", "0.4908325", "0.48993587", "0.4889969", "0.48835745", "0.4876764", "0.48618394", "0.48616126", "0.48525888", "0.48475268", "0.48467356", "0.48425448", "0.48387647", "0.48293638", "0.48277962", "0.4820219", "0.4818537", "0.48093176", "0.48088196", "0.48065388", "0.47999188", "0.47960716", "0.47909027", "0.47879398", "0.47814614", "0.47598872", "0.47581986", "0.47554696", "0.47512653", "0.4748662", "0.47473702", "0.47430208", "0.4741166", "0.4734253", "0.47326928", "0.47320426", "0.47286046", "0.47259733", "0.47258243", "0.47102287", "0.47086906", "0.47038552", "0.47031015", "0.46963182", "0.46959174", "0.46955228", "0.46945888", "0.46938747", "0.4684791", "0.46800944", "0.46757042", "0.46623802", "0.4656089", "0.46529827", "0.46528238", "0.46524468" ]
0.0
-1
Convert ASL images to nifti.
def ProcessAsl(self): for entry in self.info: if self.info[entry]['type'] == 'asl': if self.verbose: print 'Processing ASL data in %s' % os.path.basename(entry) cmd = 'convert_file %s %s %s' % (entry, \ self.info[entry]['imgfile'], self.info[entry]['filetype']) fname = '%s%s' % \ (self.info[entry]['imgfile'], self.info[entry]['suffix']) self.CheckExec(cmd, [fname])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_nifti(log, brain):\n log.info('Doing convert_to_nifti')\n cmdargs = split('3dAFNItoNIFTI {}'.format(brain))\n proc = Popen(cmdargs, stdout=PIPE, stderr=STDOUT)\n log.info(proc.stdout.read())", "def ConvertAnat(self):\n if self.verbose:\n print 'Convert T1 and T2 images...'\n for entry in self.info:\n info = self.info[entry]\n if self.info[entry]['imgfile'] is None:\n continue\n if self.info[entry]['type'] in self.anat_types:\n key = self.info[entry]['type']\n imgfile = self.info[entry]['imgfile']\n cmd = 'convert_file %s %s %s %s' % (self.flip_opts, entry, \\\n imgfile, self.info[entry]['filetype'])\n checkfile = '%s%s' % (imgfile, self.info[entry]['suffix'])\n self.CheckExec(cmd, [checkfile])\n if self.info[entry]['norm_src'] and self.skull_strip:\n cmd = \"3dSkullStrip -input %s -prefix %s\" % \\\n (checkfile, self.info[entry]['imgfile_skstrip'])\n checkfile = '%s+orig.BRIK' % \\\n (self.info[entry]['imgfile_skstrip'])\n self.CheckExec(cmd, [checkfile])", "def save2nifti(data, affine, file_name):\n img = nib.Nifti1Image(data, affine)\n nib.save(img, file_name)", "def save2nifti(self, file_path):\n #Define nifti1 datatype codes\n NIFTI_TYPE_UINT8 = 2 # unsigned char\n NIFTI_TYPE_INT16 = 4 # signed short\n NIFTI_TYPE_INT32 = 8 # signed int.\n NIFTI_TYPE_FLOAT32 = 16 # 32 bit float.\n NIFTI_TYPE_COMPLEX64 = 32 # 64 bit complex = 2 32 bit floats\n NIFTI_TYPE_FLOAT64 = 64 # 64 bit float = double.\n NIFTI_TYPE_RGB24 = 128 # 3 8 bit bytes.\n NIFTI_TYPE_INT8 = 256 # signed char.\n NIFTI_TYPE_UINT16 = 512 # unsigned short.\n NIFTI_TYPE_UINT32 = 768 # unsigned int.\n NIFTI_TYPE_INT64 = 1024 #signed long long.\n NIFTI_TYPE_UINT64 = 1280 # unsigned long long.\n NIFTI_TYPE_FLOAT128 = 1536 # 128 bit float = long double.\n NIFTI_TYPE_COMPLEX128 = 1792 #128 bit complex = 2 64 bit floats.\n NIFTI_TYPE_COMPLEX256 = 2048 # 256 bit complex = 2 128 bit floats\n NIFTI_TYPE_RGBA32 = 2304 # 4 8 bit bytes.\n\n #Detect the data type of the input data.\n data_type = {\n np.uint8: NIFTI_TYPE_UINT8,\n np.uint16: NIFTI_TYPE_UINT16,\n np.uint32: NIFTI_TYPE_UINT32,\n np.float32: NIFTI_TYPE_FLOAT32,\n np.int16: NIFTI_TYPE_INT16,\n np.int32: NIFTI_TYPE_INT32,\n np.int8: NIFTI_TYPE_INT8\n }\n if sys.maxint > 2 ** 32: # The platform is 64 bit\n data_type[np.float128] = NIFTI_TYPE_FLOAT128\n data_type[np.float64] = NIFTI_TYPE_FLOAT64\n data_type[np.int64] = NIFTI_TYPE_INT64\n data_type[np.uint64] = NIFTI_TYPE_UINT64\n data_type[np.complex64] = NIFTI_TYPE_COMPLEX64\n data_type[np.complex128] = NIFTI_TYPE_COMPLEX128\n data_type[np.complex256] = NIFTI_TYPE_COMPLEX256\n\n data = np.rot90(self._data, 3)\n if data_type.has_key(data.dtype.type):\n self._header['datatype'] = data_type[data.dtype.type]\n self._header['cal_max'] = data.max()\n self._header['cal_min'] = 0\n image = nib.nifti1.Nifti1Image(data, None, self._header)\n nib.nifti1.save(image, file_path)", "def to_nifti(self,folder_path: str):\n data_path = settings.STORAGE_DIR\n path = folder_path \n nifti=series.get_series_object(path) \n nifti_str=str(nifti)\n nifti_str=nifti_str[1:44]\n if nifti_str=='dicom_to_cnn.model.reader.SeriesCT.SeriesCT': \n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')\n if nifti_str=='dicom_to_cnn.model.reader.SeriesPT.SeriesPT':\n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n nifti.set_ExportType('suv')\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')", "def transform_images(img1,img2):", "def save2nifti(self, file_path):\n # Define nifti1 datatype codes\n NIFTI_TYPE_UINT8 = 2 # unsigned char\n NIFTI_TYPE_INT16 = 4 # signed short\n NIFTI_TYPE_INT32 = 8 # signed int.\n NIFTI_TYPE_FLOAT32 = 16 # 32 bit float.\n NIFTI_TYPE_COMPLEX64 = 32 # 64 bit complex = 2 32 bit floats\n NIFTI_TYPE_FLOAT64 = 64 # 64 bit float = double.\n NIFTI_TYPE_RGB24 = 128 # 3 8 bit bytes.\n NIFTI_TYPE_INT8 = 256 # signed char.\n NIFTI_TYPE_UINT16 = 512 # unsigned short.\n NIFTI_TYPE_UINT32 = 768 # unsigned int.\n NIFTI_TYPE_INT64 = 1024 # signed long long.\n NIFTI_TYPE_UINT64 = 1280 # unsigned long long.\n NIFTI_TYPE_FLOAT128 = 1536 # 128 bit float = long double.\n NIFTI_TYPE_COMPLEX128 = 1792 # 128 bit complex = 2 64 bit floats.\n NIFTI_TYPE_COMPLEX256 = 2048 # 256 bit complex = 2 128 bit floats\n NIFTI_TYPE_RGBA32 = 2304 # 4 8 bit bytes.\n\n # Detect the data type of the input data.\n data_type = {\n np.uint8: NIFTI_TYPE_UINT8,\n np.uint16: NIFTI_TYPE_UINT16,\n np.uint32: NIFTI_TYPE_UINT32,\n np.float32: NIFTI_TYPE_FLOAT32,\n np.int16: NIFTI_TYPE_INT16,\n np.int32: NIFTI_TYPE_INT32,\n np.int8: NIFTI_TYPE_INT8\n }\n if sys.maxint > 2 ** 32: # The platform is 64 bit\n data_type[np.float128] = NIFTI_TYPE_FLOAT128\n data_type[np.float64] = NIFTI_TYPE_FLOAT64\n data_type[np.int64] = NIFTI_TYPE_INT64\n data_type[np.uint64] = NIFTI_TYPE_UINT64\n data_type[np.complex64] = NIFTI_TYPE_COMPLEX64\n data_type[np.complex128] = NIFTI_TYPE_COMPLEX128\n data_type[np.complex256] = NIFTI_TYPE_COMPLEX256\n\n header = nib.Nifti1Header()\n if self.data.shape[1] == 1:\n new_shape = (self.data.shape[0], 1, 1)\n else:\n new_shape = (self.data.shape[0], 1, 1, self.data.shape[1])\n data = self.data.reshape(new_shape)\n\n if data.dtype.type in data_type:\n header['datatype'] = data_type[data.dtype.type]\n header['cal_max'] = data.max()\n header['cal_min'] = data.min()\n image = nib.Nifti1Image(data, None, header)\n nib.nifti1.save(image, file_path)", "def exercise():\n\n #\n # Convert Lena Tiff image to raw format\n #\n for f in glob.glob('*.jpg'):\n os.remove(f)\n \n for f in glob.glob('*.dat'):\n os.remove(f)\n \n input_raw_file = convert_to_raw('Lena.tiff')\n\n for device in ['cpu', 'gpu']:\n for interp in ['nn', 'bl']:\n for (w,h) in ((256, 300), (486, 486),(2000, 1000),(1000, 2000),(8000, 4000)):\n (t, f) = interpolate(input_raw_file, device + '_' + interp + '_lena.dat', device, 0, interp, w, h)\n convert_to_jpg(f)\n\n \n for f in glob.glob('*.dat'):\n convert_to_jpg(f)\n os.remove(f)\n \n quit()", "def LIDC_to_niftis(extraction_results_dataframe, spacing=[1.0, 1.0, 1.0], debug=False):\n loop = map(\n lambda t: t[1][[\"extraction_location\", \"annotation_file\"]].values,\n extraction_results_dataframe.iterrows(),\n )\n progbar = tqdm.tqdm(\n loop, total=extraction_results_dataframe.shape[0], desc=\"Converting to NiFTIs...\"\n )\n converted_dicoms = Parallel(n_jobs=1, prefer=\"processes\")(\n delayed(convert_to_niftis)(*t, spacing=spacing) for t in progbar\n )\n initial_shape = extraction_results_dataframe.shape[0]\n extraction_results_dataframe = extraction_results_dataframe[converted_dicoms]\n final_shape = extraction_results_dataframe.shape[0]\n print(f\"{final_shape}/{initial_shape} DICOMs folders successfully converted.\")\n\n # Update config file\n config_file = get_config_file_path(dataset_name=\"fed_lidc_idri\", debug=debug)\n write_value_in_config(config_file, \"preprocessing_complete\", True)\n\n return extraction_results_dataframe", "def preprocess_image(image: Image) -> np.ndarray:\n return np.array(image.convert('L'))", "def to_nii(self, outbase, spirec='spirec', saveInOut=False):\n if self.image_data is None:\n self.recon(spirec)\n\n image_tlhc = np.array([self.header.image.tlhc_R, self.header.image.tlhc_A, self.header.image.tlhc_S])\n image_trhc = np.array([self.header.image.trhc_R, self.header.image.trhc_A, self.header.image.trhc_S])\n image_brhc = np.array([self.header.image.brhc_R, self.header.image.brhc_A, self.header.image.brhc_S])\n #image_cent = np.array([self.header.image.ctr_R, self.header.image.ctr_A, self.header.image.ctr_S])\n\n row_vec = (image_trhc-image_tlhc)/np.sqrt(np.dot(image_trhc-image_tlhc, image_trhc-image_tlhc))\n col_vec = -(image_trhc-image_brhc)/np.sqrt(np.dot(image_trhc-image_brhc, image_trhc-image_brhc))\n # The DICOM standard defines these two unit vectors in an LPS coordinate frame, but we'll\n # need RAS (+x is right, +y is anterior, +z is superior) for NIFTI. So, we compute them\n # such that row_vec points to the right and col_vec points up.\n # Not sure if we need to negate the slice_norm. From the NIFTI-1 header:\n # The third column of R will be either the cross-product of the first 2 columns or\n # its negative. It is possible to infer the sign of the 3rd column by examining\n # the coordinates in DICOM attribute (0020,0032) \"Image Position (Patient)\" for\n # successive slices. However, this method occasionally fails for reasons that I\n # (RW Cox) do not understand.\n\n # can also get slice_norm from: slice_norm = np.cross(row_vec, col_vec)\n slice_norm = np.array([self.header.image.norm_R, self.header.image.norm_A, self.header.image.norm_S])\n slice_fov = np.abs(self.header.series.start_loc - self.header.series.end_loc)\n\n # This is either the first slice tlhc (image_tlhc) or the last slice tlhc. How to decide?\n # And is it related to wheather I have to negate the slice_norm?\n # Tuned this empirically by comparing spiral and EPI data with the sam Rx.\n # Everything seems reasonable, except the test for axial orientation (start_ras==S|I).\n # I have no idea why I need that! But the flipping only seems necessary for axials, not\n # coronals or the few obliques I've tested.\n # FIXME: haven't tested sagittals! (to test for spiral: 'sprt' in self.psd_name.lower())\n if (self.header.series.start_ras=='S' or self.header.series.start_ras=='I') and self.header.series.start_loc > self.header.series.end_loc:\n pos = image_tlhc - slice_norm*slice_fov\n # FIXME: since we are reversing the slice order here, should we change the slice_order field below?\n self.image_data = self.image_data[:,:,::-1,]\n if self.fm_data is not None:\n self.fm_data = self.fm_data[:,:,::-1,]\n else:\n pos = image_tlhc\n\n if self.num_bands > 1:\n pos = pos - slice_norm * self.band_spacing_mm * (self.num_bands - 1.0) / 2.0\n\n qto_xyz = np.zeros((4,4))\n qto_xyz[0,0] = row_vec[0]\n qto_xyz[0,1] = col_vec[0]\n qto_xyz[0,2] = slice_norm[0]\n\n qto_xyz[1,0] = row_vec[1]\n qto_xyz[1,1] = col_vec[1]\n qto_xyz[1,2] = slice_norm[1]\n\n qto_xyz[2,0] = row_vec[2]\n qto_xyz[2,1] = col_vec[2]\n qto_xyz[2,2] = slice_norm[2]\n\n qto_xyz[:,3] = np.append(pos, 1).T\n qto_xyz[0:3,0:3] = np.dot(qto_xyz[0:3,0:3], np.diag(self.mm_per_vox))\n\n nii_header = nibabel.Nifti1Header()\n nii_header.set_xyzt_units('mm', 'sec')\n nii_header.set_qform(qto_xyz, 'scanner')\n nii_header.set_sform(qto_xyz, 'scanner')\n\n nii_header['slice_start'] = 0\n nii_header['slice_end'] = self.num_slices - 1\n # nifti slice order codes: 0 = unknown, 1 = sequential incrementing, 2 = seq. dec., 3 = alternating inc., 4 = alt. dec.\n slice_order = 0\n nii_header['slice_duration'] = self.tr * 1000 / self.num_slices\n # FIXME: check that this is correct.\n if self.header.series.se_sortorder == 0:\n slice_order = 1 # or 2?\n elif self.header.series.se_sortorder == 1:\n slice_order = 3 # or 4?\n nii_header['slice_code'] = slice_order\n\n # Note: the freq/phase dir isn't meaningful for spiral trajectories.\n if self.header.image.freq_dir==1:\n nii_header.set_dim_info(freq=1, phase=0, slice=2)\n else:\n nii_header.set_dim_info(freq=0, phase=1, slice=2)\n\n # FIXME: There must be a cleaner way to set the TR! Maybe bug Matthew about it.\n nii_header.structarr['pixdim'][4] = self.tr\n nii_header.set_slice_duration(nii_header.structarr['pixdim'][4] / self.num_slices)\n nii_header.structarr['cal_max'] = self.image_data.max()\n nii_header.structarr['cal_min'] = self.image_data.min()\n\n if self.num_echoes == 1:\n nifti = nibabel.Nifti1Image(self.image_data, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n elif self.num_echoes == 2:\n if saveInOut:\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,0], None, nii_header)\n nibabel.save(nifti, outbase + '_in.nii.gz')\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,1], None, nii_header)\n nibabel.save(nifti, outbase + '_out.nii.gz')\n # FIXME: Do a more robust test for spiralio!\n # Assume spiralio, so do a weighted average of the two echos.\n # FIXME: should do a quick motion correction here\n w_in = np.mean(self.image_data[:,:,:,:,0], 3)\n w_out = np.mean(self.image_data[:,:,:,:,1], 3)\n inout_sum = w_in + w_out\n w_in = w_in / inout_sum\n w_out = w_out / inout_sum\n avg = np.zeros(self.image_data.shape[0:4])\n for tp in range(self.image_data.shape[3]):\n avg[:,:,:,tp] = w_in*self.image_data[:,:,:,tp,0] + w_out*self.image_data[:,:,:,tp,1]\n nifti = nibabel.Nifti1Image(avg, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n else:\n for echo in range(self.num_echoes):\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,echo], None, nii_header)\n nibabel.save(nifti, outbase + '_echo%02d.nii.gz' % echo)\n\n if self.fm_data is not None:\n nii_header.structarr['cal_max'] = self.fm_data.max()\n nii_header.structarr['cal_min'] = self.fm_data.min()\n nifti = nibabel.Nifti1Image(self.fm_data, None, nii_header)\n nibabel.save(nifti, outbase + '_B0.nii.gz')", "def standardize_atlas(atlas_ni):\n atlas_data = atlas_ni.get_data()\n max_features = atlas_ni.get_data().reshape(-1, atlas_ni.shape[-1] ).max(axis=0)\n std_data = (np.abs(atlas_data) / max_features).reshape(atlas_ni.shape)\n return image.new_img_like(atlas_ni,std_data )", "def normalise(image):", "def convert_for_submission(source_dir, target_dir):\r\n files = subfiles(source_dir, suffix=\".nii.gz\", join=False)\r\n maybe_mkdir_p(target_dir)\r\n for f in files:\r\n img = sitk.ReadImage(join(source_dir, f))\r\n out_file = join(target_dir, f[:-7] + \".nii\")\r\n sitk.WriteImage(img, out_file)", "def annot_to_gifti(atlas):\n\n labels, ctab, names = nib.freesurfer.read_annot(atlas)\n\n darr = nib.gifti.GiftiDataArray(labels, intent='NIFTI_INTENT_LABEL',\n datatype='NIFTI_TYPE_INT32')\n labeltable = nib.gifti.GiftiLabelTable()\n for key, label in enumerate(names):\n (r, g, b), a = (ctab[key, :3] / 255), (1.0 if key != 0 else 0.0)\n glabel = nib.gifti.GiftiLabel(key, r, g, b, a)\n glabel.label = label.decode()\n labeltable.labels.append(glabel)\n\n return nib.GiftiImage(darrays=[darr], labeltable=labeltable)", "def cast(*args):\n return _itkSpeckleNoiseImageFilterPython.itkSpeckleNoiseImageFilterIUS2IUS2_cast(*args)", "def make_nifti(self, output_path=None):\n\n # save nifti\n if output_path is None:\n output = self.nifti_file\n else:\n output = output_path\n ecat2nii.ecat2nii(ecat_main_header=self.ecat_header, ecat_subheaders=self.subheaders, ecat_pixel_data=self.data,\n nifti_file=output, affine=self.affine)\n\n if 'nii.gz' not in output:\n output = helper_functions.compress(output)\n\n return output", "def normalize_images(image_sitk):\n\n max = 400\n min = -1000\n\n image_np = sitk.GetArrayFromImage(image_sitk)\n\n # Normalization\n image_np = (image_np - min)/(max - min)\n image_np[image_np > 1] = 1\n image_np[image_np < 0] = 0\n\n # Convert back to SITK\n out_image_sitk = sitk.GetImageFromArray(image_np)\n out_image_sitk.CopyInformation(image_sitk)\n\n return out_image_sitk", "def npy2nii(volume: np.ndarray, nii_path: str):\n # because of nibabel, the saving path can't contain more than one dot(.)\n assert nii_path.endswith('.nii')\n assert nii_path.count('.') == 1\n affine = np.eye(4)\n affine[0, 0] = 1\n img = nib.nifti1.Nifti1Image(volume, affine)\n # img.set_data_dtype(np.int32)\n try:\n nib.nifti1.save(img, nii_path)\n except:\n print(\"failed to convert npy to nii\")", "def save_nii(img_path, data, affine, header):\n nimg = nib.Nifti1Image(data, affine=affine, header=header)\n nimg.to_filename(img_path)", "def data_augmentation_and_vectorization(self,imlist, lb,im_labels, average_image = None):\n\t\tX,Y,X_original = [] ,[], []\n\n\t\ti = 0\n\t\tfor im in imlist:\n\t\t\tim=Image.fromarray(im,mode=self.mode)\n\t\t\t#try:\n\t\t\t#im_ini = im\n\t\t\tim_original = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t#im = self.substract_average_image(im, average_image)\n\t\t\t#print 'i:{} is a: {}' .format(i,im_labels[i])\n\t\t\t#im.show()\n\t\t\tX_original.append(im_original)\n\n\t\t\t#Rotations \n\t\t\t#im_r = im.rotate(15)\n\t\t\t# im_r_2 = im.rotate(-15)\n\t\t\t# im_r_3 = im.rotate(180)\n\t\t\t#im_r.show()\n\t\t\t#im_r_2.show()\n\n\t\t\t#Filters\n\t\t\t#im_f = im_ini.filter(ImageFilter.DETAIL)\n\t\t\t#im_f = im.filter(ImageFilter.FIND_EDGES)\n\t\t\t\n\t\t\tif self.mode == 'RGB':\n\t\t\t\tim = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t\t#Uncomment this if you want to use cross-correlate for 2D arrays http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.signal.correlate2d.html\n\t\t\t\t# im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\t# im = sp.inner(im, [299, 587, 114]) / 1000.0\n\t\t\t\t# im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\t# # normalize per http://en.wikipedia.org/wiki/Cross-correlation\n\t\t\t\t# im = (im - im.mean()) / im.std()\n\n\t\t\tif self.mode == 'L':\n\t\t\t\t# im = np.asarray(im, dtype='float64')\n\t\t\t\t# im = filters.sobel(im)\n\t\t\t\t#im = filters.roberts(im)\n\t\t\t\tim = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\n\t\t\t#im = np.asarray(im, dtype=np.uint8)\n\t\t\t#print im.shape\n\t\t\t#print im.shape\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t#im = self.flaten_aux(im)\n\t\t\t#print im.shape\n\t\t\t#im = data.coins() # or any NumPy arr\n\t\t\t#print im.shape\n\t\t\t#image = data.coins() # or any NumPy array!\n\t\t\t#print im\n\t\t\t#im = filter.sobel(im)\n\t\t\t#im = filter.roberts(im)\n\n\t\t\t# im_original = sp.inner(im, [299, 587, 114]) / 1000.0\n\t\t\t# im_original = np.asarray(im_original, dtype=theano.config.floatX)\n\t\t\t# # normalize per http://en.wikipedia.org/wiki/Cross-correlation\n\t\t\t# im = (im_original - im_original.mean()) / im_original.std()\n\t\t\t#print im.shape\n\t\t\t#print edges\n\t\t\t# edges = np.asarray(edges, dtype=np.uint8)\n\t\t\t#Image.fromarray(edges,mode=self.mode).show()\n\n\t\t\t#print edges\n\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX) / 256.\n\n\t\t\t#print edges.shape\n\t\t\t# io.imshow(im)\n\t\t\t# io.show()\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\n\t\t\t# plt.suptitle(im_labels[i], size=16)\n\t\t\t# plt.imshow(im, cmap=plt.cm.gray, interpolation='nearest')\n\t\t\t# plt.show()\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t#print im.shape\n\t\t\t#self.reconstructImage(im).show()\n\n\t\t\t#im_r = np.asarray(im_r, dtype=theano.config.floatX) / 256.\n\t\t\t# im_r_2 = np.asarray(im_r_2, dtype=theano.config.floatX) / 256.\n\t\t\t# im_r_3 = np.asarray(im_r_3, dtype=theano.config.floatX) / 256.\n\t\t\t#im_f = np.asarray(im_f, dtype=theano.config.floatX) / 256.\n\t\t\t\n\t\t\t#im = im.transpose(2, 0, 1)\n\t\t\t#X.append(np.array(im, dtype=theano.config.floatX))\n\t\t\t#X.append(np.array(im_raw, dtype=theano.config.floatX))\n\t\t\t#X.append(im)\n\t\t\tX.append(im)\n\t\t\t# if i % 100 == 0:\n\t\t\t# \tX.append(im)\n\t\t\t#X.append(im_r)\n\t\t\t# X.append(im_r_2)\n\t\t\t# X.append(im_r_3)\n\t\t\t#X.append(im_f)\n\t\t\t#X_original.append(im)\n\n\t\t\t# X.append(np.array(im_r, dtype=theano.config.floatX))\n\t\t\t# X.append(np.array(im_r_2, dtype=theano.config.floatX))\n\n\t\t\t#Uncomment this if you want to work with monochrome\n\t\t\t# im = im.convert('L')\n\t\t\t# pixels_monochrome = np.array(list(im.getdata()), dtype=np.float)\n\t\t\t\t\t\t\n\t\t\t# # scale between 0-1 to speed up computations\n\t\t\t# min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0,1), copy=True)\n\t\t\t# pixels_monochrome = min_max_scaler.fit_transform(pixels_monochrome)\n\n\t\t\t# X.append(pixels_monochrome)\n\n\t\t\t#Y.append(lb.transform([im_labels[i]])[0][0])\n\t\t\t#print lb.transform([im_labels[i]])\n\t\t\t\n\t\t\tlabel = lb.transform([im_labels[i]])[0][0]\n\t\t\t#print lb.transform([im_labels[i]])\n\t\t\t# label_vector = lb.transform([im_labels[i]])[0]\n\t\t\t# label = np.where( label_vector == 1 )[0][0]\n\t\t\t# print \"Label: {}\".format(label)\n\t\t\t#print label\n\t\t\t#Y.append(label)\n\t\t\tY.append(label)\n\t\t\t#Y.append(im_labels[i])\t\n\n\t\t\t\n\t\t\t#Y.append(label)\t\n\t\t\t# Y.append(label)\t\n\t\t\t# except Exception, e:\n\t\t\t# \tprint e\n\t\t\t# \t#raise e\n\n\t\t\t# if i == 30:\n\t\t\t# \tbreak\n\n\t\t\ti += 1\n\t\t\tif self.verbose:\n\t\t\t\tsys.stdout.write(\"\\r Process: {0}/{1}\".format(i, len(imlist)))\n\t\t\t\tsys.stdout.flush()\n\t\t\n\t\t# output = open(self.data_path + 'X_original.pkl', 'wb')\n\t\t# cPickle.dump(X_original, output,protocol=-1)\n\t\t# output.close()\n\n\t\treturn X,Y", "def ndvi(in_nir_band, in_colour_band, in_rows, in_cols, in_geotransform, out_tiff, data_type=gdal.GDT_Float32):\r\n\r\n # Read the input bands as numpy arrays.\r\n np_nir = in_nir_band.ReadAsArray(0, 0, in_cols, in_rows)\r\n np_colour = in_colour_band.ReadAsArray(0, 0, in_cols, in_rows)\r\n\r\n # Convert the np arrays to 32-bit floating point to make sure division will occur properly.\r\n np_nir_as32 = np_nir.astype(np.float32)\r\n np_colour_as32 = np_colour.astype(np.float32)\r\n\r\n # Calculate the NDVI formula.\r\n numerator = subtract(np_nir_as32, np_colour_as32)\r\n denominator = add(np_nir_as32, np_colour_as32)\r\n result = divide(numerator, denominator)\r\n\r\n # Remove any NaNs cause by division by zero.\r\n ndvi_float32 = nan_to_num(result)\r\n\r\n # Initialize a geotiff driver.\r\n geotiff = GetDriverByName('GTiff')\r\n\r\n # If the desired output is an int16, map the domain [-1,1] to [0,255], create an int16 geotiff with one band and\r\n # write the contents of the int16 NDVI calculation to it. Otherwise, create a float32 geotiff with one band and\r\n # write the contents of the float32 NDVI calculation to it.\r\n if data_type == gdal.GDT_UInt16:\r\n ndvi_int16 = multiply((ndvi_float32 + 1), (2**7 - 1))\r\n output = geotiff.Create(out_tiff, in_cols, in_rows, 1, gdal.GDT_UInt16)\r\n output.GetRasterBand(1).WriteArray(ndvi_int16)\r\n elif data_type == gdal.GDT_Float32:\r\n output = geotiff.Create(out_tiff, in_cols, in_rows, 1, gdal.GDT_Float32)\r\n output.GetRasterBand(1).WriteArray(ndvi_float32)\r\n else:\r\n raise ValueError('Invalid output data type. Valid types are gdal.UInt16 or gdal.Float32.')\r\n\r\n # Set the geographic transformation as the input.\r\n output.SetGeoTransform(in_geotransform)\r\n\r\n # return the output image in case you want to do something else with it.\r\n return output", "def load_aal_atlas(atlas_dir, aal_basename=\"ROI_MNI_V4\", verbose=0):\n \n if not osp.isdir(atlas_dir):\n raise ValueError(\"%s not a directory\" % atlas_dir)\n\n aal_img_name = glob.glob(osp.join(atlas_dir, aal_basename+\"*.nii\"))[0]\n aal_labels_name = glob.glob(osp.join(atlas_dir, aal_basename+\"*.txt\"))[0]\n aalimg = nib.load(aal_img_name)\n data = aalimg.get_data()\n\n labels = []\n with open(aal_labels_name) as f:\n for line in f.read().splitlines():\n labels.append(line.split(\"\\t\"))\n \n # labels is now a list of [\"short name\", \"long name\", \"ROI_value\"]\n # [['FAG', 'Precentral_L', '2001'], ['FAD', 'Precentral_R', '2002'], ...]\n n_roi = len(labels)\n split_data = np.ndarray(aalimg.shape + (n_roi,), dtype=bool)\n split_data.fill(False)\n \n only_name_labels = []\n roi_size = []\n for idx,lab in enumerate(labels):\n only_name_labels.append(lab[1])\n split_data[...,idx] = data==int(lab[2])\n roi_size.append(split_data[...,idx].sum())\n \n return (split_data, aalimg.get_affine(), only_name_labels, roi_size)", "def _singlechannelTransformixArray(self, niiIn):\n\t\t# here we will get the extension of the image and will convert it to the nift-1\n\t\t# format if it is not already in that format. While users can supply their own\n\t\t# nifti formatted image to the pipeline, this ensures that other file formats\n\t\t# can be used, although, it creates additionally overhead\n\t\t# here we supply all preprocessing commands that were used to preprocess or morph\n\t\t# the array size of the input image through the hdiprep workflow. Transformix\n\t\t# must be run on images with the same size as the elastix registration\n\t\tif ((self.out_ext!=\".nii\") or (self.target_size!=None) or (self.pad!=None)):\n\t\t\t# get the shape of the image\n\t\t\tshp = len(niiIn.hdi.data.image_shape)\n\t\t\t# create new name for the temporary image\n\t\t\ttmp_nm = os.path.join(out_dir, next(tempfile._get_candidate_names())+\".nii\")\n\t\t\t# export nifti intermediate\n\t\t\tprint('Creating nifti-1 intermediate for registration')\n\t\t\t# check for padding\n\t\t\tif self.pad!=None:\n\t\t\t\t# pad the single-channel\n\t\t\t\tniiIn.hdi.data.image = np.pad(niiIn.hdi.data.image,[(self.pad[0], self.pad[0]), (self.pad[1], self.pad[1])],mode='constant')\n\t\t\t# check for image resizing\n\t\t\tif (self.target_size != None) and (self.crops==None):\n\t\t\t\t# transform the image\n\t\t\t\tniiIn.hdi.data.image = resize(niiIn.hdi.data.image,self.target_size)\n\n\t\t\t# Create nifti oject -- transpose axes because of the transformation!\n\t\t\tnii_im = nib.Nifti1Image(niiIn.hdi.data.image.T, affine=np.eye(4))\n\t\t\t#Save the nifti image\n\t\t\tnib.save(nii_im,str(tmp_nm))\n\t\t\t# remove the nifit memory\n\t\t\tnii_im = None\n\t\t\t# update the image name\n\t\t\tprint('Using nifti-1 intermediate for registration')\n\t\t\t# update the input image\n\t\t\tself.in_im = Path(tmp_nm)\n\t\t\t# update the intermediate flag\n\t\t\tself.intermediate = True\n\t\t\t#Remove loaded image to clear memory\n\t\t\tniiIn = None\n\n\t\t#Print update\n\t\tprint('Detected single channel input images...')\n\t\t#Update the fixed channels\n\t\tself.in_channels.append(self.in_im)\n\n\t\t#add transform -- check for list size\n\t\tif len(self.tps) > 1:\n\t\t\t#Run the composition function for transformix\n\t\t\tres_name = MultiTransformix(in_im = self.in_im, out_dir = self.out_dir, tps = self.tps)\n\n\t\t#Otherwise only use the first transform parameter\n\t\telse:\n\t\t\t#Updatethe command with the single channel path alone\n\t\t\tself.command = self.command + ' -in ' + str(self.in_im)\n\t\t\t#use the first transform parameter file\n\t\t\tself.command = self.command + ' -tp ' + str(self.tps[0])\n\t\t\t#Update the command with the output directory\n\t\t\tself.command = self.command + ' -out ' + str(self.out_dir)\n\t\t\t#Run single channel transformix without temporary directories\n\t\t\tRunTransformix(self.command)\n\t\t\t#Get a result name for the output of transformix (assumes nifti for now)\n\t\t\tres_name = Path(os.path.join(self.out_dir,\"result\"+self.in_im.suffix))\n\n\t\t#Create a new name\n\t\tnew_name = Path(os.path.join(self.out_dir,self.baseName+'_result'+self.out_ext))\n\n\t\t# check if the output format needs to be switched -- set by the user\n\t\tif (self.out_ext!=\".nii\") or (self.trim!=None):\n\t\t\t# use HDIreader for now to parse image and exporter to export\n\t\t\tniiIn = hdi_reader.HDIreader(\n\t\t\t path_to_data=self.in_im,\n\t\t\t path_to_markers=None,\n\t\t\t flatten=False,\n\t\t\t subsample=None,\n\t\t\t mask=None,\n\t\t\t save_mem=False\n\t\t\t)\n\t\t\t# check the trim\n\t\t\tif self.trim!=None:\n\t\t\t\t# trim the image borders\n\t\t\t\tniiIn.hdi.data.image = niiIn.hdi.data.image[self.trim:-self.trim,self.trim:-self.trim]\n\t\t\t# export new data\n\t\t\thdi_exporter.HDIexporter(niiIn.hdi.data.image,new_name)\n\t\telse:\n\t\t\t# simply rename the file that is already in the nifti format\n\t\t\tres_name.rename(new_name)", "def _convert_to_features(self, img: np.ndarray) -> np.ndarray:", "def prepare_images(images):\n images = color.rgb2lab(images)\n\n l = images[:,:,:,:1]/100.\n ab = images[:,:,:,1:]/200. + 0.5\n\n return l, ab", "def preprocess(exam, data_folder, save_path, image_format):\n for v in ['L-CC', 'L-MLO', 'R-CC', 'R-MLO']:\n if len(exam[v]) == 0:\n continue\n else:\n for image in exam[v]:\n image_path = data_folder + '/' + image + '.' + image_format\n # Extract subdirectories\n subdirs = \"/\".join(image.split('/')[:-1])\n save_dirs = os.path.join(save_path, subdirs)\n # Extract image id\n image_id = image.split('/')[-1]\n # Create save directories\n os.makedirs(save_dirs, exist_ok=True)\n png_save_path = os.path.join(save_dirs, image_id + '.png')\n with Image(filename=image_path, format=image_format) as img:\n with img.clone() as i:\n i.auto_level()\n with i.convert('png') as png_image:\n png_image.transform(resize='896x1152!')\n png_image.save(filename=png_save_path)", "def _convert_to_yolo_img(self, img):\n\n img = img / 255.0\n h, w, c = img.shape\n img = img.transpose(2, 0, 1)\n outimg = make_image(w, h, c)\n img = img.reshape((w*h*c))\n data = c_array(c_float, img)\n outimg.data = data\n rgbgr_image(outimg)\n return outimg", "def load_nii(img_path):\n nimg = nib.load(img_path)\n return np.asanyarray(nimg.dataobj), nimg.affine, nimg.header", "def save_to_nii(im, filename, outdir=\"\", mode=\"image\", system=\"sitk\"):\n if system == \"sitk\":\n if mode == 'label':\n img = sitk.GetImageFromArray(im.astype(np.uint8))\n else:\n img = sitk.GetImageFromArray(im.astype(np.float32))\n if not os.path.exists(\"./{}\".format(outdir)):\n os.mkdir(\"./{}\".format(outdir))\n sitk.WriteImage(img, \"./{}/{}.nii.gz\".format(outdir, filename))\n else:\n img = np.rot90(im, k=2, axes= (1,2))\n OUTPUT_AFFINE = np.array(\n [[0, 0, 1, 0],\n [0, 1, 0, 0],\n [1, 0, 0, 0],\n [0, 0, 0, 1]])\n if mode == 'label':\n img = nibabel.Nifti1Image(img.astype(np.uint8), OUTPUT_AFFINE)\n else:\n img = nibabel.Nifti1Image(img.astype(np.float32), OUTPUT_AFFINE)\n if not os.path.exists(\"./{}\".format(outdir)):\n os.mkdir(\"./{}\".format(outdir))\n nibabel.save(img, \"./{}/{}.nii.gz\".format(outdir, filename))", "def array2ipl(img): \n img_new = cv.CreateImageHeader((img.shape[1], img.shape[0]), cv.IPL_DEPTH_8U, 3)\n cv.SetData(img_new, img.copy().data,img.dtype.itemsize*3*img.shape[1])\n img_new[50,75]\n return img_new", "def buildAnat(self, parFiles):\n # should only be a single parFile in the list\n anatImage = nib.load(join(self.seriesDir, parFiles[0]), strict_sort=True)\n\n # convert to RAS+\n anatImage_RAS = nib.as_closest_canonical(anatImage)\n\n print('Nifti image dims: {}'.format(anatImage_RAS.shape))\n\n return anatImage_RAS", "def demo(image, name):\n\n # Log.set_log_max_depth(5)\n\n image = normalise(image.astype(np.float32))\n # noisy = add_noise(image, intensity=None, variance=0.1, sap=0, clip=False)\n noisy = random_noise(image, mode=\"gaussian\", var=0.1, seed=0, clip=False)\n noisier = random_noise(noisy, mode=\"gaussian\", var=0.1, seed=100, clip=False)\n\n generator = StandardFeatureGenerator(\n include_corner_features=True,\n include_scale_one=True,\n include_fine_features=True,\n include_spatial_features=True,\n )\n regressor = CBRegressor(\n patience=16, loss='l1', learning_rate=0.002, max_num_estimators=4096\n )\n\n it = ImageTranslatorFGR(feature_generator=generator, regressor=regressor)\n\n it.train(noisy, noisy, jinv=True)\n n2s_denoised = it.translate(noisy)\n\n it.exclude_center_feature = False\n it.train(noisier, noisy, jinv=False)\n denoised = it.translate(noisy)\n denoised_corrected = 2 * denoised - noisy\n\n # denoised2 = it.translate(it.translate(it.translate(denoised)))\n denoised2 = it.translate(denoised)\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n n2s_denoised = numpy.clip(n2s_denoised, 0, 1)\n denoised_corrected = numpy.clip(denoised_corrected, 0, 1)\n denoised2 = numpy.clip(denoised2, 0, 1)\n\n psnr_noisy = psnr(image, noisy)\n ssim_noisy = ssim(image, noisy)\n\n psnr_n2s_denoised = psnr(image, n2s_denoised)\n ssim_n2s_denoised = ssim(image, n2s_denoised)\n\n psnr_denoised = psnr(image, denoised)\n ssim_denoised = ssim(image, denoised)\n\n psnr_denoised_corrected = psnr(image, denoised_corrected)\n ssim_denoised_corrected = ssim(image, denoised_corrected)\n\n psnr_denoised2 = psnr(image, denoised2)\n ssim_denoised2 = ssim(image, denoised2)\n\n print(\"noisy :\", psnr_noisy, ssim_noisy)\n print(\n \"denoised (classic_denoisers) :\",\n psnr_n2s_denoised,\n ssim_n2s_denoised,\n )\n print(\"denoised (noiser2noise) :\", psnr_denoised, ssim_denoised)\n print(\n \"denoised (noiser2noise corrected) :\",\n psnr_denoised_corrected,\n ssim_denoised_corrected,\n )\n print(\"denoised (x2) :\", psnr_denoised2, ssim_denoised2)\n\n Log.enable_output = False\n denoised_images = []\n for i in range(1, 32):\n psnr_denoised = psnr(image, numpy.clip(denoised, 0, 1))\n ssim_denoised = ssim(image, numpy.clip(denoised, 0, 1))\n psnr_sslos = psnr(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n ssim_sslos = ssim(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n print(f\"x{i} :\", psnr_sslos, ssim_sslos, psnr_denoised, ssim_denoised)\n\n denoised_images.append(numpy.clip(denoised, 0, 1))\n denoised = it.translate(denoised)\n\n import napari\n\n with napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(image, name='image')\n viewer.add_image(noisy, name='noisy')\n viewer.add_image(noisier, name='noisier')\n viewer.add_image(n2s_denoised, name='denoised (classic_denoisers)')\n viewer.add_image(denoised, name='denoised (noiser3noise)')\n viewer.add_image(denoised_corrected, name='denoised (noiser3noise corrected)')\n viewer.add_image(numpy.stack(denoised_images), name=f'denoised images')", "def _extract_images(source_path, target_path, merge_labels):\n\n images_path = os.path.join(source_path, 'imagesTr')\n labels_path = os.path.join(source_path, 'labelsTr')\n\n # Filenames have the form 'hippocampus_XX.nii.gz'\n filenames = [x for x in os.listdir(images_path) if x[:5] == 'hippo']\n\n # Create directories\n if not os.path.isdir(target_path):\n os.makedirs(target_path)\n\n for filename in filenames:\n\n # Extract only T2-weighted\n x = sitk.ReadImage(os.path.join(images_path, filename))\n x = sitk.GetArrayFromImage(x)\n y = sitk.ReadImage(os.path.join(labels_path, filename))\n y = sitk.GetArrayFromImage(y)\n\n # Shape expected: (35, 51, 35)\n # Average label shape: (24.5, 37.8, 21.0)\n assert x.shape == y.shape\n\n # No longer distinguish between hippocampus proper and subiculum\n if merge_labels:\n y[y == 2] = 1\n\n # Save new images so they can be loaded directly\n study_name = filename.replace('_', '').split('.nii')[0]\n sitk.WriteImage(sitk.GetImageFromArray(x), join_path([target_path, study_name + \".nii.gz\"]))\n sitk.WriteImage(sitk.GetImageFromArray(y), join_path([target_path, study_name + \"_gt.nii.gz\"]))", "def prep_input(im, acc=4):\n mask = cs.cartesian_mask(im.shape, acc, sample_n=8)\n im_und, k_und = cs.undersample(im, mask, centred=False, norm='ortho')\n im_gnd_l = to_lasagne_format(im)\n im_und_l = to_lasagne_format(im_und)\n k_und_l = to_lasagne_format(k_und)\n mask_l = to_lasagne_format(mask, mask=True)\n\n return im_und_l, k_und_l, mask_l, im_gnd_l", "def cast(*args):\n return _itkSpeckleNoiseImageFilterPython.itkSpeckleNoiseImageFilterIUL2IUL2_cast(*args)", "def reconstruct_image(img_a, nnf):\r\n final_img = np.zeros_like(img_a)\r\n size = nnf.shape[0]\r\n scale = img_a.shape[0] // nnf.shape[0]\r\n for i in range(size):\r\n for j in range(size):\r\n x, y = nnf[i, j]\r\n if final_img[scale * i:scale * (i + 1), scale * j:scale * (j + 1)].shape == img_a[scale * y:scale * (y + 1),\r\n scale * x:scale * (x + 1)].shape:\r\n final_img[scale * i:scale * (i + 1), scale * j:scale * (j + 1)] = img_a[scale * y:scale * (y + 1),\r\n scale * x:scale * (x + 1)]\r\n return final_img", "def cast(*args):\n return _itkSpeckleNoiseImageFilterPython.itkSpeckleNoiseImageFilterIUC2IUC2_cast(*args)", "def load_nii(img_path):\n nimg = nib.load(img_path)\n return nimg.get_data(), nimg.affine, nimg.header", "def cast(*args):\n return _itkSpeckleNoiseImageFilterPython.itkSpeckleNoiseImageFilterIF2IF2_cast(*args)", "def pil_to_npa(pil_img, as_float=False):\n if pil_img.mode == 'L':\n npa_shape = pil_img.size\n elif pil_img.mode == 'RGB':\n npa_shape = (pil_img.height, pil_img.width, 3)\n elif pil_img.mode == 'RGBA':\n npa_shape = (pil_img.height, pil_img.width, 4)\n elif pil_img.mode in ['CMYK']:\n return pil_to_npa(pil_img.convert('RGB'))\n else:\n raise RuntimeError(\"{}: Invalid PIL mode\".format(pil_img.mode))\n npa = np.array(pil_img.getdata(), dtype=np.uint8).reshape(*npa_shape)\n if as_float:\n npa = (npa / 255).astype(IMG_FLOAT_TYPE)\n return npa", "def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))", "def cast_and_normalise_images(images):\n images = (tf.cast(images, tf.float32) / 255.0) - 0.5\n return images", "def convert_img(self):\r\n self.img = self.img.convert('RGB')", "def load_image(img_file, as_float=False):\n if hasattr(img_file, 'read'):\n pil_img = Image.open(img_file)\n else:\n with open(img_file, 'rb') as f:\n pil_img = Image.open(f)\n pil_img.load()\n return pil_to_npa(pil_img, as_float=as_float)", "def preprocess(image):\n image = rgb2yuv(image)\n return image", "def convert_alleles(self, alleles):\n raise NotImplementedError", "def preprocess_nib(nib_img, is_mask=False):\n if not is_mask:\n img_data = nib_img.get_fdata()\n if img_data.max() > 256:\n mask256 = img_data < 257\n img_data = img_data * mask256\n img_data = img_data.astype('float32')\n mean = np.mean(img_data)\n std = np.std(img_data)\n img_data /= img_data.max() # intensity normalization\n img_data -= mean # data centering\n img_data /= std # data normalization\n\n return np.expand_dims(img_data, axis=3)\n else:\n img_data = np.array(nib_img.get_fdata(), dtype='float32')\n\n return np.expand_dims(img_data, axis=3)", "def __augmented_images(self, info, start):\n count = start\n final_img_to_save = []\n for pair in info:\n processedImage = self.__processImage(os.path.join(WORKING_DIR, pair[0]))\n if processedImage == None:\n continue\n # translation is not that important since CNNs are resistant to image translations\n rotatedImages = self.__applyRotations(processedImage)\n\n rotCount = 1\n for img in rotatedImages:\n filename = str(count) + \"_\" + str(rotCount) + \".jpg\"\n # img.save(os.path.join(directory, filename))\n final_img_to_save.append((img, pair[1], filename))\n rotCount += 1\n\n print(\"Augmenting image: {:05}\".format(count))\n count += 1\n return final_img_to_save", "def test_afni_nifti():\n im = ndar.Image('test_data/06025B_mprage.nii.gz')\n assert os.path.exists('%s.BRIK' % im.afni)\n assert os.path.exists('%s.HEAD' % im.afni)", "def cast(*args):\n return _itkShotNoiseImageFilterPython.itkShotNoiseImageFilterIUS2IUS2_cast(*args)", "def _convert_images(raw):\n # Convert the raw images from the data-files to floating-points.\n #raw_float = np.array(raw, dtype=float) / 255.0\n\n # Reshape the array to 4-dimensions.\n images = raw.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images.transpose([0, 2, 3, 1])\n\n return images", "def pre_processing_image(img):\n\n #print(img.shape)\n # apply gamma correction and show the images\n #adjusted = adjust_gamma(img, gamma=0.65)\n\n adjusted = exposure.adjust_gamma(img, gamma=1.65)\n #print(adjusted.shape)\n\n # log transform of image\n\n logarithmic_corrected = exposure.adjust_log(adjusted, 1)\n #print(logarithmic_corrected.shape)\n\n # denoising\n #dst2 = cv2.fastNlMeansDenoisingColored(logarithmic_corrected, None, 10, 10, 7, 21)\n #print(dst2.shape)\n dst2 = logarithmic_corrected\n return dst2", "def convert_image(rel_path_in, rel_path_out):\n #Lade Bild mit Originalmaske im Grayscale-Modus\n img = cv2.imread(rel_path_in, cv2.IMREAD_GRAYSCALE)\n #Jetzt steht in img ein 2D-Array/Matrix mit jedem Graufstufen-Wert der Pixel\n #Skaliere Pixelwerte runter\n for zeilen_index in range(0,img.__len__()):\n for spalten_index in range(0, img[zeilen_index].__len__()):\n #Hole Pixel-Wert an aktueller Stelle\n wert = img[zeilen_index][spalten_index]\n #Falls Wert != 0 (also Pixel gehoert nicht zum Hintergrund)\n if wert != 0: # != 0 statt == 255, da auch z.B. 253er Werte in den Masken existieren... (vielleicht durch Konvertierung in anderes Format?)\n #Markiere den Pixel mit 1 statt 255\n img[zeilen_index][spalten_index]=1\n #print(img)\n #*NACHDEM* alle Pixel skaliert wurden, zeichne Umrandung der Objekte\n umrandung_zeichnen(img)\n #change_color(img, 0, 255)\n #change_color(img, 1, 0)\n #print(img)\n #Schreibe Ergebnis-Bild in uebergebene Datei\n cv2.imwrite(rel_path_out, img)", "def cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUS2IUS2IUS2IUS2_cast(*args)", "def save_nifti(self, path):\n meta = {'te': self.te, 'tr': self.tr, 'sw': self.sw}\n if self.sequence_type == 'STEAM':\n meta['tm'] = self.tm\n\n # store real and imaginary components in last 2 dims\n component_fid = np.stack((np.real(self.fid),np.imag(self.fid)), -2)\n nifti = nib.Nifti2Image(component_fid, self.transform.get_matrix(), extra=meta)\n nib.save(nifti, path)", "def test_nirspec_slit_transformations(verbose=False, siaf=None):\n if siaf is None:\n siaf = Siaf(instrument)\n else:\n siaf = copy.deepcopy(siaf)\n\n threshold = 0.010 # arc-seconds\n pixel_threshold = 10 * threshold\n\n labels = ['X', 'Y']\n from_frame = 'sci'\n to_frames = 'det tel'.split()\n x_sci = np.linspace(-10, 10, 3)\n y_sci = np.linspace(10, -10, 3)\n\n\n # for aper_name in 'NRS_S1600A1_SLIT NRS_S200B1_SLIT NRS_FIELD1_MSA4 NRS1_FULL'.split():\n for aper_name in siaf.apertures.keys():\n skip = False\n aperture = siaf[aper_name]\n\n if (aperture.AperType not in ['SLIT']) or ('MIMF' in aper_name) or (\n not hasattr(aperture, '_parent_aperture')):\n skip = True\n\n if skip is False:\n parent_aperture = siaf[aperture._parent_aperture.AperName]\n if verbose:\n print(\n 'testing {} {} parent {}'.format(siaf.instrument, aper_name, parent_aperture.AperName))\n\n # verify that correct reference point can be retrieved\n v2ref, v3ref = aperture.reference_point('tel')\n assert np.abs(v2ref - aperture.V2Ref) < threshold\n assert np.abs(v3ref - aperture.V3Ref) < threshold\n\n # verify that we get the same tel to sci transform whether using slit or parent\n # aperture name\n xsciref, ysciref = aperture.reference_point('sci')\n xscidref, yscidref = parent_aperture.tel_to_sci(v2ref, v3ref)\n xsciaref, ysciaref = aperture.tel_to_sci(v2ref, v3ref)\n error = np.sqrt((xsciref - xscidref) ** 2 + (ysciref - yscidref) ** 2)\n if verbose:\n print(\n '{} {}: Error in reference point {:02.6f} pixels. (parent aperture is {})'.format(siaf.instrument, aper_name,\n error, parent_aperture.AperName))\n assert error < pixel_threshold\n\n # verify that corners can be retrieved and check 1st vertice\n ixc, iyc = aperture.corners('idl')\n assert np.abs(ixc[0] - aperture.XIdlVert1) < pixel_threshold\n assert np.abs(iyc[0] - aperture.YIdlVert1) < pixel_threshold\n\n # verify that we get the same tel to det transform whether using slit or parent\n # aperture name\n v2c, v3c = aperture.corners('tel')\n xc, yc = aperture.corners('det')\n xdc, ydc = parent_aperture.tel_to_det(v2c, v3c)\n xac, yac = aperture.tel_to_det(v2c, v3c)\n xic, yic = aperture.idl_to_det(ixc, iyc)\n error = np.max(np.abs(\n np.concatenate((xc - xdc, yc - ydc, xc - xac, yc - yac, xc - xic, yc - yic))))\n if verbose:\n print(\n '{} {}: Max error in corners {:02.6f} pixels.'.format(siaf.instrument, aper_name,\n error))\n assert error < pixel_threshold\n\n #testing roundtrip error\n for to_frame in to_frames:\n forward_transform = getattr(aperture, '{}_to_{}'.format(from_frame, to_frame))\n backward_transform = getattr(aperture, '{}_to_{}'.format(to_frame, from_frame))\n\n x_out, y_out = backward_transform(*forward_transform(x_sci, y_sci))\n x_mean_error = np.mean(np.abs(x_sci - x_out))\n y_mean_error = np.mean(np.abs(y_sci - y_out))\n for i, error in enumerate([x_mean_error, y_mean_error]):\n if verbose:\n print('{} {}: Error in {}<->{} {}-transform is {:02.6f})'.format(\n siaf.instrument, aper_name, from_frame, to_frame, labels[i], error))\n assert error < pixel_threshold", "def npa_to_pil(npa):\n if npa.dtype in [np.float16, np.float32, np.float64]:\n npa = (npa * 255).astype(np.uint8)\n return Image.fromarray(npa)", "def _load_image(img, verbose=None):\n nib = _import_nibabel('use GUI')\n if not isinstance(img, nib.spatialimages.SpatialImage):\n logger.info(f'Loading {img}')\n _check_fname(img, overwrite='read', must_exist=True)\n img = nib.load(img)\n # get data\n orig_data = np.array(img.dataobj).astype(np.float32)\n # reorient data to RAS\n ornt = nib.orientations.axcodes2ornt(\n nib.orientations.aff2axcodes(img.affine)).astype(int)\n ras_ornt = nib.orientations.axcodes2ornt('RAS')\n ornt_trans = nib.orientations.ornt_transform(ornt, ras_ornt)\n img_data = nib.orientations.apply_orientation(orig_data, ornt_trans)\n orig_mgh = nib.MGHImage(orig_data, img.affine)\n aff_trans = nib.orientations.inv_ornt_aff(ornt_trans, img.shape)\n vox_ras_t = np.dot(orig_mgh.header.get_vox2ras_tkr(), aff_trans)\n return img_data, vox_ras_t", "def process_images(image, label):\n # Normalize images to have a mean of 0 and standard deviation of 1\n # per_image_standardization is preferred, which normalize the entire image to mean zero and std 1.\n # It also make learning fast.\n image = tf.image.per_image_standardization(image)\n # Resize images from 32x32 to 277x277\n image = tf.image.resize(image, (227,227))\n return image, label", "def cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIUC2IUC2IUC2IUC2_cast(*args)", "def ras2ijk(self,A):\n #productive #math #coordinate-space-conversion\n profprint()\n m=vtk.vtkMatrix4x4()\n volumeNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\n volumeNode.GetIJKToRASMatrix(m)\n m.Invert()\n imageData = volumeNode.GetImageData()\n ijk=[0,0,0]\n k = vtk.vtkMatrix4x4()\n o = vtk.vtkMatrix4x4()\n k.SetElement(0,3,A[0])\n k.SetElement(1,3,A[1])\n k.SetElement(2,3,A[2])\n k.Multiply4x4(m,k,o)\n ijk[0] = o.GetElement(0,3)\n ijk[1] = o.GetElement(1,3)\n ijk[2] = o.GetElement(2,3)\n return ijk", "def savi(self,\n img):\n return (img.select(['NIR']).subtract(img.select(['RED'])).multiply(1 + self.const))\\\n .divide(img.select(['NIR']).add(img.select(['RED'])).add(self.const))\\\n .select([0], ['SAVI']).multiply(self.scale_factor).toInt16()", "def ir(subject):\n return image(data.get_ir(subject))", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def itkSpeckleNoiseImageFilterIUS2IUS2_cast(*args):\n return _itkSpeckleNoiseImageFilterPython.itkSpeckleNoiseImageFilterIUS2IUS2_cast(*args)", "def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)", "def cast(*args):\n return _itkSpeckleNoiseImageFilterPython.itkSpeckleNoiseImageFilterIUC3IUC3_cast(*args)", "def transform_image(self):\n im = cv2.imread(\"result.png\", 0)\n im2 = cv2.resize(im, (28, 28))\n im = im2.reshape(28, 28, -1)\n im = im.reshape(1, 1, 28, 28)\n im = cv2.bitwise_not(im)\n im = im.reshape(28,28)\n \n with out:\n clear_output()\n \n # resize\n img = np.array(im)\n img = img.reshape(28*28,)\n \n #img = img/255.0\n \n return img", "def ras2ijk(self, A):\r\n # productive #math #coordinate-space-conversion #frequent\r\n if frequent: profprint()\r\n m = vtk.vtkMatrix4x4()\r\n volumeNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\r\n volumeNode.GetIJKToRASMatrix(m)\r\n m.Invert()\r\n imageData = volumeNode.GetImageData()\r\n ijk = [0, 0, 0]\r\n k = vtk.vtkMatrix4x4()\r\n o = vtk.vtkMatrix4x4()\r\n k.SetElement(0, 3, A[0])\r\n k.SetElement(1, 3, A[1])\r\n k.SetElement(2, 3, A[2])\r\n k.Multiply4x4(m, k, o)\r\n ijk[0] = o.GetElement(0, 3)\r\n ijk[1] = o.GetElement(1, 3)\r\n ijk[2] = o.GetElement(2, 3)\r\n return ijk", "def preprocess_nico(path: Path) -> None:\n for superclass in (\"animals\", \"vehicles\"):\n superclass_dir = path / superclass\n for class_dir in superclass_dir.glob(\"*\"):\n for context_dir in class_dir.glob(\"*\"):\n images_paths: list[Path] = []\n for ext in (\"jpg\", \"jpeg\", \"png\", \"gif\"):\n images_paths.extend(context_dir.glob(f\"**/*.{ext}\"))\n for counter, image_path in enumerate(images_paths):\n try:\n image = Image.open(image_path)\n if image.format == \"GIF\":\n image = image.convert(\"RGBA\")\n # Convert from gif to jpeg by extracting the first frame\n new_image = _gif_to_jpeg(image)\n new_image_path = image_path.with_suffix(\".jpg\")\n # Delete the original gif\n image_path.unlink()\n new_image.save(new_image_path, \"JPEG\")\n assert new_image_path.exists()\n image_path = new_image_path\n\n concept = image_path.parent.parent.stem\n context = image_path.parent.stem\n new_name = (\n image_path.parent\n / f\"{concept}_{context}_{counter:04}{image_path.suffix}\".replace(\n \" \", \"_\"\n )\n )\n image_path.rename(new_name)\n # Image is corrupted - delete it\n except UnidentifiedImageError:\n image_path.unlink()", "def ff_correct_image(image):\n pass", "def ff_correct_image(image):\n pass", "def main(input_folder, output_images_folder, output_files_folder, bb_file,\n archive_folder, name_mapping):\n\n output_images_folder = Path(output_images_folder)\n output_files_folder = Path(output_files_folder)\n archive_folder = Path(archive_folder)\n output_images_folder.mkdir(exist_ok=True)\n archive_folder.mkdir(exist_ok=True)\n logger.info(\"Converting Dicom to Nifty - START\")\n converter = NiftiConverter(\n padding=\"whole_image\",\n resampling_spacing=-1,\n list_labels=[\"GTVt\"],\n cores=10,\n )\n _ = converter(input_folder, output_folder=output_images_folder)\n\n logger.info(\"Converting Dicom to Nifty - END\")\n logger.info(\"Removing extra VOI - START\")\n move_extra_vois(output_images_folder, archive_folder)\n logger.info(\"Removing extra VOI - END\")\n logger.info(\"Renaming files- START\")\n correct_names(output_images_folder, name_mapping)\n logger.info(\"Renaming files- END\")\n logger.info(\"Cleaning the VOIs - START\")\n clean_vois(output_images_folder)\n logger.info(\"Cleaning the VOIs - END\")\n\n logger.info(\"Computing the bounding boxes - START\")\n bb_df = compute_bbs(output_images_folder)\n bb_df.to_csv(bb_file)\n logger.info(\"Computing the bounding boxes - END\")", "def reorient_img(img, namer):\n from ndmg.utils.reg_utils import normalize_xform\n\n # Load image, orient as RAS\n orig_img = nib.load(img)\n reoriented = nib.as_closest_canonical(orig_img)\n normalized = normalize_xform(reoriented)\n\n # Image may be reoriented\n if normalized is not orig_img:\n print(\"%s%s%s\" % (\"Reorienting \", img, \" to RAS+...\"))\n out_name = \"%s%s%s%s\" % (\n namer.dirs[\"output\"][\"prep_anat\"],\n \"/\",\n img.split(\"/\")[-1].split(\".nii.gz\")[0],\n \"_reor_RAS.nii.gz\",\n )\n else:\n out_name = \"%s%s%s%s\" % (\n namer.dirs[\"output\"][\"prep_anat\"],\n \"/\",\n img.split(\"/\")[-1].split(\".nii.gz\")[0],\n \"_RAS.nii.gz\",\n )\n\n normalized.to_filename(out_name)\n\n return out_name", "def classifyPhaseImage(fr_nb):\n phase_path = os.path.join(\"..\",'data','microglia','Beacon-1 unst',\"Scene1Interval\"+str(fr_nb)+\"_PHASE.png\")\n \n phase= Image.open(phase_path)\n phase = np.asarray(phase)\n X=phase.reshape(-1,1)\n from sklearn.cluster import KMeans\n kmeans = KMeans(n_clusters=3).fit(X)\n classified = kmeans.labels_\n classified=classified.reshape(phase.shape)\n si2(phase,classified,\"Phase image\",\"Classification\")\n return classified", "def AA2Image(readpath, savepath, header, font_data):\n if not os.path.isdir(savepath):\n os.makedirs(savepath)\n print('convert txt to png. save path: ', savepath)\n\n files = glob.glob(readpath+'*.txt')\n\n for file in files:\n ascii_art = AsciiArt(file)\n ascii_art_image = ascii_art.image(font_data)\n filename = header + os.path.basename(file)[:-4] + '.png'\n ascii_art_image = Image.fromarray(ascii_art_image)\n ascii_art_image = ascii_art_image.convert('L')\n ascii_art_image.save(savepath + filename)\n print('saved ', filename)", "def selfies2image(s):\n mol = MolFromSmiles(sf.decoder(s), sanitize=True)\n return Draw.MolToImage(mol)", "def cast(*args):\n return _itkScalarConnectedComponentImageFilterPython.itkScalarConnectedComponentImageFilterIUS2IUS2_cast(*args)", "def cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterIUS2ISS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS2ISS2_cast(obj)", "def itkSpeckleNoiseImageFilterIUL2IUL2_cast(*args):\n return _itkSpeckleNoiseImageFilterPython.itkSpeckleNoiseImageFilterIUL2IUL2_cast(*args)", "def prepare_image(img):\n img = img.filter(ImageFilter.SMOOTH_MORE)\n img = img.filter(ImageFilter.SMOOTH_MORE)\n if 'L' != img.mode:\n img = img.convert('L')\n return img", "def cast(*args):\n return _itkSpeckleNoiseImageFilterPython.itkSpeckleNoiseImageFilterIUS3IUS3_cast(*args)", "def cast(*args):\n return _itkShotNoiseImageFilterPython.itkShotNoiseImageFilterIUL2IUL2_cast(*args)", "def build_label_transform():\n\n return NALabelEncoder()", "def run():\n\n today = datetime.now().strftime(\"%Y-%m-%d\")\n log_file = os.path.abspath(\"logs/{}.log\".format(today))\n logger = RsmasLogger(\"pipeline\", log_file)\n\n images = get_list_of_images()\n # LOG: list of images to process\n logger.log(loglevel.INFO, [img.key for img in images])\n\n for im in images:\n\n logger.log(loglevel.INFO, \"Processing image: {}\".format(im.key))\n\n file_path = \"{}/{}\".format(im.bucket_name, im.key)\n full_path = \"{}_full.jpg\"\n mod_path = \"{}_mod.jpg\"\n aws_path = \"{}/{}/{}/{}\"\n try:\n haz_id, haz_name, sat_name, sat_dir, img_type, img_date, center = summary.pull_summary_data(\n \"/vsis3/{}\".format(file_path))\n sat_id = Satellite.from_params(sat_name, bool(sat_dir))\n except:\n # LOG: error in image metadata format\n logger.log(loglevel.ERROR, '\\tThere was an error in the metadata format of the image. Skipping.')\n continue\n\n aws_path = aws_path.format(haz_id, sat_id, img_type, img_date)\n full_path = full_path.format(img_date)\n mod_path = mod_path.format(img_date)\n\n # 1. Read in image file\n with rasterio.open(\"s3://{}\".format(file_path)) as data:\n band = data.read(1)\n img = plot.show(band)\n img.get_figure().savefig(full_path, dpi=300)\n\n # 3. Compress image\n compressed = immanip.compress_image(full_path, compression_amount=0.3)\n\n # 4 - 5. Pad image and add date on image\n text_image = immanip.add_text_to_image(compressed, img_date)\n\n # 6. Save image locally\n text_image.save(mod_path.format(img_date))\n mod_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, mod_path))\n full_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, full_path))\n\n tif_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, im.key))\n\n # LOG: images successfully moved to S3 bucket\n # LOG: mod_path_aws, full_path_aws, tif_path_aws\n\n hazard = Hazard(haz_id, haz_name, HazardType.VOLCANO, Location(center[0], center[1]), Date(img_date), 0)\n satellite = Satellite.from_params(sat_name, bool(sat_dir))\n image = Image(str(randint(1, 10000000)),\n haz_id,\n satellite,\n ImageType.from_string(img_type),\n Date(img_date),\n ImageURL(full_path_aws),\n ImageURL(tif_path_aws),\n ImageURL(mod_path_aws))\n\n try:\n db = Database()\n except ConnectionError:\n logger.log(loglevel.ERROR, \"\\tThere was an error while connecting to the database. Skipping this image.\")\n continue\n\n db.create_new_hazard(hazard)\n db.create_new_satellite(satellite)\n db.create_new_image(image)\n\n db.close()\n\n # LOG: database successfully updated\n logger.log(loglevel.INFO, \"\\tDatabase succesfully updated.\")\n\n save.save_image_s3(mod_path, \"{}/{}\".format(aws_path, mod_path))\n save.save_image_s3(full_path, \"{}/{}\".format(aws_path, full_path))\n save.move_tif(im.key, \"{}/{}\".format(aws_path, im.key))\n\n logger.log(loglevel.INFO, \"\\tImages were successfully uploaded to the S3 bucket\")\n logger.log(loglevel.INFO, \"\\t\\tmod_path_aws: {}\".format(mod_path_aws))\n logger.log(loglevel.INFO, \"\\t\\tfull_path_aws: {}\".format(full_path_aws))\n logger.log(loglevel.INFO, \"\\t\\ttif_path_aws: {}\".format(tif_path_aws))\n\n # LOG: image completed\n logger.log(loglevel.INFO, \"\\tProcessing of {} completed.\".format(im.key))\n\n # LOG: finished processing images\n logger.log(loglevel.INFO, \"Processing complete.\")", "def cast(obj: 'itkLightObject') -> \"itkHuangThresholdImageFilterIUS2ISS2 *\":\n return _itkHuangThresholdImageFilterPython.itkHuangThresholdImageFilterIUS2ISS2_cast(obj)", "def itkScalarImageKmeansImageFilterIUS2ISS2_cast(obj: 'itkLightObject') -> \"itkScalarImageKmeansImageFilterIUS2ISS2 *\":\n return _itkScalarImageKmeansImageFilterPython.itkScalarImageKmeansImageFilterIUS2ISS2_cast(obj)", "def cast(*args):\n return _itkShotNoiseImageFilterPython.itkShotNoiseImageFilterIUC2IUC2_cast(*args)", "def cast(*args):\n return _itkTernaryAddImageFilterPython.itkTernaryAddImageFilterIF2IF2IF2IF2_cast(*args)", "def ndwi(self,\n img):\n return img.normalizedDifference(['NIR', 'SWIR2']).select([0], ['NDWI']).multiply(self.scale_factor)", "def make_avrage_data(train_images,train_labels, log=True):\n\n M = len(train_labels)\n train_images_list, train_label_list = train_svm_model.make_HuMoment_data(num_train=M, images=train_images,\n tag=train_labels, log_transform=False)\n train_images_list = np.array(train_images_list)\n train_label_list = np.array(train_label_list)\n\n indexes_0 = np.where(train_label_list == 0)\n indexes_1 = np.where(train_label_list == 1)\n indexes_2 = np.where(train_label_list == 2)\n indexes_3 = np.where(train_label_list == 3)\n indexes_4 = np.where(train_label_list == 4)\n indexes_5 = np.where(train_label_list == 5)\n indexes_6 = np.where(train_label_list == 6)\n indexes_7 = np.where(train_label_list == 7)\n indexes_8 = np.where(train_label_list == 8)\n indexes_9 = np.where(train_label_list == 9)\n\n image_0 = np.array(train_images_list[indexes_0])\n image_1 = np.array(train_images_list[indexes_1])\n image_2 = np.array(train_images_list[indexes_2])\n image_3 = np.array(train_images_list[indexes_3])\n image_4 = np.array(train_images_list[indexes_4])\n image_5 = np.array(train_images_list[indexes_5])\n image_6 = np.array(train_images_list[indexes_6])\n image_7 = np.array(train_images_list[indexes_7])\n image_8 = np.array(train_images_list[indexes_8])\n image_9 = np.array(train_images_list[indexes_9])\n\n avrage_train_set = []\n avrage_train_tag = []\n for k in range(0, 5001, 10):\n avrage_train_set.append(make_avrage_vector(image_0, k))\n avrage_train_tag.append(0)\n avrage_train_set.append(make_avrage_vector(image_1, k))\n avrage_train_tag.append(1)\n avrage_train_set.append(make_avrage_vector(image_2, k))\n avrage_train_tag.append(2)\n avrage_train_set.append(make_avrage_vector(image_3, k))\n avrage_train_tag.append(3)\n avrage_train_set.append(make_avrage_vector(image_4, k))\n avrage_train_tag.append(4)\n avrage_train_set.append(make_avrage_vector(image_5, k))\n avrage_train_tag.append(5)\n avrage_train_set.append(make_avrage_vector(image_6, k))\n avrage_train_tag.append(6)\n avrage_train_set.append(make_avrage_vector(image_7, k))\n avrage_train_tag.append(7)\n avrage_train_set.append(make_avrage_vector(image_8, k))\n avrage_train_tag.append(8)\n avrage_train_set.append(make_avrage_vector(image_9, k))\n avrage_train_tag.append(9)\n\n if(log):\n length = len(avrage_train_set)\n avrage_train_set_log = []\n for m in range(0, length):\n avrage_train_set_log.append(train_svm_model.log_transformation(avrage_train_set[m]))\n avrage_train_set = avrage_train_set_log\n\n return avrage_train_set, avrage_train_tag", "def itkSpeckleNoiseImageFilterIUC2IUC2_cast(*args):\n return _itkSpeckleNoiseImageFilterPython.itkSpeckleNoiseImageFilterIUC2IUC2_cast(*args)", "def convert_yolo_to_ls(input_dir, out_file,\n to_name='image', from_name='label', out_type=\"annotations\",\n image_root_url='/data/local-files/?d=', image_ext='.jpg'):\n\n tasks = []\n logger.info('Reading YOLO notes and categories from %s', input_dir)\n\n # build categories=>labels dict\n notes_file = os.path.join(input_dir, 'classes.txt')\n with open(notes_file) as f:\n lines = [line.strip() for line in f.readlines()]\n categories = {i: line for i, line in enumerate(lines)}\n logger.info(f'Found {len(categories)} categories')\n\n # generate and save labeling config\n label_config_file = out_file.replace('.json', '') + '.label_config.xml'\n generate_label_config(categories, to_name, from_name, label_config_file)\n\n # labels, one label per image\n labels_dir = os.path.join(input_dir, 'labels')\n logger.info('Converting labels from %s', labels_dir)\n\n for f in os.listdir(labels_dir):\n image_file_base = f[0:-4] + image_ext\n image_file = os.path.join(input_dir, 'images', image_file_base)\n label_file = os.path.join(labels_dir, f)\n\n if not f.endswith('.txt'):\n continue\n\n if not os.path.exists(image_file):\n logger.error(\"Can't convert YOLO to Label Studio JSON without image source: %s\", image_file)\n continue\n\n task = {\n # 'annotations' or 'predictions'\n out_type: [\n {\n \"result\": [],\n \"ground_truth\": False,\n }\n ],\n \"data\": {\n \"image\": os.path.join(image_root_url, image_file_base)\n }\n }\n\n # read image sizes\n im = Image.open(image_file)\n image_width, image_height = im.size\n\n with open(label_file) as file:\n # convert all bounding boxes to Label Studio Results\n lines = file.readlines()\n for line in lines:\n label_id, x, y, width, height = line.split()\n x, y, width, height = float(x), float(y), float(width), float(height)\n item = {\n \"id\": uuid.uuid4().hex[0:10],\n \"type\": \"rectanglelabels\",\n \"value\": {\n \"x\": (x-width/2) * 100,\n \"y\": (y-height/2) * 100,\n \"width\": width * 100,\n \"height\": height * 100,\n \"rotation\": 0,\n \"rectanglelabels\": [\n categories[int(label_id)]\n ]\n },\n \"to_name\": to_name,\n \"from_name\": from_name,\n \"image_rotation\": 0,\n \"original_width\": image_width,\n \"original_height\": image_height\n }\n task['annotations'][0]['result'].append(item)\n\n tasks.append(task)\n\n if len(tasks) > 0:\n logger.info('Saving Label Studio JSON to %s', out_file)\n with open(out_file, 'w') as out:\n json.dump(tasks, out)\n\n print('\\n'\n f' 1. Create a new project in Label Studio\\n'\n f' 2. Use Labeling Config from \"{label_config_file}\"\\n'\n f' 3. Setup serving for images [e.g. you can use Local Storage (or others):\\n'\n f' https://labelstud.io/guide/storage.html#Local-storage]\\n'\n f' 4. Import \"{out_file}\" to the project\\n')\n else:\n logger.error('No labels converted, maybe ')", "def cast(*args):\n return _itkSpeckleNoiseImageFilterPython.itkSpeckleNoiseImageFilterIUL3IUL3_cast(*args)", "def test_afni_afni():\n im = ndar.Image('test_data/NDAR_INVZU049GXV_image03_1326225820791.zip')\n assert im.afni == im.path(im.files['AFNI'][0])", "def check_niimg(\n niimg,\n ensure_ndim=None,\n atleast_4d=False,\n dtype=None,\n return_iterator=False,\n wildcards=True,\n):\n from ..image import new_img_like # avoid circular imports\n\n niimg = stringify_path(niimg)\n\n if isinstance(niimg, str):\n if wildcards and ni.EXPAND_PATH_WILDCARDS:\n # Ascending sorting + expand user path\n filenames = sorted(glob.glob(os.path.expanduser(niimg)))\n\n # processing filenames matching globbing expression\n if len(filenames) >= 1 and glob.has_magic(niimg):\n niimg = filenames # iterable case\n # niimg is an existing filename\n elif [niimg] == filenames:\n niimg = filenames[0]\n # No files found by glob\n elif glob.has_magic(niimg):\n # No files matching the glob expression, warn the user\n message = (\n \"No files matching the entered niimg expression: \"\n \"'%s'.\\n You may have left wildcards usage \"\n \"activated: please set the global constant \"\n \"'nilearn.EXPAND_PATH_WILDCARDS' to False to \"\n \"deactivate this behavior.\"\n ) % niimg\n raise ValueError(message)\n else:\n raise ValueError(f\"File not found: '{niimg}'\")\n elif not os.path.exists(niimg):\n raise ValueError(f\"File not found: '{niimg}'\")\n\n # in case of an iterable\n if hasattr(niimg, \"__iter__\") and not isinstance(niimg, str):\n if return_iterator:\n return _iter_check_niimg(\n niimg, ensure_ndim=ensure_ndim, dtype=dtype\n )\n return concat_niimgs(niimg, ensure_ndim=ensure_ndim, dtype=dtype)\n\n # Otherwise, it should be a filename or a SpatialImage, we load it\n niimg = load_niimg(niimg, dtype=dtype)\n\n if ensure_ndim == 3 and len(niimg.shape) == 4 and niimg.shape[3] == 1:\n # \"squeeze\" the image.\n data = _safe_get_data(niimg)\n affine = niimg.affine\n niimg = new_img_like(niimg, data[:, :, :, 0], affine)\n if atleast_4d and len(niimg.shape) == 3:\n data = _get_data(niimg).view()\n data.shape = data.shape + (1,)\n niimg = new_img_like(niimg, data, niimg.affine)\n\n if ensure_ndim is not None and len(niimg.shape) != ensure_ndim:\n raise DimensionError(len(niimg.shape), ensure_ndim)\n\n if return_iterator:\n return (_index_img(niimg, i) for i in range(niimg.shape[3]))\n\n return niimg", "def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output", "def ndvi(self,\n img):\n return img.normalizedDifference(['NIR', 'RED']).select([0], ['NDVI']).multiply(self.scale_factor)", "def SaveNIFTI(data, file_path):\n if(np.iscomplex(data).any()):\n data = abs(data)\n nii = nib.Nifti1Image(data, np.eye(4)) \n nib.save(nii, file_path)" ]
[ "0.6358437", "0.6124715", "0.5904855", "0.57210785", "0.57022226", "0.5685979", "0.5665494", "0.5628516", "0.5568676", "0.5515679", "0.5492054", "0.5473638", "0.5446226", "0.5407872", "0.54046696", "0.5352408", "0.53269494", "0.5309064", "0.53084403", "0.5288916", "0.52865076", "0.52808386", "0.5275108", "0.52656305", "0.52631", "0.5261811", "0.52556884", "0.5251397", "0.52504563", "0.5244521", "0.5240951", "0.52205455", "0.52041364", "0.51507", "0.51503944", "0.51405907", "0.51396996", "0.51374936", "0.5111446", "0.5089651", "0.50810075", "0.5080909", "0.5068678", "0.5051426", "0.50483847", "0.5037269", "0.5032968", "0.5032709", "0.5028701", "0.5010743", "0.4999207", "0.49952543", "0.4994128", "0.49887657", "0.4976476", "0.4969883", "0.4964038", "0.4943308", "0.49389192", "0.4938589", "0.49344173", "0.49315226", "0.492961", "0.4926459", "0.4922166", "0.4920279", "0.4908147", "0.49064612", "0.49058062", "0.4905574", "0.4905382", "0.4903281", "0.4903281", "0.49027503", "0.48952568", "0.4894211", "0.48906234", "0.48886833", "0.488173", "0.48782787", "0.48762023", "0.4873649", "0.4868486", "0.48643696", "0.48576662", "0.4850058", "0.48446074", "0.48428774", "0.48421335", "0.48397148", "0.48354143", "0.48347718", "0.48341593", "0.48304275", "0.48234898", "0.48210827", "0.48209956", "0.482039", "0.48169592", "0.48152125" ]
0.55716157
8
Create the fieldmap(s) and the corresponding magnitude images.
def MakeFieldmaps(self): if self.verbose: print 'Compute fieldmaps.' for entry in self.info: if self.info[entry]['type'] == 'fmap': if self.info[entry]['imgfile'] == None: # Fieldmap data not found. return # Make a magnitude image for use in checking registration. cmd = 'convert_file -f0 -m0 %s %s nii' % \ (entry, self.info[entry]['magfile']) self.CheckExec(cmd, [self.info[entry]['magfile'] + '.nii']) # Make fieldmap. Use separate loop in case make_fmap aborts. for entry in self.info: if self.info[entry]['type'] == 'fmap': fmapname = self.info[entry]['imgfile'] if not os.path.exists('%s.nii' % fmapname) or self.redo: # Couldn't find or existing fmap, compute a new one. if self.verbose: extra_args = '-v' else: extra_args = '' if self.info[entry]['correct_fmap_phase'] == 'force': extra_args += ' --force-slicecorr' elif self.info[entry]['correct_fmap_phase'] == 'omit': extra_args += ' --omit-slicecorr' cmd = 'make_fmap %s %s %s' % (extra_args, entry, fmapname) # error = self.ExecCmd(cmd, halt_on_error=False) if self.no_fmapcorr: halt_on_error = False else: halt_on_error = True error = self.CheckExec(cmd, ['%s.nii' % fmapname], \ halt_on_error=halt_on_error) if error: self.info[entry]['valid'] = False del self.fmaps[entry]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AlignFieldmaps(self):\n for entry in self.entry_map['fmap']:\n info = self.info[entry]\n\n# Register the magnitude image at the shortest TR to the T1-IR\n# structural image.\n target = self.info[self.norm_src]['imgfile'] + \\\n self.info[self.norm_src]['suffix']\n source = info['magfile'] + info['suffix']\n matfile = info['matfile']\n fmt = '3dAllineate -prefix NULL -1Dmatrix_save %s -base %s ' + \\\n '-source %s -cost mi -warp shift_rotate'\n cmd = fmt % (info['matfile'], target, source)\n self.CheckExec(cmd, [info['matfile']])\n\n# Convert to unitary matrix (remove scaling component.)\n cmd = 'cat_matvec -ONELINE %s -P > %s' % \\\n (info['matfile'], info['matfile_unitary'])\n self.CheckExec(cmd, [info['matfile_unitary']])\n\n# Rotate the magnitude image to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['magfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['magfile'] + info['suffix'])\n self.CheckExec(cmd, [info['magfile_r']+info['suffix']])\n\n# Rotate the fieldmap to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['imgfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['imgfile'] + info['suffix'])\n self.CheckExec(cmd, [info['imgfile_r']+info['suffix']])", "def calcMagneticFieldMap(self):\n # Normalised b-field (note lower case)\n self.solenoid.calcMagneticFieldMap()\n self.b = lambda z: self.solenoid.B_interp(z) * -e / (2 * m * c)\n self.calc_level = CALC_B_MAP", "def _get_magnitudes(self):\n\n self.logging.debug('Get magnitudes ' )\n\n self.mags = {}\n\n steps = ['dbopen netmag', 'dbsubset orid != NULL']\n\n fields = ['orid', 'magid', 'magnitude', 'magtype',\n 'auth', 'uncertainty', 'lddate']\n\n for v in extract_from_db(self.db, steps, fields):\n orid = v.pop('orid')\n self.logging.debug('new mag for orid:%s' % orid)\n\n try:\n v['strmag'] = '%0.1f %s' % ( float(v['magnitude']), v['magtype'] )\n except:\n v['strmag'] = '-'\n\n if not orid in self.mags:\n self.mags[ orid ] = {}\n\n self.mags[ orid ][ v['magid'] ] = v", "def _SetFmapInfo(self):\n for epi in self.pfiles + self.epirt_paths:\n self.info[epi]['fmapname'] = None\n self.info[epi]['fmap_entry'] = None\n for entry in self.entry_map['fmap']:\n fmap_name = self.info[entry]['imgfile'] + self.info[entry]['suffix']\n if self.info[entry]['plane'] == self.info[epi]['plane']:\n# Use the fieldmap acquired at the same plane.\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n else:\n# for fmap in self.fmaps.keys():\n for entry in self.entry_map['fmap']:\n# No fmap at same orientation, look for fmaps in other planes.\n# There won't be more than one, so it isn't much of a choice.\n fmap_name = self.info[entry]['imgfile'] + \\\n self.info[entry]['suffix']\n if self.info[entry]['plane'] == 'sagittal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'axial':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'coronal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'oblique':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n self.info[epi]['plane'] = 'oblique'\n break", "def _fractalize(self, f, compMap):\n\n from PIL import Image\n\n def toImage(cmObject):\n \"\"\"cmObject is the ComplexMap instance\"\"\"\n size = self.gridsize, self.gridsize\n cm = cmObject()\n master = []\n for item in cm:\n master.extend(item)\n\n #Apply default Mandelbrot Set Function\n master = map(f, master)\n\n col1 = (0,0,102,0)\n col2 = (255,204,51,0)\n\n def select_color(x):\n if x == 1: return col1\n else: return col2\n\n master = map(select_color, master)\n \n image = Image.new(\"RGBA\", size, (0,0,0,0))\n image.putdata(master)\n return image\n\n image_width = 0\n image_height = 0\n image_list = []\n #Unpack row\n for (y, row) in enumerate(compMap):\n image_row = []\n\n #Unpack columns\n for item in row:\n #Unpack the individual\n image_row.append(toImage(item))\n\n width = len(image_row) * self.gridsize\n height = self.gridsize\n row_holder_image = Image.new(\"RGBA\", (width, height), (0,0,0,0)) \n\n for (n, image) in enumerate(image_row):\n row_holder_image.paste(image, ((n*self.gridsize),0))\n\n image_list.append(row_holder_image)\n \n image_width = width\n image_height = len(image_list) * self.gridsize\n\n image_whole = Image.new(\"RGBA\", (image_width, image_height), (0,0,0,0))\n for (n, image) in enumerate(image_list):\n image_whole.paste(image, (0, (n*self.gridsize)))\n image_whole.save(\"fractal.jpg\", \"JPEG\")\n\n return", "def update_maps(self):\n if self.fmodel is None:\n return\n def fft_map(map_coeffs, resolution_factor = 0.25):\n return map_coeffs.fft_map(resolution_factor = resolution_factor,\n ).apply_sigma_scaling().real_map_unpadded()\n map_types = [\"2mFo-DFc\", \"mFo-DFc\"]\n map_keys = [\"2mFo-DFc\", \"mFo-DFc\"]\n if (self.fmodel.f_obs().anomalous_flag()):\n if (self.params.anom_map_type == \"phaser\"):\n map_types.append(\"llg\")\n elif (self.params.anom_map_type == \"residual\"):\n map_types.append(\"anom_residual\")\n else :\n map_types.append(\"anom\")\n map_keys.append(\"anom\")\n if (self.use_svm):\n map_types.append(\"mFo\")\n map_keys.append(\"mFo\")\n # To save memory, we sample atomic positions immediately and throw out\n # the actual maps (instead of keeping up to 3 in memory)\n sites_frac = self.xray_structure.sites_frac()\n sites_cart = self.xray_structure.sites_cart()\n self._principal_axes_of_inertia = [ None ] * len(sites_frac)\n self._map_variances = [ None ] * len(sites_frac)\n self._map_gaussian_fits = {}\n self.calpha_mean_two_fofc = 0\n for map_type, map_key in zip(map_types, map_keys):\n real_map = self.get_map(map_type)\n if (real_map is not None):\n # Gather values for map peaks at each site\n self._map_values[map_key] = flex.double(sites_frac.size(), 0)\n self._map_gaussian_fits[map_key] = [ None ] * len(sites_frac)\n for i_seq, site_frac in enumerate(sites_frac):\n atom = self.pdb_atoms[i_seq]\n resname = atom.fetch_labels().resname.strip().upper()\n if (resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED or\n atom.segid.strip().upper() in [\"ION\"]):\n value = real_map.eight_point_interpolation(site_frac)\n self._map_values[map_key][i_seq] = value\n if (self.use_svm):\n gaussian_fit = utils.fit_gaussian(\n unit_cell=self.unit_cell,\n site_cart=atom.xyz,\n real_map=real_map)\n self._map_gaussian_fits[map_key][i_seq] = gaussian_fit\n\n if map_type in [\"2mFo-DFc\"]:\n # Gather values on map variance and principal axes of interia\n from cctbx import maptbx\n for i_seq, site_cart in enumerate(sites_cart):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n if resname in WATER_RES_NAMES + mmtbx.ions.SUPPORTED:\n # XXX not totally confident about how I'm weighting this...\n p_a_i = maptbx.principal_axes_of_inertia(\n real_map = real_map,\n site_cart = site_cart,\n unit_cell = self.unit_cell,\n radius = self.params.map_sampling_radius)\n self._principal_axes_of_inertia[i_seq] = p_a_i\n variance = maptbx.spherical_variance_around_point(\n real_map = real_map,\n unit_cell = self.unit_cell,\n site_cart = site_cart,\n radius = self.params.map_sampling_radius)\n self._map_variances[i_seq] = variance\n elif (i_seq in self.calpha_sel):\n # Also collect some info in average C_alpha 2FoFc peak heights\n self.calpha_mean_two_fofc += real_map.eight_point_interpolation(\n sites_frac[i_seq])\n del real_map\n\n if (self.calpha_mean_two_fofc > 0):\n n_calpha = len(self.calpha_sel)\n assert (n_calpha > 0)\n self.calpha_mean_two_fofc /= n_calpha\n\n # Gather info on carbons' average Fo peak height for use in estimating other\n # sites' atomic weight\n self.carbon_fo_values = None\n if (len(self.carbon_sel) > 0):\n self.carbon_fo_values = flex.double()\n self._map_values[\"mFo\"] = flex.double(sites_frac.size(), 0)\n fo_map = fft_map(self.fmodel.map_coefficients(\n map_type = \"mFo\",\n exclude_free_r_reflections = True,\n fill_missing = True))\n\n for i_seq, site_frac in enumerate(sites_frac):\n resname = self.pdb_atoms[i_seq].fetch_labels().resname.strip()\n element = self.pdb_atoms[i_seq].element.strip()\n if (element == \"C\") or ((element == \"O\") and (resname in WATER_RES_NAMES)):\n map_value = fo_map.eight_point_interpolation(site_frac)\n self._map_values[\"mFo\"][i_seq] = map_value\n if (element == \"C\"):\n self.carbon_fo_values.append(map_value)\n del fo_map", "def _make_image_info_hst(self, flistname):\n\n flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n #fname=line.strip()\n flist.append(fname)\n magzp_list.append(magzp)\n\n magzp = np.array(magzp_list)\n\n nimage = len(flist)\n\n path_len = max([len(f) for f in flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f.replace('sci.fits','wht.fits')\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def recon(self, spirec):\n tmpdir = tempfile.mkdtemp()\n basename = 'recon'\n basepath = os.path.join(tmpdir, basename)\n pfilename = os.path.abspath(self.pfilename)\n\n # run spirec to get the mag file and the fieldmap file\n cmd = spirec + ' -l --rotate -90 --magfile --savefmap2 --b0navigator -r ' + pfilename + ' -t ' + basename\n self.log and self.log.debug(cmd)\n sp.call(shlex.split(cmd), cwd=tmpdir, stdout=open('/dev/null', 'w'))\n\n self.image_data = np.fromfile(file=basepath+'.mag_float', dtype=np.float32).reshape([self.size_x,self.size_y,self.num_timepoints,self.num_echoes,self.num_slices],order='F').transpose((0,1,4,2,3))\n if os.path.exists(basepath+'.B0freq2') and os.path.getsize(basepath+'.B0freq2')>0:\n self.fm_data = np.fromfile(file=basepath+'.B0freq2', dtype=np.float32).reshape([self.size_x,self.size_y,self.num_echoes,self.num_slices],order='F').transpose((0,1,3,2))\n shutil.rmtree(tmpdir)", "def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def get_sigma_map(start_x = 0,field_height=100,field_width=100,viewing_distance=12.0,screen_pixel_size=0.282,debug=False):\n start_x_pixels = np.round(get_pixels_at_degrees(degrees=start_x,viewing_distance=viewing_distance,screen_pixel_size=screen_pixel_size))\n optical_nodal_distance = 17.0 # mm from lens to fovea\n viewing_distance_inches = viewing_distance\n viewing_distance = viewing_distance * 25.4 # mm\n center_y, center_x = 0,0\n x_coords = (start_x_pixels + np.arange(-field_width/2.0,field_width/2,1))*screen_pixel_size\n y_coords = np.arange(-field_height/2.0,field_height/2,1)*screen_pixel_size\n x,y = np.meshgrid(x_coords,y_coords)\n coords = np.vstack((y.ravel(),x.ravel())).T\n\n image_dist = cdist(np.matrix([center_y,center_x]),coords)\n fovea_dist = (np.pi/180.0)*optical_nodal_distance*get_degrees_at_pixels(pixels=image_dist/screen_pixel_size,viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n midget_dendritic_field_diameter_micrometers = 8.64 * np.power(fovea_dist,1.04) # midget from Dacey and Peterson, 1994\n midget_dendritic_field_diameter_millimeters = midget_dendritic_field_diameter_micrometers/1000.0\n midget_projected_field_diameter_on_image = get_pixels_at_degrees(degrees=start_x+np.degrees(np.arctan((midget_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size) - get_pixels_at_degrees(degrees=start_x-np.degrees(np.arctan((midget_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n\n midget_sigma_map = midget_projected_field_diameter_on_image / 6.0 # ensures 99.7% of dendrites are connected to field diameter\n midget_sigma_map = midget_sigma_map.reshape((field_height,field_width))\n\n parasol_dendritic_field_diameter_micrometers = 70.2 * np.power(fovea_dist,0.65) # parasol from Dacey and Peterson, 1994\n parasol_dendritic_field_diameter_millimeters = parasol_dendritic_field_diameter_micrometers/1000.0\n parasol_projected_field_diameter_on_image = get_pixels_at_degrees(degrees=start_x+np.degrees(np.arctan((parasol_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size) - get_pixels_at_degrees(degrees=start_x-np.degrees(np.arctan((parasol_dendritic_field_diameter_millimeters/2.0)/optical_nodal_distance)),viewing_distance=viewing_distance_inches,screen_pixel_size=screen_pixel_size)\n parasol_sigma_map = parasol_projected_field_diameter_on_image / 6.0 # ensures 99.7% of dendrites are connected to field diameter\n parasol_sigma_map = parasol_sigma_map.reshape((field_height,field_width))\n\n return midget_sigma_map,parasol_sigma_map", "def __build_map(self):\n columns = []\n\n for i in range(self.__dimensions):\n columns.append([])\n\n for i in range(self.__dimensions):\n self.map.append(columns)", "def render_map(self):\n # first we create a blank image, on which we will draw the base map\n width = self.image_size[0]\n height = self.image_size[1]\n # ex: size of the image 1080 height, 1920 width, 3 channels of colour\n base_map = np.zeros((height, width, 3), np.uint8)\n base_map[:, :] = self.background_color\n\n # we draw each shape of the dictionary on the blank image\n for shape_id in self.shape_dict_filt:\n shape = self.shape_dict_filt[shape_id]\n points = shape.points\n pts = np.array(points, np.int32)\n cv2.polylines(base_map, [pts], True, shape.color_line,\n shape.line_thick, cv2.LINE_AA)\n\n self.map_file = base_map", "def _standard_mapping(self):\n mapping_raw = scipy.io.loadmat(join(self.dataset_dir, 'scripts/mapping.mat'))\n self.camvidMap = mapping_raw['camvidMap'] * 255\n self.cityscapesMap = mapping_raw['cityscapesMap'] * 255", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def field_map(ar_field, ar_coorx, ar_coory, X, picture_out, title, flip=0):\n max_val=max(ar_field)\n\n xmin=min(ar_coorx);xmax=max(ar_coorx)\n ymin=min(ar_coory);ymax=max(ar_coory)\n step=X\n nx=(xmax-xmin)/step+1\n ny=(ymax-ymin)/step+1\n\n ar_indx=np.array((ar_coorx-xmin)/step,int)\n ar_indy=np.array((ar_coory-ymin)/step,int)\n\n ar_map=np.ones((ny,nx))*-99.9\n ar_map[ar_indy,ar_indx]=ar_field\n\n if flip==1:\n ar_map=np.flipud(ar_map)\n\n ar_map2 = ma.masked_where(ar_map <0, ar_map)\n\n\n ut.check_file_exist(picture_out)\n\n pl.clf()\n pl.imshow(ar_map2, interpolation='Nearest',\n origin='lower', vmax=max_val,vmin=0)\n\n pl.title(title)\n pl.colorbar()\n pl.savefig(picture_out)", "def compose_fieldmap(rf1, rf2):\n offset1, size1, step1 = rf1\n offset2, size2, step2 = rf2\n\n size = tuple((size2c - 1) * step1c + size1c\n for size1c, step1c, size2c in zip(size1, step1, size2))\n offset = tuple(offset2c * step1c + offset1c\n for offset2c, step1c, offset1c in zip(offset2, step1, offset1))\n step = tuple(step2c * step1c\n for step1c, step2c in zip(step1, step2))\n return (offset, size, step)", "def _createMap(self,dimensions, density):\n compMap = []\n xmin, xmax = dimensions[0], dimensions[1]\n imin, imax = dimensions[2], dimensions[3]\n\n #Ideally the hsteps and the vsteps are the same\n hsteps = int((xmax - xmin)/density)\n vsteps = int((imax - imin)/density)\n\n for im in range(vsteps):\n compMap.append([])\n for x in range(hsteps):\n myComplexPair = complex(xmin + (density * x), imin + (density * im))\n compMap[im].append(myComplexPair)\n compMap.reverse()\n return compMap", "def plot_maps(self, mode=0, target=1, gfilter=0):\r\n\r\n mpl.figure(1)\r\n\r\n mpl.imshow(self.avgimg, cmap=matplotlib.cm.gray, interpolation=None) # scipy.ndimage.gaussian_filter(ampmap, filter, order=0, mode='reflect'), cmap=matplotlib.cm.gray)\r\n\r\n mpl.colorbar()\r\n\r\n mpl.title('Average image')\r\n\r\n print ('target, mode: ', target, mode)\r\n\r\n max1 = np.amax(self.amplitudeImage1)\r\n\r\n if target > 1:\r\n\r\n max1 = np.amax([max1, np.amax(self.amplitudeImage2)])\r\n\r\n max1 = 10.0*int(max1/10.0)\r\n\r\n mpl.figure(2)\r\n\r\n mpl.subplot(2,2,4)\r\n\r\n ipy0, posl, coll = self.plot_averaged_amplitude()\r\n\r\n\r\n\r\n mpl.subplot(2,2,1)\r\n\r\n self.plot_amplitude_map(self.amplitudeImage1, max1, 'Amplitude Map1', filter=gfilter)\r\n\r\n mpl.subplot(2,2,3)\r\n\r\n self.plot_phase_map(self.phaseImage1, 'Phase Map1', filter=gfilter)\r\n\r\n for i, px in enumerate(posl):\r\n\r\n mpl.plot(px, self.ipy+ipy0, 'o-', markersize=5.0, markerfacecolor = coll[i], markeredgecolor='w')\r\n\r\n if target > 1:\r\n\r\n mpl.subplot(2,2,4)\r\n\r\n self.plot_phase_map(self.phaseImage1, 'Phase Map1', filter=gfilter)\r\n\r\n mpl.subplot(2,2,2)\r\n\r\n self.plot_fft()\r\n\r\n \r\n\r\n mpl.figure(3)\r\n\r\n mpl.title('Phase across center horizontally')\r\n\r\n # extract middle line\r\n\r\n sh = self.phaseImage1.shape\r\n\r\n iy0 = int(sh[1]/2)\r\n\r\n mpl.plot(self.phaseImage1[iy0, :], 'ko-')\r\n\r\n return\r\n\r\n \r\n\r\n if mode == 0:\r\n\r\n mpl.subplot(2,3,3)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(ta.n_times, self.DF[:,5,5].view(ndarray))\r\n\r\n #mpl.plot(self.n_times, D[:,i*55+20, 60])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('Waveforms')\r\n\r\n\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(ta.n_times, self.DF[:,5,5].view(ndarray))\r\n\r\n #mpl.plot(self.DF[:,i*55+20, 60])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('FFTs')\r\n\r\n\r\n\r\n if mode == 1 and target > 1:\r\n\r\n \r\n\r\n mpl.subplot(2,3,2)\r\n\r\n mpl.title('Amplitude Map2')\r\n\r\n #scipy.ndimage.gaussian_filter(self.amplitudeImage2, 2, order=0, output=self.amplitudeImage2, mode='reflect')\r\n\r\n imga2 = mpl.imshow(scipy.ndimage.gaussian_filter(self.amplitudeImage2, gfilter, order=0, mode='reflect'))\r\n\r\n imga2.set_clim = (0.0, max1)\r\n\r\n mpl.colorbar()\r\n\r\n mpl.subplot(2,3,5)\r\n\r\n imgp2 = mpl.imshow(scipy.ndimage.gaussian_filter(self.phaseImage2, gfilter, order=0, mode='reflect'), cmap=matplotlib.cm.hsv)\r\n\r\n mpl.colorbar()\r\n\r\n imgp2.set_clim=(-np.pi/2.0, np.pi/2.0)\r\n\r\n mpl.title('Phase Map2')\r\n\r\n # doubled phase map\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n #scipy.ndimage.gaussian_filter(self.phaseImage2, 2, order=0, output=self.phaseImage2, mode='reflect')\r\n\r\n np1 = scipy.ndimage.gaussian_filter(self.phaseImage1, gfilter, order=0, mode='reflect')\r\n\r\n np2 = scipy.ndimage.gaussian_filter(self.phaseImage2, gfilter, order=0, mode='reflect')\r\n\r\n dphase = np1 + np2\r\n\r\n #dphase = self.phaseImage1 - self.phaseImage2\r\n\r\n \r\n\r\n #scipy.ndimage.gaussian_filter(dphase, 2, order=0, output=dphase, mode='reflect')\r\n\r\n imgpdouble = mpl.imshow(dphase, cmap=matplotlib.cm.hsv)\r\n\r\n mpl.title('2x Phi map')\r\n\r\n mpl.colorbar()\r\n\r\n imgpdouble.set_clim=(-np.pi, np.pi)\r\n\r\n\r\n\r\n if mode == 2 or mode == 1:\r\n\r\n if self.phasex == []:\r\n\r\n self.phasex = np.random.randint(0, high=self.DF.shape[1], size=self.DF.shape[1])\r\n\r\n self.phasey = np.random.randint(0, high=self.DF.shape[2], size=self.DF.shape[2])\r\n\r\n\r\n\r\n mpl.subplot(2,3,3)\r\n\r\n sh = self.DF.shape\r\n\r\n spr = sh[2]/self.nPhases\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n Dm = self.avgimg[i*spr,i*spr] # diagonal run\r\n\r\n mpl.plot(self.n_times, 100.0*(self.DF[:,self.phasex[i], self.phasey[i]]/Dm))\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('Waveforms')\r\n\r\n\r\n\r\n if mode == 2:\r\n\r\n mpl.subplot(2,3,6)\r\n\r\n sh = self.DF.shape\r\n\r\n x0 = int(sh[1]/2)\r\n\r\n y0 = int(sh[2]/2)\r\n\r\n for i in range(0, self.nPhases):\r\n\r\n mpl.plot(self.DF[1:,x0,y0])\r\n\r\n mpl.hold('on')\r\n\r\n mpl.title('FFTs')", "def __init__(self, mapfile, camera=None, light=None,\r\n width=100.0, depth=100.0, height=10.0,\r\n divx=0, divy=0, ntiles=1.0, name=\"\",\r\n x=0.0, y=0.0, z=0.0, rx=0.0, ry=0.0, rz=0.0,\r\n sx=1.0, sy=1.0, sz=1.0, cx=0.0, cy=0.0, cz=0.0, smooth=True, cubic=False):\r\n super(ElevationMap, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\r\n sx, sy, sz, cx, cy, cz)\r\n if divx > 200 or divy > 200:\r\n print(\"... Map size can't be bigger than 200x200 divisions\")\r\n divx = 200\r\n divy = 200\r\n if issubclass(type(mapfile), type(\"\")): #HORRIBLE. Only way to cope with python2v3\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n if VERBOSE:\r\n print(\"Loading height map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n else:\r\n im = mapfile #allow image files to be passed as mapfile\r\n ix, iy = im.size\r\n if (ix > 200 and divx == 0) or (divx > 0):\r\n if divx == 0:\r\n divx = 200\r\n divy = 200\r\n im = im.resize((divx, divy), Image.ANTIALIAS)\r\n ix, iy = im.size\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n self.pixels = im.load()\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.ix = ix\r\n self.iy = iy\r\n self.ttype = GL_TRIANGLE_STRIP\r\n\r\n if VERBOSE:\r\n print(\"Creating Elevation Map ...\", ix, iy)\r\n\r\n wh = width * 0.5\r\n hh = depth * 0.5\r\n ws = width / ix\r\n hs = depth / iy\r\n ht = height / 255.0\r\n tx = 1.0*ntiles / ix\r\n ty = 1.0*ntiles / iy\r\n\r\n verts = []\r\n norms = []\r\n tex_coords = []\r\n idx = []\r\n\r\n for y in xrange(0, iy):\r\n for x in xrange(0, ix):\r\n hgt = (self.pixels[x, y])*ht\r\n this_x = -wh + x*ws\r\n this_z = -hh + y*hs\r\n if cubic:\r\n \"\"\" this is a bit experimental. It tries to make the map either zero\r\n or height high. Vertices are moved 'under' adjacent ones if there is\r\n a step to make vertical walls. Goes wrong in places - mainly because\r\n it doesn't check diagonals\r\n \"\"\"\r\n if hgt > height / 2:\r\n hgt = height\r\n else:\r\n hgt = 0.0\r\n if hgt == 0 and y > 0 and y < iy-1 and x > 0 and x < ix-1:\r\n if self.pixels[x-1, y] > 127:\r\n this_x = -wh + (x-1)*ws\r\n elif self.pixels[x+1, y] > 127:\r\n this_x = -wh + (x+1)*ws\r\n elif self.pixels[x, y-1] > 127:\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x, y+1] > 127:\r\n this_z = -hh + (y+1)*hs\r\n elif self.pixels[x-1, y-1] > 127:\r\n this_x = -wh + (x-1)*ws\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x-1, y+1] > 127:\r\n this_x = -wh + (x-1)*ws\r\n this_z = -hh + (y+1)*hs\r\n elif self.pixels[x+1, y-1] > 127:\r\n this_x = -wh + (x+1)*ws\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x+1, y+1] > 127:\r\n this_x = -wh + (x+1)*ws\r\n this_z = -hh + (y+1)*hs\r\n verts.append((this_x, hgt, this_z))\r\n tex_coords.append(((ix-x) * tx,(iy-y) * ty))\r\n\r\n s = 0\r\n #create one long triangle_strip by alternating X directions\r\n for y in range(0, iy-1):\r\n for x in range(0, ix-1):\r\n i = (y * ix)+x\r\n idx.append((i, i+ix, i+ix+1))\r\n idx.append((i+ix+1, i+1, i))\r\n s += 2\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, verts, tex_coords, idx, None, smooth))", "def getMagneticFieldMap(self):\n return self.solenoid.B_interp(self.z_array)", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def process(self, step_guess_orientation=True, step_advanced_alignement=True,\n step_gen_worldfiles=True, step_load_worldfiles=True,\n step_gen_vrts=True, step_load_vrts=True,\n step_load_debug=True ):\n\n QgsMessageLog.logMessage(\"1/ Instantiating all images...\", \"QuickDroneMap\", 0)\n for root, dirs, files in os.walk(self.folder):\n for file in files:\n if file.endswith(\".jpg\") or file.endswith(\".JPG\"):\n image_path = os.path.join(root, file)\n image = Image(self, image_path)\n self.images.append(image)\n self.images = self.images[70:90]\n # for i in [301,300,329]: # 3 images, transform fails on all of them\n # for i in [397,398,364]: # 3 images, transform fails on one of them\n # for i in [377,380,381]: # 3 images, transform works on all of them\n # path = \"C:\\\\Users\\\\Olivier\\\\Dropbox\\\\Affaires\\\\SPC\\\\Sources\\\\quickdronemap\\\\test\\\\data\\\\DJI_{0:04d}.JPG\".format(i)\n # self.images.append(Image(self, path))\n\n QgsMessageLog.logMessage(\"2/ Assigning ids\", \"QuickDroneMap\", 0)\n for i, image in enumerate(self.images):\n image.id = i\n\n\n QgsMessageLog.logMessage(\"2/ Loading image attributes and parsing exif tags...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.set_attributes()\n\n if step_guess_orientation:\n QgsMessageLog.logMessage(\"3/ Building image sequences...\", \"QuickDroneMap\", 0)\n sorted_images = sorted(self.images, key=lambda x: x.timestamp)\n for i in range(len(sorted_images)):\n\n prev_image = sorted_images[i-1] if i>0 else None\n image = sorted_images[i]\n next_image = sorted_images[i+1] if i<len(sorted_images)-1 else None\n\n if prev_image is None or next_image is None:\n continue\n\n angle_p_i = math.atan2(image.point.x()-prev_image.point.x(),-image.point.y()+prev_image.point.y())\n angle_i_n = math.atan2(next_image.point.x()-image.point.x(),-next_image.point.y()+image.point.y())\n\n # Checking if the three images are aligned (if not, we're probably at an angle)\n dA = absolute_angle_difference(angle_p_i, angle_i_n)\n if dA > ANGLE_THRESHOLD:\n continue\n\n # Checking if the three images are near enough timewise, if not, it could be separate flights\n dT1 = image.timestamp - prev_image.timestamp\n dT2 = next_image.timestamp - image.timestamp\n if dT1 > TIME_THRESHOLD or dT2 > TIME_THRESHOLD:\n continue\n\n prev_image.next_image = image\n image.prev_image = prev_image\n image.next_image = next_image\n next_image.prev_image = image\n\n QgsMessageLog.logMessage(\"4/ Deriving orientation from image sequence\", \"QuickDroneMap\", 0)\n for image in self.images:\n # if the direction wasn't set in the Exif tags, we derive it from the image sequences\n if image.direction is None:\n img_a = image.prev_image or image \n img_b = image.next_image or image\n image.angle = math.atan2(img_b.point.x()-img_a.point.x(),-img_b.point.y()+img_a.point.y())\n\n if step_advanced_alignement:\n QgsMessageLog.logMessage(\"5/ Building image neighbourhood graph...\", \"QuickDroneMap\", 0)\n from scipy.spatial import Delaunay\n points = [(i.point.x(),i.point.y()) for i in self.images]\n triangulation = Delaunay(points)\n\n done = [[False for _i2 in self.images] for _i1 in self.images]\n for tri in triangulation.simplices:\n i1,i2,i3 = tri\n if not done[i1][i2]:\n e = Edge(self.images[i1], self.images[i2])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i2].edges.append(e)\n done[i1][i2] = True\n if not done[i1][i3]:\n e = Edge(self.images[i1], self.images[i3])\n self.edges.append(e)\n self.images[i1].edges.append(e)\n self.images[i3].edges.append(e)\n done[i1][i3] = True\n if not done[i2][i3]:\n e = Edge(self.images[i2], self.images[i3])\n self.edges.append(e)\n self.images[i2].edges.append(e)\n self.images[i3].edges.append(e)\n done[i2][i3] = True\n\n QgsMessageLog.logMessage(\"6/ Computing similarities\", \"QuickDroneMap\", 0)\n for i, edge in enumerate(self.edges):\n QgsMessageLog.logMessage(\"Done {} out of {}\".format(i,len(self.edges)), \"QuickDroneMap\", 0)\n QApplication.processEvents()\n edge.compute_transform()\n\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # QgsMessageLog.logMessage(\"Initial fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n\n # print(\"TESTING QUALITY OF SIMILARITY (disable optimization to do this)\")\n # done = []\n # edges_to_delete = []\n # for edge in self.edges:\n # QApplication.processEvents()\n\n # if edge.imageA in done or edge.imageB in done:\n # edges_to_delete.append(edge)\n # continue\n\n # done.append(edge.imageA)\n # done.append(edge.imageB)\n\n # d_angle = edge.angle\n # edge.imageB.angle = edge.imageA.angle + d_angle\n\n # f_scale = edge.scale\n # edge.imageB.scale = edge.imageA.scale * f_scale\n\n # d_point = QgsPointXY(edge.tvec[0],edge.tvec[1])\n # d_point = d_point.rotated(edge.imageA.angle)\n # d_point *= edge.imageA.pixel_size/DOWNSCALING_FACTOR\n # edge.imageB.point = edge.imageA.point + d_point\n # for edge in edges_to_delete:\n # self.edges.remove(edge)\n\n\n # print(\"AFTER PROTOTYPE PLACEMENT\")\n # initial_guess_np, _ = self.get_initial_values_and_bounds()\n # self.calculate_fitness(initial_guess_np)\n\n\n QgsMessageLog.logMessage(\"7/ Optimizing\", \"QuickDroneMap\", 0)\n QApplication.processEvents()\n\n initial_guess_np, bounds = self.get_initial_values_and_bounds() \n # res_1 = least_squares(calculate_fitness, initial_guess_np, bounds=([b[0] for b in bounds],[b[1] for b in bounds]))\n res_1 = minimize(self.calculate_fitness, initial_guess_np, bounds=bounds)\n\n for image in self.images:\n px = res_1.x[image.id*4+0]\n py = res_1.x[image.id*4+1]\n pa = res_1.x[image.id*4+2]\n ps = res_1.x[image.id*4+3]\n image.point = QgsPointXY(px, py)\n image.angle = pa\n image.psize = ps\n\n initial_guess_np, _ = self.get_initial_values_and_bounds()\n QgsMessageLog.logMessage(\"After optimization fitness is {}\".format(self.calculate_fitness(initial_guess_np)), \"QuickDroneMap\", 0)\n \n QgsMessageLog.logMessage(\"8/ Computing all transforms...\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.update_transform()\n\n if step_gen_worldfiles:\n QgsMessageLog.logMessage(\"9a/ Creating and loading worldfiles\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_worldfile()\n if step_load_worldfiles:\n image.load_worldfile(self.iface)\n\n if step_gen_vrts:\n QgsMessageLog.logMessage(\"9b/ Creating and loading vrts\", \"QuickDroneMap\", 0)\n for image in self.images:\n image.write_vrt()\n if step_load_vrts:\n image.load_vrt(self.iface)\n\n if step_load_debug:\n QgsMessageLog.logMessage(\"10/ Creating debug jsons files\", \"QuickDroneMap\", 0)\n edg_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 32628}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.point.x(), edge.imageA.point.y()],[edge.imageB.point.x(), edge.imageB.point.y()]]\n props = {k:v for (k,v) in vars(edge).items()}\n props['angle_a'] = edge.imageA.angle\n props['angle_b'] = edge.imageB.angle\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n edg_data['features'].append(feature)\n \n edg_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(edg_data, edg_file, default=lambda o: str(o))\n edg_file.close()\n layer = self.iface.addVectorLayer(edg_file.name,\"[DEBUG] Edges\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_edges_style.qml'))\n \n graph_data = {\"type\": \"FeatureCollection\",\"features\": [], \"crs\": {\"type\": \"EPSG\",\"properties\": {\"code\": 4326}}} # TODO : use self.crs\n for edge in self.edges:\n coords = [[edge.imageA.lon, edge.imageA.lat],[edge.imageB.lon, edge.imageB.lat]]\n props = {k:v for (k,v) in vars(edge).items()}\n feature = {\"type\": \"Feature\",\"properties\": props,\"geometry\": {\"type\": \"LineString\",\"coordinates\": coords}}\n graph_data['features'].append(feature)\n\n graph_file = tempfile.NamedTemporaryFile(mode='w+', suffix='.geojson', delete=False)\n json.dump(graph_data, graph_file, default=lambda o: str(o))\n graph_file.close()\n layer = self.iface.addVectorLayer(graph_file.name,\"[DEBUG] Graph\",\"ogr\")\n layer.loadNamedStyle(os.path.join(os.path.dirname(os.path.realpath(__file__)),'debug_graph_style.qml'))", "def add_field(self, img_dict):\n for k in img_dict.keys():\n assert k in self.bands, \"Celeste model doesn't support band %s\"%k\n self.field_list.append(Field(img_dict))", "def _createMap(self):\n width = self.map_size[0] * self.chunk_size\n height = self.map_size[1] * self.chunk_size\n map_array = np.zeros((height, width), dtype=float)\n chunks = {}\n clist = []\n for i in range(0, self.map_size[0]*self.map_size[1]):\n chunks[i+1] = Chunk(self)\n chunk_array = np.asarray(list(chunks.keys()))\n chunk_array.resize(self.map_size[0], self.map_size[1])\n return map_array, chunk_array, chunks", "def generate_winstonlutz_multi_bb_multi_field(\n simulator: Simulator,\n field_layer: type[Layer],\n dir_out: str,\n field_offsets: list[list[float]],\n bb_offsets: list[list[float]] | list[dict[str, float]],\n field_size_mm: tuple[float, float] = (20, 20),\n final_layers: list[Layer] | None = None,\n bb_size_mm: float = 5,\n image_axes: ((int, int, int), ...) = (\n (0, 0, 0),\n (90, 0, 0),\n (180, 0, 0),\n (270, 0, 0),\n ),\n gantry_tilt: float = 0,\n gantry_sag: float = 0,\n clean_dir: bool = True,\n jitter_mm: float = 0,\n align_to_pixels: bool = True,\n) -> list[str]:\n if not osp.isdir(dir_out):\n os.mkdir(dir_out)\n if clean_dir:\n for pdir, _, files in os.walk(dir_out):\n [os.remove(osp.join(pdir, f)) for f in files]\n file_names = []\n for gantry, coll, couch in image_axes:\n sim_single = copy.copy(simulator)\n for field_offset in field_offsets:\n offset_mm_left = field_offset[0] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_up = field_offset[1] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_in = -field_offset[2] + random.uniform(\n -jitter_mm, jitter_mm\n ) # negative because pixels increase as we go out, so to go in we subtract\n long_offset = bb_projection_long(\n offset_in=offset_mm_in,\n offset_up=offset_mm_up,\n offset_left=offset_mm_left,\n sad=1000,\n gantry=gantry,\n )\n gplane_offset = bb_projection_gantry_plane(\n offset_left=offset_mm_left,\n offset_up=offset_mm_up,\n sad=1000,\n gantry=gantry,\n )\n long_offset += gantry_tilt * cos(gantry)\n gplane_offset += gantry_sag * sin(gantry)\n if align_to_pixels:\n long_offset = pixel_align(sim_single.pixel_size, long_offset)\n gplane_offset = pixel_align(sim_single.pixel_size, gplane_offset)\n sim_single.add_layer(\n field_layer(\n field_size_mm=field_size_mm,\n cax_offset_mm=(long_offset, gplane_offset),\n )\n )\n for offset in bb_offsets:\n if isinstance(offset, dict):\n offset_mm_left = offset[\"offset_left_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n offset_mm_up = offset[\"offset_up_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n offset_mm_in = -offset[\"offset_in_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n else:\n offset_mm_left = offset[0] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_up = offset[1] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_in = -offset[2] + random.uniform(\n -jitter_mm, jitter_mm\n ) # negative because pixels increase as we go out, so to go in we subtract\n\n long_offset = bb_projection_long(\n offset_in=offset_mm_in,\n offset_up=offset_mm_up,\n offset_left=offset_mm_left,\n sad=1000,\n gantry=gantry,\n )\n gplane_offset = bb_projection_gantry_plane(\n offset_left=offset_mm_left,\n offset_up=offset_mm_up,\n sad=1000,\n gantry=gantry,\n )\n if align_to_pixels:\n long_offset = pixel_align(sim_single.pixel_size, long_offset)\n gplane_offset = pixel_align(sim_single.pixel_size, gplane_offset)\n sim_single.add_layer(\n PerfectBBLayer(\n cax_offset_mm=(\n long_offset,\n gplane_offset,\n ),\n bb_size_mm=bb_size_mm,\n )\n )\n if final_layers is not None:\n for layer in final_layers:\n sim_single.add_layer(layer)\n file_name = f\"WL G={gantry}, C={coll}, P={couch}; Field={field_size_mm}mm (shifts={field_offsets}); BB={bb_size_mm}mm @ left={offset_mm_left:.2f}, in={offset_mm_in:.2f}, up={offset_mm_up:.2f}; Gantry tilt={gantry_tilt}, Gantry sag={gantry_sag}.dcm\"\n sim_single.generate_dicom(\n osp.join(dir_out, file_name),\n gantry_angle=gantry,\n coll_angle=coll,\n table_angle=couch,\n )\n file_names.append(file_name)\n return file_names", "def analysis_dFF_map(self):\r\n\r\n \r\n\r\n print ('Starting dF/F analysis:')\r\n\r\n self.print_image_info()\r\n\r\n # smoothwin = int(self.imageData.shape[1]/8.)\r\n\r\n # get the average image and the average of the whole image over time\r\n\r\n avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n \r\n\r\n mpl.figure(99)\r\n\r\n mpl.imshow(avgimg, vmin=0, vmax=np.max(np.max(avgimg, axis=0), axis=0))\r\n\r\n # self.meanimagevalue = np.mean(np.mean(avgimg, axis=1), axis=0)\r\n\r\n # self.stdimg = np.std(self.imageData, axis= 0) # and standard deviation\r\n\r\n imgdatasm = scipy.ndimage.filters.gaussian_filter(self.imageData,[0,2,2],order=0,output=None,mode='reflect',cval=0.0,truncate=4.0)\r\n # field correction: smooth the average image, subtract it from the imagedata, then add back the mean value\r\n avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, 2, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n #self.imageData = (self.imageData-avgimgsm)+ self.meanimagevalue\r\n\r\n mpl.figure(98)\r\n mpl.imshow(avgimgsm,vmin=0, vmax=np.max(np.max(avgimgsm, axis=0), axis=0))\r\n mpl.figure(97)\r\n mpl.imshow(np.mean(imgdatasm,axis=0))\r\n self.n_times = self.timebase\r\n\r\n periodsize = int(self.period*self.framerate)\r\n print('periodsize: ',periodsize)\r\n\r\n # windowsize = int(self.freqperiod*self.framerate) # window size for every response\r\n\r\n # r = range(0, self.imageData.shape[0], windowsize)\r\n\r\n sig = np.reshape(imgdatasm, (self.nrepetitions, periodsize, \r\n\r\n self.imageData.shape[1], self.imageData.shape[2]), order='C')\r\n\r\n delresp=np.zeros([19,256,256])\r\n repback = np.mean(sig[:,1:4,:,:],axis=1)\r\n resp = np.mean(sig[:,5:9,:,:],axis=1)\r\n for counter in range(19):\r\n delresp[counter,:,:]=(resp[counter,:,:]-repback[counter,:,:])/repback[counter,:,:]\r\n quot=np.mean(delresp,axis=0)\r\n quot=-quot\r\n print ('shape of quot: ', np.shape(quot))\r\n # quot=(resp-repback)/repback\r\n # quot[quot>0]=0\r\n # quot=-1000*quot\r\n\r\n mpl.figure(7)\r\n mpl.imshow(quot,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n\r\n quotsm = scipy.ndimage.filters.gaussian_filter(quot, 3, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n mpl.figure(8)\r\n mpl.imshow(quotsm,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n \r\n # bl = np.mean(sig[:, range(0, sig.shape[1], windowsize), :, :], axis=0)\r\n\r\n # bl = scipy.ndimage.filters.gaussian_filter(bl, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n\r\n\r\n # print (' windowsize: ', windowsize)\r\n\r\n # print (' periodsize: ', periodsize)\r\n # mc = matplotlib.cm\r\n\r\n # only use sequential maps here\r\n\r\n # clist = [mc.Reds, mc.YlOrBr, mc.Oranges, mc.Greens, mc.GnBu, mc.Blues, mc.RdPu, mc.Purples,mc.Reds,mc.Greens,mc.Blues,mc.Reds,mc.Reds,mc.Reds,mc.Reds]\r\n # clist2 = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet', 'black','red','purple','green','blue','red','red','red','red']\r\n\r\n cs = {}\r\n\r\n # sigd = np.zeros((bl.shape[0], sig.shape[2], sig.shape[3]))\r\n# \r\n # localmax = {}\r\n\r\n # sigmax = 0.\r\n# \r\n # kernel = np.ones((5, 5))\r\n\r\n # psf = kernel / np.sum(kernel)\r\n\r\n # compute dF/F, and get maximum over all frequencies\r\n\r\n print (' sig shape: ', sig.shape)\r\n\r\n # print (' bl shape: ', bl.shape)\r\n\r\n # smax = np.zeros(bl.shape[0])\r\n\r\n # for i in range(bl.shape[0]):\r\n\r\n # sigd[i] = (np.mean(np.max(sig[:,range(i*windowsize, i*windowsize+windowsize),:,:], axis=0), axis=0) - bl[i,:,:])/bl[i,:,:]\r\n\r\n # sigd[i] = sigd[i]**2.0\r\n\r\n # smooth\r\n\r\n #sigd[i] = scipy.ndimage.filters.gaussian_filter(sigd[i], 1., order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # deconvolve\r\n\r\n # sigd[i] = restoration.richardson_lucy(sigd[i], psf, 5)\r\n\r\n# sm = sigd[i].max().max()\r\n\r\n# if sm > sigmax:\r\n\r\n# sigmax = sm\r\n\r\n# smax[i] = sm\r\n\r\n# print( ' i, sm: ', i, sm)\r\n\r\n# # now process for display\r\n\r\n# print (' sigd shape: ', sigd.shape)\r\n\r\n# wdat = np.mean(sig, axis=0)\r\n\r\n# wds = wdat.shape\r\n\r\n# print('wdat shape: ', wds)\r\n\r\n# # print (range(int(wds[1]/2.), int(3.*wds[1]/4.)), range(int(wds[2]/2.), int(3.*wds[2]/4.)))\r\n\r\n# print( 'reduced shape: ', wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))].shape)\r\n\r\n# wp = wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))]\r\n\r\n# wp = np.mean(np.mean(wdat, axis=1), axis=1)\r\n\r\n# mpl.figure(1)\r\n\r\n# mpl.plot(np.linspace(0., len(wp)*1./self.framerate, num=len(wp)), wp)\r\n\r\n\r\n\r\n# mpl.figure(2)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# sigd[i][sigd[i] < self.threshold*sigmax] = 0.\r\n\r\n# # find center of mass of areas above threshold\r\n\r\n# # mass = sigd[i].copy()\r\n\r\n# # mass[sigd[i] > 0.] = 1.\r\n\r\n# # structuring_element = [[0,1,0],[1,1,1],[0,1,0]]\r\n\r\n# # segmentation, segments = scipy.ndimage.label(mass, structuring_element)\r\n\r\n# # coords = scipy.ndimage.center_of_mass(sigd[i], segmentation, range(1,segments+1))\r\n\r\n# # xcoords = np.array([x[1] for x in coords])\r\n\r\n# # ycoords = np.array([x[0] for x in coords])\r\n\r\n# # cs[i] = (xcoords, ycoords)\r\n\r\n\r\n\r\n# # Calculating local maxima\r\n\r\n# lm = skif.peak_local_max(sigd[i], min_distance=2, threshold_rel=0.25, exclude_border=False, \r\n\r\n# indices=True, num_peaks=10, footprint=None, labels=None)\r\n\r\n# localmax[i] = [(m[0], m[1], sigd[i][(m[0], m[1])]) for m in lm]\r\n\r\n# # print ('i, local max: ',i, localmax)\r\n\r\n# mpl.subplot(5,5,i+1)\r\n# print ('shape of sigd: ',[np.shape(sigd),i])\r\n\r\n# imga1 = mpl.imshow(sigd[i], cmap=clist[i], vmin=0, origin='lower')\r\n\r\n# if len(localmax[i]) > 0:\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# else:\r\n\r\n# continue\r\n\r\n# scattersize = 30.\r\n\r\n# for k, lm in enumerate(localmax[i]):\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], edgecolors='k',\r\n\r\n# s=scattersize*lm[2]/max_fr, linewidths=0.125, alpha=0.5)\r\n\r\n# mpl.subplot(6,5,i+15+1)\r\n\r\n# wr = range(i*windowsize, i*windowsize+windowsize)\r\n\r\n# # print (' wr: len, min max: ', len(wr), min(wr), max(wr))\r\n\r\n# wmax = 0.\r\n\r\n# for lmax in localmax[i]: # was xcoords\r\n\r\n# wave = wdat[wr, lmax[0],lmax[1]]\r\n\r\n# wdff = (wave-wave[0])/wave[0]\r\n\r\n# if np.max(wdff) > wmax:\r\n\r\n# wmax = np.max(wdff)\r\n\r\n# mpl.plot(np.linspace(0., len(wave)*1./self.framerate, num=len(wave)),\r\n\r\n# wdff, color=clist2[i])\r\n\r\n# mpl.ylim(-0.1*wmax, wmax)\r\n\r\n# fig = mpl.figure(3)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# if len(localmax[i]) == 0:\r\n\r\n# continue\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# for lm in localmax[i]:\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], \r\n\r\n# s=scattersize*lm[2]/max_fr, alpha=0.5, edgecolors='k')\r\n\r\n# mpl.ylim(0, sigd.shape[2])\r\n\r\n# mpl.xlim(0, sigd.shape[1])\r\n\r\n# mpl.axis('equal')\r\n\r\n mpl.show()\r\n\r\n print (' DF/F analysis finished.\\n')", "def make_input_map(self) :\n\n self.input_map = \"\"\n stencil = self.core.stencil\n pattern = self.core.pattern\n reflect = len(pattern)+1 # reflector id, last material\n N = self.dimension\n coremap = np.zeros((N+2,N+2), dtype='i')\n \n # reflections and vacuum\n coremap[0, 1:N+1] = -1 \n coremap[1:N+1, 0] = -1\n coremap[N+1, 1:N+1] = -2\n coremap[1:N+1, N+1] = -2\n \n fuelindex = 0\n \n for i in range(1, N+1) :\n for j in range(1, N+1) :\n if j == 1 and i > 1 :\n pass\n else :\n if stencil[i-1, j-1] > 0 : # a fuel\n coremap[i, j] = pattern[fuelindex]+1\n fuelindex += 1\n elif stencil[i-1, j-1] == 0 : # a reflector\n coremap[i, j] = reflect\n else : # a void\n pass \n # Copy elements such that rotational symmetry is enforced. \n for j in range(2, N+1) :\n coremap[j, 1] = coremap[1, j]\n for i in range(0, N+2) :\n for j in range(0, N+2) :\n self.input_map +='%4i' % (coremap[i, j])\n self.input_map += '\\n'", "def _generate_modifiers(pixel_scale=0.2, bands='ugrizy',\n has_modelfit_mag=True, has_modelfit_flux=True, has_modelfit_flag=True,\n dm_schema_version=4):\n\n if dm_schema_version not in (1, 2, 3, 4):\n raise ValueError('Only supports dm_schema_version == 1, 2, 3, 4')\n\n FLUX = 'flux' if dm_schema_version <= 2 else 'instFlux'\n ERR = 'Sigma' if dm_schema_version <= 1 else 'Err'\n BLENDEDNESS_SUFFIX = '_%s' % FLUX if dm_schema_version <= 3 else ''\n\n modifiers = {\n 'objectId': 'id',\n 'parentObjectId': 'parent',\n 'ra': (np.rad2deg, 'coord_ra'),\n 'dec': (np.rad2deg, 'coord_dec'),\n 'x': 'base_SdssCentroid_x',\n 'y': 'base_SdssCentroid_y',\n 'xErr': 'base_SdssCentroid_x{}'.format(ERR),\n 'yErr': 'base_SdssCentroid_y{}'.format(ERR),\n 'xy_flag': 'base_SdssCentroid_flag',\n 'psNdata': 'base_PsfFlux_area',\n 'extendedness': 'base_ClassificationExtendedness_value',\n 'blendedness': 'base_Blendedness_abs{}'.format(BLENDEDNESS_SUFFIX),\n }\n\n not_good_flags = (\n 'base_PixelFlags_flag_edge',\n 'base_PixelFlags_flag_interpolatedCenter',\n 'base_PixelFlags_flag_saturatedCenter',\n 'base_PixelFlags_flag_crCenter',\n 'base_PixelFlags_flag_bad',\n 'base_PixelFlags_flag_suspectCenter',\n 'base_PixelFlags_flag_clipped',\n )\n\n modifiers['good'] = (create_basic_flag_mask,) + not_good_flags\n modifiers['clean'] = (\n create_basic_flag_mask,\n 'deblend_skipped',\n ) + not_good_flags\n\n # cross-band average, second moment values\n modifiers['I_flag'] = 'ext_shapeHSM_HsmSourceMoments_flag'\n for ax in ['xx', 'yy', 'xy']:\n modifiers['I{}_pixel'.format(ax)] = 'ext_shapeHSM_HsmSourceMoments_{}'.format(ax)\n modifiers['I{}PSF_pixel'.format(ax)] = 'base_SdssShape_psf_{}'.format(ax)\n\n for band in bands:\n modifiers['mag_{}'.format(band)] = '{}_mag'.format(band)\n modifiers['magerr_{}'.format(band)] = '{}_mag_err'.format(band)\n modifiers['psFlux_{}'.format(band)] = (convert_dm_ref_zp_flux_to_nanoJansky,\n '{}_base_PsfFlux_{}'.format(band, FLUX))\n modifiers['psFlux_flag_{}'.format(band)] = '{}_base_PsfFlux_flag'.format(band)\n modifiers['psFluxErr_{}'.format(band)] = (convert_dm_ref_zp_flux_to_nanoJansky,\n '{}_base_PsfFlux_{}{}'.format(band, FLUX, ERR))\n\n modifiers['I_flag_{}'.format(band)] = '{}_base_SdssShape_flag'.format(band)\n\n for ax in ['xx', 'yy', 'xy']:\n modifiers['I{}_pixel_{}'.format(ax, band)] = '{}_base_SdssShape_{}'.format(band, ax)\n modifiers['I{}PSF_pixel_{}'.format(ax, band)] = '{}_base_SdssShape_psf_{}'.format(band, ax)\n\n modifiers['psf_fwhm_{}'.format(band)] = (\n lambda xx, yy, xy: pixel_scale * 2.355 * (xx * yy - xy * xy) ** 0.25,\n '{}_base_SdssShape_psf_xx'.format(band),\n '{}_base_SdssShape_psf_yy'.format(band),\n '{}_base_SdssShape_psf_xy'.format(band),\n )\n\n if has_modelfit_flux:\n # The zp=27.0 is based on the default calibration for the coadds\n # as specified in the DM code. It's correct for Run 1.1p.\n modifiers['cModelFlux_{}'.format(band)] = (convert_dm_ref_zp_flux_to_nanoJansky,\n '{}_modelfit_CModel_{}'.format(band, FLUX))\n modifiers['cModelFluxErr_{}'.format(band)] = (convert_dm_ref_zp_flux_to_nanoJansky,\n '{}_modelfit_CModel_{}{}'.format(band, FLUX, ERR))\n if has_modelfit_flag:\n modifiers['cModelFlux_flag_{}'.format(band)] = '{}_modelfit_CModel_flag'.format(band)\n\n if not has_modelfit_mag:\n modifiers['mag_{}_cModel'.format(band)] = (convert_dm_ref_zp_flux_to_mag,\n '{}_modelfit_CModel_{}'.format(band, FLUX))\n modifiers['magerr_{}_cModel'.format(band)] = (convert_flux_err_to_mag_err,\n '{}_modelfit_CModel_{}'.format(band, FLUX),\n '{}_modelfit_CModel_{}{}'.format(band, FLUX, ERR))\n modifiers['snr_{}_cModel'.format(band)] = (\n np.divide,\n '{}_modelfit_CModel_{}'.format(band, FLUX),\n '{}_modelfit_CModel_{}{}'.format(band, FLUX, ERR),\n )\n\n if has_modelfit_mag:\n modifiers['mag_{}_cModel'.format(band)] = '{}_modelfit_mag'.format(band)\n modifiers['magerr_{}_cModel'.format(band)] = '{}_modelfit_mag_err'.format(band)\n modifiers['snr_{}_cModel'.format(band)] = '{}_modelfit_SNR'.format(band)\n\n return modifiers", "def create_band_maps(self):\n band_maps = []\n source_band_index = 1\n target_band_index = self.starting_target_band\n for band in self.image['bands']:\n band_maps.append({\n 'source': source_band_index,\n 'target': target_band_index\n })\n source_band_index += 1\n target_band_index += 1\n return band_maps", "def generate_image_info(image):\n image = ee.Image(image)\n image_vis = image.visualize(**{\n 'min': image_min,\n 'max': image_max,\n 'palette': image_palette\n })\n\n print(image_min, image_max)\n\n if 'hillshade' in r and r['hillshade']:\n image_vis = hillshade(image_vis,\n image.subtract(image_min).divide(ee.Image.constant(image_max).subtract(image_min)),\n True)\n\n m = image_vis.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result", "def make_pm_maps(input_file, input_pm_file, output_file, num_cones, num_bins=80, titles=None, mincount=0, maxcount=40, cut=None):\n # get titles for each subplot and dwarf proper motions\n titles, dwarf_pmra, dwarf_pmdec, = load_dwarf_info(input_file, titles)\n\n # load stellar pm values\n ra, dec, pmra, pmdec, parallax, parallax_error = load_gaia_search_info(input_pm_file)\n\n # from table 2 in\n # if titles is not None:\n # titles = fix_names(titles)\n # for i, title, dpmra, dpmdec in enumerate(zip(titles, dwarf_pmra, dwarf_pmdec)):\n # dwarf_pmra[i], dwarf_pmdec[i] = fix_pms(title, dpmra, dpmdec)\n # # dwarf_pmra[5] = 1.81\n # # dwarf_pmra[8] = -1.21\n # # dwarf_pmra[11] = 0.22\n # # dwarf_pmdec[5] = 0.14\n # # dwarf_pmdec[8] = -0.92\n # # dwarf_pmdec[11] = -1.41\n\n # set fig size and shape\n d = len(titles)\n rows = 3\n cols = int(np.ceil(d/rows))\n fig, axs = plot_setup(rows, cols, d)\n max_count = [0, 0]\n\n # plot each dwarf in separate subplots\n for ax, title, dwarfpmra, dwarfpmdec, *data in zip(axs, titles, dwarf_pmra, dwarf_pmdec, ra, dec, pmra, pmdec, parallax, parallax_error):\n counts, xedges, yedges, im = pm_histogram(fig, ax, data, title, dwarf_pmra=dwarfpmra, dwarf_pmdec=dwarfpmdec, cut=cut)\n\n # make labels across all subplots\n universal_plot_labels(fig, r\"Proper motion, right ascension [mas/yr]\", r\"Proper motion, declination [mas/yr]\")\n\n # add a universal colorbar, change cmap in hist2d above\n # fig.colorbar(im, ax=axs.ravel().tolist())\n\n fig.savefig(output_file, bbox_inches='tight')", "def _init_meg_map_dict(bands, length=0):\n\n # Initialize dictionary\n meg_map = dict()\n\n # Add oscillation bands\n for band in bands:\n meg_map[band] = np.zeros(length)\n\n return meg_map", "def prepare_map(self):\n for y_coord, row in enumerate(self.contents):\n for x_coord, tile in enumerate(row):\n bit_map = self.get_tile_bitmap(tile)\n self.image[y_coord * TILE_SIZE:(y_coord+1) * TILE_SIZE,\n x_coord * TILE_SIZE:(x_coord+1) * TILE_SIZE] = bit_map", "def __init__(self, MRIObj, pRFModelObj = None, FAModelObj = None,\n pRF_data = [], FA_data = [],\n prf_dm = [], max_ecc_ext = 5.5,\n pysub = 'hcp_999999', flatmap_height = 2048, full_figsize = (12, 8)):\n\n # set data object to use later on\n self.MRIObj = MRIObj\n\n # Load pRF and model object\n self.pRFModelObj = pRFModelObj\n self.FAModelObj = FAModelObj\n\n ## data to be plotted \n self.pRF_data = pRF_data\n self.FA_data = FA_data\n\n ## figure settings\n self.flatmap_height = flatmap_height\n self.full_figsize = full_figsize\n self.images = {}\n \n ## create pycortex vars\n self.mask, extents = cortex.quickflat.utils.get_flatmask(pysub, height = self.flatmap_height)\n self.vc = cortex.quickflat.utils._make_vertex_cache(pysub, height = self.flatmap_height)\n\n self.mask_index = np.zeros(self.mask.shape)\n self.mask_index[self.mask] = np.arange(self.mask.sum())\n\n # set prf dm\n self.prf_dm = prf_dm\n\n ## set grid of possible points in downsampled space\n self.point_grid_2D = np.array(np.meshgrid(np.linspace(-1, 1, prf_dm.shape[0]) * max_ecc_ext,\n np.linspace(1, -1, prf_dm.shape[0]) * max_ecc_ext))", "def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):", "def __init__(self, mapfile, xpos, zpos, emap, width=10.0, depth=10.0, height=10.0, name=\"building\", draw_details=None, yoff=0.0, scheme=None):\r\n self.xpos = xpos\r\n self.zpos = zpos\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.name = name\r\n self.ceilingthickness = 1.0\r\n self.walls = []\r\n\r\n if scheme == None:\r\n self.scheme = Building.baseScheme\r\n else:\r\n self.scheme = scheme\r\n\r\n # We don't have to be rigorous here, this should only be a draw_details or an iterable of draw_details.\r\n if hasattr(draw_details, \"__getitem__\") or hasattr(draw_details, \"__iter__\"):\r\n assert (len(draw_details) == self.scheme[\"#models\"])\r\n self.details = draw_details\r\n else:\r\n self.details = [draw_details for x in range(self.scheme[\"#models\"])]\r\n # having a method like this allows draw details to be set later\r\n\r\n self.yoff = yoff\r\n\r\n self.model = [MergeShape(name=name+\".\"+str(x)) for x in range(self.scheme[\"#models\"])]\r\n\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n print(\"Loading building map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n ix,iy = im.size\r\n\r\n print(\"image size\", ix, \",\", iy)\r\n\r\n startx = xpos - ix / 2 * width\r\n starty = zpos - ix / 2 * depth\r\n\r\n yoff += emap.calcHeight(-xpos,-zpos)\r\n\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n pixels = im.load()\r\n\r\n for y in range(1,iy-1):\r\n print(\".\", end='')\r\n for x in range(1,ix-1):\r\n colour = pixels[x,y]\r\n\r\n if x == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y], \"edge\"), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x-1,y]), wallfunc=self.west_wall, ceilingedgefunc=self.west_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if x == ix-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y], \"edge\"), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x+1,y]), wallfunc=self.east_wall, ceilingedgefunc=self.east_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == 1:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1], \"edge\"), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y-1]), wallfunc=self.south_wall, ceilingedgefunc=self.south_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n if y == iy-2:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x, y+1], \"edge\"), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n else:\r\n self._executeScheme(x, y, startx, starty, (colour, pixels[x,y+1]), wallfunc=self.north_wall, ceilingedgefunc=self.north_edge, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self._executeScheme(x, y, startx, starty, (colour, None), wallfunc=None, ceilingedgefunc=None, ceilingfunc=self.ceiling, rooffunc=self.roof)\r\n\r\n self.set_draw_details(self.details) # after models created otherwise\r\n # details lost by merging\r", "def _build_augmentation_map(self, images):\n aug_map = {}\n img_shape = (images[0].x.shape[0], images[0].x.shape[1])\n\n vert_modes = [Crop.TOP, Crop.CENTER, Crop.BOTTOM]\n horiz_modes = [Crop.LEFT, Crop.CENTER, Crop.RIGHT]\n crop_modes = flatten_list([[CropMode(vert, horiz) for horiz in horiz_modes] for vert in vert_modes])\n\n labels_series = pd.Series([i.y for i in images])\n labels_distribution = labels_series.value_counts(normalize=True).sort_values()\n\n min_distribution = labels_distribution.iloc[0] * len(crop_modes)\n\n for label, distribution in labels_distribution.iteritems():\n aug_num = math.ceil(min_distribution / distribution)\n #additional augmentation functions can be added here:\n aug_map[label] = [self._build_crop_fn(img_shape, crop_modes[:aug_num])]\n \n return aug_map", "def build_filler_images(self):", "def __init__(self, mapfile, camera=None, light=None,\n width=100.0, depth=100.0, height=10.0,\n divx=0, divy=0, ntiles=1.0, name=\"\",\n x=0.0, y=0.0, z=0.0, rx=0.0, ry=0.0, rz=0.0,\n sx=1.0, sy=1.0, sz=1.0, cx=0.0, cy=0.0, cz=0.0, smooth=True, cubic=False):\n super(ElevationMap, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\n sx, sy, sz, cx, cy, cz)\n if mapfile[0] != '/':\n mapfile = sys.path[0] + '/' + mapfile\n if VERBOSE:\n print(\"Loading height map ...\", mapfile)\n\n if divx > 200 or divy > 200:\n print(\"... Map size can't be bigger than 200x200 divisions\")\n divx = 200\n divy = 200\n\n im = Image.open(mapfile)\n im = ImageOps.invert(im)\n ix, iy = im.size\n if (ix > 200 and divx == 0) or (divx > 0):\n if divx == 0:\n divx = 200\n divy = 200\n im = im.resize((divx, divy), Image.ANTIALIAS)\n ix, iy = im.size\n if not im.mode == \"P\":\n im = im.convert('P', palette=Image.ADAPTIVE)\n\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n self.pixels = im.load()\n self.width = width\n self.depth = depth\n self.height = height\n self.ix = ix\n self.iy = iy\n self.ttype = GL_TRIANGLE_STRIP\n\n if VERBOSE:\n print(\"Creating Elevation Map ...\", ix, iy)\n\n wh = width * 0.5\n hh = depth * 0.5\n ws = width / ix\n hs = depth / iy\n ht = height / 255.0\n tx = 1.0*ntiles / ix\n ty = 1.0*ntiles / iy\n\n verts = []\n norms = []\n tex_coords = []\n idx = []\n\n for y in xrange(0, iy):\n for x in xrange(0, ix):\n hgt = (self.pixels[x, y])*ht\n this_x = -wh + x*ws\n this_z = -hh + y*hs\n if cubic:\n \"\"\" this is a bit experimental. It tries to make the map either zero\n or height high. Vertices are moved 'under' adjacent ones if there is\n a step to make vertical walls. Goes wrong in places - mainly because\n it doesn't check diagonals\n \"\"\"\n if hgt > height / 2:\n hgt = height\n else:\n hgt = 0.0\n if hgt == 0 and y > 0 and y < iy-1 and x > 0 and x < ix-1:\n if self.pixels[x-1, y] > 127:\n this_x = -wh + (x-1)*ws\n elif self.pixels[x+1, y] > 127:\n this_x = -wh + (x+1)*ws\n elif self.pixels[x, y-1] > 127:\n this_z = -hh + (y-1)*hs\n elif self.pixels[x, y+1] > 127:\n this_z = -hh + (y+1)*hs\n elif self.pixels[x-1, y-1] > 127:\n this_x = -wh + (x-1)*ws\n this_z = -hh + (y-1)*hs\n elif self.pixels[x-1, y+1] > 127:\n this_x = -wh + (x-1)*ws\n this_z = -hh + (y+1)*hs\n elif self.pixels[x+1, y-1] > 127:\n this_x = -wh + (x+1)*ws\n this_z = -hh + (y-1)*hs\n elif self.pixels[x+1, y+1] > 127:\n this_x = -wh + (x+1)*ws\n this_z = -hh + (y+1)*hs\n verts.append((this_x, hgt, this_z))\n tex_coords.append(((ix-x) * tx,(iy-y) * ty))\n\n s = 0\n #create one long triangle_strip by alternating X directions\n for y in range(0, iy-1):\n for x in range(0, ix-1):\n i = (y * ix)+x\n idx.append((i, i+ix, i+ix+1))\n idx.append((i+ix+1, i+1, i))\n s += 2\n\n self.buf = []\n self.buf.append(Buffer(self, verts, tex_coords, idx, None, smooth))", "def map_sim_property(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n counter = 0\n fignum = 1\n if p.gal_index == 'all':\n\n for gal_index in GR.N_gal - np.arange(GR.N_gal) - 1:\n\n if counter == 0:\n fig, axes = plt.subplots(3, 3, figsize=(20,15))\n axs = [axes[0,0],axes[0,1],axes[0,2],axes[1,0],axes[1,1],axes[1,2],axes[2,0],axes[2,1],axes[2,2]]\n counter = 9\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type='simgas')\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n\n # Plot\n Rmax = max_scale/2\n ax1 = axs[9 - counter]\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n if not p.log:\n map2D[map2D < p.vmin] = p.vmin/2\n map2D[map2D > p.vmax] = p.vmax\n im = ax1.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,cmap=p.cmap)\n fig.colorbar(im,shrink=0.8,ax=ax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-0.99*Rmax,0.99*Rmax])\n ax1.set_ylim([-0.99*Rmax,0.99*Rmax])\n if (p.prop == 'm') & (p.text == True):\n ax1.text(0.05,0.85,'M$_{gas}$=%.2eM$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.75,'SFR=%.2eM$_{\\odot}$/yr' % GR.SFR[gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n\n counter -= 1\n\n #if counter == 0:\n # ax1 = plt.subplots(1, 1)\n #cbar = fig.colorbar(im, ax=axes.ravel().tolist(), shrink=0.95, label=lab)\n # fig.colorbar(im,shrink=0.8,label=lab)\n\n if counter == 0 or gal_index == GR.N_gal-1:\n print('Saving in ' + p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format))\n # plt.tight_layout()\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/')\n plt.savefig(p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format), format=p.format, dpi=250, facecolor='w')\n fignum += 1\n\n else:\n if p.add:\n fig, ax1 = plt.gcf(), p.ax\n if not p.add:\n fig = plt.figure(figsize=(8,6))\n ax1 = fig.add_axes([0.1, 0.01, 0.8, 0.8]) \n ax1.axis('equal')\n\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n if p.R_max:\n # Cut out square\n simgas = simgas[(np.abs(simgas.x) < p.R_max) & (np.abs(simgas.y) < p.R_max)]\n # Add bottom left corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = -p.R_max,-p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n # Add top right corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = p.R_max,p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n else:\n pass\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n print('Min and max of map: ',map2D.min(),map2D.max())\n #map2D[map2D < 1e4] = 1e6\n # Plot map\n if not p.R_max:\n p.R_max = max_scale/2\n if p.log: \n if not p.vmax: p.vmax = np.log10(map2D).max()\n if not p.vmin: p.vmin = np.log10(map2D).max() - 4\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n else:\n if not p.vmax: p.vmax = np.max(map2D)\n if not p.vmin: p.vmin = np.min(map2D) / 1e3\n map2D[map2D < p.vmin] = p.vmin #np.min(map2D[map2D > 0])\n map2D = np.flipud(map2D)\n\n im = ax1.imshow(map2D,\\\n extent=[-max_scale/2,max_scale/2,-max_scale/2,max_scale/2],vmin=p.vmin,vmax=p.vmax,cmap=p.cmap)\n # Limit axes limits a bit to avoid area with no particles...\n zoom = 1#/1.5\n ax1.set_xlim([-1/zoom * p.R_max,1/zoom * p.R_max])\n ax1.set_ylim([-1/zoom * p.R_max,1/zoom * p.R_max])\n if p.colorbar: \n divider = make_axes_locatable(ax1)\n cax1 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(im,cax=cax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n if (p.prop == 'm') & (p.text == True):\n simstar = aux.load_temp_file(gal_ob=gal_ob,data_type='simstar')\n ax1.text(0.05,0.92,'M$_{star}$=%.1e M$_{\\odot}$' % np.sum(simstar.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.86,'M$_{gas}$=%.1e M$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.80,'SFR=%.2f M$_{\\odot}$/yr' % GR.SFR[p.gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/') \n plt.savefig(p.d_plot + 'sim_data/map_%s_G%i.png' % (p.prop,p.gal_index), format=p.format, dpi=250, facecolor='w')\n\n if not p.colorbar: return(im)", "def CreateTargetGeoField(nbtimestep,latlen,lonlen):\n\n pres_grid = np.zeros((nbtimestep, latlen, lonlen))\n u_grid = np.zeros((nbtimestep, latlen, lonlen))\n v_grid = np.zeros((nbtimestep, latlen, lonlen))\n\n return pres_grid,u_grid,v_grid", "def build_tiles(img,tilefile,tilesize,options=[]):\n\tlevels=ceil(log(max(img.get_xsize(),img.get_ysize())/tilesize)/log(2.0))\n\t\n\ttf=file(tilefile,\"w\")\n\t\n\ttile_dict={}\n\tpos=0\n\timg2=img.copy()\n\txs,ys=img2.get_xsize(),img2.get_ysize()\n\tfor l in range(int(levels)):\n\t\trmin=img2.get_attr(\"mean\")-img2.get_attr(\"sigma\")*3.0\n\t\trmax=img2.get_attr(\"mean\")+img2.get_attr(\"sigma\")*3.0\n\t\tfor x in range(0,img2.get_xsize(),tilesize):\n\t\t\tfor y in range(0,img2.get_ysize(),tilesize):\n\t\t\t\ti=img2.get_clip(Region(x,y,tilesize,tilesize))\n\t\t\t\ti.set_attr(\"render_min\",rmin)\n\t\t\t\ti.set_attr(\"render_max\",rmax)\n\t\t\t\ti.set_attr(\"jpeg_quality\",70)\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ti.write_image(fsp)\n\t\t\t\tsz=os.stat(fsp).st_size\n\t\t\t\ttile_dict[(l,x/tilesize,y/tilesize)]=(pos,sz)\n\t\t\t\tpos+=sz\n\t\timg2.process_inplace(\"math.meanshrink\",{\"n\":2})\n\t\n\t# This will produce 2 power spectrum images in the tile file\n\t# with scale factors -1 and -2\n\tif \"pspec\" in options :\n\t\tnx,ny=img.get_xsize()/512,img.get_ysize()/512\n\t\ta=EMData()\n\t\ta.set_size(512,512)\n\t\tif (ny>2 and nx>2) :\n\t\t\tfor y in range(1,ny-1):\n\t\t\t\tfor x in range(1,nx-1):\n\t\t\t\t\tc=img.get_clip(Region(x*512,y*512,512,512))\n\t\t\t\t\tc.process_inplace(\"normalize\")\n\t\t\t\t\tc.process_inplace(\"math.realtofft\")\n\t\t\t\t\tc.process_inplace(\"math.squared\")\n\t\t\t\t\ta+=c\n\t\t\ta.set_value_at(256,256,0,.01)\n\t\t\ta-=a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.01\n\t\t\ta.process_inplace(\"math.log\")\n\t\t\ta-=a.get_attr(\"minimum\")\n\t\t\ta.set_attr(\"render_min\",a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.1)\n\t\t\ta.set_attr(\"render_max\",a.get_attr(\"mean\")+a.get_attr(\"sigma\")*4.0)\n\t\t\ta.set_attr(\"jepg_quality\",80)\n\t\t\ta.write_image(\"/tmp/tmpimg.mrc\")\n\t\t\tfsp=\"tmpimg.jpg\"\n\t\t\ta.write_image(fsp)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-1,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\t\n#\t\ttry:\n\t\t\timport matplotlib\n\t\t\tmatplotlib.use('Agg')\n\t\t\timport pylab\n\t\t\tmanager = pylab.get_current_fig_manager()\n\t\t\tapix=options[\"pspec\"]\n\t\t\tdx=1.0/(2.0*apix*256.0)\n\t\t\tx=pylab.arange(dx,dx*255.9,dx)\n\t\t\ty=a.calc_radial_dist(255,1,1,0)\t# radial power spectrum (log)\n\t\t\tpylab.figure(figsize=(8,6),dpi=96)\n\t\t\tpylab.axes([.08,.08,.9,.9], axisbg='w')\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.axis([0,dx*256,min(y),max(y)])\n\t\t\tpylab.xlabel(\"Spatial Freq. (1/A)\")\n\t\t\tpylab.ylabel(\"Log Intensity (10^x)\")\n#\t\t\tprint y\n\t\t\t\n\t\t\tfsp=\"tmpimg2.png\"\n\t\t\tpylab.savefig(fsp,dpi=96)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-2,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\n#\t\texcept:\n#\t\t\tprint \"Unable to generate plot (need matplotlib)\"\n\t\t\t\n\t\n\tpickle.dump(tile_dict,tf)\n\t\n\tfor l in range(int(levels)):\n\t\tfor x in range(0,xs,tilesize):\n\t\t\tfor y in range(0,ys,tilesize):\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ta=file(fsp,\"r\")\n\t\t\t\tb=a.read()\n\t\t\t\ta.close()\n\t\t\t\ttf.write(b)\n\t\t\t\tos.remove(fsp)\n\t\txs/=2\n\t\tys/=2\n\t\n\tif \"pspec\" in options :\n\t\tfor fsp in [\"tmpimg.jpg\",\"tmpimg2.png\"] :\n\t\t\ta=file(fsp,\"r\")\n\t\t\tb=a.read()\n\t\t\ta.close()\n\t\t\ttf.write(b)\n#\t\t\tos.remove(fsp)\n\t\n\ttf.close()", "def level1_hitmaps(filename,\n image_directory,\n band_average=True,\n feed_average=False,\n feeds=[1],\n make_hits=True,\n make_sky=True,\n field_width=None,\n cdelt=[1./60.,1./60.],\n ctype=['RA---TAN','DEC--TAN'],\n crval=None,\n source='None',\n plot_circle=False,\n plot_circle_radius=1,\n AzElMode=False,\n SunMode=False):\n\n\n try:\n fd = h5py.File(filename,'r')\n except OSError:\n print('Unable to open file {}'.format(filename))\n return\n\n # cdelt given in arcmin\n if not isinstance(field_width, type(None)):\n xpixelWidth = int(field_width[0]/cdelt[0]*60)\n ypixelWidth = int(field_width[1]/cdelt[1]*60)\n image_width = [xpixelWidth, ypixelWidth]\n else:\n image_width = None\n\n if isinstance(image_directory, type(None)):\n image_directory = filename.split('/')[-1].split('.')[0]\n if not os.path.exists(image_directory):\n os.makedirs(image_directory)\n\n\n if AzElMode:\n mapper = MapperAzEl(makeHitMap=make_hits,\n makeAvgMap=make_sky,\n crval=crval,\n cdelt=cdelt,\n npix=image_width,\n image_directory=image_directory,\n ctype=ctype)\n elif SunMode:\n mapper = MapperSun(makeHitMap=make_hits,\n makeAvgMap=make_sky,\n crval=crval,\n cdelt=cdelt,\n npix=image_width,\n image_directory=image_directory,\n ctype=ctype)\n \n else:\n mapper = Mapper(makeHitMap=make_hits,\n makeAvgMap=make_sky,\n image_directory=image_directory,\n crval=crval,\n cdelt=cdelt,\n npix=image_width,\n ctype=ctype)\n \n \n mapper.setLevel1(fd, source)\n if 'all' in feeds:\n feeds = [feed for feed in fd['spectrometer/feeds'][:] if feed != 20]\n if feed_average:\n \n maps = mapper(feeds, usetqdm=True)\n fstr = '-'.join(['{:02d}'.format(feed) for feed in feeds if feed in mapper.feed_ids])\n outdir = '{}'.format(image_directory)\n\n mapper.plotImages(feeds,\n '{}/Hitmap_FeedAvg.png'.format(outdir),\n '{}/BandAverage_FeedAvg.png'.format(outdir),\n plot_circle,\n plot_circle_radius)\n # mapper.SaveMaps('{}/BandAverage_FeedAvg.fits'.format(image_directory))\n \n \n for feed in tqdm(feeds):\n if not isinstance(mapper.map_bavg,type(None)):\n mapper.map_bavg *= 0.\n mapper.hits = None\n\n maps = mapper(feed)\n\n fstr = '-'.join(['{:02d}'.format(feed)])\n outdir = '{}'.format(image_directory)\n\n mapper.plotImages([feed],\n '{}/Hitmap_Feed{:02d}.png'.format(outdir,feed),\n '{}/BandAverage_Feed{:02d}.png'.format(outdir,feed),\n plot_circle,\n plot_circle_radius)\n #mapper.SaveMaps('{}/BandAverage_Feed{:02d}.fits'.format(image_directory,feed))", "def make_distance_boxplots(dm_f,\r\n map_f,\r\n fields,\r\n width=None,\r\n height=6.0,\r\n suppress_all_within=False,\r\n suppress_all_between=False,\r\n suppress_individual_within=False,\r\n suppress_individual_between=False,\r\n y_min=0.0,\r\n y_max=1.0,\r\n whisker_length=1.5,\r\n box_width=0.5,\r\n box_color=None,\r\n color_individual_within_by_field=None,\r\n sort=None):\r\n # Parse data files and do some preliminary error checking.\r\n dm_header, dm_data = parse_distmat(dm_f)\r\n map_data, map_header, map_comments = parse_mapping_file(map_f)\r\n\r\n if fields is None or len(fields) < 1:\r\n raise ValueError(\"You must provide at least one field to analyze.\")\r\n\r\n for field in fields:\r\n if field not in map_header:\r\n raise ValueError(\"The field '%s' is not in the provided mapping \"\r\n \"file. Please supply correct fields \"\r\n \"corresponding to fields in the mapping file.\" %\r\n field)\r\n\r\n # Make sure the y_min and y_max options make sense, as they can be either\r\n # 'auto' or a number.\r\n y_min = _cast_y_axis_extrema(y_min)\r\n y_max = _cast_y_axis_extrema(y_max)\r\n\r\n # Collate the distributions of distances that will comprise each boxplot.\r\n # Suppress the generation of the indicated types of boxplots.\r\n results = []\r\n for field in fields:\r\n plot_data = []\r\n plot_labels = []\r\n plot_colors = []\r\n legend = None\r\n\r\n # Little bit of duplicate code here... not sure it's worth the effort\r\n # to clean up though.\r\n if not suppress_all_within:\r\n plot_data.append(get_all_grouped_distances(dm_header, dm_data,\r\n map_header, map_data, field, within=True))\r\n plot_labels.append(\"All within %s\" % field)\r\n\r\n if color_individual_within_by_field is not None:\r\n plot_colors.append(None)\r\n else:\r\n plot_colors.append(box_color)\r\n\r\n if not suppress_all_between:\r\n plot_data.append(get_all_grouped_distances(dm_header, dm_data,\r\n map_header, map_data, field, within=False))\r\n plot_labels.append(\"All between %s\" % field)\r\n\r\n if color_individual_within_by_field is not None:\r\n plot_colors.append(None)\r\n else:\r\n plot_colors.append(box_color)\r\n\r\n if not suppress_individual_within:\r\n within_dists = get_grouped_distances(dm_header, dm_data,\r\n map_header, map_data, field,\r\n within=True)\r\n field_states = []\r\n for grouping in within_dists:\r\n plot_data.append(grouping[2])\r\n plot_labels.append(\"%s vs. %s\" % (grouping[0], grouping[1]))\r\n field_states.append(grouping[0])\r\n\r\n # If we need to color these boxplots by a field, build up a\r\n # list of colors and a legend.\r\n if color_individual_within_by_field is not None:\r\n colors, color_mapping = _color_field_states(\r\n format_mapping_file(map_header, map_data).split('\\n'),\r\n dm_header, field, field_states,\r\n color_individual_within_by_field)\r\n plot_colors.extend(colors)\r\n legend = (color_mapping.values(), color_mapping.keys())\r\n else:\r\n plot_colors.extend([box_color] * len(field_states))\r\n\r\n if not suppress_individual_between:\r\n between_dists = get_grouped_distances(dm_header, dm_data,\r\n map_header, map_data, field, within=False)\r\n\r\n for grouping in between_dists:\r\n plot_data.append(grouping[2])\r\n plot_labels.append(\"%s vs. %s\" % (grouping[0], grouping[1]))\r\n\r\n if color_individual_within_by_field is not None:\r\n plot_colors.append(None)\r\n else:\r\n plot_colors.append(box_color)\r\n\r\n assert (len(plot_data) == len(plot_labels) and\r\n len(plot_labels) == len(plot_colors)), \"The number \" +\\\r\n \"of boxplot labels and colors do not match the number of \" +\\\r\n \"boxplots.\"\r\n\r\n # We now have our data and labels ready, so plot them!\r\n if plot_data:\r\n if sort is not None:\r\n plot_data, plot_labels, plot_colors = _sort_distributions(\r\n plot_data, plot_labels, plot_colors, sort)\r\n\r\n if width is None:\r\n width = len(plot_data) * box_width + 2\r\n if width <= 0 or height <= 0:\r\n raise ValueError(\"The specified width and height of the plot \"\r\n \"must be greater than zero.\")\r\n\r\n plot_figure = boxplots(plot_data, x_tick_labels=plot_labels,\r\n title=\"%s Distances\" % field,\r\n x_label=\"Grouping\", y_label=\"Distance\",\r\n x_tick_labels_orientation='vertical',\r\n y_min=y_min, y_max=y_max,\r\n whisker_length=whisker_length,\r\n box_width=box_width, box_colors=plot_colors,\r\n figure_width=width, figure_height=height,\r\n legend=legend)\r\n\r\n results.append((field, plot_figure, plot_data, plot_labels,\r\n plot_colors))\r\n else:\r\n raise ValueError(\"The generation of all plots was suppressed. At \"\r\n \"least one type of plot must be unsuppressed.\")\r\n\r\n return results", "def analyze(self, options, target):\r\n\r\n target = 0\r\n\r\n upf = None\r\n\r\n dwnf = None\r\n\r\n if options.upfile is not None:\r\n\r\n upf = basepath + options.upfile + '.ma'\r\n\r\n if options.downfile is not None:\r\n\r\n dwnf = basepath + options.downfile + '.ma'\r\n\r\n\r\n\r\n for filename in (upf, dwnf):\r\n\r\n # if options.upfile is not None and options.downfile is not None:\r\n\r\n if filename is None:\r\n\r\n break\r\n\r\n im=[]\r\n\r\n self.imageData = []\r\n\r\n print (\"Loading data from %s\" % filename)\r\n\r\n try:\r\n\r\n im = MetaArray(file = filename, subset=(slice(0,2), slice(64,128), slice(64,128)))\r\n\r\n except:\r\n\r\n print(' Error loading upfile: %s' % filename)\r\n\r\n return\r\n\r\n print(' Data loaded')\r\n\r\n target = target + 1\r\n\r\n self.times = im.axisValues('Time').astype('float32')\r\n\r\n self.imageData = im.view(np.ndarray).astype('float32')\r\n\r\n im=[]\r\n\r\n self.analysis_fourier_map(period=self.period, target=target, bins=binsize,)\r\n\r\n if target > 0:\r\n\r\n self.plot_maps(mode = 1, target = target, gfilter = self.gfilter)", "def make_distance_boxplots(dm_f,\n map_f,\n fields,\n width=None,\n height=6.0,\n suppress_all_within=False,\n suppress_all_between=False,\n suppress_individual_within=False,\n suppress_individual_between=False,\n y_min=0.0,\n y_max=1.0,\n whisker_length=1.5,\n box_width=0.5,\n box_color=None,\n color_individual_within_by_field=None,\n sort=False):\n # Parse data files and do some preliminary error checking.\n dm_header, dm_data = parse_distmat(dm_f)\n map_data, map_header, map_comments = parse_mapping_file(map_f)\n\n if fields is None or len(fields) < 1:\n raise ValueError(\"You must provide at least one field to analyze.\")\n\n for field in fields:\n if field not in map_header:\n raise ValueError(\"The field '%s' is not in the provided mapping \"\n \"file. Please supply correct fields \"\n \"corresponding to fields in the mapping file.\" %\n field)\n\n # Make sure the y_min and y_max options make sense, as they can be either\n # 'auto' or a number.\n y_min = _cast_y_axis_extrema(y_min)\n y_max = _cast_y_axis_extrema(y_max)\n\n # Collate the distributions of distances that will comprise each boxplot.\n # Suppress the generation of the indicated types of boxplots.\n results = []\n for field in fields:\n plot_data = []\n plot_labels = []\n plot_colors = []\n legend = None\n\n # Little bit of duplicate code here... not sure it's worth the effort\n # to clean up though.\n if not suppress_all_within:\n plot_data.append(get_all_grouped_distances(dm_header, dm_data,\n map_header, map_data, field, within=True))\n plot_labels.append(\"All within %s\" % field)\n\n if color_individual_within_by_field is not None:\n plot_colors.append(None)\n else:\n plot_colors.append(box_color)\n\n if not suppress_all_between:\n plot_data.append(get_all_grouped_distances(dm_header, dm_data,\n map_header, map_data, field, within=False))\n plot_labels.append(\"All between %s\" % field)\n\n if color_individual_within_by_field is not None:\n plot_colors.append(None)\n else:\n plot_colors.append(box_color)\n\n if not suppress_individual_within:\n within_dists = get_grouped_distances(dm_header, dm_data,\n map_header, map_data, field,\n within=True)\n field_states = []\n for grouping in within_dists:\n plot_data.append(grouping[2])\n plot_labels.append(\"%s vs. %s\" % (grouping[0], grouping[1]))\n field_states.append(grouping[0])\n\n # If we need to color these boxplots by a field, build up a\n # list of colors and a legend.\n if color_individual_within_by_field is not None:\n colors, color_mapping = _color_field_states(\n format_mapping_file(map_header, map_data).split('\\n'),\n dm_header, field, field_states,\n color_individual_within_by_field)\n plot_colors.extend(colors)\n legend = (color_mapping.values(), color_mapping.keys())\n else:\n plot_colors.extend([box_color] * len(field_states))\n\n if not suppress_individual_between:\n between_dists = get_grouped_distances(dm_header, dm_data,\n map_header, map_data, field, within=False)\n\n for grouping in between_dists:\n plot_data.append(grouping[2])\n plot_labels.append(\"%s vs. %s\" % (grouping[0], grouping[1]))\n\n if color_individual_within_by_field is not None:\n plot_colors.append(None)\n else:\n plot_colors.append(box_color)\n\n assert (len(plot_data) == len(plot_labels) and\n len(plot_labels) == len(plot_colors)), \"The number \" +\\\n \"of boxplot labels and colors do not match the number of \" +\\\n \"boxplots.\"\n\n # We now have our data and labels ready, so plot them!\n if plot_data:\n if sort:\n plot_data, plot_labels, plot_colors = \\\n _sort_distributions_by_median(plot_data, plot_labels,\n plot_colors)\n\n if width is None:\n width = len(plot_data) * box_width + 2\n if width <= 0 or height <= 0:\n raise ValueError(\"The specified width and height of the plot \"\n \"must be greater than zero.\")\n\n plot_figure = generate_box_plots(plot_data,\n x_tick_labels=plot_labels, title=\"%s Distances\" % field,\n x_label=\"Grouping\", y_label=\"Distance\",\n x_tick_labels_orientation='vertical', y_min=y_min,\n y_max=y_max, whisker_length=whisker_length,\n box_width=box_width, box_colors=plot_colors,\n figure_width=width, figure_height=height, legend=legend)\n\n results.append((field, plot_figure, plot_data, plot_labels,\n plot_colors))\n else:\n raise ValueError(\"The generation of all plots was suppressed. At \"\n \"least one type of plot must be unsuppressed.\")\n\n return results", "def _generate_modifiers(self, bands=\"riz\", **kwargs):\n\n modifiers = {\n 'mcal_psf_g1': 'mcal_psf_g1_mean',\n 'mcal_psf_g2': 'mcal_psf_g2_mean',\n 'mcal_T_psf': 'mcal_psf_T_mean',\n 'mcal_flags': 'mcal_flags',\n }\n\n # newer metacal catalogs no longer have the id column\n if 'id' in self._columns:\n modifiers['objectId'] = 'id'\n\n # Additional metacal values and their variants\n for variant in ['', '_1p', '_1m', '_2p', '_2m']:\n # Shape\n modifiers['mcal_g1{}'.format(variant)] = 'mcal_gauss_g1{}'.format(variant)\n modifiers['mcal_g2{}'.format(variant)] = 'mcal_gauss_g2{}'.format(variant)\n # Size\n modifiers['mcal_T{}'.format(variant)] = 'mcal_gauss_T{}'.format(variant)\n # SNR\n modifiers['mcal_s2n{}'.format(variant)] = 'mcal_gauss_s2n{}'.format(variant)\n\n # Adds band dependent info and their variants\n for band in bands:\n modifiers['mcal_flux_{}{}'.format(band, variant)] = (\n lambda x: x * self._flux_scaling,\n 'mcal_gauss_flux_{}{}'.format(band, variant)\n )\n modifiers['mcal_flux_err_{}{}'.format(band, variant)] = (\n lambda x: x * self._flux_scaling,\n 'mcal_gauss_flux_err_{}{}'.format(band, variant)\n )\n modifiers['mcal_mag_{}{}'.format(band, variant)] = (\n lambda x: -2.5 * np.log10(x * self._flux_scaling) + self.METACAL_ZEROPOINT,\n 'mcal_gauss_flux_{}{}'.format(band, variant),\n )\n modifiers['mcal_mag_err_{}{}'.format(band, variant)] = (\n lambda flux, err: (2.5 * err) / (flux * np.log(10)),\n 'mcal_gauss_flux_{}{}'.format(band, variant),\n 'mcal_gauss_flux_err_{}{}'.format(band, variant),\n )\n\n return modifiers", "def map_cell_property(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n counter = 0\n fignum = 1\n if p.gal_index == 'all':\n for gal_index in range(GR.N_gal):\n\n if counter == 0:\n fig, axes = plt.subplots(3, 3, figsize=(20,15))\n axs = [axes[0,0],axes[0,1],axes[0,2],axes[1,0],axes[1,1],axes[1,2],axes[2,0],axes[2,1],axes[2,2]]\n counter = 9\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n print('Now mapping %s' % gal_ob.name)\n isrf_ob = gal.isrf(gal_index)\n\n # Load SKIRT output\n wavelengths,bin_width = aux.read_probe_wavelengths(isrf_ob.name)\n N_start,N_stop = aux.FUV_index(wavelengths)\n image_data,units = isrf_ob._get_cut_probe(orientation=p.orientation)\n\n # Plot\n ax1 = axs[9 - counter]\n if p.prop == 'FUV':\n # FUV_xy_image = np.array([np.trapz(image_data[N_start:N_stop,:,:],x=wavelengths[N_start:N_stop]) \\\n # for i in range(len(df))])\n FUV_xy_image = image_data[N_start:N_stop,:,:].sum(axis=0) * 4 * np.pi\n FUV_xy_image = ndimage.rotate(FUV_xy_image, 0, reshape=True)\n # FUV_xy_image = np.fliplr(FUV_xy_image)\n FUV_xy_image[FUV_xy_image <= 0] = np.min(FUV_xy_image[FUV_xy_image > 0])\n im = ax1.imshow(np.log10(FUV_xy_image),\\\n extent=[-isrf_ob.radius,isrf_ob.radius,-isrf_ob.radius,isrf_ob.radius],\\\n vmin=p.vmin,\\\n cmap='twilight')\n lab = 'FUV flux [W/m$^2$/micron]'\n\n # pdb.set_trace()\n\n ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-0.8*gal_ob.radius,0.8*gal_ob.radius])\n ax1.set_ylim([-0.8*gal_ob.radius,0.8*gal_ob.radius])\n if p.prop == 'm':\n ax1.text(0.05,0.85,'M$_{gas}$=%.2eM$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n\n counter -= 1\n\n\n if counter == 0:\n cbar = fig.colorbar(im, ax=axes.ravel().tolist(), shrink=0.95, label=lab)\n # fig.colorbar(im,shrink=0.8,label=lab)\n\n if counter == 0 or gal_index == GR.N_gal-1:\n figname = p.d_plot + 'cell_data/map_%s_%s_gals_%s_%i.png' % (p.prop,p.z1,p.orientation,fignum)\n print('Saving in ' + figname)\n # plt.tight_layout()\n plt.savefig(figname, format='png', dpi=250, facecolor='w')\n fignum += 1\n pdb.set_trace()\n else:\n fig, ax1 = plt.subplots(figsize=(10,10))\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type='cell_data')\n print(simgas.keys())\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n\n # Plot\n Rmax = max_scale/2\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D = np.log10(map2D)\n if not p.log: map2D[map2D < p.vmin] = p.vmin/2 #np.min(map2D[map2D > 0])\n im = ax1.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,cmap=p.cmap)\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-2/3*gal_ob.radius,2/3*gal_ob.radius])\n ax1.set_ylim([-2/3*gal_ob.radius,2/3*gal_ob.radius])\n fig.colorbar(im,shrink=0.8,ax=ax1,label=lab)\n ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n\n print('Saving in ' + p.d_plot + 'sim_data/map_%s_G%i.png' % (p.prop,p.gal_index))\n if not os.path.isdir(p.d_plot + 'cell_data/'): os.mkdir(p.d_plot + 'cell_data/')\n plt.savefig(p.d_plot + 'cell_data/map_%s_G%i.png' % (p.prop,p.gal_index), format='png', dpi=250, facecolor='w')", "def Generate_BG_Template(outputSize=300, angularSize = 10, fileOut = 'BGRateMap.pickle' ):\r\n template = np.zeros((outputSize,outputSize))\r\n ppd=float(outputSize)/float(angularSize) # pixels per deg\r\n \r\n events110 = ParseFermi.Import_File('photons.txt', energyRange = (120000,140000),lonRange=(-5,5),latRange = (-5,5))\r\n events130 = ParseFermi.Import_File('photons.txt', energyRange = (100000,120000),lonRange=(-5,5),latRange = (-5,5))\r\n events150 = ParseFermi.Import_File('photons.txt', energyRange = (140000,200000),lonRange=(-5,5),latRange = (-5,5))\r\n \r\n for i in range(10000,200001,20000):\r\n if i == 130000:\r\n continue\r\n events = ParseFermi.Import_File('photons.txt', energyRange = (i-10000,i+10000),lonRange=(-5,5),latRange = (-5,5))\r\n BG = np.zeros((outputSize,outputSize)) \r\n for j in events:\r\n xIDX = int(j[1]*ppd+float(outputSize/2))\r\n yIDX = int(j[2]*ppd+float(outputSize/2))\r\n BG[yIDX][xIDX] += 1.0\r\n \r\n psfDeg = .2+float(200)/float(i)\r\n psfOut = psfDeg*ppd\r\n #print i/1e3, psfDeg, psfOut\r\n \r\n template += scipy.ndimage.filters.gaussian_filter(BG, psfOut)\r\n \r\n template = template/np.max(template)\r\n \r\n # Write to file \r\n outFile = open(fileOut, \"wb\" )\r\n pickle.dump(template, outFile)\r\n print 'Rate Map saved to ', fileOut\r\n \r\n plt.imshow(scipy.fliplr(template), 'jet',extent=[5,-5,-5,5])\r\n\r\n plt.xlabel(r'$l [^\\circ]$')\r\n plt.ylabel(r'$b [^\\circ]$')\r\n plt.xlim(5,-5)\r\n plt.ylim(-5,5)\r\n plt.colorbar()\r\n\r\n x,y = Find_Centroid(template)\r\n x,y = (x/ppd -angularSize/2.0,) ,(y/ppd -angularSize/2.0,)\r\n print x,y\r\n plt.scatter(x,y, s=10, c='r', marker = '+')\r\n \r\n X,Y = FormatEvents(events110)\r\n plt.scatter(X, Y, label = '100-120 GeV', marker = 'o' , c = 'k')\r\n \r\n X,Y = FormatEvents(events130)\r\n plt.scatter(X, Y, label = '120-140 GeV', marker = 'o' , c = 'r')\r\n \r\n X,Y = FormatEvents(events150)\r\n plt.scatter(X, Y, label = '140-200 GeV', marker = 'o' , c = 'g' )\r\n \r\n from matplotlib.font_manager import FontProperties\r\n fontP = FontProperties()\r\n fontP.set_size('small')\r\n plt.legend(loc=1, ncol=1, fancybox=True, shadow=False,prop=fontP,borderaxespad=0.,labelspacing = .2)\r\n \r\n from matplotlib.backends.backend_pdf import PdfPages\r\n if fileOut != '':\r\n pp = PdfPages(fileOut + '_sideband.pdf')\r\n plt.savefig(pp, format='pdf')\r\n print \"Figures saved to \", str(fileOut)+ '_sideband.pdf\\n',\r\n pp.close()\r\n \r\n plt.show()\r\n return template", "def createFieldMapping(sgidPoints):\n # Create field mappings\n sgidFMs = arcpy.FieldMappings()\n\n # Perform some field renaming\n mapPairs = [\n ('State', 'State'),\n ('City', 'Inc_Muni'),\n ('CountyID', 'County'),\n ('ZipCode', 'Zip_Code'),\n ('PrefixDir', 'StN_PreDir'),\n ('StreetName', 'StreetName'),\n ('StreetType', 'StN_PosTyp'),\n ('SuffixDir', 'StN_PosDir'),\n ('AddNum', 'Add_Number'),\n ('LandmarkName', 'landmkName'),\n ('Building', 'Building'),\n ('UnitType', 'Unit'),\n ('AddSource', 'AddAuth'),\n ('AddSystem', 'UniqWithin'),\n ('LoadDate', 'LastUpdate')]\n\n for p in mapPairs:\n print p\n sgidFMs.addFieldMap(getRenameFieldMap(sgidPoints, p[0], p[1]))\n\n return sgidFMs", "def _make_features(self):\n self.features = {}\n self.labels = {}\n for key in ['train', 'cv', 'test']:\n if self.radius is not None:\n feat, label = self._sliding_window(self.images[key], self.masks[key], window_radius=self.radius)\n self.features[key] = feat\n self.labels[key] = label\n else:\n self.features[key] = self.images[key].reshape(-1, 3)\n self.labels[key] = self.masks[key].ravel()", "def create_image_db():\n logging.info('=============> create_image_db: create image metadata json mapper file <===========')\n load_all_map_dir(manifest_map_dir, layer_json_map_dir, layer_config_map_dir)\n print \"create pool\"\n P = multiprocessing.Pool(60)\n print \"before map!\"\n print len(manifest_names) #process_manifest\n print len(layer_json_map_dir)\n print \"before map!\"\n #json_datas = []\n #for i in manifest_names:\n # json_datas.append(process_manifest(i))\n json_datas = P.map(process_manifest, manifest_names)\n print \"after map\"\n print \"write to files!\"\n write_json_datas(json_datas)", "def map(self, mapunit):\n\n #The number of bands to measure the LF for\n if len(mapunit['luminosity'].shape)>1:\n self.nbands = mapunit['luminosity'].shape[1]\n else:\n mapunit['luminosity'] = np.atleast_2d(mapunit['luminosity']).T\n self.nbands = 1\n\n #If only measuring for centrals, get the appropriate\n #rows of the mapunit\n\n mu = {}\n if self.central_only:\n delete_after_map = True\n for k in mapunit.keys():\n mu[k] = mapunit[k][mapunit['central']==1]\n else:\n delete_after_map = False\n mu = mapunit\n\n #Want to count galaxies in bins of luminosity for\n #self.nbands different bands in self.nzbins\n #redshift bins\n if self.lumcounts is None:\n self.lumcounts = np.zeros((self.njack, len(self.magbins)-1,\n self.nbands, self.nzbins))\n\n #Assume redshifts are provided, and that the\n #mapunit is sorted in terms of them\n \n if self.lightcone:\n for i, z in enumerate(self.zbins[:-1]):\n zlidx = mu['redshift'].searchsorted(self.zbins[i])\n zhidx = mu['redshift'].searchsorted(self.zbins[i+1])\n\n #Count galaxies in bins of luminosity\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][zlidx:zhidx])\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,i] += c\n else:\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][:,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][:])\n c, e = np.histogram(mu['luminosity'][:,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,0] += c\n\n if delete_after_map:\n True", "def img_map(ts):\n image_map = \"\"\n texdata = bpy.data.textures[ts.texture]\n if ts.mapping == \"FLAT\":\n image_map = \"map_type 0 \"\n elif ts.mapping == \"SPHERE\":\n image_map = \"map_type 1 \"\n elif ts.mapping == \"TUBE\":\n image_map = \"map_type 2 \"\n\n # map_type 3 and 4 in development (?) (ENV in pov 3.8)\n # for POV-Ray, currently they just seem to default back to Flat (type 0)\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 3 \"\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 4 \"\n if ts.use_interpolation: # Available if image sampling class reactivated?\n image_map += \" interpolate 2 \"\n if texdata.extension == \"CLIP\":\n image_map += \" once \"\n # image_map += \"}\"\n # if ts.mapping=='CUBE':\n # image_map+= \"warp { cubic } rotate <-90,0,180>\"\n # no direct cube type mapping. Though this should work in POV 3.7\n # it doesn't give that good results(best suited to environment maps?)\n # if image_map == \"\":\n # print(\" No texture image found \")\n return image_map", "def show_field(self, vehicles, type):\n\n # starting pixels x = 0, y = 0 on field image\n start_x = 78\n start_y = 45\n\n # block pixel width is slightly different per field size\n if self.size == 6:\n block_width = 72\n elif self.size == 9:\n block_width = 69\n elif self.size == 12:\n block_width = 68.5\n\n field = plt.imread(f\"data/RushHourImages/RushHour{self.size}.jpg\")\n fig, ax = plt.subplots()\n plt.imshow(field)\n plt.axis('off')\n\n for vehicle in vehicles:\n if vehicle.orientation == 'H':\n x = start_x + (vehicle.x * block_width)\n y = start_y + (vehicle.y * block_width)\n if vehicle.length == 2:\n car = plt.imread(f\"data/RushHourImages/Car{vehicle.id}.png\")\n else:\n car = plt.imread(f\"data/RushHourImages/Truck{vehicle.id}.png\")\n\n # truck: the image coordinate is his middle, which changes with the length of the car\n x += 40\n\n if vehicle.orientation == 'V':\n x = start_y + (vehicle.x * block_width)\n y = start_x + (vehicle.y * block_width)\n if vehicle.length == 2:\n car = plt.imread(f\"data/RushHourImages/Car-rotated{vehicle.id}.png\")\n else:\n car = plt.imread(f\"data/RushHourImages/Truck-rotated{vehicle.id}.png\")\n y += 40\n\n if self.size == 6:\n imagebox = OffsetImage(car, zoom=0.6)\n elif self.size == 9:\n imagebox = OffsetImage(car, zoom=0.4)\n elif self.size == 12:\n imagebox = OffsetImage(car, zoom=0.3)\n\n imagebox.image.axes = ax\n xy = (x, y)\n ab = AnnotationBbox(imagebox, xy, frameon=False)\n ax.add_artist(ab)\n\n if type == True:\n plt.show(block=False)\n plt.pause(0.001)\n plt.close()\n else:\n plt.show()", "def build_features_dict(image, image_id, filename, image_format=None,\n bboxes=None, masks=None, label_ids=None,\n label_names=None, masks_format=\"png\"):\n\n # Add channel dimension if needed.\n if len(image.shape) == 3:\n pass\n elif len(image.shape) == 2:\n image = np.expand_dims(image, -1)\n else:\n raise Exception(f\"Wrong image shape: {image.shape}\")\n\n # Get image shape.\n image_width, image_height, image_channel = image.shape\n\n # Encode image.\n image_encoded = imaging.encode_image(image, image_format)\n\n # Create te feature dict.\n feature_dict = {}\n\n # Image features\n feature_dict['image_height'] = int64_feature(image_height)\n feature_dict['image_width'] = int64_feature(image_width)\n feature_dict['image_channel'] = int64_feature(image_channel)\n feature_dict['image_filename'] = bytes_feature(filename.encode('utf8'))\n feature_dict['image_id'] = bytes_feature(str(image_id).encode('utf8'))\n feature_dict['image_encoded'] = bytes_feature(image_encoded.numpy())\n feature_dict['image_format'] = bytes_feature(image_format.encode('utf8'))\n\n # Object features\n if bboxes is not None:\n if bboxes.shape[0] > 0:\n bboxes_x = bboxes[:, 0]\n bboxes_y = bboxes[:, 1]\n bboxes_width = bboxes[:, 2]\n bboxes_height = bboxes[:, 3]\n else:\n bboxes_x = []\n bboxes_y = []\n bboxes_width = []\n bboxes_height = []\n\n feature_dict['bboxes_x'] = float_list_feature(bboxes_x)\n feature_dict['bboxes_y'] = float_list_feature(bboxes_y)\n feature_dict['bboxes_width'] = float_list_feature(bboxes_width)\n feature_dict['bboxes_height'] = float_list_feature(bboxes_height)\n\n if label_ids is not None:\n feature_dict['label_ids'] = int64_list_feature(label_ids)\n\n if label_names is not None:\n feature_dict['label_names'] = bytes_list_feature(label_names)\n\n if masks is not None:\n # Encode masks.\n masks_encoded = []\n for mask in masks:\n mask = image = np.expand_dims(mask, -1)\n mask_encoded = imaging.encode_image(mask, masks_format)\n masks_encoded.append(mask_encoded.numpy())\n\n feature_dict['masks_encoded'] = bytes_list_feature(masks_encoded)\n feature_dict['masks_format'] = bytes_feature(masks_format.encode(\"utf8\"))\n\n return feature_dict", "def __make_processing(self, img_name, abspath_dir_img, id_foot):\n data = {}\n data['data'] = ImageInfo.get_date(abspath_dir_img)\n data['total_part'] = TOTAL_PART\n data['nuvens'] = ImageInfo.get_cloud(abspath_dir_img)\n self.__make_tms(abspath_dir_img)\n data['geom'] = self.__make_footprint(abspath_dir_img, shp_out=id_foot)\n abspath_rgb, img_name_rgb = ImageInfo.get_image_rgb(\n abspath_dir_img, img_name\n )\n data['tms'] = ImageInfo.get_xml_tms(img_name_rgb)\n data['image'] = img_name_rgb\n data['quicklook'] = self.__make_png(abspath_rgb)\n data['path'] = ImageInfo.get_path(img_name)\n return data", "def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)", "def get_alms(maps=None,\n mask=None,\n maplabel='353',\n showI=False,\n pol=True,\n intensity=True,\n rewrite=False,\n writemap=False,\n savealms=True,\n masktype='PowerSpectra',#'GalPlane2',\n lmax=100):\n\n\n newname = 'alms_lmax{}_mask_{}__'.format(lmax, masktype) + maplabel + '.npy'\n \n\n \n if not os.path.exists(data_path + newname) or rewrite:\n print 'alms file {} does not exist; calculating alms...'.format(newname)\n if mask is None:\n if masktype == 'PowerSpectra':\n maskname = 'HFI_PowerSpect_Mask_2048_R1.10.fits'\n maskfield = 0\n elif masktype == 'GalPlane60':\n maskname = 'HFI_Mask_GalPlane-apo0_2048_R2.00.fits',\n maskfield = 2\n elif masktype == 'no':\n maskname = 'HFI_PowerSpect_Mask_2048_R1.10.fits'\n maskfield = 0\n mask = hp.read_map(data_path + maskname, field=maskfield)\n if masktype == 'no':\n mask = mask*0. + 1.\n masknside = hp.get_nside(mask)\n if maps is None:\n Imap,Qmap,Umap = hp.read_map( data_path + 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(maplabel),hdu=1, field=(0,1,2) )\n mapnside = hp.get_nside(Imap)\n else:\n if intensity and pol:\n Imap = maps[0]\n Qmap = maps[1]\n Umap = maps[2]\n mapnside = hp.get_nside(Imap)\n elif intensity and not pol:\n Imap = maps[0]\n mapnside = hp.get_nside(Imap)\n elif pol and not intensity:\n Qmap = maps[0]\n Umap = maps[1]\n mapnside = hp.get_nside(Qmap)\n \n if masknside != mapnside:\n print 'adjusting mask to match map resolution...'\n mask = hp.pixelfunc.ud_grade(mask, nside_out=mapnside)\n\n if showI:\n hp.mollview(Imap*mask)\n\n alms = []\n if intensity:\n Imap = Imap*mask\n Tlm = hp.map2alm(Imap, lmax=lmax)\n alms.append(Tlm)\n if pol:\n Qmap *= mask\n Umap *= mask\n Elm,Blm = hp.map2alm_spin( (Qmap,Umap), 2, lmax=lmax )\n alms.append(Elm)\n alms.append(Blm)\n\n #this will only work if get_intensity and get_pol\n if writemap and intensity and pol:\n hp.fitsfunc.write_map( data_path + newname, [Imap, Qmap, Umap])\n \n if savealms and intensity and pol:\n np.save(data_path + newname, alms)\n\n return alms\n\n\n else:\n alms = np.load(data_path + newname, 'r')\n if intensity and pol:\n return alms[0], alms[1], alms[2]\n else:\n if intensity:\n return alms[0]\n if pol:\n return alms[1], alms[2]", "def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'", "def run(self):\n super().run()\n # more verbose creation date for clarity\n creation_date = f'20{self.creation_date}'\n make_e3sm_to_cmip_maps(self.config, self.logger, self.mesh_short_name,\n creation_date, self.ntasks)", "def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag", "def _generate_modifiers(dm_schema_version=4, bands=None, pixel_scale=0.2, **kwargs): # pylint: disable=arguments-differ\n\n bands = bands or 'ugrizy'\n FLUX = 'flux' if dm_schema_version <= 2 else 'instFlux'\n ERR = 'Sigma' if dm_schema_version <= 1 else 'Err'\n\n modifiers = {\n 'objectId': 'id',\n 'parentObjectId': 'parent',\n 'ra': (np.rad2deg, 'coord_ra'),\n 'dec': (np.rad2deg, 'coord_dec'),\n 'x': 'base_SdssCentroid_x',\n 'y': 'base_SdssCentroid_y',\n 'xErr': f'base_SdssCentroid_x{ERR}',\n 'yErr': f'base_SdssCentroid_y{ERR}',\n 'xy_flag': 'base_SdssCentroid_flag',\n 'psNdata': 'base_PsfFlux_area',\n 'extendedness': 'base_ClassificationExtendedness_value',\n 'blendedness': 'base_Blendedness_abs',\n }\n\n not_good_flags = (\n 'base_PixelFlags_flag_edge',\n 'base_PixelFlags_flag_interpolatedCenter',\n 'base_PixelFlags_flag_saturatedCenter',\n 'base_PixelFlags_flag_crCenter',\n 'base_PixelFlags_flag_bad',\n 'base_PixelFlags_flag_suspectCenter',\n 'base_PixelFlags_flag_clipped',\n )\n\n modifiers['good'] = (create_basic_flag_mask,) + not_good_flags\n modifiers['clean'] = (\n create_basic_flag_mask,\n 'deblend_skipped',\n ) + not_good_flags\n\n # cross-band average, second moment values\n modifiers['I_flag'] = 'ext_shapeHSM_HsmSourceMoments_flag'\n for ax in ['xx', 'yy', 'xy']:\n modifiers[f'I{ax}_pixel'] = f'ext_shapeHSM_HsmSourceMoments_{ax}'\n modifiers[f'I{ax}PSF_pixel'] = f'base_SdssShape_psf_{ax}'\n\n for band in bands:\n modifiers[f'psFlux_{band}'] = (convert_flux_to_nanoJansky,\n f'{band}_slot_PsfFlux_{FLUX}',\n f'{band}_FLUXMAG0')\n modifiers[f'psFlux_flag_{band}'] = f'{band}_slot_PsfFlux_flag'\n modifiers[f'psFluxErr_{band}'] = (convert_flux_to_nanoJansky,\n f'{band}_slot_PsfFlux_{FLUX}{ERR}',\n f'{band}_FLUXMAG0')\n modifiers[f'mag_{band}'] = (convert_flux_to_mag,\n f'{band}_slot_PsfFlux_{FLUX}',\n f'{band}_FLUXMAG0')\n modifiers[f'magerr_{band}'] = (convert_flux_err_to_mag_err,\n f'{band}_slot_PsfFlux_{FLUX}',\n f'{band}_slot_PsfFlux_{FLUX}{ERR}')\n\n modifiers[f'cModelFlux_{band}'] = (convert_flux_to_nanoJansky,\n f'{band}_modelfit_CModel_{FLUX}',\n f'{band}_FLUXMAG0')\n modifiers[f'cModelFluxErr_{band}'] = (convert_flux_to_nanoJansky,\n f'{band}_modelfit_CModel_{FLUX}{ERR}',\n f'{band}_FLUXMAG0')\n modifiers[f'cModelFlux_flag_{band}'] = f'{band}_modelfit_CModel_flag'\n modifiers[f'mag_{band}_cModel'] = (convert_flux_to_mag,\n f'{band}_modelfit_CModel_{FLUX}',\n f'{band}_FLUXMAG0')\n modifiers[f'magerr_{band}_cModel'] = (convert_flux_err_to_mag_err,\n f'{band}_modelfit_CModel_{FLUX}',\n f'{band}_modelfit_CModel_{FLUX}{ERR}')\n modifiers[f'snr_{band}_cModel'] = (np.divide,\n f'{band}_modelfit_CModel_{FLUX}',\n f'{band}_modelfit_CModel_{FLUX}{ERR}')\n\n # Per-band shape information\n modifiers[f'I_flag_{band}'] = f'{band}_base_SdssShape_flag'\n\n for ax in ['xx', 'yy', 'xy']:\n modifiers[f'I{ax}_pixel_{band}'] = f'{band}_base_SdssShape_{ax}'\n modifiers[f'I{ax}PSF_pixel_{band}'] = f'{band}_base_SdssShape_psf_{ax}'\n\n modifiers[f'psf_fwhm_{band}'] = (\n lambda xx, yy, xy: pixel_scale * 2.355 * (xx * yy - xy * xy) ** 0.25,\n f'{band}_base_SdssShape_psf_xx',\n f'{band}_base_SdssShape_psf_yy',\n f'{band}_base_SdssShape_psf_xy')\n\n return modifiers", "def __init__(self, filename):\n\t\tself.im_raw = sunpy.map.Map(filename)\n\t\ttry:\n\t\t\tself.B0 = self.im_raw.meta['B0']\n\t\texcept KeyError:\n\t\t\tself.B0 = self.im_raw.meta['OBS_B0']\n\t\ttry:\n\t\t\tself.L0 = self.im_raw.meta['L0']\n\t\texcept KeyError:\n\t\t\tself.L0 = self.im_raw.meta['OBS_L0']\n\t\ttry:\n\t\t\tself.X0 = self.im_raw.meta['X0']\n\t\texcept KeyError:\n\t\t\tself.X0 = self.im_raw.meta['IMG_X0']\n\t\ttry:\n\t\t\tself.Y0 = self.im_raw.meta['Y0']\n\t\texcept KeyError:\n\t\t\tself.Y0 = self.im_raw.meta['IMG_Y0']\n\t\tif self.im_raw.detector == 'SPMG':\n\t\t\tself.rsun = self.im_raw.rsun_obs.value / self.im_raw.meta['SCALE']\t\n\t\telse:\n\t\t\tself.rsun = self.im_raw.rsun_obs.value", "def build_map(self):\n # Initialize the world map\n self.world_map = np.zeros((self.map_size, self.map_size))\n \n # Subscribe data and process them in the callback func\n sonar_sub = message_filters.Subscriber('/RosAria/sonar', PointCloud)\n pose_sub = message_filters.Subscriber('/RosAria/pose', Odometry)\n\n time_sync = message_filters.TimeSynchronizer([sonar_sub, pose_sub], queue_size=10)\n time_sync.registerCallback(self.callback_map)\n \n # show map interactively\n rospy.sleep(1)\n while not rospy.is_shutdown():\n cv2.imshow('world_map', self.world_prob)\n cv2.waitKey(100)\n\n if self.save_map and self.count%1000==0:\n with open(self.map_file, 'w') as f:\n pickle.dump(self.world_prob, f)\n print(\"=== Save map to {} ===\".format(self.map_file))", "def _get_imagenet_as_dict(self):\n real_file_path = os.path.realpath(self.map_file)\n if not os.path.exists(real_file_path):\n raise IOError(\"map file {} not exists\".format(self.map_file))\n\n label_dict = {}\n with open(real_file_path) as fp:\n line = fp.readline()\n while line:\n labels = line.split(\" \")\n label_dict[labels[1]] = labels[0]\n line = fp.readline()\n\n # get all the dir which are n02087046, n02094114, n02109525\n dir_paths = {}\n for item in label_dict:\n real_path = os.path.join(self.image_dir, label_dict[item])\n if not os.path.isdir(real_path):\n logger.warning(\"{} dir is not exist\".format(real_path))\n continue\n dir_paths[item] = real_path\n\n if not dir_paths:\n raise PathNotExistsError(\"not valid image dir in {}\".format(self.image_dir))\n\n # get the filename, label and image binary as a dict\n for label in dir_paths:\n for item in os.listdir(dir_paths[label]):\n file_name = os.path.join(dir_paths[label], item)\n if not item.endswith(\"JPEG\") and not item.endswith(\"jpg\"):\n logger.warning(\"{} file is not suffix with JPEG/jpg, skip it.\".format(file_name))\n continue\n data = {}\n data[\"file_name\"] = str(file_name)\n data[\"label\"] = int(label)\n\n # get the image data\n real_file_path = os.path.realpath(file_name)\n image_file = open(real_file_path, \"rb\")\n image_bytes = image_file.read()\n image_file.close()\n if not image_bytes:\n logger.warning(\"The image file: {} is invalid.\".format(file_name))\n continue\n data[\"image\"] = image_bytes\n yield data", "def generate_legacy_layers(self, images_map, content_retriever):\n pass", "def generate_legacy_layers(self, images_map, content_retriever):\n pass", "def generate_materials_dict(self):\n c = 299792458.0\n w_mat = 2 * np.pi * c / self.l_mat - self.w0\n l2_mat = (self.l_mat * 1e6) ** 2\n\n n_air = 1 + 0.05792105 * l2_mat / (238.0185 * l2_mat - 1) + 0.00167917 * l2_mat / (57.362 * l2_mat - 1)\n air_ip = interp1d(w_mat, n_air, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['air'] = air_ip\n\n n_fs = np.sqrt(1 + 0.6961663 * l2_mat / (l2_mat - 0.0684043 ** 2) +\n 0.4079426 * l2_mat / (l2_mat - 0.1162414 ** 2) +\n 0.8974794 * l2_mat / (l2_mat - 9.896161 ** 2))\n fs_ip = interp1d(w_mat, n_fs, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['fs'] = fs_ip\n\n n_mgf2 = np.sqrt(1 + 0.48755108 * l2_mat / (l2_mat - 0.04338408 ** 2) +\n 0.39875031 * l2_mat / (l2_mat - 0.09461442 ** 2) +\n 2.3120353 * l2_mat / (l2_mat - 23.793604 ** 2))\n mgf2_ip = interp1d(w_mat, n_mgf2, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['mgf2'] = mgf2_ip\n\n n_sapphire_o = np.sqrt(1 + 1.4313493 * l2_mat / (l2_mat - 0.0726631 ** 2) +\n 0.65054713 * l2_mat / (l2_mat - 0.1193242 ** 2) +\n 5.3414021 * l2_mat / (l2_mat - 18.028251 ** 2))\n sapphire_o_ip = interp1d(w_mat, n_sapphire_o, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['sapphire_o'] = sapphire_o_ip\n\n n_sapphire_e = np.sqrt(1 + 1.5039759 * l2_mat / (l2_mat - 0.0740288 ** 2) +\n 0.55069141 * l2_mat / (l2_mat - 0.1216529 ** 2) +\n 6.5927379 * l2_mat / (l2_mat - 20.072248 ** 2))\n sapphire_e_ip = interp1d(w_mat, n_sapphire_e, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['sapphire_e'] = sapphire_e_ip\n\n n_bbo_o = np.sqrt(2.7405 + 0.0184 / (l2_mat - 0.0179) - 0.0155 * l2_mat)\n bbo_o_ip = interp1d(w_mat, n_bbo_o, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['bbo_o'] = bbo_o_ip\n\n n_bbo_e = np.sqrt(2.3730 + 0.0128 / (l2_mat - 0.0156) - 0.0044 * l2_mat)\n bbo_e_ip = interp1d(w_mat, n_bbo_e, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials['bbo_e'] = bbo_e_ip\n\n materials_files = os.listdir(self.materials_path)\n logger.info(\"Found {0:d}\".format(materials_files.__len__()))\n for mat_file in materials_files:\n logger.debug(mat_file)\n self.read_material(''.join((self.materials_path, '/', mat_file)))", "def buildFieldTable(fields):\n name=[]\n ra =[]\n dec =[]\n radius = []\n \n index = 0\n for k,v in fields.items():\n name.append(k)\n ra.append(getDegree(v.coords.ra))\n dec.append(getDegree(v.coords.dec))\n radius.append(v.radius)\n v.index = index\n index += 1\n \n hdu = pf.BinTableHDU.from_columns(\\\n pf.ColDefs( [pf.Column(name='NAME',format=py_to_fits(name),array=name),\n pf.Column(name='RA',format=py_to_fits(ra),array=ra),\n pf.Column(name='DEC',format=py_to_fits(dec),array=dec),\n pf.Column(name='RADIUS',format=py_to_fits(radius),array=radius)]),\n name = 'Fields')\n# hdu.header['EXTNAME'] = 'Fields'\n return hdu", "def make_normmaps(maps, supmap, etacut=0.9):\n\n\tif maps[0].ndim == 0:\n\t\tmaps = np.reshape(maps, (1, len(maps)))\n\n\tnpix = hp.get_map_size(maps[0])\n\tnside = hp.npix2nside(npix)\n\n\tif etacut:\n\t\tqi, qf = 2.*np.arctan(np.exp(-np.array([etacut, -etacut])))\n\t\tmask = np.ones(npix)\n\t\tmask[hp.query_strip(nside, qi, qf)] = 0.\n\telse:\n\t\tqi, qf = 0., 2*np.pi\n\t\tmask = 0.\n\n\tfinmap = supmap/npix*(1.-mask)+mask\n\tpixs = np.where(finmap == 0.)\n\tfinmap[pixs] = 1.\n\n\tnorm_maps = maps / (npix*finmap)\n\tnorm_maps *= npix / np.sum(norm_maps, axis=1)[:, None]\n\n\treturn norm_maps", "def __init__(self):\n self.lattices = []\n self.meshfns = []", "def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SCREEN_SIZE, shrink, True)", "def GetMapId(landsat, date, date_range):\n \n def maskClouds(img):\n scored = ee.Algorithms.Landsat.simpleCloudScore(img);\n return img.updateMask(scored.select(['cloud']).lt(20));\n\n def CreateTimeBand(img):\n return maskClouds(img).byte().addBands(img.metadata('system:time_start'))\n\n if landsat == 'l7':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L7)\n l7 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l7Composite = l7.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l7Composite.getMapId({\n 'min': '0,0,0',\n 'max': '255,255,255',\n 'bands': 'B4,B3,B2',\n })\n if landsat == 'l8':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L8)\n l8 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l8Composite = l8.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l8Composite.getMapId({\n 'min': '0',\n 'max': '0.4',\n 'bands': 'B4,B3,B2',\n })", "def __init__(self):\n self.index = 'r11_07_06c'\n self.parameters = {'run_index': 'r11_07_06c',\n 'h_1': 0.25,\n 'rho_0': 1.150,\n 'rho_1': 1.100,\n 'rho_2': 1.000,\n 'alpha': 0.5,\n 'D': 0.4,\n 'H': 0.25,\n 'sample': 1.0,\n 'perspective': 'old'}\n self.run_data = {'run_index': 'r11_07_06c',\n 'l0x': 2796,\n 'l0y': 1151,\n 'lsx': 2793,\n 'lsy': 716,\n 'j10x': 210,\n 'j10y': 1165,\n 'j1sx': 208,\n 'j1sy': 727,\n 'leakage': -76,\n 'odd_1': 'n',\n 'j20x': 2728,\n 'j20y': 1086,\n 'j2sx': 2730,\n 'j2sy': 670,\n 'r0x': 1097,\n 'r0y': 1095,\n 'rsx': 1093,\n 'rsy': 683,\n 'odd_2': 'n'}\n self.raw_image = 'tests/data/synced/r11_07_06c/cam1/img_0001.jpg'\n self.bc_image = 'tests/data/bc/r11_07_06c/cam1/img_0001.jpg'\n self.processed_path = 'tests/data/processed_ref/r11_07_06c/cam1/img_0001.jpg'", "def generate_winstonlutz_multi_bb_single_field(\n simulator: Simulator,\n field_layer: type[Layer],\n dir_out: str,\n offsets: list[list[float]] | list[dict[str, float]],\n field_size_mm: tuple[float, float] = (30, 30),\n final_layers: list[Layer] | None = None,\n bb_size_mm: float = 5,\n image_axes: ((int, int, int), ...) = (\n (0, 0, 0),\n (90, 0, 0),\n (180, 0, 0),\n (270, 0, 0),\n ),\n gantry_tilt: float = 0,\n gantry_sag: float = 0,\n clean_dir: bool = True,\n jitter_mm: float = 0,\n) -> list[str]:\n if not osp.isdir(dir_out):\n os.mkdir(dir_out)\n if clean_dir:\n for pdir, _, files in os.walk(dir_out):\n [os.remove(osp.join(pdir, f)) for f in files]\n file_names = []\n for gantry, coll, couch in image_axes:\n sim_single = copy.copy(simulator)\n sim_single.add_layer(\n field_layer(\n field_size_mm=field_size_mm,\n cax_offset_mm=(gantry_tilt * cos(gantry), gantry_sag * sin(gantry)),\n )\n )\n for offset in offsets:\n if isinstance(offset, dict):\n offset_mm_left = offset[\"offset_left_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n offset_mm_up = offset[\"offset_up_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n offset_mm_in = -offset[\"offset_in_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n else:\n offset_mm_left = offset[0] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_up = offset[1] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_in = -offset[2] + random.uniform(\n -jitter_mm, jitter_mm\n ) # negative because pixels increase as we go out, so to go in we subtract\n\n long_offset = bb_projection_long(\n offset_in=offset_mm_in,\n offset_up=offset_mm_up,\n offset_left=offset_mm_left,\n sad=1000,\n gantry=gantry,\n )\n gplane_offset = bb_projection_gantry_plane(\n offset_left=offset_mm_left,\n offset_up=offset_mm_up,\n sad=1000,\n gantry=gantry,\n )\n sim_single.add_layer(\n PerfectBBLayer(\n cax_offset_mm=(\n long_offset,\n gplane_offset,\n ),\n bb_size_mm=bb_size_mm,\n )\n )\n if final_layers is not None:\n for layer in final_layers:\n sim_single.add_layer(layer)\n file_name = f\"WL G={gantry}, C={coll}, P={couch}; Field={field_size_mm}mm; BB={bb_size_mm}mm @ left={offset_mm_left:.2f}, in={offset_mm_in:.2f}, up={offset_mm_up:.2f}; Gantry tilt={gantry_tilt}, Gantry sag={gantry_sag}.dcm\"\n sim_single.generate_dicom(\n osp.join(dir_out, file_name),\n gantry_angle=gantry,\n coll_angle=coll,\n table_angle=couch,\n )\n file_names.append(file_name)\n return file_names", "def create_boundary_maps(params):\n batch_size = 40\n loader_params = {'batch_size': batch_size, 'shuffle': False, 'num_workers': 1}\n train_loader, val_loader = \\\n init_city_loader(data_folder=params['data_folder'],\n image_size=[1024, 2048], # keeping original size\n remove_alpha=True, # removing the alpha channel\n loader_params=loader_params,\n ret_type='all') # return everything in the batch\n\n print(f'In [create_boundary_maps]: performing with data loaders of size: \\n'\n f'train_loader: {len(train_loader)} \\n'\n f'val_loader: {len(val_loader)} \\n'\n f'and batch_size of: {batch_size} \\n')\n\n for loader_name, loader in {'train_loader': train_loader, 'val_loader': val_loader}.items():\n print(f'In [create_boundary_maps]: creating for {loader_name}')\n for i_batch, batch in enumerate(loader):\n if i_batch % 1 == 0:\n print(f'Doing for the batch {i_batch}')\n\n instance_maps = batch['instance'].to(device)\n boundaries = helper.get_edges(instance_maps)\n boundary_paths = batch['boundary_path']\n # save one by one in the same location as gtFine images\n helper.save_one_by_one(boundaries, boundary_paths, save_path=None) # saving to boundary_paths\n print(f'In [create_boundary_maps]: done for {loader_name}')\n print('In [create_boundary_maps]: all done')", "def decode(self, img_metas, output, **kwargs):\n batch_size = len(img_metas)\n result = {}\n heatmap3d_depth_bound = np.ones(batch_size, dtype=np.float32)\n root_depth_bound = np.ones(batch_size, dtype=np.float32)\n center = np.zeros((batch_size, 2), dtype=np.float32)\n scale = np.zeros((batch_size, 2), dtype=np.float32)\n image_paths = []\n score = np.ones(batch_size, dtype=np.float32)\n if 'bbox_id' in img_metas[0]:\n bbox_ids = []\n else:\n bbox_ids = None\n for i in range(batch_size):\n heatmap3d_depth_bound[i] = img_metas[i]['heatmap3d_depth_bound']\n root_depth_bound[i] = img_metas[i]['root_depth_bound']\n center[i, :] = img_metas[i]['center']\n scale[i, :] = img_metas[i]['scale']\n image_paths.append(img_metas[i]['image_file'])\n if 'bbox_score' in img_metas[i]:\n score[i] = np.array(img_metas[i]['bbox_score']).reshape(-1)\n if bbox_ids is not None:\n bbox_ids.append(img_metas[i]['bbox_id'])\n all_boxes = np.zeros((batch_size, 6), dtype=np.float32)\n all_boxes[:, 0:2] = center[:, 0:2]\n all_boxes[:, 2:4] = scale[:, 0:2]\n all_boxes[:, 4] = np.prod(scale * 200.0, axis=1)\n all_boxes[:, 5] = score\n result['boxes'] = all_boxes\n result['image_paths'] = image_paths\n result['bbox_ids'] = bbox_ids\n heatmap3d = output[0]\n preds, maxvals = keypoints_from_heatmaps3d(heatmap3d, center, scale)\n keypoints_3d = np.zeros((batch_size, preds.shape[1], 4), dtype=np.float32)\n keypoints_3d[:, :, 0:3] = preds[:, :, 0:3]\n keypoints_3d[:, :, 3:4] = maxvals\n keypoints_3d[:, :, 2] = (keypoints_3d[:, :, 2] / self.right_hand_head.depth_size - 0.5) * heatmap3d_depth_bound[:, np.newaxis]\n result['preds'] = keypoints_3d\n result['rel_root_depth'] = (output[1] / self.root_head.heatmap_size - 0.5) * root_depth_bound\n result['hand_type'] = output[2] > 0.5\n return result", "def create_preset_images(self):\n for f in sorted(self.get_files_from_data()):\n photoInstances = {}\n for preset in self.generator.settings[\"GALLERY_PRESETS\"]:\n preset_dir = \"%s%s%s\" % (self.absolute_output_path,\n os.sep, \n preset[\"name\"])\n photoInstances[preset[\"name\"]] = Photo(self, f, preset_dir, preset)\n \n self.photos.append(photoInstances)", "def _init_fields(self):\n if self._fields is None:\n M.mset('U', \"^\") # DBS Calls Require this\n f = self._fields = {}\n attrs = self.fieldnames = {}\n fieldid = \"0\"\n while 1:\n # Subscript 0 is field description, .1 is the title, 3 is help\n fieldid, info, title, fieldhelp = M.ddwalk(self._fileid, fieldid)\n #fieldid, info, title, fieldhelp = M.mexec(\n # \"\"\"set s0=$order(^DD(s2,s0)) Q:s0'=+s0 s s1=$G(^DD(s2,s0,0)),s3=$G(^DD(s2,s0,.1)),s4=$G(^DD(s2,s0,3))\"\"\",\n # M.INOUT(str(fieldid)), M.INOUT(\"\"), str(self._fileid), M.INOUT(\"\"), M.INOUT(\"\"))\n if fieldid == \"\" or fieldid[0] not in \"0123456789.\":\n break\n\n info = info.split(\"^\", 4) \n label = self._clean_label(info[0])\n try:\n ftype = info[1]\n except:\n ftype = None\n if ftype:\n finst = None\n for klass in FIELD_TYPES:\n if klass.isa(ftype):\n finst = f[fieldid] = klass(fieldid, label, info)\n finst.fileid = self.fileid\n finst.ownerdd = self\n attrs[label] = fieldid\n break\n if finst is None:\n print finst, \"FIELD [%s], spec [%s] was not identified\" % (label, ftype)\n continue\n finst.title = title\n finst.fieldhelp = fieldhelp\n else:\n assert finst, \"FIELD [%s] %s has no fieldspec\" % (label, info)\n\n return self._fields", "def maps(offices, fixed):\n with Image(filename=BAT_B) as page, Drawing() as draw:\n for office, x, y in MAP_POSITIONS:\n label = door_label(offices[office], logo=False)\n if label:\n draw.composite(\"over\", x, y, label.width / 3, label.height / 3, label)\n draw(page)\n page.save(filename=\"generated_map%s.png\" % (\"_fixed\" if fixed else \"\"))", "def generate_image_info(im, params):\n im = ee.Image(im)\n\n # some images are scaled to a factor of 10.\n if params.get('scale') == 'log':\n im = im.log10()\n\n im = im.sldStyle(params.get('sld_style'))\n\n m = im.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result", "def _create_dnp3_object_map(self):\n\n feeders = self.file_dict.get(\"feeders\", [])\n measurements = list()\n capacitors = list()\n regulators = list()\n switches = list()\n solarpanels = list()\n batteries = list()\n fuses = list()\n breakers = list()\n reclosers = list()\n energyconsumers = list()\n for x in feeders:\n measurements = x.get(\"measurements\", [])\n capacitors = x.get(\"capacitors\", [])\n regulators = x.get(\"regulators\", [])\n switches = x.get(\"switches\", [])\n solarpanels = x.get(\"solarpanels\", [])\n batteries = x.get(\"batteries\", [])\n fuses = x.get(\"fuses\", [])\n breakers = x.get(\"breakers\", [])\n reclosers = x.get(\"reclosers\", [])\n energyconsumers = x.get(\"energyconsumers\", [])\n\n # Unique grouping of measurements - GroupBy Name, Type and Connectivity node\n groupByNameTypeConNode = defaultdict(list) \n for m in measurements:\n groupByNameTypeConNode[m['name']+m.get(\"measurementType\")+m.get(\"ConnectivityNode\")].append(m)\n\n # Create Net Phase DNP3 Points\n for grpM in groupByNameTypeConNode.values():\n\n if grpM[0]['MeasurementClass'] == \"Analog\" and grpM[0].get(\"measurementType\") == \"VA\":\n measurement_type = grpM[0].get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n \n\n name1 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VAR-value'\n name2 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-Watts-value'\n name3 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VA-value'\n\n description1 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VAR\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description2 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-Watts\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description3 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VA\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n # Create Each Phase DNP3 Points\n for m in measurements:\n attribute = attribute_map['regulators']['attribute']\n measurement_type = m.get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n name= m['name'] + '-' + m['phases']\n description = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + measurement_type + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name, description, measurement_type, measurement_id)\n self.c_ai += 1\n\n if m.get(\"measurementType\") == \"VA\":\n measurement_id = m.get(\"mRID\")\n name1 = m['name'] + '-' + m['phases'] + '-VAR-value'\n name2 = m['name'] + '-' + m['phases'] + '-Watts-value'\n name3 = m['name'] + '-' + m['phases'] + '-angle'\n\n description1 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"VAR\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description2 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"Watt\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description3 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"angle\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") + \",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n\n elif m['MeasurementClass'] == \"Discrete\" and measurement_type == \"Pos\":\n if \"RatioTapChanger\" in m['name'] or \"reg\" in m[\"SimObject\"]:\n # TODO: Do we need step?\n for r in range(5, 7): # [r==4]: Step, [r==5]: LineDropR, [r==6]:LineDropX \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, attribute[r])\n self.c_ao += 1\n else:\n self.assign_val_a(\"DI\", 1, 2, self.c_di, name, description, measurement_type, measurement_id)\n self.c_di += 1\n\n for m in capacitors:\n measurement_id = m.get(\"mRID\")\n cap_attribute = attribute_map['capacitors']['attribute'] # type: List[str]\n\n for l in range(0, 4):\n # publishing attribute value for capacitors as Bianry/Analog Input points based on phase attribute\n name = m['name']\n description = \"Name:\" + m['name'] + \"ConductingEquipment_type:LinearShuntCompensator\" + \",Attribute:\" + cap_attribute[l] + \",Phase:\" + m['phases']\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, cap_attribute[l])\n self.c_ao += 1\n for p in range(0, len(m['phases'])):\n name = m['name'] + m['phases'][p]\n description = \"Name:\" + m['name'] + \",ConductingEquipment_type:LinearShuntCompensator\" + \",controlAttribute:\" + cap_attribute[p] + \",Phase:\" + m['phases'][p]\n # description = \"Capacitor, \" + m['name'] + \",\" + \"phase -\" + m['phases'][p] + \", and attribute is - \" + cap_attribute[4]\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, cap_attribute[4])\n self.c_do += 1\n\n for m in regulators:\n reg_attribute = attribute_map['regulators']['attribute']\n # bank_phase = list(m['bankPhases'])\n for n in range(0, 4):\n measurement_id = m.get(\"mRID\")\n name = m['bankName'] + '-' + m['bankPhases']\n description = \"Name:\" + m['bankName'] + \",ConductingEquipment_type:RatioTapChanger_Reg\" +\",Phase:\" + m['bankPhases'] + \",Attribute:\" + reg_attribute[n]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id[0], reg_attribute[n])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id[0], reg_attribute[n])\n self.c_ai += 1\n for i in range(5, 7):\n for j in range(0, len(m['bankPhases'])):\n measurement_id = m.get(\"mRID\")[j]\n name = m['tankName'][j] + '-' + m['bankPhases'][j]\n description = \"Name:\" + m['tankName'][j] + \",ConductingEquipment_type:RatioTapChanger_Reg\"+ \",Phase:\" + m['bankPhases'][j] + \",controlAttribute:\" + reg_attribute[i]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id,reg_attribute[i])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id,reg_attribute[i])\n self.c_ai += 1\n \n for m in solarpanels:\n for k in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-Watts-value'\n description = \"Solarpanel:\" + m['name'] + \",Phase:\" + m['phases'] + \",measurementID:\" + measurement_id\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n \n name1 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name2 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name2, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name3 = \"Solar\"+ m['name'] + '-' + m['phases'][k] + '-Watts-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name3, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n\t\t\t\n for m in batteries:\n for l in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = m['name'] + '-' + m['phases'][l] + '-Watts-value'\n description = \"Battery, \" + m['name'][l] + \",Phase: \" + m['phases'] + \",ConductingEquipment_type:PowerElectronicConnections\"\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description,measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n name1 = m['name'] + '-' + m['phases'][l] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description,measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n for m in switches:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][k]\n description = \"Name:\" + m[\"name\"] + \",ConductingEquipment_type:LoadBreakSwitch\" + \"Phase:\" + phase_value[k] +\",controlAttribute:\"+switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in fuses:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for l in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][l]\n description = \"Name:\" + m[\"name\"] + \",Phase:\" + phase_value[l] + \",Attribute:\" + switch_attribute + \",mRID\" + measurement_id\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in breakers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for n in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][n]\n description = \"Name: \" + m[\"name\"] + \",Phase:\" + phase_value[n] + \",ConductingEquipment_type:Breaker\" + \",controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n \n for m in reclosers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for i in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][i]\n description = \"Recloser, \" + m[\"name\"] + \"Phase: - \" + phase_value[i] + \",ConductingEquipment_type:Recloser\"+\"controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in energyconsumers:\n measurement_id = m.get(\"mRID\")\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name']+\"phase:\" + m['phases'][k]\n description = \"EnergyConsumer, \" + m[\"name\"] + \"Phase: \" + phase_value[k] \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"EnergyConsumer.p\")\n self.c_ao += 1\n \n name1 = m['name']+\"phase:\" + m['phases'][k] + \"control\"\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name1, description, measurement_id, \"EnergyConsumer.p\")\n self.c_do += 1\n\n return self.out_json", "def hwReflectionMap(*args, backTextureName: Union[AnyStr, bool]=\"\", bottomTextureName:\n Union[AnyStr, bool]=\"\", cubeMap: bool=False, decalMode: bool=False, enable:\n bool=False, frontTextureName: Union[AnyStr, bool]=\"\", leftTextureName:\n Union[AnyStr, bool]=\"\", rightTextureName: Union[AnyStr, bool]=\"\",\n sphereMapTextureName: Union[AnyStr, bool]=\"\", topTextureName: Union[AnyStr,\n bool]=\"\", q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass", "def img_map_transforms(ts):\n # XXX TODO: unchecked textures give error of variable referenced before assignment XXX\n # POV-Ray \"scale\" is not a number of repetitions factor, but ,its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # scale is 0.5,0.5 in blender and 0,0 in POV\n # Strange that the translation factor for scale is not the same as for\n # translate.\n # TODO: verify both matches with other blender renderers / internal in previous versions.\n image_map_transforms = \"\"\n image_map_transforms = \"scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % (\n ts.scale[0],\n ts.scale[1],\n ts.scale[2],\n ts.offset[0],\n ts.offset[1],\n ts.offset[2],\n )\n # image_map_transforms = (\" translate <-0.5,-0.5,0.0> scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % \\\n # ( 1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # (0.5 / ts.scale.x) + ts.offset.x,\n # (0.5 / ts.scale.y) + ts.offset.y,\n # ts.offset.z))\n # image_map_transforms = (\n # \"translate <-0.5,-0.5,0> \"\n # \"scale <-1,-1,1> * <%.4g,%.4g,%.4g> \"\n # \"translate <0.5,0.5,0> + <%.4g,%.4g,%.4g>\" % \\\n # (1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # ts.offset.x,\n # ts.offset.y,\n # ts.offset.z)\n # )\n return image_map_transforms", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def magnetic_field(date: datetime.datetime, lat, lon, alt, output_format='cartesian'):\n g = GeoMag()\n return g.GeoMag(np.array([lat, lon, alt]), date, location_format='geodetic', output_format=output_format)", "def setup_fermi(self):\n eventclass=5 # 2 (Source) or 5 (UltracleanVeto)\n eventtype=0 # 0 (all), 3 (bestpsf) or 5 (top3 quartiles)\n mask_type='top300'\n force_mask_at_bin_number=10\n\n self.f1 = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=eventclass,eventtype=eventtype,newstyle=1,data_July16=True)\n\n if mask_type != 'False':\n self.f1.make_ps_mask(mask_type = mask_type,force_energy = True,energy_bin = force_mask_at_bin_number)\n self.f1.add_diffuse_newstyle(comp = 'p7', eventclass = eventclass, eventtype = eventtype)\n self.f1.add_bubbles(comp='bubs') #bubbles\n self.f1.add_iso(comp='iso') #iso\n self.f1.add_ps_model(comp='ps_model')\n\n # Exposure correct J_map_arr\n self.J_map_arr *= self.f1.CTB_exposure_maps\n\n # Add J-factor map with mean 1 in each energy bin\n self.f1.add_template_by_hand('J_map',np.array([self.J_map_arr[i]/np.mean(self.J_map_arr[i]) for i in range(40)]))", "def generate_base_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Base Tiles:\"\n if self.options.verbose:\n #mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY\n #px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)\n #print \"Pixel coordinates:\", px, py, (mx, my)\n print\n print \"Tiles generated from the max zoom level:\"\n print \"----------------------------------------\"\n print\n\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n querysize = self.querysize\n\n # Just the center tile\n #tminx = tminx+ (tmaxx - tminx)/2\n #tminy = tminy+ (tmaxy - tminy)/2\n #tmaxx = tminx\n #tmaxy = tminy\n\n #print tminx, tminy, tmaxx, tmaxy\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n #print tcount\n ti = 0\n i_y_column_count=((tmaxy-tminy)+1)\n ds = self.out_ds\n tz = self.tmaxz\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 281596 ] tmaxx[ 281744 ] ; ((tmaxx-tmaxy)+1) x_tiles[ 23393 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tmaxy)+1) x_tiles[\",tcount,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 352409 ] tminy[ 352253 ] ; ((tmaxy-tminy)) y_tiles[ 157 ] 352409-(352253-1)\n print \"\\ttz=[\",tz,\"] : ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy+1)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] y_tiles[\",tcount,\"]\"\n return\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n break\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; x =\",tx,\" ; y_tms =\",ty_tms, \"; y_osm =\",ty_osm\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n if self.options.profile in ('mercator','geodetic'):\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:900913\n b = self.mercator.TileBounds(tx, ty_tms, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty_tms, tz)\n\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1])\n nativesize = wb[0]+wb[2] # Pixel size in the raster covering query geo extent\n if self.options.verbose:\n print \"\\tNative Extent (querysize\",nativesize,\"): \", rb, wb\n\n querysize = self.querysize\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n else: # 'raster' or 'gearth' or 'garmin' profile:\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.out_ds.RasterXSize # size of the raster in pixels\n ysize = self.out_ds.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty_tms == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty_tms * tsize) - rysize\n\n wx, wy = 0, 0\n\n wxsize, wysize = int(rxsize/float(tsize) * querysize), int(rysize/float(tsize) * querysize)\n if wysize != querysize:\n wy = querysize - wysize\n xyzzy = Xyzzy(querysize, rx, ry, rxsize, rysize, wx, wy, wxsize, wysize)\n try:\n if self.options.verbose:\n print ti,'/',tcount,' total ; z =',tz,' ; x =',tx,' ; y_tms =',ty_tms,' ; y_osm =',ty_osm\n print \"\\tReadRaster Extent: \", (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)\n self.write_base_tile(tx, ty, tz, xyzzy)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None", "def createImages(self, geneticInstances):\n genomes = []\n for geneticInstance in geneticInstances:\n genomes.append(geneticInstance.toGenomeRepresentation())\n generatePlantImages(genomes)\n # We now have the output pictures. We'll get to them using the database instances' filenames", "def createSurfaceGeo(self):\n self.surfGeo = dict()\n r = self.geoParam['CylinderLightGuideRadius']\n self.surfGeo[r] = 'LightGuide'\n #self.material = {'Moderator':None,'Detector':None,'LightGuide':None}\n while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):\n r += self.geoParam['DetectorThickness']\n self.surfGeo[r] = 'Detector'\n r += self.geoParam['DetectorSpacing']\n if (r < self.geoParam['CylinderRadius']):\n self.surfGeo[r] = 'LightGuide'\n return self.surfGeo", "def calculate_maps(self,statistic=None,titles=None,filenames=\"auto\") :\n\n if statistic is not None :\n self.statistic = statistic\n\n if isinstance(self.statistic,str) :\n self.statistic = [self.statistic]\n \n # declare array of nans to fill with maps\n self.maps = np.full([self.num_hists * len(self.statistic)] + \n list(np.shape(self.hists[0])[1:]),np.nan)\n\n if titles is not None :\n self.titles = titles\n else :\n self.titles = [str(x) for x in range(self.num_hists * len(self.statistic))]\n\n if isinstance(filenames,str) and filenames == \"auto\" :\n self.filenames = [str(x) for x in range(self.num_hists * len(self.statistic))]\n else :\n self.filenames = filenames\n\n\n mapnum = 0\n hist_inds = []\n stat_inds = []\n for i in range(len(self.statistic)) :\n for j in range(self.num_hists) :\n \n self.maps[mapnum,:,:] = calculate_map_from_hists(\n self.hists[j],self.statistic[i],self.hist_specs[j]['bin_centers'])\n\n if titles is None :\n if filenames == \"auto\" :\n self.titles[mapnum], self.filenames[mapnum] = gen_map_title(**{\n **self.hist_specs[j],\n 'statistic':self.statistic[i]},filename=True)\n else :\n self.titles[mapnum] = gen_map_title(**{\n **self.hist_specs[j],\n 'statistic':self.statistic[i]},filename=False)\n\n hist_inds = hist_inds + [j]\n stat_inds = stat_inds + [i]\n\n mapnum += 1\n\n self.num_maps = mapnum\n\n self.map_specs = {'hist' : hist_inds, 'statistic' : stat_inds}\n\n return self", "def generate_map(self):\n\n # Create main streets first\n self.create_main_streets()\n\n # Then create the commercial buildings in the center of town\n self.create_commercial_center()\n\n # Then create the neighborhoods that populate the rest of the city\n while(self.create_neighborhood()):\n pass\n\n # Clean up any invalid buildings that were created\n self.delete_inaccessible_buildings()", "def local_composition(self, outfile):\n # TODO Rewrite if I ever need this again\n radius = 3.6 * 2\n npix = 64\n #mat = np.zeros((npix,npix,npix),dtype=np.float)\n #mat = np.zeros((npix,npix,npix),dtype={'names':['col1', 'col2', 'col3'], 'formats':['f4','f4','f4']})\n #mat = np.zeros((npix,npix,npix),dtype={'names':['40', '13', '29'], 'formats':['f4','f4','f4']})\n #mat = np.zeros((npix,npix,npix),dtype={'names':['id','data'], 'formats':['f4','f4']})\n #names = ['id','data']\n #formats = ['i4',('f4','f4','f4')]\n #mat = np.zeros((npix,npix,npix),dtype=dict(names = names, formats=formats))\n #mat = np.zeros((npix,npix,npix),dtype={'40':('i4',0), '29':('f4',0), '13':('f4',0)})\n print(\"Creating matrix...\")\n mat = [[[{} for i in range(npix)] for j in range(npix)] for k in range(npix)]\n print(\"Finished creating matrix.\")\n #print(repr(mat))\n dx = self.xsize/npix\n dy = self.ysize/npix\n dz = self.zsize/npix\n for ii,i in enumerate(drange(-npix/2*dx,npix/2*dx-dx,dx)):\n print(\"On ii = {0}\".format(ii))\n for jj,j in enumerate(drange(-npix/2*dy,npix/2*dy-dy,dy)):\n for kk,k in enumerate(drange(-npix/2*dz,npix/2*dz-dz,dz)):\n atoms = self.get_atoms_in_cutoff( (i,j,k), radius )\n comp = {}\n for atom in atoms:\n comp[str(atom.z)] = comp.get(str(atom.z),0) + 1.0\n for key in comp:\n comp[key] /= len(atoms)\n #print(comp)\n #mat[ii][jj][kk] = copy.copy(comp)\n mat[ii][jj][kk] = comp\n of = open(outfile,'w')\n of.write('IGOR\\n')\n for atomtype in self.atomtypes:\n of.write('\\nWAVES/N=({0},{1},{2})\\t {3}\\nBEGIN\\n'.format(npix,npix,npix,'partial_comp_'+znum2sym.z2sym(atomtype)))\n for layer in mat:\n for column in layer:\n for value in column:\n try:\n of.write(\"{0} \".format(value[str(atomtype)]))\n except KeyError:\n of.write(\"{0} \".format(0.0))\n of.write(\"\\n\")\n of.write('END\\n')\n of.write('X SetScale/P x 0,1,\"\", {0}; SetScale/P y 0,1,\"\", {0}; SetScale/P z 0,1,\"\", {0}; SetScale d 0,0,\"\", {0}\\n'.format('partial_comp_'+znum2sym.z2sym(atomtype)))\n of.close()\n return mat", "def get_score_and_geo(images, labels, data_iter_type='ImageDetIter'):\n for i, image in enumerate(images):\n height, width = image.shape[1], image.shape[2]\n poly_mask = np.zeros((height, width), dtype=np.uint8)\n score_map = np.zeros((images.shape[0], height, width), dtype=np.uint8)\n geo_map = np.zeros((images.shape[0], height, width, 5), dtype=np.float32)\n if data_iter_type == 'MXDataIter':\n label = labels[i].asnumpy() # 1-D Array\n label = np.delete(label, np.where(label == -1)) # Delete all -1 padding\n c, h, w, label_width, header_length, bbox_label_width, orig_h, orig_w = label[:8]\n polys = np.reshape(label[8:], (int(len(label[8:]) / bbox_label_width), -1)) # (Num_of_polys, bbox_label_width)\n vocab_idx = polys[:, 1]\n polys = polys[:, 1:]\n\n elif data_iter_type == 'ImageDetIter':\n label = labels[i].asnumpy() # 1-D Array\n polys = np.delete(label, np.where(label[:, 0] == -1), axis=0)\n vocab_idx = polys[:, 1]\n polys = polys[:, 1:]\n\n # For each polygon (bounding box) in the label\n for poly in polys:\n poly = np.reshape(poly, (4, 2))\n poly[:, 0] = np.round(poly[:, 0] * height, 0)\n poly[:, 1] = np.round(poly[:, 1] * width, 0)\n\n poly = np.array(poly).astype(np.int32)\n # Draw polygon on the score mask (binary map)\n cv2.fillPoly(score_map[i], [poly], (1))\n # Draw polygon on the poly_mask (binary map) that will be used to create geo_map\n cv2.fillPoly(poly_mask, [poly], (1))\n\n fitted_parallelograms = []\n # For the number of sides (4) in the polygon\n for j in range(4):\n # Rotate the orientation at each iter\n p0 = poly[j]\n p1 = poly[(j + 1) % 4]\n p2 = poly[(j + 2) % 4]\n p3 = poly[(j + 3) % 4]\n\n edge = fit_line([p0[0], p1[0]], [p0[1], p1[1]])\n if point_dist_to_line(p0, p1, p2) > point_dist_to_line(p0, p1, p3):\n if edge[1] == 0:\n edge_opposite = [1, 0, -p2[0]]\n else:\n edge_opposite = [edge[0], -1, p2[1] - edge[0] * p2[0]]\n else:\n if edge[1] == 0:\n edge_opposite = [1, 0, -p3[0]]\n else:\n edge_opposite = [edge[0], -1, p3[1] - edge[0] * p3[0]]\n\n # move forward edge\n new_p0 = p0\n new_p1 = p1\n new_p2 = p2\n new_p3 = p3\n forward_edge = fit_line([p1[0], p2[0]], [p1[1], p2[1]])\n new_p2 = line_cross_point(forward_edge, edge_opposite)\n if point_dist_to_line(p1, new_p2, p0) > point_dist_to_line(p1, new_p2, p3):\n # across p0\n if forward_edge[1] == 0:\n forward_opposite = [1, 0, -p0[0]]\n else:\n forward_opposite = [forward_edge[0], -1, p0[1] - forward_edge[0] * p0[0]]\n else:\n # across p3\n if forward_edge[1] == 0:\n forward_opposite = [1, 0, -p3[0]]\n else:\n forward_opposite = [forward_edge[0], -1, p3[1] - forward_edge[0] * p3[0]]\n new_p0 = line_cross_point(forward_opposite, edge)\n new_p3 = line_cross_point(forward_opposite, edge_opposite)\n fitted_parallelograms.append([new_p0, new_p1, new_p2, new_p3, new_p0])\n\n # or move backward edge\n new_p0 = p0\n new_p1 = p1\n new_p2 = p2\n new_p3 = p3\n backward_edge = fit_line([p0[0], p3[0]], [p0[1], p3[1]])\n new_p3 = line_cross_point(backward_edge, edge_opposite)\n if point_dist_to_line(p0, p3, p1) > point_dist_to_line(p0, p3, p2):\n # across p1\n if backward_edge[1] == 0:\n backward_opposite = [1, 0, -p1[0]]\n else:\n backward_opposite = [backward_edge[0], -1, p1[1] - backward_edge[0] * p1[0]]\n else:\n # across p2\n if backward_edge[1] == 0:\n backward_opposite = [1, 0, -p2[0]]\n else:\n backward_opposite = [backward_edge[0], -1, p2[1] - backward_edge[0] * p2[0]]\n new_p1 = line_cross_point(backward_opposite, edge)\n new_p2 = line_cross_point(backward_opposite, edge_opposite)\n fitted_parallelograms.append([new_p0, new_p1, new_p2, new_p3, new_p0])\n\n areas = [Polygon(t).area for t in fitted_parallelograms]\n parallelogram = np.array(fitted_parallelograms[np.argmin(areas)][:-1], dtype=np.float32)\n\n # sort thie polygon\n parallelogram_coord_sum = np.sum(parallelogram, axis=1)\n min_coord_idx = np.argmin(parallelogram_coord_sum)\n parallelogram = parallelogram[\n [min_coord_idx, (min_coord_idx + 1) % 4, (min_coord_idx + 2) % 4, (min_coord_idx + 3) % 4]]\n\n rectangle = rectangle_from_parallelogram(parallelogram)\n rectangle, rotate_angle = sort_rectangle(rectangle)\n r0, r1, r2, r3 = rectangle\n\n # For all points of the polygon (bounding box), calculate geometric geo map\n Y, X = np.where(poly_mask == 1)\n for y, x in zip(Y, X):\n point = np.array([x, y], dtype=np.float32)\n # top\n geo_map[i, y, x, 0] = point_dist_to_line(r0, r1, point)\n # right\n geo_map[i, y, x, 1] = point_dist_to_line(r1, r2, point)\n # down\n geo_map[i, y, x, 2] = point_dist_to_line(r2, r3, point)\n # left\n geo_map[i, y, x, 3] = point_dist_to_line(r3, r0, point)\n # angle\n geo_map[i, y, x, 4] = rotate_angle\n\n print (geo_map.shape)\n exit()\n\n return score_map, geo_map", "def populate_images(self):\n print \"Populating images info...\"\n images = self.get_all_images()\n for i in images:\n\n associated_snapshots = self.get_snapshots_of(i)\n\n self.spreadsheet[i.id] = dict(name=i.name, Name_tag=self.get_name_tag(i), id=i.id,\n KEEP_tag=self.get_keep_tag(i), PROD_tag=self.is_production(i),\n region=i.region.name,\n created=i.creationDate,\n associated_snapshots=associated_snapshots,\n description=i.description)", "def field_on_map(basemap, x, title=None, bounds=None,\n xcolormap='RdYlGn', alpha=0.5, vmin=None, vmax=None, fig_nr=None):\n #xcolormap = RdYlGn\n #note: bacgroundmap must be cropped to same coordinates than x but resoltion\n # can be different\n r, c = np.shape(basemap[0])\n extent1 = (-0.5, c - 0.5, r - 0.5, -0.5) # extent into where x is re-scaled\n \n if not fig_nr:\n h = plt.figure()\n else:\n h = plt.figure(fig_nr)\n \n if not vmin:\n vmin = np.nanmin(x)\n if not vmax:\n vmax = np.nanmax(x)\n\n # peruskartta \n plt.imshow(basemap[0], cmap=basemap[1], alpha=0.8)\n \n plt.imshow(x, extent=extent1, cmap=xcolormap, vmin=vmin, vmax=vmax, alpha=alpha)\n plt.colorbar()\n if bounds is not None:\n plt.imshow(bounds,extent=extent1, cmap='RdYlGn')\n plt.title(title)\n return h" ]
[ "0.677067", "0.64014757", "0.62887424", "0.6082136", "0.58927894", "0.58386827", "0.58132875", "0.57924163", "0.57924163", "0.57452226", "0.5743414", "0.57319504", "0.57206607", "0.57153034", "0.563697", "0.5628693", "0.56157184", "0.5559617", "0.55475587", "0.5537074", "0.55315846", "0.55233526", "0.5520516", "0.5520516", "0.5520156", "0.5495048", "0.5467132", "0.54520327", "0.54375637", "0.5427407", "0.5417032", "0.53996146", "0.5389462", "0.53821546", "0.53800535", "0.5370813", "0.5369934", "0.53693104", "0.53401047", "0.531345", "0.5299775", "0.5286565", "0.52767205", "0.52378905", "0.5234583", "0.5227005", "0.5226921", "0.52240914", "0.5214423", "0.5212855", "0.5210246", "0.5202225", "0.51958776", "0.5175939", "0.5174771", "0.5171824", "0.5171652", "0.51551384", "0.51528305", "0.5128053", "0.51221365", "0.51206756", "0.512055", "0.5113641", "0.51108176", "0.5108291", "0.5102381", "0.5098305", "0.5085996", "0.50848734", "0.50848734", "0.5077689", "0.50748414", "0.50708026", "0.50655454", "0.5065016", "0.5061615", "0.5057281", "0.5053677", "0.50496215", "0.5049104", "0.5046051", "0.5035438", "0.5026411", "0.502454", "0.50244284", "0.5021499", "0.50205266", "0.50179625", "0.50166893", "0.5016071", "0.50129807", "0.5011339", "0.5010275", "0.5010218", "0.5009747", "0.5007844", "0.50056386", "0.5003719", "0.50019276" ]
0.7716374
0
Create link to structural image if it doesn't already exist.
def LinkAnat(self): if self.anatomical is None: return for entry in self.info.keys(): info = self.info[entry] if info.has_key('anat_link'): self.LinkFiles(info['outdir'], self.anatomical)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_HTML_a_img(link_url, image_url):\n img = '<img src=\"' + image_url + '\">'\n linked_image = create_HTML_a(link_url, img)\n return linked_image", "def make_image(self, path):\n\t\treturn None", "def image(self, link, title, alt):\n if not link.startswith(('http://', 'https://')):\n source_dir = os.path.dirname(self.source_path)\n link = os.path.abspath(os.path.join(source_dir, link))\n return '<img src=\"%s\" title=\"%s\" alt=\"%s\" />' % (link, title, alt)", "def create_link(seconds, image_name, size):\n token = signing.dumps([str(timezone.now() + timedelta(seconds=int(seconds))), image_name, size])\n return settings.SERVER_PATH + reverse(\"image:dynamic-image\", kwargs={\"token\": token})", "def builder_will_create_target_image(self, builder, target, image_id, template, parameters):", "def image_link(self):\r\n\r\n if not self._image_link:\r\n warnings.warn(\"Seems like you are trying to pull out the image link while not having it.\", Warning, stacklevel=2)\r\n\r\n return self._image_link", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"].strip().lower() == 'homeobject':\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def _collect_img_links(self):\n raise NotImplementedError", "def create_link(self):\n if self.link_info:\n link_type = self.file.options['link_type']\n if 'node' in self.link_info:\n target_path = self.link_info['node'].full_path\n if link_type == 'string':\n # create string dataset containing link path\n #- self.file.file_pointer.create_dataset(self.full_path, data=\"h5link:/\" + target_path)\n self.file.create_dataset(self.full_path, data=\"h5link:/\" + target_path)\n elif link_type == 'hard':\n # create hard link to target. This implemented by h5py \"Softlink\". Not sure why.\n #- self.file.file_pointer[self.full_path] = h5py.SoftLink(target_path)\n self.file.create_softlink(self.full_path, target_path)\n else: \n raise Exception('Invalid option value for link_type (%s)' % link_type)\n elif 'extlink' in self.link_info:\n file, path = self.link_info['extlink']\n # link to external file\n if link_type == 'string':\n # create string dataset containing link path\n target_path = \"%s,%s\" % (file, path)\n #- self.file.file_pointer.create_dataset(self.full_path, data=\"h5extlink:/\" + target_path)\n self.file.create_dataset(self.full_path, data=\"h5extlink:/\" + target_path)\n elif link_type == 'hard':\n # create link to external file\n #- self.file.file_pointer[self.full_path] = h5py.ExternalLink(file,path)\n self.file.create_external_link(self.full_path, file, path) \n else:\n raise Exception('Invalid option value for link_type (%s)' % link_type)\n else:\n raise SystemError(\"** Error: invalid key in link_info %s\" % self.link_info)", "def create_specimen_image():\n\n clear_specimen_session()\n # STEP 1: UPLOAD IMAGE\n form = SpecimenImageForm()\n urlForm = ImageURLForm()\n if form.validate_on_submit():\n if request.files:\n image = request.files[\"image\"]\n # converts img to binary file (apparently)\n img = image.read()\n upload = upload_img(img)\n session[\"link\"] = upload.get(\"data\").get(\"link\")\n\n return redirect(\"/specimen/new/taxonomy\")\n elif urlForm.validate_on_submit():\n session[\"link\"] = urlForm.image.data\n\n return redirect(\"/specimen/new/taxonomy\")\n else:\n return render_template(\"newspecimen.html\", form=form, urlForm=urlForm, step=\"image\")", "def add_anchor_image(self, element, body):\n # TODO defensive coding\n src_url = element.attrib['src']\n picid, picname, picdescription, width, height = self.download_image(src_url)\n picrelid = 'rId'+str(len(self.relationships)+1)\n self.relationships.append(['http://schemas.openxmlformats.org/officeDocument/2006/relationships/image', 'media/'+picname])\n graphic = self.create_graphic_tag(width, height, picrelid, picid, picname, picdescription)\n anchor = docx.makeelement('anchor', nsprefix='wp',\n attributes={'allowOverlap':'1',\n 'behindDoc':'0',\n 'distB':'0',\n 'distL':'0',\n 'distR':'0',\n 'distT':'0',\n 'layoutInCell':'1',\n 'locked':'0',\n 'relativeHeight':'3',\n 'simplePos':'0'})\n anchor.append(docx.makeelement('simplePos', nsprefix='wp', attributes={'x':'0', 'y':'0'}))\n positionH = docx.makeelement('positionH', nsprefix='wp', attributes={'relativeFrom':'column',})\n positionH.append(docx.makeelement('posOffset', tagtext='1506220', nsprefix='wp'))\n anchor.append(positionH)\n positionV = docx.makeelement('positionV', nsprefix='wp', attributes={'relativeFrom':'paragraph',})\n positionV.append(docx.makeelement('posOffset', tagtext='0', nsprefix='wp'))\n anchor.append(positionV)\n anchor.append(docx.makeelement('extent', nsprefix='wp', attributes={'cx':str(width), 'cy':str(height)}))\n anchor.append(docx.makeelement('effectExtent', nsprefix='wp', attributes={'b':'0', 'l':'0', 'r':'0', 't':'0'}))\n anchor.append(docx.makeelement('wrapNone', nsprefix='wp'))\n anchor.append(docx.makeelement('docPr', nsprefix='wp', attributes={'id': picid, 'name': 'Picture 1', 'descr': picdescription}))\n cNvGraphicFramePr = docx.makeelement('cNvGraphicFramePr', nsprefix='wp')\n cNvGraphicFramePr.append(docx.makeelement('graphicFrameLocks', nsprefix='a', attributes={'noChangeAspect':'1',}))\n anchor.append(cNvGraphicFramePr)\n # now we can append the actual graphic\n anchor.append(graphic)\n drawing = docx.makeelement('drawing', nsprefix='w')\n drawing.append(anchor)\n r = docx.makeelement('r', nsprefix='w')\n r.append(docx.makeelement('rPr', nsprefix='w'))\n r.append(drawing)\n p = docx.makeelement('p', nsprefix='w')\n pPr = docx.makeelement('pPr', nsprefix='w')\n pPr.append(docx.makeelement('pStyle', nsprefix='w', attributes={'val':'style0',}))\n p.append(pPr)\n p.append(r)\n body.append(p)", "def embed_image(self, node):\r\n xlink = node.get('xlink:href')\r\n if xlink and xlink[:5] == 'data:':\r\n # No need, data alread embedded\r\n return\r\n\r\n url = urllib.parse.urlparse(xlink)\r\n href = urllib.request.url2pathname(url.path)\r\n\r\n # Primary location always the filename itself.\r\n path = self.absolute_href(href or '')\r\n\r\n # Backup directory where we can find the image\r\n if not os.path.isfile(path):\r\n path = node.get('sodipodi:absref', path)\r\n\r\n if not os.path.isfile(path):\r\n inkex.errormsg('File not found \"{}\". Unable to embed image.'.format(path))\r\n return\r\n\r\n with open(path, \"rb\") as handle:\r\n # Don't read the whole file to check the header\r\n file_type = self.get_image_type(path, handle.read(10))\r\n handle.seek(0)\r\n\r\n if file_type:\r\n # Future: Change encodestring to encodebytes when python3 only\r\n node.set('xlink:href', 'data:{};base64,{}'.format(\r\n file_type, base64.encodebytes(handle.read()).decode('ascii')))\r\n node.pop('sodipodi:absref')\r\n else:\r\n inkex.errormsg(\"%s is not of type image/png, image/jpeg, \"\r\n \"image/bmp, image/gif, image/tiff, or image/x-icon\" % path)", "def create_base_image(self, builder, template, parameters):", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"dsb\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def MakeImageURL(fname, hyperlink='openfile', **kwargs):\n prefix = 'cdb://image/'\n if not _isSupportedUriPath(fname):\n fname = 'file:///%s' % os.path.basename(fname)\n else:\n fname = fname.replace('\\\\', '/')\n if hyperlink:\n hyperlink = ' cdb:hyperlink:%s' % hyperlink\n else:\n hyperlink = ''\n return '%s%s%s' % (prefix, fname, hyperlink)", "def story_image(story):\n if story:\n image_path = 'static/image/news/{}.jpg'.format(story.pk)\n if os.path.exists(image_path):\n return 'image/news/{}.jpg'.format(story.pk)\n return 'image/placeholder-img.jpg'", "def test_create_namespaced_image_stream_tag(self):\n pass", "def get_image_url():", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"vesicle\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def correct_img_links(body_main_content, schema_name, list_name_image):\n for name_image in list_name_image:\n body_main_content = body_main_content.replace(\n \"src=\\\"\" + name_image + \"\\\"\",\n \"src=\\\"{% static \\\"schema_viewer/oxygen/\" + schema_name + \"/\" + name_image + \"\\\" %}\\\"\"\n )\n return body_main_content", "def new_image(path, attendance, data):\n try:\n return Images.objects.get_or_create(path=path.replace(IMG_FOLDER, '', 1),\n attendance=attendance, data=json.dumps(data))\n except:\n return None", "def builder_should_create_target_image(self, builder, target, image_id, template, parameters):", "def create_website_image(fname, item):\n\n public_site_files_path = os.path.abspath(\n frappe.get_site_path('public', 'files'))\n\n # Create a symbolic link and a thumbnail for the website image\n path, ext = os.path.splitext(fname)\n web_fname = path + '_web' + ext\n thumb_fname = path + '_thumb' + ext\n\n # Full paths to original file, web image symlink and thumbnail\n file_fpath = os.path.join(public_site_files_path, fname)\n web_fpath = os.path.join(public_site_files_path, web_fname)\n thumb_fpath = os.path.join(public_site_files_path, thumb_fname)\n\n # URLs on website for web image symlink and thumbnail\n web_url = '/' + os.path.join('files', web_fname)\n thumb_url = '/' + os.path.join('files', thumb_fname)\n\n # Create the symbolic link and create the thumbnail\n try:\n os.symlink(file_fpath, web_fpath)\n except OSError:\n if os.path.islink(web_fpath):\n os.remove(web_fpath)\n files = frappe.get_all(\n 'File', filters={'file_url': web_url})\n for file in files:\n frappe.delete_doc('File', file['name'],\n ignore_permissions=True)\n os.symlink(file_fpath, web_fpath)\n else:\n raise\n resize_image(file_fpath, out=thumb_fpath, thumbnail=True)\n\n # Document for web image\n f = frappe.get_doc({\n \"doctype\": \"File\",\n \"file_url\": web_url,\n \"file_name\": web_fname,\n \"attached_to_doctype\": \"Item\",\n \"attached_to_name\": item,\n \"attached_to_field\": None,\n \"folder\": 'Home/Attachments',\n \"file_size\": os.path.getsize(web_fpath),\n \"is_private\": 0\n })\n try:\n f.insert(ignore_permissions=True)\n except frappe.FileAlreadyAttachedException:\n # If already attached, don't attach again\n pass\n\n return web_url, thumb_url", "def fix_image_links_in_static_portlet(portal):\n\n def get_image_uid(image):\n \"\"\"Return image UID.\"\"\"\n folder = portal['imagens']\n if image in folder:\n return folder[image].UID()\n\n manager = getUtility(IPortletManager, name='plone.rightcolumn', context=portal)\n mapping = getMultiAdapter((portal, manager), IPortletAssignmentMapping)\n\n assert 'midias-sociais' in mapping\n portlet = mapping['midias-sociais']\n images = [\n 'ico-facebook.png', 'ico-twitter.png', 'ico-linkedin.png',\n 'ico-youtube.png', 'ico-flickr.png'\n ]\n for i in images:\n uid = 'resolveuid/' + get_image_uid(i)\n portlet.text = portlet.text.replace(i, uid)\n logger.debug(u'Links substituidos no portlet de midias sociais')\n\n assert 'banners' in mapping\n portlet = mapping['banners']\n image = 'acesso-a-informacao.png'\n uid = 'resolveuid/' + get_image_uid(image) + '/image_mini'\n portlet.text = portlet.text.replace(image, uid)\n logger.debug(u'Link substituido no portlet de acesso a informacao')", "def return_image(val, model_id, message_name, field_name, mime, sind):\n column_data_source = curdoc().get_model_by_name(sind)\n index = column_data_source.tags[0]\n url = \"http://{0}/image/\".format(_host) + \"---\".join([model_id, message_name, field_name, mime, sind, str(index)])\n return url", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"pedestrian\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"pcb\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def resolve_image(self, info):\n if self.image:\n self.image = info.context.build_absolute_uri(self.image.url)\n return self.image", "def image_reference(self, image_id):\n return \"\"", "def image_reference(self, image_id):\n return \"\"", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"self_annotation\":\n return os.path.basename(info[\"path\"])\n else:\n super(CarsAndVehiclesDataset, self).image_reference(image_id)", "def create_link(self):\n self.filename = App.get_running_app().root.ids.camera_screen.capture()\n self.url = FileSharer(self.filename).share()\n self.ids.label.text = self.url", "def image_reference(self, image_id):\n pass", "def fromlink(cls, image_link, devmode=False):\r\n\r\n response = requests.get(image_link)\r\n\r\n if response.headers['Content-Type'] not in SUPPORTED_CONTENT_TYPES:\r\n raise ContentTypeError(response.headers['Content-Type'])\r\n\r\n image_bytes_array = BytesIO(response.content)\r\n\r\n return cls(image_bytes_array, image_link, devmode)", "def add_image(self, image):\n\n # we're only for new images, no i'ds allowed\n # if u want to set an id by hand use set_image\n if image.id:\n raise o.Exception('Can not add image with id')\n\n if not image.data:\n raise o.Exception('Image must have data')\n\n if not image.source_page_url:\n raise o.Exception('Image must have source page url')\n\n # update it's stats\n image = self.populate_image_stats(image)\n\n # only add the image if we haven't seen it beforeQ\n # if we've seen it before there will be an id which\n # the set of images w/ this data and from this page share\n ids = self.rc.sinter('images:datainstance:%s' % image.shahash,\n 'images:page_ids:%s' % image.source_page_url)\n\n\n # we don't need to continue\n # we'll return back their original msg, w/o the id set\n if ids:\n print 'image already exists [%s], not setting' % ids\n return image\n\n # so the image appears to be new, good for it\n return self.set_image(image)", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"balloon\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"balloon\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def hook_image_tag(self, parser, space, name):\n link = name\n caption = name\n params = {}\n\n # Parse the inner syntax, e.g. [[Image:src|option=val|caption]]\n separator = name.find('|')\n items = []\n if separator != -1:\n items = link.split('|')\n link = items[0]\n # If the last item contains '=', it's not a caption\n if items[-1].find('=') == -1:\n caption = items[-1]\n items = items[1:-1]\n else:\n caption = link\n items = items[1:]\n\n # parse the relevant items\n params = self._buildImageParams(items)\n img_path = self._getImagePath(link)\n\n template = jingo.env.get_template('wikiparser/hook_image.html')\n r_kwargs = {'img_path': img_path, 'caption': caption, 'params': params}\n return template.render(**r_kwargs)", "def test_create_namespaced_image_stream(self):\n pass", "def create_target_image(self, builder, target, base_image, parameters):", "def image(self, name=None):\n raise NotImplementedError", "def _build_final_image(self, image):\n raise NotImplementedError", "def generate_image(self):\n pass", "def findReferenceImage(modelfile):\n\n try:\n\n dirname = op.dirname(modelfile)\n prefixes = [getFIRSTPrefix(modelfile)]\n except ValueError:\n return None\n\n if prefixes[0].endswith('_first'):\n prefixes.append(prefixes[0][:-6])\n\n for p in prefixes:\n try:\n return fslimage.addExt(op.join(dirname, p), mustExist=True)\n except fslimage.PathError:\n continue\n\n return None", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info['source'] == 'local':\n return info['source']\n else:\n super(self.__class__).image_reference(self, image_id)", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"coco\":\n return \"http://cocodataset.org/#explore?id={}\".format(info[\"id\"])\n else:\n super(CocoDataset, self).image_reference(image_id)", "def share_image(self):\n portal_state = getMultiAdapter((self.context, self.request), name=u'plone_portal_state')\n registry = queryUtility(IRegistry)\n settings = registry.forInterface(IFbShareSettings, check=False)\n\n if settings.content_use_own_image:\n # Stolen from collective.opengraph\n img_size = settings.content_image_size\n context = aq_inner(self.context)\n obj_url = context.absolute_url()\n if hasattr(context, 'getField'):\n field = self.context.getField('image')\n if not field and HAS_LEADIMAGE:\n field = context.getField(IMAGE_FIELD_NAME)\n \n if field and field.get_size(context) > 0:\n if img_size:\n return u'%s/%s_%s' % (obj_url, field.getName(), img_size)\n return u'%s/%s' % (obj_url, field.getName())\n \n return SiteOpenGraphMetaViewlet.share_image(self)", "def generate_link_kml(self, d):\n return \"\"\"\\\n <NetworkLink>\n <name>%(image_filename)s</name>\n <Region>\n <Lod>\n <minLodPixels>%(minlodpixels)d</minLodPixels>\n <maxLodPixels>-1</maxLodPixels>\n </Lod>\n <LatLonAltBox>\n <north>%(north).14f</north>\n <south>%(south).14f</south>\n <east>%(east).14f</east>\n <west>%(west).14f</west>\n </LatLonAltBox>\n </Region>\n <Link>\n <href>%(link_url)s</href>\n <viewRefreshMode>onRegion</viewRefreshMode>\n </Link>\n </NetworkLink>\"\"\" % d", "def test_answer_meta_image_uses_category_image_if_no_social_image(self):\n category = baker.make(Category, category_image=self.test_image)\n page = self.page1\n page.category.add(category)\n page.save_revision()\n self.assertEqual(page.meta_image, self.test_image)", "def __init__(self, filename, hyperlink=None, width=None, height=None, kind='direct',\r\n mask='auto', lazy=1, hAlign='CENTER'):\r\n super(HyperlinkedImage, self).__init__(filename, width, height, kind, mask, lazy, hAlign=hAlign)\r\n self.hyperlink = hyperlink", "def builder_did_create_target_image(self, builder, target, image_id, template, parameters):", "def add_repo_url(image, repository, repositories):\n try:\n path = repositories[repository]\n path = path.strip(\"/\").replace(\"https://\", \"\").replace(\"http://\", \"\")\n image = \"/\".join([path, image])\n except KeyError:\n raise KeyError(f\"Repository {repository} not defined!\")\n return image", "def create(self, spec, force_cache=False, image_dir=\"~/.hyperkit\"):", "def show_image_ref():\n return get_image_ref()", "def imagesert(guidefilename, filename, url):\n\n guide = None\n with open(guidefilename, 'r') as guide_file:\n guide = json.load(guide_file)\n\n if not guide:\n return True\n\n content = {\"url\":url, \"file\":filename}\n try:\n guide['Cities'][0]['image'] = content\n except KeyError:\n return True\n\n with open(guidefilename, 'w') as guide_file:\n json.dump(guide, guide_file)\n\n return False", "def visit_image(self, node):\n self.image[\"in\"] = True\n # Image wrapped within a reference\n if self.reference:\n if self.output[-1] == self.syntax.visit_reference():\n self.output.pop()\n self.image[\"skip-reference\"] = True\n options = self.infer_image_attrs(node)\n if self.figure[\"in\"]:\n figure_options = self.figure[\"figure-options\"]\n options = {**options, **figure_options} # Figure options take precedence\n options = self.myst_options(options)\n uri = node.attributes[\"uri\"]\n self.images.append(uri)\n if self.figure[\"in\"]:\n syntax = self.syntax.visit_figure(uri, options)\n else:\n syntax = self.syntax.visit_image(uri, options)\n self.output.append(syntax)", "def assign_image(f, df, ix=0):\n if 'Image' not in df.columns:\n im = os.path.splitext(f)[0] + '.tif'\n df.insert(ix, 'Image', im)\n df['Image_Abspath'] = os.path.abspath(im)\n return df", "def test_create_namespaced_image_stream_mapping(self):\n pass", "def test_create_image(self):\n pass", "def add_image(self, fname, image_str, sid=None):\n src = \"%s/%s\" % (self.IMAGES, fname)\n if not src in self.images:\n if sid is None:\n sid = sluggify(src)\n self.opf.add_manifest(sid, src, \"image/jpeg\")\n filename = os.path.join(\"OEBPS\", self.IMAGES, fname)\n self.zip.writestr(filename, image_str)\n self.images.append(src)\n\n return \"../%s\" % src", "def create_thumbnail(self, target, format=None):", "def baseimage(self, new_image):\n images = []\n for instr in self.structure:\n if instr['instruction'] == 'FROM':\n image, _ = image_from(instr['value'])\n if image is not None:\n images.append(image)\n if not images:\n raise RuntimeError('No stage defined to set base image on')\n images[-1] = new_image\n self.parent_images = images", "def add_header_image(self, element, body):\n # TODO defensive coding\n src_url = element.attrib['src']\n urltool = getToolByName(self.context, \"portal_url\")\n portal = urltool.getPortalObject()\n base_url = portal.absolute_url()\n #url = base_url + '/' + src_url\n docx.log.info('Header Image base_url: %s', base_url)\n docx.log.info('Header Image src_url: %s', src_url)\n url = src_url\n if url.startswith(base_url):\n url = url[len(base_url)+1:]\n media_path = self.working_folder + '/word/media'\n if not os.path.exists(media_path):\n os.makedirs(media_path)\n self.image_count += 1\n picid = str(self.image_count)\n url_parts = url.split('/')\n picname = url_parts[-1]\n picdescription = 'The header image'\n file_object = open(media_path + '/' + picname, 'w')\n docx.log.info('Header Image url: %s', url)\n image_string = subrequest(unquote(url)).getBody()\n file_object.write(image_string)\n file_object.close()\n picrelid = 'rId'+str(len(self.relationships)+1)\n # TODO this should be moved to a separate method\n rels_content = types = etree.fromstring('<Relationships xmlns=\"http://schemas.openxmlformats.org/package/2006/relationships\"></Relationships>')\n rels_content.append(makeelement('Relationship', nsprefix=None,\n attributes={'Id':picrelid,\n 'Type':'http://schemas.openxmlformats.org/officeDocument/2006/relationships/image',\n 'Target':'media/%s' % picname}))\n rels_path = self.working_folder + '/word/_rels'\n if not os.path.exists(rels_path):\n os.makedirs(rels_path)\n # TODO hard code header name for now\n file_object = open(rels_path + '/header.xml.rels', 'w')\n file_object.write(etree.tostring(rels_content))\n file_object.close()\n self.relationships.append(['http://schemas.openxmlformats.org/officeDocument/2006/relationships/image',\n 'media/'+picname])\n # TODO hard code the content_tpe entry for now\n self.content_types_list['/word/media/%s' % picname] = 'image/jpeg'\n # TODO hard code dimensions for now\n width = 7560310\n height = 1378585\n graphic = self.create_graphic_tag(width, height, picrelid, picid, picname, picdescription)\n # This needs to be in an anchor rather than a framelocks\n # TODO atrbibs shouldn't have a namespace\n anchor = docx.makeelement('anchor', nsprefix='wp',\n attributes={'allowOverlap':'1',\n 'behindDoc':'1',\n 'distB':'0',\n 'distL':'0',\n 'distR':'0',\n 'distT':'0',\n 'layoutInCell':'1',\n 'locked':'0',\n 'relativeHeight':'12',\n 'simplePos':'0'})\n anchor.append(docx.makeelement('simplePos', nsprefix='wp', attributes={'x':'0', 'y':'0'}))\n positionH = docx.makeelement('positionH', nsprefix='wp', attributes={'relativeFrom':'character',})\n positionH.append(docx.makeelement('posOffset', tagtext='-1143000', nsprefix='wp'))\n anchor.append(positionH)\n positionV = docx.makeelement('positionV', nsprefix='wp', attributes={'relativeFrom':'line',})\n positionV.append(docx.makeelement('posOffset', tagtext='+457200', nsprefix='wp'))\n anchor.append(positionV)\n anchor.append(docx.makeelement('extent', nsprefix='wp', attributes={'cx':str(width), 'cy':str(height)}))\n anchor.append(docx.makeelement('effectExtent', nsprefix='wp', attributes={'b':'0', 'l':'0', 'r':'0', 't':'0'}))\n anchor.append(docx.makeelement('wrapNone', nsprefix='wp'))\n anchor.append(docx.makeelement('docPr', nsprefix='wp', attributes={'id': picid, 'name': 'Picture 1', 'descr': picdescription}))\n cNvGraphicFramePr = docx.makeelement('cNvGraphicFramePr', nsprefix='wp')\n cNvGraphicFramePr.append(docx.makeelement('graphicFrameLocks', nsprefix='a', attributes={'noChangeAspect':'1',}))\n anchor.append(cNvGraphicFramePr)\n # now we can append the actual graphic\n anchor.append(graphic)\n drawing = docx.makeelement('drawing', nsprefix='w')\n drawing.append(anchor)\n r = docx.makeelement('r', nsprefix='w')\n r.append(drawing)\n p = docx.makeelement('p', nsprefix='w')\n p.append(r)\n body.append(p)", "def url_for(**options):\n\n url_parts = get_url_parts(**options)\n image_hash = hashlib.md5(b(options[\"image_url\"])).hexdigest()\n url_parts.append(image_hash)\n\n return \"/\".join(url_parts)", "def add_link (self, src, dst):\n raise NotImplementedError", "def resolve_image(image):\n resolved = image\n if resolved.startswith(\"file:\"):\n return load_image_from_file(resolved[5:])\n if \":\" not in resolved:\n resolved = \"neo4j:\" + image\n if resolved.endswith(\"!\"):\n force = True\n resolved = resolved[:-1]\n else:\n force = False\n if resolved == \"neo4j:snapshot\":\n return pull_snapshot(\"community\", force)\n elif resolved in (\"neo4j:snapshot-enterprise\",\n \"neo4j-enterprise:snapshot\"):\n return pull_snapshot(\"enterprise\", force)\n else:\n return resolved", "def create_image(image_url, owner, permission=\"PRIVATE\"):\n\n image = Image(image_url=image_url,\n owner=owner,\n permission=permission)\n \n db.session.add(image)\n db.session.commit()\n return image", "def test_replace_namespaced_image_stream(self):\n pass", "def _format_image(image, tags):\n text = \", \".join(md.pre(tag) for tag in tags)\n dest = _to_dockerfile_url(image)\n return md.item(md.link(text, dest))", "def get_image_link(self):\n table = self.soup.find('table')\n image_tag = table.find('img')\n image_name = self.soup.find_all(\"b\")[1].text\n return image_tag['src'], image_name\n\n # image = td.find_all('img')\n # print(image)\n # if image is not None:\n # return urljoin(self.base_url, image['src'])", "def add_image(self, image):\r\n metadata = self.collection.find_one( { \"_id\": image.identifier } )\r\n if metadata:\r\n raise ImageFactoryException(\"Image %s already managed, use image_with_id() and save_image()\" % (image.identifier))\r\n\r\n image.persistent_manager = self\r\n basename = self.storage_path + '/' + str(image.identifier)\r\n body_path = basename + BODY_EXT\r\n image.data = body_path\r\n try:\r\n if not os.path.isfile(body_path):\r\n open(body_path, 'w').close()\r\n self.log.debug('Created file %s' % body_path)\r\n except IOError as e:\r\n self.log.debug('Exception caught: %s' % e)\r\n\r\n self._save_image(image)", "def image_reference(self, image_id):\r\n info = self.image_info[image_id]\r\n if info[\"source\"] == \"shapes\":\r\n return info[\"shapes\"]\r\n else:\r\n super(self.__class__).image_reference(self, image_id)", "def image_reference(self, image_id):\r\n info = self.image_info[image_id]\r\n if info[\"source\"] == \"shapes\":\r\n return info[\"shapes\"]\r\n else:\r\n super(self.__class__).image_reference(self, image_id)", "def image_reference(self, image_id):\r\n info = self.image_info[image_id]\r\n if info[\"source\"] == \"shapes\":\r\n return info[\"shapes\"]\r\n else:\r\n super(self.__class__).image_reference(self, image_id)", "def source_image_link(self, image_id):\n return self.image_info[image_id][\"path\"]", "def source_image_link(self, image_id):\n return self.image_info[image_id][\"path\"]", "def _constructInstance(self, container, id, *args, **kw):\n file, title = None, ''\n id = container.manage_addProduct['OFSP'].manage_addImage(id, file, title)\n return container.get(id, None)", "def add_image(context, person):\n args = context.message.content.split(\" \")\n\n parsed_url = urllib.parse.urlparse(args[1])\n host = parsed_url.hostname.split('.')[0]\n media_type = args[1].split('.')[-1]\n\n if not valid_host(host) and not valid_media_type(media_type): # Tests for imgur image URL\n return context.send('Invalid URL, try again.')\n\n collection = db['people']\n # Prevent duplicate inputs\n collection.update_one({'image_url': args[1]}, {'$set': {'person': person}}, upsert=True)\n return context.send(f'Added to the `{person}` collection')", "def createLink(self, source, destination):\n log(\"creating link\")\n\n if \"flix\" in source:\n return \"%s\" % +OSUtils.createLink(source, destination)\n return \"0\"", "def make(item:dict):\n main_image = item[\"item_json\"][\"Item\"][\"mediumImageUrls\"][0][\"imageUrl\"]\n new_main_image = rak_image_mainpulation.sz10(main_image_url=main_image)\n embedVar = discord.Embed(title=item[\"item_name\"], description=item[\"price\"], color=0x00ff00)\n embedVar.set_image(url=new_main_image)\n embedVar.add_field(name=\"Link\", value=item[\"item_url\"], inline=False)\n return embedVar", "def test_replace_namespaced_image_stream_tag(self):\n pass", "def create_img(\n stem: str,\n img_type: str,\n file_type: str,\n file_path: str,\n ref_img: str = None,\n args: dict[str, T.Any] = {},\n) -> None:\n\n if ref_img:\n tif_img = tf.TiffFile(ref_img)\n args[\"shape\"] = tif_img.pages[0].shape[:2]\n\n if img_type == \"label\":\n if file_type == \"visium\":\n visium_label(stem, file_path, **args)\n elif file_type == \"merscope\":\n merscope_label(stem, file_path, **args)\n elif file_type == \"xenium\":\n xenium_label(stem, file_path, **args)\n elif img_type == \"raw\":\n if file_type == \"merscope\":\n merscope_raw(stem, file_path, **args)", "def share_image(self):\n portal_state = getMultiAdapter((self.context, self.request), name=u'plone_portal_state')\n \n registry = queryUtility(IRegistry)\n settings = registry.forInterface(IFbShareSettings, check=False)\n if settings.image_to_share==u'site_logo':\n portal = self.portal_state.portal()\n logoName = portal.restrictedTraverse('base_properties').logoName\n return \"%s/%s\" % (portal_state.portal_url(), logoName)\n \n share_image_view = getMultiAdapter((portal_state.portal(), self.request),\n name=u'collective.fbshare.default_image')\n if share_image_view.data():\n return \"%s/@@collective.fbshare.default_image\" % portal_state.portal_url()", "def get_resumable_create_media_link(self):\n return self.get_link(RESUMABLE_CREATE_MEDIA_LINK_REL)", "def add_image_to_database(full_image_url, image_name, image_source):\r\n\r\n logging.debug('add_image_to_database({}, {}, {})'.format(full_image_url, image_name, image_source))\r\n\r\n dir_path = os.path.join(os.environ['LOCALAPPDATA'],'WarietyWallpaperImages')\r\n os.makedirs(dir_path, exist_ok=True)\r\n db_file = os.path.join(dir_path,'wariety.db')\r\n conn = sqlite3.connect(db_file)\r\n c = conn.cursor()\r\n\r\n # Insert a row of data\r\n c.execute(\"\"\"INSERT INTO wallpapers (iurl, iname, isource)\r\n VALUES (?,?,?)\"\"\", (full_image_url, image_name, image_source))\r\n \r\n # Save (commit) the changes\r\n conn.commit()\r\n\r\n # Close connection\r\n conn.close()", "def test_create_namespaced_image_stream_import(self):\n pass", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"shapes\":\n return info[\"shapes\"]\n else:\n super(self.__class__).image_reference(self, image_id)", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"shapes\":\n return info[\"shapes\"]\n else:\n super(self.__class__).image_reference(self, image_id)", "def create(self, name, image, command, **kwargs):\n return", "def image_link_collector(self, count):\n pass", "def saveImages(self):\n if self.imageLink and not self.image:\n f= open('imageLink.txt','wb')\n f.write(bytes(str(self.imageLink), 'UTF-8'))\n f.close()\n req = urllib3.urlopen(self.imageLink, headers={'User-Agent': 'Mozilla/5.0'})\n result = urllib3.urlopen(req)\n self.image.save(os.path.basename(self.imageLink),ContentFile(result.read()))\n print (\"Image saved\")\n self.save()", "def singularity_build(self, path, image):\n Client.build(os.path.join(\n path, 'singularity.def'\n ), self.generate_image_name(image))", "def modify_image_url(image_url, type=''):\n parsed_uri = urlparse(image_url)\n if parsed_uri.scheme == 'https' or parsed_uri.scheme == 'http':\n pass\n elif image_url == '':\n image_url = '/media/default_' + type + '.jpg'\n else:\n image_url = '/media/' + image_url\n return image_url", "def add_image(self, image_name, version, image_hash):\n raise NotImplementedError()", "def add_sglink (self, src, dst):\n raise NotImplementedError", "def add_link(self, id_img, id_usr, distance=0, angle=0):\n if not self.destroyed :\n est_dedans = False\n for link in self.links:\n if link.linked_to_animal(id_img):\n est_dedans = True\n tab = [link.id_usr, self.createur.identifier]\n for fusionneur in self.fusionneurs:\n tab.append(fusionneur.identifier)\n self.parent.logger.write(\"destroy_link\", id_usr, tab)\n self.links.remove(link)\n if not est_dedans:\n self.links.append(Link(id_img, id_usr, distance, angle))\n tab = [self.createur.identifier]\n for fusionneur in self.fusionneurs:\n tab.append(fusionneur.identifier)\n self.parent.logger.write(\"create_link\", id_usr, tab)", "def upload_image(instance, image):\n today = datetime.today()\n return 'gallery/{model}/{year}/{month}/{day}/{image}'.format(\n model=instance._meta.model_name,\n year=today.year, month=today.month,\n day=today.day, image=image,\n )", "def add_image(self, image, mode='normal', state='on'):\n raise NotImplementedError", "def test_replace_image(self):\n pass", "def _add_image(self, image):\n document = self._control.document()\n name = str(image.cacheKey())\n document.addResource(QtGui.QTextDocument.ImageResource,\n QtCore.QUrl(name), image)\n format = QtGui.QTextImageFormat()\n format.setName(name)\n return format", "def test_patch_namespaced_image_stream_tag(self):\n pass" ]
[ "0.6233887", "0.60792506", "0.60782003", "0.5984427", "0.58839905", "0.58724886", "0.5832461", "0.58206445", "0.580554", "0.5745005", "0.5738044", "0.5731455", "0.5714038", "0.5617503", "0.560202", "0.5583627", "0.55811983", "0.5542545", "0.55367535", "0.5529259", "0.5487097", "0.5471151", "0.5470766", "0.5467708", "0.5466955", "0.5458971", "0.5438806", "0.54302204", "0.5424887", "0.5424887", "0.5392435", "0.5392204", "0.53753984", "0.5373603", "0.536812", "0.53511", "0.53511", "0.53506535", "0.53329366", "0.53124785", "0.5309328", "0.53028196", "0.52945465", "0.5278945", "0.5273507", "0.5259075", "0.52589154", "0.52540386", "0.52526706", "0.52507627", "0.52442443", "0.5243321", "0.52340806", "0.5230577", "0.52226675", "0.5221387", "0.52113205", "0.5207084", "0.5195384", "0.51908517", "0.51692224", "0.51523286", "0.51482445", "0.51389706", "0.51310265", "0.5129263", "0.51280826", "0.5125917", "0.51205975", "0.5117349", "0.5116824", "0.51061946", "0.51061946", "0.51061946", "0.51032", "0.51032", "0.51002985", "0.5090818", "0.5088468", "0.50760263", "0.50749135", "0.5073382", "0.50717354", "0.5070557", "0.5068959", "0.5065029", "0.505477", "0.505477", "0.50546294", "0.50405645", "0.5022111", "0.49948636", "0.49928004", "0.4992572", "0.49776164", "0.49775514", "0.49586323", "0.49559897", "0.49512032", "0.49449113", "0.4944414" ]
0.0
-1
Create links to BRIK, HEAD, and .nii files.
def LinkFiles(self, srcdir, target): if '+orig' in target: tgt_prefix = target.replace('.BRIK','') tgt_prefix = tgt_prefix.replace('.HEAD','') linkfiles = ['%s.HEAD'%tgt_prefix, '%s.BRIK' %tgt_prefix] else: linkfiles = [target] for linkfile in linkfiles: linkname = '%s/%s' % (srcdir, os.path.basename(linkfile)) rel_linkdir = abspath_to_relpath(os.path.dirname(target), srcdir) rel_linkfile = '%s/%s' % (rel_linkdir, os.path.basename(linkfile)) if not os.path.exists(linkname) and not os.path.islink(linkname): cmd = 'cd %s && ln -s %s %s' % (srcdir, rel_linkfile, linkname) self.ExecCmd(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_links(self):\n for line in self.iter_files_to_install():\n arcname, link = line.split()\n if link == 'False':\n continue\n self.files.append(create_link(arcname, link, self.prefix))", "def makeLinks(self):\n self.deleteIndexFileIfExists()\n _fileNames = self.getHTMLFileNames()\n _msgPart1 = \"<a href=\\\"\"\n _msgPart2 = \"\\\" target=\\\"loadHTMLResults\\\">\"\n _msgPart3 = \"</a><br>\"\n _link = \"\"\n for _fileName in _fileNames:\n _origFileName = _fileName\n _linkName = _fileName.split('.')[0]\n _createAnchorTag = (_msgPart1+str(_origFileName)+_msgPart2+str(_linkName)+_msgPart3)\n _link = _link + _createAnchorTag\n return _link", "def make_links(self):\n for filepath in list(self):\n self.make_link(filepath)", "def _generate_links(self):\n index = 0\n links = \"\"\n for ch in self.text:\n if ch == '[':\n links += \"(^\"\n elif ch == ']':\n links += \")$|\"\n index += 1\n elif links[-1:] != '|' and links != \"\":\n links += ch\n self.links = compile(links[:-1].lower())", "def __add_gitlinks(self, gitlinks):\n for sha1, path in gitlinks:\n if sha1 == p4gf_const.NULL_COMMIT_SHA1:\n self.__append(\"D {}\\n\".format(path))\n else:\n self.__append(\"M 160000 {0} {1}\\n\".format(sha1, path))", "def update_symlinks(n):\n\tif n > 0: return\n\tsymlink_dir = sc.text_image_symlink_dir.absolute()\n\tfor tpi, info in sorted(index.items(), key=lambda t: t[0]):\n\t\tsymlink = symlink_dir / info['url']\n\t\tif symlink.is_symlink():\n\t\t\tif symlink.resolve() == info['file']:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tsymlink.unlink()\n\t\tif not symlink.parent.exists():\n\t\t\tsymlink.parent.mkdir(parents=True)\n\t\tsymlink.symlink_to(info['file'])", "def gen_links(text):\n return []", "def make_franny_symlinks(src_dirs, out_dir):\n\n for path, dirs, files in chain.from_iterable(os.walk(path)\n for path in src_dirs):\n print('Looking in %s' % path)\n for sta in ['NS12', 'NS13', 'NS14']:\n for filename in fnmatch.filter(files, '*.%s*' % sta):\n net = filename.split('.')[-7]\n chan = filename.split('.')[-4]\n if chan[-1] == 'N':\n new_chan = 'EH1'\n elif chan[-1] == 'E':\n new_chan = 'EH2'\n else:\n continue\n mseed_nm = filename.split('/')[-1]\n new_mseed = string.replace(mseed_nm, chan, new_chan)\n old_path = os.path.join(path, filename)\n new_path = '%s/%s/%s/%s.D/%s' % (out_dir, net,\n sta, new_chan, new_mseed)\n\n print('Creating symlink for file %s at %s'\n % (old_path, new_path))\n spwd = '*blackmore89'\n cmnd = 'sudo -S ln %s %s' % (old_path, new_path)\n os.system('echo %s | %s' % (spwd, cmnd))\n return", "def makeBackrefLink(self, info, g_links, i):\n atts, content, infoid, link = '', '', '', ''\n if 'def' in info:\n link = info['def']['link']\n backlink_type = link or g_links\n i_ = self.encode_high(i)\n allow_inc = i not in self.syms\n i_ = int(i_)\n\n if backlink_type == \"!\":\n return ''\n elif backlink_type == '^':\n return \"\"\"<sup><a href=\"#noteref%s\">%s</a></sup>\"\"\" % (\n info['refids'][0], i\n )\n else:\n result = []\n for refid in info['refids']:\n i_entity = self.decode_high(i_)\n sup = \"\"\"<sup><a href=\"#noteref%s\">%s</a></sup>\"\"\" % (\n refid, i_entity\n )\n if allow_inc:\n i_ += 1\n result.append(sup)\n result = ' '.join(result)\n return result", "def create_home_directory_symbolic_links():\n file_paths = (\n path\n for path in repo_home.rglob(\"*\")\n if path.is_file() and not path.is_symlink()\n )\n\n for file_path in file_paths:\n sym_link_path = translate_home_path(file_path)\n\n if sym_link_path.is_file() and not sym_link_path.is_symlink():\n backup_file(sym_link_path)\n sym_link_path.unlink()\n\n if sym_link_path.is_symlink():\n sym_link_path.unlink()\n\n print(f\"Creating Symlink: {sym_link_path} -> {file_path}\")\n sym_link_path.symlink_to(file_path)", "def test_apiLinking(self):\n version = \"1.2.3\"\n input, output = self.getArbitraryLoreInputAndOutput(version)\n self.howtoDir.child(\"one.xhtml\").setContent(input)\n\n self.builder.build(version, self.howtoDir, self.howtoDir,\n self.templateFile, \"scheme:apilinks/%s.ext\")\n out = self.howtoDir.child('one.html')\n self.assertIn(\n '<a href=\"scheme:apilinks/foobar.ext\" title=\"foobar\">foobar</a>',\n out.getContent())", "def fix_links():\n pass", "def download_brick_catalog(brick):\n urls = {1: 'http://archive.stsci.edu/pub/hlsp/phat/brick01/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12058-m31-b01_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 2: 'http://archive.stsci.edu/pub/hlsp/phat/brick02/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12073-m31-b02_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 3: 'http://archive.stsci.edu/pub/hlsp/phat/brick03/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12109-m31-b03_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 4: 'http://archive.stsci.edu/pub/hlsp/phat/brick04/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12107-m31-b04_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 5: 'http://archive.stsci.edu/pub/hlsp/phat/brick05/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12074-m31-b05_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 6: 'http://archive.stsci.edu/pub/hlsp/phat/brick06/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12105-m31-b06_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 7: 'http://archive.stsci.edu/pub/hlsp/phat/brick07/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12113-m31-b07_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 8: 'http://archive.stsci.edu/pub/hlsp/phat/brick08/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12075-m31-b08_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 9: 'http://archive.stsci.edu/pub/hlsp/phat/brick09/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12057-m31-b09_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 10: 'http://archive.stsci.edu/pub/hlsp/phat/brick10/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12111-m31-b10_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 11: 'http://archive.stsci.edu/pub/hlsp/phat/brick11/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12115-m31-b11_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 12: 'http://archive.stsci.edu/pub/hlsp/phat/brick12/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12071-m31-b12_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 13: 'http://archive.stsci.edu/pub/hlsp/phat/brick13/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12114-m31-b13_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 14: 'http://archive.stsci.edu/pub/hlsp/phat/brick14/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12072-m31-b14_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 15: 'http://archive.stsci.edu/pub/hlsp/phat/brick15/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12056-m31-b15_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 16: 'http://archive.stsci.edu/pub/hlsp/phat/brick16/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12106-m31-b16_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 17: 'http://archive.stsci.edu/pub/hlsp/phat/brick17/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12059-m31-b17_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 18: 'http://archive.stsci.edu/pub/hlsp/phat/brick18/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12108-m31-b18_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 19: 'http://archive.stsci.edu/pub/hlsp/phat/brick19/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12110-m31-b19_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 20: 'http://archive.stsci.edu/pub/hlsp/phat/brick20/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12112-m31-b20_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 21: 'http://archive.stsci.edu/pub/hlsp/phat/brick21/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12055-m31-b21_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 22: 'http://archive.stsci.edu/pub/hlsp/phat/brick22/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12076-m31-b22_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits', # NOQA\n 23: 'http://archive.stsci.edu/pub/hlsp/phat/brick23/hlsp_phat_hst_wfc3-uvis-acs-wfc-wfc3-ir_12070-m31-b23_f275w-f336w-f475w-f814w-f110w-f160w_v2_st.fits'} # NOQA\n url = urls[brick]\n output_path = os.path.join(os.getenv('PHATV2DATA'), os.path.basename(url))\n print \"Downloading {url}\".format(url=url)\n cmd = 'wget -c -nc -q -O {output} {input}'.format(output=output_path,\n input=url)\n print \"Started at\", datetime.utcnow()\n if not os.path.exists(output_path):\n subprocess.call(cmd, shell=True)\n print \"Ended at \", datetime.utcnow()", "def makelinks(links, sp1, sp2):\n sp1_links = []\n sp2_links = []\n sp1_chrom = []\n sp2_chrom = []\n f = open(\"circos.{}-{}.links.txt\".format(sp1, sp2), 'w')\n with open(links, 'r') as link:\n for line in link:\n x = line.strip().split()\n species = x[0].split(\".\")[0]\n chrom = x[0].split(\".\")[1]\n orient = x[3]\n size = int(x[4])\n align_l = int(x[2])\n align_s = int(x[1])\n if orient == \"+\":\n start = align_s\n end = start + align_l\n elif orient == \"-\":\n start = size - align_s\n end = start - align_l\n else:\n print(\"\\nNo Direction indicated\".format(line))\n if species == sp1:\n sp1_links.append(\"{} {} {}\".format(chrom, start, end))\n sp1_chrom.append(chrom)\n elif species == sp2:\n sp2_links.append(\"{} {} {}\".format(chrom, start, end))\n sp2_chrom.append(chrom)\n [f.write(\"{} {}\\n\".format(i, j)) for i, j in zip(sp1_links, sp2_links)]\n f.close()\n\n return(sp1_chrom, sp2_chrom)", "def test_all_notebooks_linked():\n assert _public_nb_dir.is_dir()\n assert _linked_nb_dir.is_dir()\n linked_nbs = [f for f in listdir(_linked_nb_dir) if f.endswith(\".ipynb\")]\n new_links = []\n for f in listdir(_public_nb_dir):\n if not f.endswith(\".ipynb\"):\n continue\n linked_name = get_symlink_name(f)\n if linked_name not in linked_nbs:\n cwd = os.getcwd()\n os.chdir(str(_linked_nb_dir))\n os.symlink(\n f\"../../../../jupyter_notebooks/{f}\",\n linked_name,\n target_is_directory=False,\n )\n os.chdir(cwd)\n new_links.append(str(_linked_nb_dir / linked_name))\n if new_links:\n ll = \"\\n\".join(new_links)\n pytest.fail(f\"Please commit the following notebook symlinks:\\n{ll}\")", "def create_links(self, name):\n for target, linknames in self._link_map.iteritems():\n for linkname in linknames:\n self._api.path.mock_copy_paths(target, linkname)\n self._api.python(\n name,\n self._resource,\n args = [\n '--link-json',\n self._api.json.input({str(target) : linkname\n for target, linkname in self._link_map.iteritems()\n }),\n ],\n infra_step=True)", "def test_create_symlink_file(self):\n pass", "def _post_src_install_soname_symlinks(mysettings, out):\n\n\timage_dir = mysettings[\"D\"]\n\tneeded_filename = os.path.join(mysettings[\"PORTAGE_BUILDDIR\"],\n\t\t\"build-info\", \"NEEDED.ELF.2\")\n\n\tf = None\n\ttry:\n\t\tf = io.open(_unicode_encode(needed_filename,\n\t\t\tencoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='r', encoding=_encodings['repo.content'],\n\t\t\terrors='replace')\n\t\tlines = f.readlines()\n\texcept IOError as e:\n\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\traise\n\t\treturn\n\tfinally:\n\t\tif f is not None:\n\t\t\tf.close()\n\n\tqa_no_symlink = \"\"\n\tf = None\n\ttry:\n\t\tf = io.open(_unicode_encode(os.path.join(\n\t\t\tmysettings[\"PORTAGE_BUILDDIR\"],\n\t\t\t\"build-info\", \"QA_SONAME_NO_SYMLINK\"),\n\t\t\tencoding=_encodings['fs'], errors='strict'),\n\t\t\tmode='r', encoding=_encodings['repo.content'],\n\t\t\terrors='replace')\n\t\tqa_no_symlink = f.read()\n\texcept IOError as e:\n\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\traise\n\tfinally:\n\t\tif f is not None:\n\t\t\tf.close()\n\n\tqa_no_symlink = qa_no_symlink.split()\n\tif qa_no_symlink:\n\t\tif len(qa_no_symlink) > 1:\n\t\t\tqa_no_symlink = \"|\".join(\"(%s)\" % x for x in qa_no_symlink)\n\t\t\tqa_no_symlink = \"^(%s)$\" % qa_no_symlink\n\t\telse:\n\t\t\tqa_no_symlink = \"^%s$\" % qa_no_symlink[0]\n\t\tqa_no_symlink = re.compile(qa_no_symlink)\n\n\tlibpaths = set(portage.util.getlibpaths(\n\t\tmysettings[\"ROOT\"], env=mysettings))\n\tlibpath_inodes = set()\n\tfor libpath in libpaths:\n\t\tlibdir = os.path.join(mysettings[\"ROOT\"], libpath.lstrip(os.sep))\n\t\ttry:\n\t\t\ts = os.stat(libdir)\n\t\texcept OSError:\n\t\t\tcontinue\n\t\telse:\n\t\t\tlibpath_inodes.add((s.st_dev, s.st_ino))\n\n\tis_libdir_cache = {}\n\n\tdef is_libdir(obj_parent):\n\t\ttry:\n\t\t\treturn is_libdir_cache[obj_parent]\n\t\texcept KeyError:\n\t\t\tpass\n\n\t\trval = False\n\t\tif obj_parent in libpaths:\n\t\t\trval = True\n\t\telse:\n\t\t\tparent_path = os.path.join(mysettings[\"ROOT\"],\n\t\t\t\tobj_parent.lstrip(os.sep))\n\t\t\ttry:\n\t\t\t\ts = os.stat(parent_path)\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tif (s.st_dev, s.st_ino) in libpath_inodes:\n\t\t\t\t\trval = True\n\n\t\tis_libdir_cache[obj_parent] = rval\n\t\treturn rval\n\n\tmissing_symlinks = []\n\n\t# Parse NEEDED.ELF.2 like LinkageMapELF.rebuild() does.\n\tfor l in lines:\n\t\tl = l.rstrip(\"\\n\")\n\t\tif not l:\n\t\t\tcontinue\n\t\tfields = l.split(\";\")\n\t\tif len(fields) < 5:\n\t\t\tportage.util.writemsg_level(_(\"\\nWrong number of fields \" \\\n\t\t\t\t\"in %s: %s\\n\\n\") % (needed_filename, l),\n\t\t\t\tlevel=logging.ERROR, noiselevel=-1)\n\t\t\tcontinue\n\n\t\tobj, soname = fields[1:3]\n\t\tif not soname:\n\t\t\tcontinue\n\t\tif not is_libdir(os.path.dirname(obj)):\n\t\t\tcontinue\n\t\tif qa_no_symlink and qa_no_symlink.match(obj.strip(os.sep)) is not None:\n\t\t\tcontinue\n\n\t\tobj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))\n\t\tsym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)\n\t\ttry:\n\t\t\tos.lstat(sym_file_path)\n\t\texcept OSError as e:\n\t\t\tif e.errno not in (errno.ENOENT, errno.ESTALE):\n\t\t\t\traise\n\t\telse:\n\t\t\tcontinue\n\n\t\tmissing_symlinks.append((obj, soname))\n\n\tif not missing_symlinks:\n\t\treturn\n\n\tqa_msg = [\"QA Notice: Missing soname symlink(s):\"]\n\tqa_msg.append(\"\")\n\tqa_msg.extend(\"\\t%s -> %s\" % (os.path.join(\n\t\tos.path.dirname(obj).lstrip(os.sep), soname),\n\t\tos.path.basename(obj))\n\t\tfor obj, soname in missing_symlinks)\n\tqa_msg.append(\"\")\n\tfor line in qa_msg:\n\t\teqawarn(line, key=mysettings.mycpv, out=out)", "def write_downloaded_links():\n global downloaded_links_fn\n text_file = open(downloaded_links_fn,\"w\")\n for link in downloaded_links.items():\n text_file.write(link[0] + \"\\n\")\n text_file.close()", "def make_bibtex(self):\n\n\t\t# bib = requests.request('GET', 'http://dx.doi.org/' + self.doi, ", "def _open_output_files(self):\n self.links_outfile = open(self.opts.links_outfile, 'wb')", "def makeLinks(self, source, target):\n\n if os.path.exists(target): os.unlink(target)\n os.symlink(source, target)", "def main():\n # Construct the feed generator\n f = LogBufferFeed(FEED_DIR)\n f.MAX_AGE = 24 * 60 * 60 # 1 day\n f.FEED_META['feed.title'] = '%s Referrering Links' % SITE_NAME\n f.FEED_META['feed.tagline'] = \\\n 'New referring links from Apache access.log on %s' % SITE_NAME\n \n # Load up tail of access log, parse, and filter\n new_lines = bookmark_tailgrep(ACCESS_LOG, max_initial_lines=100000)\n all_events = parse_access_log(new_lines)\n events = [ x for x in all_events if event_filter(x) ]\n \n # Scan through latest events for new referrers\n referrers_seen = shelve.open(REFER_SEEN)\n new_referrers = []\n for evt in events:\n k = '%(referrer)s -> %(path)s' % evt\n if not referrers_seen.has_key(k):\n referrers_seen[k] = 1\n new_referrers.append( (evt['referrer'], evt['path']) )\n referrers_seen.close()\n \n # If there were new referrers found, insert a new entry.\n if len(new_referrers) > 0:\n \n # Build a list of hyperlinks for referrers\n links_out = [\n LINK_TMPL % {\n 'SITE_ROOT' : SITE_ROOT,\n 'referrer' : x[0],\n 'path' : x[1],\n }\n for x in new_referrers\n ]\n \n # Build a summary for this entry.\n summary = SUMMARY_TMPL % { \n 'count' : len(new_referrers), \n 'links' : \"\\n\".join(links_out)\n }\n \n # Construct and append a new entry\n entry = FeedEntryDict({\n 'title' : '%s new referrers' % len(new_referrers),\n 'link' : '',\n 'summary' : summary\n })\n f.append_entry(entry)\n\n # Output the current feed entries as both RSS and Atom\n open(FEED_NAME_FN % 'rss', 'w').write(f.scrape_rss())\n open(FEED_NAME_FN % 'atom', 'w').write(f.scrape_atom())", "def link_resources(ctx):\n\n for resource in RESOURCES:\n\n command = \"ln -s -r -f -T {res}/{resource} {proj}/{resource}\".format(\n res=RESOURCE_DIR,\n proj=PROJECT_DIR,\n resource=resource)\n\n print(\"Running\")\n print(command)\n print(\"-----------------------------\")\n ctx.run(command)", "def get_all_books_page_links(raw_page_rip):\n nt = {}\n ot = {}\n OTIDS = []\n soup = raw_page_rip.soup\n if not os.path.exists(data_store):\n os.mkdir(data_store)\n \n nt_soup = soup.find(\"td\", class_=\"NT\")\n ot1 = soup.find(\"td\", class_=\"OT1\")\n ot2 = soup.find(\"td\", class_=\"OT2\")\n \n for each in nt_soup.find_all(\"a\", href=True):\n if 'class=\"b\"' in str(each):\n href = each.get(\"href\")\n name = each.text\n\n idd = re.search(r'\\d{5}', href).group(0)\n nt[name] = [domain + href, idd]\n \n with open(os.path.join(data_store, \"new_test.json\"), \"w+\") as wh:\n json.dump(nt, wh)\n\n for each in ot1.find_all(\"a\", href=True):\n if 'class=\"b\"' in str(each):\n \n href = each.get(\"href\")\n name = each.text\n idd = re.search(r'\\d{5}', href).group(0)\n \n if idd in OTIDS:\n ot[domain + href][0] = name + \" or \" + ot[domain + href][0]\n else:\n ot[domain + href] = [name, idd]\n OTIDS.append(idd)\n \n for each in ot2.find_all(\"a\", href=True):\n if 'class=\"b\"' in str(each):\n \n href = each.get(\"href\")\n name = each.text\n idd = re.search(r'\\d{5}', href).group(0)\n \n if idd in OTIDS:\n ot[domain + href][0] = name + \" or \" + ot[domain + href][0]\n else:\n ot[domain + href] = [name, idd]\n OTIDS.append(idd)\n \n rev_old = {value[0] : [key, value[1]] for key, value in ot.items()}\n with open(os.path.join(data_store, \"old_test.json\"), \"w+\") as wh:\n json.dump(rev_old, wh)", "def generate_links(wiki, page, tags):\n wiki.generate_tag_links(page, tags)", "def create_links(list_of_paths, dest_dir, print_cfg_ipol=False):\n ms = False\n for i, f in enumerate(list_of_paths):\n\n if isinstance(f, tuple): # we have the ms image\n # tif ms\n ms = True\n symlink_p(f[1], os.path.join(dest_dir, 'im_ms_%02d.tif' % (i+1)))\n\n # preview ms\n tmp = copy_file_matching_pathname('PREVIEW_*.JPG', os.path.dirname(f[1]), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1)))\n # enhance contrast\n # os.system(\"/home/carlo/code/s2p/bin/qauto %s %s\" % (tmp, tmp)\n else:\n print('MS PREVIEW not found for %s' % f[1], file=sys.stderr)\n f = f[0] # the path to ms preview is not needed anymore\n\n # pan preview (if no ms preview)\n if not os.path.isfile(os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1))):\n tmp = copy_file_matching_pathname('PREVIEW_*.JPG', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'prv_%02d.jpg' % (i+1)))\n # os.system(\"/home/carlo/code/s2p/bin/qauto %s %s\" % (tmp, tmp))\n else:\n print('PAN PREVIEW not found for %s' % f, file=sys.stderr)\n\n # dim\n tmp = copy_file_matching_pathname('DIM_*.XML', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'dim_%02d.xml' % (i+1)))\n\n # rpc\n tmp = copy_file_matching_pathname('RPC_*.XML', os.path.dirname(f), dest_dir)\n if tmp:\n symlink_p(tmp, os.path.join(dest_dir, 'rpc_%02d.xml' % (i+1)))\n\n # tif panchro\n symlink_p(f, os.path.join(dest_dir, 'im_panchro_%02d.tif' % (i+1)))\n\n # dzi 8 bits\n dzi8_found = False\n dzi8 = '%s_8BITS.dzi' % f[:-8] # remove extension '.JP2.TIF' (8 chars)\n files8 = '%s_8BITS_files' % f[:-8]\n if os.path.isfile(dzi8) and os.path.isdir(files8):\n symlink_p(dzi8, os.path.join(dest_dir, 'im_panchro_8BITS_%02d.dzi' % (i+1)))\n symlink_p(files8, os.path.join(dest_dir, 'im_panchro_8BITS_%02d_files' % (i+1)))\n dzi8_found = True\n\n # dzi 16 bits\n dzi16_found = False\n dzi16 = '%s_16BITS.dzi' % f[:-8] # remove extension '.JP2.TIF' (8 chars)\n files16 = '%s_16BITS_files' % f[:-8]\n if os.path.isfile(dzi16) and os.path.isdir(files16):\n symlink_p(dzi16, os.path.join(dest_dir, 'im_panchro_16BITS_%02d.dzi' % (i+1)))\n symlink_p(files16, os.path.join(dest_dir, 'im_panchro_16BITS_%02d_files' % (i+1)))\n dzi16_found = True\n\n # print warning if neither 8bit nor 16bit dzi was found\n if (not dzi8_found) and (not dzi16_found):\n print('WARNING: no dzi file found for img %s' % f, file=sys.stderr)\n\n if print_cfg_ipol:\n print_cfg.main(dest_dir, len(list_of_paths), ms)", "def connect_links(base_url, extensions, wikidir, body):\n if base_url.endswith(\"/\"):\n base_url = base_url[:-1]\n\n i = 0\n body2 = []\n\n for match in WIKILINK.finditer(body):\n body2.append(body[i:match.span(0)[0]])\n \n text = match.group(1)\n\n if \"|\" in text:\n topic, desc = text.split(\"|\")\n topic = topic.strip()\n else:\n topic, desc = (text, text)\n\n fn = os.path.join(wikidir, topic)\n\n ext = tools.what_ext(extensions, fn)\n if not ext:\n body2.append(match.group(0))\n i = match.span(0)[1]\n continue\n\n body2.append(\"<a href=\\\"%s/%s/%s\\\">%s</a>\" % \\\n (base_url, TRIGGER, topic, desc))\n i = match.span(0)[1]\n\n body2.append(body[i:])\n return \"\".join(body2)", "def _collect_img_links(self):\n raise NotImplementedError", "def link_dihedra(self, verbose: bool = ...) -> None:\n ...", "def generate_readme_url(self, dn):\n\n df_curation = dn.curation_dict\n\n # Preferred citation\n single_str_citation = df_curation['item']['citation']\n\n # handle period in author list. Assume no period in dataset title\n str_list = list([single_str_citation.split('):')[0] + '). '])\n str_list += [str_row + '.' for str_row in single_str_citation.split('):')[1].split('. ')]\n\n citation_list = [content for content in str_list[0:-2]]\n citation_list.append(f\"{str_list[-2]} {str_list[-1]}\")\n citation_list = ' <br> '.join(citation_list)\n\n # summary\n figshare_description = df_curation['item']['description']\n\n query_str_dict = {'article_id': dn.name_dict['article_id'],\n 'curation_id': dn.name_dict['curation_id'],\n 'title': dn.name_dict['title'],\n 'depositor_name': dn.name_dict['simplify_fullName'],\n 'preferred_citation': citation_list,\n 'license': df_curation['item']['license']['name'],\n 'summary': figshare_description}\n # doi\n if not df_curation['item']['doi']: # empty case\n query_str_dict['doi'] = f\"https://doi.org/10.25422/azu.data.{dn.name_dict['article_id']}\"\n else:\n query_str_dict['doi'] = f\"https://doi.org/{df_curation['item']['doi']}\"\n\n # links\n if not df_curation['item']['references']: # not empty case\n links = \" <br> \".join(df_curation['item']['references'])\n query_str_dict['links'] = links\n\n # query_str_encode = str(query_str_dict).encode('base64', 'strict')\n q_eed = base64.urlsafe_b64encode(json.dumps(query_str_dict).encode()).decode()\n\n full_url = f\"{self.dict['generate_url']}{self.readme_survey_id}?\" + \\\n 'Q_EED=' + q_eed\n\n return full_url", "def create_readme(case_dict):\n # ---------------------------------------------------------------------\n logger.debug(\"create_readme\")\n os.chdir(case_dict[\"archive_temp_dir\"])\n\n fname = open(\"README.archive\", \"w\")\n fname.write(\"Archived metadata is available for this case at URL:\\n\")\n fname.write(case_dict[\"base_expdb_url\"])\n fname.close()", "def test_path05(self):\n index_file = os.path.join(self.test_input, 'index.html')\n with open(index_file, 'w', encoding='UTF-8') as fh:\n fh.write(\"\"\"\\\n<!DOCTYPE html>\n<html\n data-scrapbook-create=\"20200101000000000\"\n data-scrapbook-modify=\"20200101000000000\"\n data-scrapbook-source=\"http://example.com\">\n<head>\n<meta charset=\"UTF-8\">\n<title>MyTitle 中文</title>\n</head>\n<body>\npage content\n</body>\n</html>\n\"\"\")\n\n for _info in file2wsb.run(self.test_input, self.test_output, no_preserve_filename=True):\n pass\n\n book = Host(self.test_output).books['']\n book.load_meta_files()\n book.load_toc_files()\n\n id_item, = book.meta.keys()\n self.assertDictEqual(book.meta, {\n id_item: {\n 'title': 'MyTitle 中文',\n 'type': '',\n 'index': f'{id_item}.html',\n 'create': '20200101000000000',\n 'modify': '20200101000000000',\n 'source': 'http://example.com',\n 'icon': '',\n 'comment': '',\n },\n })\n self.assertDictEqual(book.toc, {\n 'root': [\n id_item,\n ],\n })\n self.assertEqual(set(glob.iglob(os.path.join(self.test_output, '**'), recursive=True)), {\n os.path.join(self.test_output, ''),\n os.path.join(self.test_output, f'{id_item}.html'),\n })", "def replace_local_hyperlinks(\n text,\n base_url=\"https://github.com/project-rig/nengo_spinnaker/blob/master/\"\n ):\n def get_new_url(url):\n return base_url + url[2:]\n\n # Deal with anonymous URLS\n for match in re.finditer(r\"^__ (?P<url>\\./.*)\", text, re.MULTILINE):\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = re.sub(\"^__ {}\".format(orig_url),\n \"__ {}\".format(url), text, flags=re.MULTILINE)\n\n # Deal with named URLS\n for match in re.finditer(r\"^\\.\\. _(?P<identifier>[^:]*): (?P<url>\\./.*)\",\n text, re.MULTILINE):\n identifier = match.groupdict()[\"identifier\"]\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = re.sub(\n \"^\\.\\. _{}: {}\".format(identifier, orig_url),\n \".. _{}: {}\".format(identifier, url),\n text, flags=re.MULTILINE)\n\n # Deal with image URLS\n for match in re.finditer(r\"^\\.\\. image:: (?P<url>\\./.*)\",\n text, re.MULTILINE):\n orig_url = match.groupdict()[\"url\"]\n url = get_new_url(orig_url)\n\n text = text.replace(\".. image:: {}\".format(orig_url),\n \".. image:: {}\".format(url))\n\n return text", "def formatArrayHyperlink(txt, lnk, filename):\n if oapackage.oahelper.oaIsBinary(filename):\n ss = e.a(txt + e.small(\" (binary)\"), href=lnk, class_=\"binarylink\")\n else:\n ss = e.a(txt, href=lnk, class_=\"normal\")\n return ss", "def cleanup_links(path, inspect_links=False):\n with open(path) as f:\n text = f.read()\n\n# if 'BokehJS does not appear to have successfully loaded' in text:\n# for k, v in BOKEH_REPLACEMENTS.items():\n# text = text.replace(k, v)\n\n text = component_links(text, path)\n soup = BeautifulSoup(text, features=\"html.parser\")\n for a in soup.findAll('a'):\n href = a.get('href', '')\n if '.ipynb' in href and 'http' not in href:\n # for k, v in LINK_REPLACEMENTS.items():\n # href = href.replace(k, v)\n a['href'] = href.replace('.ipynb', '.html')\n\n # check to make sure that path exists, if not, try un-numbered version\n try_path = os.path.join(os.path.dirname(path), a['href'])\n if not os.path.exists(try_path):\n num_name = os.path.basename(try_path)\n name = re.split(r\"^\\d+( |-|_)\", num_name)[-1]\n new_path = try_path.replace(num_name, name)\n if os.path.exists(new_path):\n a['href'] = os.path.relpath(new_path, os.path.dirname(path))\n else:\n also_tried = 'Also tried: {}'.format(name) if name != num_name else ''\n warnings.warn('Found missing link {} in: {}. {}'.format(a['href'], path, also_tried))\n\n if inspect_links and 'http' in a['href']:\n print(a['href'])\n for img in soup.findAll('img'):\n src = img.get('src', '')\n if 'http' not in src and 'assets' in src:\n try_path = os.path.join(os.path.dirname(path), src)\n if not os.path.exists(try_path):\n also_tried = os.path.join('..', src)\n if os.path.exists(os.path.join(os.path.dirname(path), also_tried)):\n img['src'] = also_tried\n else:\n warnings.warn('Found reference to missing image {} in: {}. Also tried: {}'.format(src, path, also_tried))\n with open(path, 'w') as f:\n f.write(str(soup))", "def write_package_index(self, package, files):\n self.logger.info('writing index for %s', package)\n pkg_dir = self.output_path / 'simple' / package\n mkdir_override_symlink(pkg_dir)\n with tempfile.NamedTemporaryFile(mode='w', dir=str(pkg_dir),\n encoding='utf-8',\n delete=False) as index:\n try:\n index.file.write('<!DOCTYPE html>\\n')\n index.file.write(\n tag.html(\n tag.head(\n tag.title('Links for {}'.format(package))\n ),\n tag.body(\n tag.h1('Links for {}'.format(package)),\n ((tag.a(\n f.filename,\n href='{f.filename}#sha256={f.filehash}'.format(f=f), # noqa: E501\n rel='internal'), tag.br())\n for f in files)\n )\n )\n )\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o644)\n os.replace(index.name, str(pkg_dir / 'index.html'))\n try:\n # Workaround for #20: after constructing the index for a\n # package attempt to symlink the \"canonicalized\" package\n # name to the actual package directory. The reasons for\n # doing things this way are rather complex...\n #\n # The older package name must exist for the benefit of\n # older versions of pip. If the symlink already exists *or\n # is a directory* we ignore it. Yes, it's possible to have\n # two packages which both have the same canonicalized name,\n # and for each to have different contents. I don't quite\n # know how PyPI handle this but their XML and JSON APIs\n # already include such situations (in a small number of\n # cases). This setup is designed to create canonicalized\n # links where possible but not to clobber \"real\" packages\n # if they exist.\n #\n # What about new packages that want to take the place of a\n # canonicalized symlink? We (and TransferState.commit)\n # handle that by removing the symlink and making a\n # directory in its place.\n canon_dir = pkg_dir.with_name(canonicalize_name(pkg_dir.name)) # noqa: E501\n canon_dir.symlink_to(pkg_dir.name)\n except FileExistsError:\n pass", "def do_links(root0):\n\n _log.info(f\"Doing links on {root0}\")\n for root, dirs, files in os.walk(root0):\n for name in files:\n fullname = pathlib.Path(root, name)\n last = findlast(fullname, tocheck)\n _log.debug(f\"Checking: {fullname} found {last}\")\n if last is not None:\n fullname.unlink()\n oldname = last / fullname\n # Need to do these relative to where the final is, but note\n # that `Path.relative_to` does not allow '.' as a common path\n # prefix, so we need to use `os.path.relpath` instead.\n relpath = os.path.relpath(oldname, start=fullname.parent)\n if name.endswith((\".htm\", \".html\")):\n # make an html redirect.\n _log.info(f\"Rewriting HTML: {fullname} in {last}\")\n with fullname.open(\"w\") as fout:\n st = html_redirect.format(\n newurl=relpath,\n canonical=oldname,\n )\n fout.write(st)\n else:\n # soft link\n _log.info(f\"Linking {fullname} to {oldname}\")\n fullname.symlink_to(relpath)", "def test_path04(self):\n index_file = os.path.join(self.test_input, 'index.html')\n with open(index_file, 'w', encoding='UTF-8') as fh:\n fh.write(\"\"\"\\\n<!DOCTYPE html>\n<html\n data-scrapbook-create=\"20200101000000000\"\n data-scrapbook-modify=\"20200101000000000\"\n data-scrapbook-source=\"http://example.com\">\n<head>\n<meta charset=\"UTF-8\">\n<title>MyTitle 中文</title>\n</head>\n<body>\npage content\n</body>\n</html>\n\"\"\")\n\n for _info in file2wsb.run(self.test_input, self.test_output):\n pass\n\n book = Host(self.test_output).books['']\n book.load_meta_files()\n book.load_toc_files()\n\n id_item, = book.meta.keys()\n self.assertDictEqual(book.meta, {\n id_item: {\n 'title': 'MyTitle 中文',\n 'type': '',\n 'index': f'{id_item}/index.html',\n 'create': '20200101000000000',\n 'modify': '20200101000000000',\n 'source': 'http://example.com',\n 'icon': '',\n 'comment': '',\n },\n })\n self.assertDictEqual(book.toc, {\n 'root': [\n id_item,\n ],\n })\n self.assertEqual(set(glob.iglob(os.path.join(self.test_output, '**'), recursive=True)), {\n os.path.join(self.test_output, ''),\n os.path.join(self.test_output, id_item),\n os.path.join(self.test_output, id_item, 'index.html'),\n })", "def build_links(self, db):\r\n\t\tprint \"Building links' connections.\"\r\n\t\tconn = sqlite3.connect(db)\r\n\t\tcur = conn.cursor()\r\n\t\tconn.text_factory = str\r\n\t\tdbname = db[:-3]\r\n\t\tsql = \"select url from %s\" % dbname\r\n\t\turls = [ url[0] for url in cur.execute(sql).fetchall()]\r\n\t\t\r\n\t\turlids = self.urls2ids(urls)\r\n\t\tfrom_urls = dict([(urlid,[]) for urlid in urlids])\r\n\t\tto_urls = dict([(urlid,[]) for urlid in urlids])\r\n\r\n\t\tprogress = ProgressMeter(total=len(urls))\r\n\t\tfor (cnt, url) in enumerate(urls):\r\n\t\t\turlid = self.get_urlid(url)\r\n\t\t\tp = MyHTMLParser(url)\r\n\t\t\tsql = \"select content from %s where url='%s'\" % (dbname, url)\r\n\t\t\tcontent = cur.execute(sql).fetchone()[0]\r\n\t\t\ttry: p.feed(content)\r\n\t\t\texcept:\tferrmsg('Error: feed error in %s.' % url, 'Rank')\r\n\t\t\tto_urls[urlid] = self.urls2ids(p.htm_urls())\r\n\t\t\tfor lid in to_urls[urlid]:\r\n\t\t\t\tif lid not in from_urls.keys():\r\n\t\t\t\t\tcontinue\r\n\t\t\t\telse:\r\n\t\t\t\t\tfrom_urls[lid].append(urlid)\r\n\t\t\t# update the progress\r\n\t\t\tif (cnt % REFRESH_CNT) == 0 or cnt == progress.total-1:\r\n\t\t\t\tprogress.update(cnt+1)\r\n\t\tself.url_ids = urlids\r\n\t\tself.from_ids = from_urls\r\n\t\tself.to_ids = to_urls", "def link_fastqs(fastq_in, fastq_out):\n if not os.path.exists(os.path.dirname(fastq_out)):\n os.mkdir(os.path.dirname(fastq_out))\n if not os.path.exists(fastq_out):\n os.symlink(fastq_in, fastq_out) \n\n \n\n #88888888888888888888888888888888888888888888888888\n #\n # R e a d t r i m m i n g\n #\n #88888888888888888888888888888888888888888888888888", "def update_links(self):\n for a in self.book.xpath(\"//a[@href]\"):\n href = a.xpath(\"@href\")[0]\n index_list = a.xpath(\"@data-index\")\n \n ### If there is no data-index it is assumed link comes from initial book landing page (the index page)\n if index_list == []:\n index = self.manager.get_page_index(\"index.html\")\n else:\n index = index_list[0]\n \n ### Fix people who are bad at links\n if href.startswith(\"www.\"):\n href = \"https://\" + href\n a.set(\"href\", href)\n \n ## Correct for ambiguity (Naive assumption that this error only occours on index page)\n if href == \"./\":\n href = \"index.html\"\n \n if not href:\n return None\n \n href = self.manager.convert_link(href, index)\n a.set(\"href\", href)", "def iter_links(self):", "def gen_www_rcs(self, names_list):\n f = open('%s/www.rc' % (self.www_classic_dir), 'w')\n f.write('%s\\n' % (self.images_dir))\n for name in names_list:\n f.write('%s\\n' % (name))\n f.close \n \n f = open('%s/www.rc' % (self.www_20_dir), 'w')\n f.write('%s\\n' % (self.images_dir))\n for name in names_list:\n f.write('%s\\n' % (name))\n f.close", "def ln_files_from_SMRTLink_job(self):\n log.info(\"make soft links from smrtlink job\")\n # Make a link of smrtlink dir\n lnabs(self.smrtlink_job_dir, op.join(self.root_dir, op.basename(self.sl_job.root_dir)))\n\n # Make a link of consensus isoforms fa, hq|lq isoforms fa|fq, isoseq_flnc.fasta\n lnabs(src=self.sl_job.hq_isoforms_fa, dst=self.hq_isoforms_fa)\n lnabs(src=self.sl_job.hq_isoforms_fq, dst=self.hq_isoforms_fq)\n lnabs(src=self.sl_job.lq_isoforms_fa, dst=self.lq_isoforms_fa)\n lnabs(src=self.sl_job.lq_isoforms_fq, dst=self.lq_isoforms_fq)\n\n if op.exists(self.sl_job.ccs_xml):\n consolidate_xml(self.sl_job.ccs_xml, self.ccs_fa)\n else:\n raise IOError(\"Could neither find {}\".format(self.sl_job.ccs_xml))\n\n self.sl_job.export_unpolished_fa(unpolished_fa=self.consensus_isoforms_fa)", "def export(self, dir):\n docs = {}\n cre, standard = None, None\n cres_written = {}\n\n # internal links are Group/HigherLevelCRE -> CRE\n for link in self.__get_internal_links():\n group = link[0]\n cre = link[1]\n type = link[2]\n grp = None\n # when cres link to each other it's a two way link\n # so handle cre1(group) -> cre2 link first\n if group.name in docs.keys():\n grp = docs[group.name]\n else:\n grp = CREfromDB(group)\n grp.add_link(cre_defs.Link(ltype=type, document=CREfromDB(cre)))\n docs[group.name] = grp\n\n # then handle cre2 -> cre1 link\n if cre.name in docs.keys():\n c = docs[cre.name]\n else:\n c = CREfromDB(cre)\n docs[cre.name] = c\n # this cannot be grp, grp already has a link to cre2\n c.add_link(cre_defs.Link(ltype=type, document=CREfromDB(group)))\n\n # external links are CRE -> standard\n for link in self.__get_external_links():\n internal_doc = link[0]\n standard = link[1]\n type = link[2]\n cr = None\n grp = None\n if internal_doc.name in docs.keys():\n cr = docs[internal_doc.name]\n else:\n cr = CREfromDB(internal_doc)\n if len(standard.name) != 0:\n cr.add_link(\n cre_defs.Link(\n ltype=type, document=StandardFromDB(standard))\n )\n docs[cr.name] = cr\n\n # unlinked standards last\n for ustandard in self.__get_unlinked_standards():\n ustand = StandardFromDB(ustandard)\n docs[\"%s-%s:%s:%s\" % (ustand.name, ustand.section,\n ustand.subsection, ustand.version)] = ustand\n\n for _, doc in docs.items():\n title = doc.name.replace(\"/\", \"-\") + \".yaml\"\n file.writeToDisk(\n file_title=title,\n file_content=yaml.safe_dump(doc.todict()),\n cres_loc=dir,\n )\n return docs.values()", "def help_links():\n return [\n {\"text\": \"Python Reference\", \"url\": PY_DOCS},\n {\"text\": \"IPython Reference\", \"url\": IPYTHON_DOCS},\n {\n \"text\": \"Robot Framework: User Guide\",\n \"url\": ujoin(ROBOT_DOCS, \"RobotFrameworkUserGuide.html\"),\n },\n *[\n {\n \"text\": f\"{lib} — Robot Framework\",\n \"url\": ujoin(ROBOT_DOCS, \"libraries\", f\"{lib}.html\"),\n }\n for lib in sorted(STDLIBS)\n if lib not in HAS_NO_DOC\n ],\n ]", "def _write_driver_graph_urls(self):\n try:\n driver_jobid = os.environ['LSB_JOBID']\n except KeyError:\n pass\n else:\n driver_rtm_url = construct_rtm_url(driver_jobid)\n driver_host = socket.gethostname()\n logger.info(f\"Driver LSB_JOBID is: {driver_jobid}\")\n logger.info(f\"Driver host is: {driver_host}\")\n logger.info(f\"Driver RTM graphs: {driver_rtm_url}\")\n\n start_timestamp = get_job_submit_time()\n ganglia_url = construct_ganglia_link(driver_host, start_timestamp)\n\n hostgraph_url_path = 'graph-links.txt'\n with open(hostgraph_url_path, 'a') as f:\n header = f\"=== Client RTM/Ganglia graphs ({socket.gethostname()}) ===\"\n f.write(header + \"\\n\")\n f.write(\"=\"*len(header) + \"\\n\")\n f.write(f\" {driver_rtm_url}\\n\")\n f.write(f\" {ganglia_url}\\n\\n\")", "def create_link(self):\n if self.link_info:\n link_type = self.file.options['link_type']\n if 'node' in self.link_info:\n target_path = self.link_info['node'].full_path\n if link_type == 'string':\n # create string dataset containing link path\n #- self.file.file_pointer.create_dataset(self.full_path, data=\"h5link:/\" + target_path)\n self.file.create_dataset(self.full_path, data=\"h5link:/\" + target_path)\n elif link_type == 'hard':\n # create hard link to target. This implemented by h5py \"Softlink\". Not sure why.\n #- self.file.file_pointer[self.full_path] = h5py.SoftLink(target_path)\n self.file.create_softlink(self.full_path, target_path)\n else: \n raise Exception('Invalid option value for link_type (%s)' % link_type)\n elif 'extlink' in self.link_info:\n file, path = self.link_info['extlink']\n # link to external file\n if link_type == 'string':\n # create string dataset containing link path\n target_path = \"%s,%s\" % (file, path)\n #- self.file.file_pointer.create_dataset(self.full_path, data=\"h5extlink:/\" + target_path)\n self.file.create_dataset(self.full_path, data=\"h5extlink:/\" + target_path)\n elif link_type == 'hard':\n # create link to external file\n #- self.file.file_pointer[self.full_path] = h5py.ExternalLink(file,path)\n self.file.create_external_link(self.full_path, file, path) \n else:\n raise Exception('Invalid option value for link_type (%s)' % link_type)\n else:\n raise SystemError(\"** Error: invalid key in link_info %s\" % self.link_info)", "def create_readme(histfile, vb):\n\tme = \"Utils.create_readme: \"\n\treadmefile = os.path.dirname(histfile)+\"/README.txt\"\n\ttry:\n\t\tassert os.path.isfile(readmefile)\n\texcept AssertionError:\n\t\tnow = str(datetime.now().strftime(\"%Y-%m-%d %H.%M\"))\n\t\tcommit = subprocess.check_output(['git', 'rev-parse', 'HEAD'])\n\t\theader = \"Time:\\t\"+now+\"\\nCommit hash:\\t\"+commit+\"\\n\\n\"\n\t\twith open(readmefile,\"w\") as f:\n\t\t\tf.write(header)\n\t\tif vb: print me+\"Created readme file \"+readmefile\n\treturn", "def _make_doi_clickable(link):\n return f\"https://doi.org/{link}\"", "def transform_github_links(app, doctree, fromdocname):\n\n try:\n target_format = app.builder.link_suffix\n except AttributeError:\n # if the builder has no link_suffix, then no need to modify\n # the current links.\n return\n\n source_suffix = app.config.source_suffix\n # Links are either absolute against the repository or relative to\n # the current document's directory. Note that this is not\n # necessarily app.srcdir, which is the documentation root\n # directory. Instead rely on 'source' attribute of doctree to\n # identify the path of the file providing the current doctree\n try:\n doc_path = doctree.attributes['source']\n doc_dir = os.path.dirname(doc_path)\n except KeyError:\n # some doctrees added by other libraries through dynamic\n # generation do not have a source file. Assume paths are\n # relative to the repo.\n doc_dir = \"\"\n\n for node in doctree.traverse(nodes.reference):\n if 'refuri' not in node:\n continue\n if node['refuri'].startswith('http'):\n continue\n\n try:\n link, anchor = node['refuri'].split('#', 1)\n anchor = '#' + anchor\n except ValueError:\n link = node['refuri']\n anchor = ''\n\n if link is None:\n continue\n\n # Replace the suffix with the correct target format file ending,\n # but only if the link ends with both the correct source suffix\n # and refers to a local file.\n for src_suffix in source_suffix:\n if link.endswith(src_suffix):\n # absolute paths are considered relative to repo\n if link.startswith(\"/\"):\n basepath = \"\"\n # relative paths are against the current doctree source path\n else:\n basepath = doc_dir\n if os.path.exists(os.path.join(basepath, link)):\n node['refuri'] = (link[:-len(source_suffix)] + target_format +\n anchor)", "def __links(self, fichier):\n f = fichier\n\n # un lien comprend une source et une destination\n src = self.files[f]['rcsdirname']\n dst = self.files[f]['rcslinkname']\n\n # on verifie si le lien existe et si il est valide\n lexist = self.files[f]['rcslinkexist']\n lok = self.files[f]['rcslinkisok']\n\n if f not in self.skips:\n if lexist:\n self.info('[lnk] %s, %s-->%s existe' % (f, src, dst))\n\n if lok:\n if (src, dst) not in self.linksok:\n self.info('[lnk] %s, %s-->%s lien valide' % (f, src, dst))\n self.linksok.append((src, dst))\n else:\n if (src, dst) not in self.linksko:\n self.error('[lnk] %s, %s-->%s lien non valide' % (f, src, dst))\n self.linksko.append((src, dst))\n else:\n if (src, dst) not in self.linksko:\n self.warn('[lnk] %s, %s-->%s innexistant' % (f, src, dst))\n self.linksko.append((src, dst))\n return None", "def test_path03(self):\n index_file = os.path.join(self.test_input, 'folder1#中文', 'folder2', 'mypage.html')\n os.makedirs(os.path.dirname(index_file), exist_ok=True)\n with open(index_file, 'w', encoding='UTF-8') as fh:\n fh.write(\"\"\"\\\n<!DOCTYPE html>\n<html\n data-scrapbook-create=\"20200101000000000\"\n data-scrapbook-modify=\"20200101000000000\"\n data-scrapbook-source=\"http://example.com\">\n<head>\n<meta charset=\"UTF-8\">\n<title>MyTitle 中文</title>\n</head>\n<body>\npage content\n</body>\n</html>\n\"\"\")\n\n for _info in file2wsb.run(self.test_input, self.test_output, no_preserve_filename=True):\n pass\n\n book = Host(self.test_output).books['']\n book.load_meta_files()\n book.load_toc_files()\n\n id_folder1, id_folder2, id_item = book.meta.keys()\n self.assertDictEqual(book.meta, {\n id_folder1: {\n 'title': 'folder1#中文',\n 'type': 'folder',\n 'create': id_folder1,\n 'modify': id_folder1,\n },\n id_folder2: {\n 'title': 'folder2',\n 'type': 'folder',\n 'create': id_folder2,\n 'modify': id_folder2,\n },\n id_item: {\n 'title': 'MyTitle 中文',\n 'type': '',\n 'index': f'{id_item}.html',\n 'create': '20200101000000000',\n 'modify': '20200101000000000',\n 'source': 'http://example.com',\n 'icon': '',\n 'comment': '',\n },\n })\n self.assertDictEqual(book.toc, {\n 'root': [\n id_folder1,\n ],\n id_folder1: [\n id_folder2,\n ],\n id_folder2: [\n id_item,\n ],\n })\n self.assertEqual(set(glob.iglob(os.path.join(self.test_output, '**'), recursive=True)), {\n os.path.join(self.test_output, ''),\n os.path.join(self.test_output, f'{id_item}.html'),\n })", "def test_supporting_folder03(self):\n content = \"\"\"\\\n<!DOCTYPE html>\n<html\n data-scrapbook-create=\"20200101000000000\"\n data-scrapbook-modify=\"20200101000000000\"\n data-scrapbook-source=\"http://example.com\">\n<head>\n<meta charset=\"UTF-8\">\n<title>MyTitle 中文</title>\n</head>\n<body>\npage content\n<img src=\"index.files/picture.bmp\">\n</body>\n</html>\n\"\"\"\n index_file = os.path.join(self.test_input, 'index.html')\n with open(index_file, 'w', encoding='UTF-8') as fh:\n fh.write(content)\n img_file = os.path.join(self.test_input, 'index.files', 'picture.bmp')\n os.makedirs(os.path.dirname(img_file), exist_ok=True)\n with open(img_file, 'wb') as fh:\n fh.write(b'dummy')\n\n for _info in file2wsb.run(self.test_input, self.test_output):\n pass\n\n book = Host(self.test_output).books['']\n book.load_meta_files()\n book.load_toc_files()\n\n id_item, = book.meta.keys()\n self.assertDictEqual(book.meta, {\n id_item: {\n 'title': 'MyTitle 中文',\n 'type': '',\n 'index': f'{id_item}/index.html',\n 'create': '20200101000000000',\n 'modify': '20200101000000000',\n 'source': 'http://example.com',\n 'icon': '',\n 'comment': '',\n },\n })\n self.assertDictEqual(book.toc, {\n 'root': [\n id_item,\n ],\n })\n self.assertEqual(set(glob.iglob(os.path.join(self.test_output, '**'), recursive=True)), {\n os.path.join(self.test_output, ''),\n os.path.join(self.test_output, id_item),\n os.path.join(self.test_output, id_item, 'index.html'),\n os.path.join(self.test_output, id_item, 'index.files'),\n os.path.join(self.test_output, id_item, 'index.files', 'picture.bmp'),\n })\n with open(os.path.join(self.test_output, id_item, 'index.html'), encoding='UTF-8') as fh:\n self.assertEqual(fh.read(), content)", "def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url", "def _create_index_file(\n root_dir, location, image_files, video_files, dirs, force_no_processing=False):\n # Put together HTML as a list of the lines we'll want to include\n # Issue #2 exists to do this better than HTML in-code\n header_text = 'imageMe: {0} [{1} image(s)] [{2} video(s)]'.format(\n location, str(len(image_files)), str(len(video_files))\n )\n html = [\n '<!DOCTYPE html>',\n '<html>',\n ' <head>',\n ' <title>imageMe</title>'\n ' <style>',\n ' html, body {margin: 0; padding: 0;}',\n ' .table {align: center;}',\n ' .content {',\n ' padding: 3em;',\n ' padding-left: 4em;',\n ' padding-right: 4em;',\n ' }',\n ' .image {max-width: 100%; border-radius: 0.3em;}',\n ' td {width: ' + str(100.0 / args.column) + '%;}',\n ' </style>',\n ' </head>',\n ' <body>',\n ' <div class=\"content\">',\n ' <h2 class=\"header\">' + header_text + '</h2>'\n ]\n\n # Populate the present subdirectories - this includes '..' unless we're at\n # the top level\n directories = []\n if root_dir != location:\n directories = ['..']\n directories += dirs\n if len(directories) > 0:\n html.append('<hr>')\n # For each subdirectory, include a link to its index file\n for directory in directories:\n link = directory + '/' + args.index_file_name\n html += [\n ' <h3>',\n ' <a href=\"' + link + '\">' + directory + '</a>',\n ' </h3>'\n ]\n\n files = sorted(image_files + video_files)\n if args.separate_image_and_video:\n files = image_files + [None] + video_files\n\n # Populate the gallery table\n if files:\n # Counter to cycle down through table rows\n table_column_count = 1\n html += ['<hr>', '<table>']\n\n # For each file, potentially create a new <tr> and create a new <td>\n for file in files:\n if table_column_count == 1:\n html.append('<tr>')\n\n if file in video_files:\n html += [\n '<td>',\n ' <video controls preload width=\"100%\">',\n ' <source src=\"' + file + '\">',\n ' Your browser does not support HTML5 video.'\n ' </video>',\n '</td>'\n ]\n\n if file in image_files:\n img_src = _get_thumbnail_src_from_file(\n location, file, force_no_processing\n )\n link_target = _get_image_link_target_from_file(\n location, file, force_no_processing\n )\n html += [\n '<td>',\n ' <a href=\"' + link_target + '\">',\n ' <img class=\"image\" src=\"' + img_src + '\">',\n ' </a>',\n '</td>'\n ]\n\n if table_column_count == args.column or file == None:\n table_column_count = 0\n html.append('</tr>')\n\n table_column_count += 1\n\n if table_column_count != 1:\n html += ['</tr>']\n html += ['</table>']\n\n html += [\n ' </div>',\n ' </body>',\n '</html>'\n ]\n\n # Actually create the file, now we've put together the HTML content\n index_file_path = _get_index_file_path(location)\n print('Creating index file %s' % index_file_path)\n index_file = open(index_file_path, 'w')\n index_file.write('\\n'.join(html))\n index_file.close()\n\n # Return the path for cleaning up later\n return index_file_path", "def make_link_to(self, index, caption):\n \n # index is an int\n return '<a href=\"/log/'+str(index)+'\"> '+caption+' '+str(index)+'</a>'", "def _add_branch(wit_path, name, head):\n\n with open(os.path.join(wit_path, '.wit', 'references.txt'), 'a') as data:\n data.write(''.join(f'\\n{name}={head}'))", "def generate_cnx_uris(book_id):\n nodes = list(get_book_nodes(book_id))\n book_node = nodes[0]\n\n short_book_id = book_node['short_id']\n\n for node in nodes[1:]: # skip the book\n # Non-versioned URIs\n yield f\"/contents/{book_id}:{node['id']}/{node['slug']}\"\n yield f\"/contents/{book_id}:{node['short_id']}/{node['slug']}\"\n yield f\"/contents/{book_node['short_id']}:{node['id']}/{node['slug']}\"\n yield f\"/contents/{book_node['short_id']}:{node['short_id']}/{node['slug']}\"\n # Partial versioned URIs\n yield f\"/contents/{book_id}@1.1:{node['id']}/{node['slug']}\"\n yield f\"/contents/{book_id}@2.99:{node['short_id']}/{node['slug']}\"\n yield f\"/contents/{book_node['short_id']}@15.123:{node['id']}/{node['slug']}\"\n yield f\"/contents/{book_node['short_id']}@0.0:{node['short_id']}/{node['slug']}\"\n # Fully versioned URIs\n yield f\"/contents/{book_id}@1.1:{node['id']}@2/{node['slug']}\"\n yield f\"/contents/{book_id}@2.99:{node['short_id']}@0/{node['slug']}\"\n yield f\"/contents/{book_node['short_id']}@15.123:{node['id']}@999/{node['slug']}\"\n yield f\"/contents/{book_node['short_id']}@0.0:{node['short_id']}@654321/{node['slug']}\"", "def _format_to_link(self, commit):\n return os.path.join(self.mount, \"commits-by-hash\", self._hash_updir(commit), commit) + \"/\"", "def _make_lib_file_symbolic_links(self):\n so_file_dict = {\n 'rpmio': {\n 'sym_src_dir': self.rpm.lib_dir,\n 'sym_dst_dir': 'rpmio/.libs',\n 'require': True,\n },\n 'rpm': {\n 'sym_src_dir': self.rpm.lib_dir,\n 'sym_dst_dir': 'lib/.libs',\n 'require': True,\n },\n 'rpmbuild': {\n 'sym_src_dir': self.rpm.lib_dir,\n 'sym_dst_dir': 'build/.libs',\n 'require': True,\n },\n 'rpmsign': {\n 'sym_src_dir': self.rpm.lib_dir,\n 'sym_dst_dir': 'sign/.libs',\n },\n }\n\n self._update_sym_src_dirs_conditionally(so_file_dict)\n\n for name in so_file_dict:\n so_dict = so_file_dict[name]\n pattern = 'lib{0}.so*'.format(name)\n so_files = Cmd.find(so_dict['sym_src_dir'], pattern)\n if not so_files:\n is_required = so_dict.get('require', False)\n if not is_required:\n message_format = (\n \"Skip creating symbolic link of \"\n \"not existing so file '{0}'\"\n )\n Log.debug(message_format.format(name))\n continue\n\n message = 'so file pattern {0} not found at {1}'.format(\n pattern, so_dict['sym_src_dir']\n )\n raise InstallError(message)\n sym_dst_dir = os.path.abspath('../{0}'.format(\n so_dict['sym_dst_dir']))\n if not os.path.isdir(sym_dst_dir):\n Cmd.mkdir_p(sym_dst_dir)\n\n cmd = 'ln -sf {0} {1}/lib{2}.so'.format(so_files[0],\n sym_dst_dir,\n name)\n Cmd.sh_e(cmd)", "def make_tags(self):\n\n if self.debug:\n print 'Running make_tags()'\n\n for root, dirs, files in os.walk(self.content_folder):\n basefolder = os.path.basename(root)\n\n # If the option self.strip_dot_files is defined,\n # the dotfiles/dotfolders will be excluded.\n if self.strip_dot_files:\n dirs[:] = [d for d in dirs if not d.startswith('.')]\n files[:] = [f for f in files if not f.startswith('.')]\n\n if self.tag_indicator in basefolder:\n self._make_symlink(root)\n\n for file in files:\n # This line checks for the tag indicator and that the file\n # is not being included from the folder that contains the\n # symlinks that will be created with this loop.\n if self.tag_indicator in file and not root.startswith(\n self.tags_folder):\n\n if self.debug:\n print 'tag: %s (from %s)' % (file, root)\n full_path = '%s/%s' % (root, file)\n self._make_symlink(full_path)", "def reportLink(self, citName, end1, end2):\n assert citName and end1 and end2\n osh = ObjectStateHolder(citName)\n osh.setAttribute(\"link_end1\", end1)\n osh.setAttribute(\"link_end2\", end2)\n return osh", "def test_path02(self):\n index_file = os.path.join(self.test_input, 'folder1#中文', 'folder2', 'mypage.html')\n os.makedirs(os.path.dirname(index_file), exist_ok=True)\n with open(index_file, 'w', encoding='UTF-8') as fh:\n fh.write(\"\"\"\\\n<!DOCTYPE html>\n<html\n data-scrapbook-create=\"20200101000000000\"\n data-scrapbook-modify=\"20200101000000000\"\n data-scrapbook-source=\"http://example.com\">\n<head>\n<meta charset=\"UTF-8\">\n<title>MyTitle 中文</title>\n</head>\n<body>\npage content\n</body>\n</html>\n\"\"\")\n\n for _info in file2wsb.run(self.test_input, self.test_output):\n pass\n\n book = Host(self.test_output).books['']\n book.load_meta_files()\n book.load_toc_files()\n\n id_folder1, id_folder2, id_item = book.meta.keys()\n self.assertDictEqual(book.meta, {\n id_folder1: {\n 'title': 'folder1#中文',\n 'type': 'folder',\n 'create': id_folder1,\n 'modify': id_folder1,\n },\n id_folder2: {\n 'title': 'folder2',\n 'type': 'folder',\n 'create': id_folder2,\n 'modify': id_folder2,\n },\n id_item: {\n 'title': 'MyTitle 中文',\n 'type': '',\n 'index': f'{id_item}/index.html',\n 'create': '20200101000000000',\n 'modify': '20200101000000000',\n 'source': 'http://example.com',\n 'icon': '',\n 'comment': '',\n },\n })\n self.assertDictEqual(book.toc, {\n 'root': [\n id_folder1,\n ],\n id_folder1: [\n id_folder2,\n ],\n id_folder2: [\n id_item,\n ],\n })\n self.assertEqual(set(glob.iglob(os.path.join(self.test_output, '**'), recursive=True)), {\n os.path.join(self.test_output, ''),\n os.path.join(self.test_output, id_item),\n os.path.join(self.test_output, id_item, 'index.html'),\n os.path.join(self.test_output, id_item, 'mypage.html'),\n })", "def link(address):", "def lnh(src, dst):\n os.link(src, dst)", "def mklinkto(self, oldname):\n error.checked_call(os.link, str(oldname), str(self))", "def create_taxon_link(rank: str, name: str, do_print: bool, same_page: bool = False, path: str = \"\",\n include_rank: bool = True) -> str:\n start_tag, end_tag = rank_tags(rank)\n if same_page:\n x = \"\"\n else:\n x = path + init_data().syst_url\n if include_rank:\n rc = rank.capitalize() + \" \"\n else:\n rc = \"\"\n return \"<a href=\\\"\" + rel_link_prefix(do_print, x + \"#\") + \\\n \"{0}_{1}\\\">{2}{3}{1}{4}</a>\".format(rank, name, rc, start_tag, end_tag)", "def cli_createLinko():\n\n info = ('Creates a linkograph from an (inverse) labeling json'\n ' and an ontology json.')\n\n parser = argparse.ArgumentParser(description=info)\n parser.add_argument('labeling', metavar='LABELING.json',\n nargs=1,\n help='the inverse labeling json file.')\n\n parser.add_argument('ontology', metavar='ONTOLOGY.json',\n nargs=1,\n help='the json of ontology.')\n\n parser.add_argument('-o', '--out', metavar='OUTPUT_FILE',\n help='the linkograph as a json')\n\n args = parser.parse_args()\n\n outfile = None\n if args.out:\n outfile = args.out\n\n # Load the json files.\n with open(args.labeling[0], 'r') as invLabelingFile:\n invLabeling = json.load(invLabelingFile)\n with open(args.ontology[0], 'r') as ontologyFile:\n ontology = json.load(ontologyFile)\n linko = createLinko(invLabeling, ontology)\n\n if outfile:\n writeLinkoJson(linko, outfile)\n else:\n print(linko)", "def add_bilink(self, nodeport_a, nodeport_b, bilink):", "def gather_headlines(urls):\n pass", "def main(argv):\n if len(argv) == 0 or argv == None:\n print(HELP_TXT)\n sys.exit(2)\n try:\n opts, args = getopt.getopt(argv,\"dhmb:\",[\"debug\", \"help\", \"markdown\", \"biblio=\"])\n except getopt.GetoptError:\n print(HELP_TXT)\n sys.exit(2)\n\n testing = False\n markdown = False\n bibtex_filename = \"library.bib\"\n \n for opt, arg in opts:\n if opt in ('-h', '--help'):\n print(HELP_TXT)\n sys.exit()\n elif opt in ('-d', '--debug'):\n testing = True\n print(\"Fetching of DOI and printing metadata, but not adding to bibtex or creating markdown.\")\n elif opt in ('-b', '--biblio'):\n bibtex_filename = arg\n elif opt in ('-m', '--markdown'):\n markdown = True\n print(\"Writing entry to bibtex file but not creating a markdown file.\")\n\n if len(args) == 0:\n print(\"Error: At least one DOI must be provided.\")\n sys.exit(2)\n\n for doi in args:\n #Get the metadata from crossref.org\n res = doi2json(doi)\n \n #extract\n meta = extract_metadata(res)\n \n #check if reference exists\n citation_found = True\n letters = list(string.ascii_lowercase)\n try:\n with open(bibtex_filename, 'r') as fobj:\n text = fobj.read().strip()\n tmp_key = meta['citationkey']\n for letter in letters:\n if tmp_key in text:\n tmp_key = meta['citationkey'] + letter\n else:\n meta['citationkey'] = tmp_key\n break\n except FileNotFoundError:\n pass\n\n #Then write if not testing\n if testing:\n print(\"DOI RESULTS:\")\n for k,v in res.items():\n if type(v) is list:\n print(k + ': ' + str(v) + '\\n')\n else:\n print(k + ': \"' + str(v) + '\"\\n')\n print(\"PARSED METADATA:\")\n for k,v in meta.items():\n if type(v) is list:\n print(k + ': ' + str(v) + '\\n')\n else:\n print(k + ': \"' + str(v) + '\"\\n')\n else:\n if not os.path.isfile(bibtex_filename):\n print(\"Bibtex file {} does not exist, creating it.\".format(bibtex_filename))\n\n #Add the metadata to the bibtex reference\n with open(bibtex_filename, \"a+\") as f:\n s = \",\".join([\n \"\\n@article{\" + meta['citationkey'],\n \"\\nauthor = {\" + \" and \".join(meta['authors']) + \"}\",\n \"\\ndoi = {\" + meta['doi'] + \"}\",\n \"\\nissn = {\" + meta['issn'] + \"}\",\n \"\\njournal = {\" + meta['container'] + \"}\",\n \"\\npublisher = {\" + meta['publisher'] + \"}\",\n \"\\nnumber = {\" + meta['issue'] + \"}\",\n \"\\npages = {\" + meta['pages'] + \"}\",\n \"\\ntitle = {\" + meta['source-title'] + \"}\",\n \"\\nurl = {\" + meta['link'] + \"}\",\n \"\\nvolume = {\" + meta['volume'] + \"}\",\n \"\\nyear = {\" + meta['year'] + \"}\\n}\\n\\n\"\n ])\n f.write(s)\n \n print(\"reference {} from {} added to {}!\\n\".format(meta['citationkey'], meta['container'], bibtex_filename))\n \n # create the markdown notes file\n if markdown:\n subject_dir = re.sub(r'\\([^)]*\\)', '', meta['subjects'][0]).strip().lower().replace(\" \", \"_\").replace(\",\",\"\")\n md_dir = \"/\".join([HOME_DIR, REPO_DIR, MD_DIR, subject_dir])\n os.mkdir(md_dir) if os.path.isdir(md_dir) == False else None\n filename = \"/\".join([md_dir, meta['citationkey'] + \".md\"])\n with open(filename, \"w\") as f:\n f.write(\"---\\nlayout: mathpost\\n\")\n for k,v in meta.items():\n try:\n if type(v) is list:\n f.write(k + ': ' + str(v) + '\\n')\n else:\n f.write(k + ': \"' + str(v) + '\"\\n')\n except UnicodeEncodeError as e:\n print(\"Unicode Error. Some character(s) may be wrong.\")\n print(meta.keys())\n print(repr(k) + \": \" + repr(v))\n print(e)\n \n f.write(\"---\\n\\n\")\n citation = \"**Citation:** \" \\\n + meta['citation-authors'] + ' \"' \\\n + meta['source-title'] + '\". ' \\\n + '*' + meta['container'] + '*'\n citation = citation + \" \" + meta['volume'] if meta['volume'] != '' else citation\n citation = citation + \", no. \" + meta['issue'] if meta['issue'] != '' else citation\n citation = citation + \" (\" + meta['year'] + \")\" if meta['year'] != '' else citation\n citation = citation + \": \" + meta['pages'] if meta['pages'] != '' else citation\n \n f.write(citation + \". [[Paper link](\" + meta['link'] + \")]\")\n \n #add the reference to the \"reading_list.md\" file\n with open(\"/\".join([HOME_DIR, REPO_DIR, MD_DIR, \"reading_list.md\"]), \"a\") as f:\n f.write(\"* [ ] **\" + meta['citationkey'] \\\n + \"**: (\" \\\n + re.sub(r'\\([^)]*\\)', '', meta['subjects'][0]).strip() \\\n + \") \" + meta['link'] + \"\\n\")\n \n print(\"reference {} added in {}!\\n\".format(meta['citationkey'], md_dir))", "def setUp(self):\n self.decorated_internal_link = \"\"\"\\item\n Then see the {[}{[}C Gotchas wiki page\\\\textbar{}C Programming, Part 3:\n Common Gotchas{]}{]}.\"\"\"\n\n self.raw_internal_link = \"\"\"As already discussed in {[}{[}Synchronization, Part 3: Working with\nMutexes And Semaphores{]}{]}, there are critical parts of our code that\ncan only be executed by one thread at a time. We describe this\nrequirement as `mutual exclusion'; only one thread (or process) may have\naccess to the shared resource.\"\"\"", "def getreferingobjs(idfindex, idfobject):\n idf, edges = eppystuff.an_idfedges(idfindex)\n refobjs = idfobject.getreferingobjs() \n keys = [refobj.key for refobj in refobjs] \n objnames = [refobj.obj[1] for refobj in refobjs] \n idfkeys = idf_helpers.idfobjectkeys(idf)\n keysobjsindexes = [(idfkeys.index(refobj.key.upper()), \n idf.idfobjects[refobj.key.upper()].index(refobj))\n for refobj in refobjs] \n urls = [\"../../%s/%s\" % (idfkey, objkey) \n for idfkey, objkey in keysobjsindexes]\n urllinks = ['<a href=%s>%s</a>' % (url, name) \n for url, name in zip(urls, objnames)]\n lines = [\"%s->%s\" % (refobj.key, urllink) \n for refobj, urllink in zip(refobjs, urllinks)]\n return ', '.join(lines)", "def fix_git_symlinked(src, dst):\n # if running from WC there should be a 'doc' dir sibling to nikola package\n if not should_fix_git_symlinked():\n return\n # probabbly in a WC, so symlinks should be fixed\n for root, dirs, files in os.walk(dst):\n for name in files:\n filename = os.path.join(root, name)\n\n # detect if symlinked\n try:\n if not (2 < os.path.getsize(filename) < 500):\n continue\n # which encoding uses a git symlink marker ? betting on default\n with open(filename, 'r') as f:\n text = f.read()\n if text[0] != '.':\n # de facto hint to skip binary files and exclude.meta\n continue\n except Exception:\n # probably encoding: content binary or encoding not defalt,\n # also in py2.6 it can be path encoding\n continue\n dst_dir_relpath = os.path.dirname(os.path.relpath(filename, dst))\n path = os.path.normpath(os.path.join(src, dst_dir_relpath, text))\n if not os.path.exists(path):\n continue\n # most probably it is a git symlinked file\n\n # copy original content to filename\n shutil.copy(path, filename)", "def test_path01(self):\n index_file = os.path.join(self.test_input, 'folder1#中文', 'folder2', 'folder_data', 'index.html')\n os.makedirs(os.path.dirname(index_file), exist_ok=True)\n with open(index_file, 'w', encoding='UTF-8') as fh:\n fh.write(\"\"\"\\\n<!DOCTYPE html>\n<html\n data-scrapbook-create=\"20200101000000000\"\n data-scrapbook-modify=\"20200101000000000\"\n data-scrapbook-source=\"http://example.com\">\n<head>\n<meta charset=\"UTF-8\">\n<title>MyTitle 中文</title>\n</head>\n<body>\npage content\n</body>\n</html>\n\"\"\")\n\n for _info in file2wsb.run(self.test_input, self.test_output):\n pass\n\n book = Host(self.test_output).books['']\n book.load_meta_files()\n book.load_toc_files()\n\n id_folder1, id_folder2, id_item = book.meta.keys()\n self.assertDictEqual(book.meta, {\n id_folder1: {\n 'title': 'folder1#中文',\n 'type': 'folder',\n 'create': id_folder1,\n 'modify': id_folder1,\n },\n id_folder2: {\n 'title': 'folder2',\n 'type': 'folder',\n 'create': id_folder2,\n 'modify': id_folder2,\n },\n id_item: {\n 'title': 'MyTitle 中文',\n 'type': '',\n 'index': f'{id_item}/index.html',\n 'create': '20200101000000000',\n 'modify': '20200101000000000',\n 'source': 'http://example.com',\n 'icon': '',\n 'comment': '',\n },\n })\n self.assertDictEqual(book.toc, {\n 'root': [\n id_folder1,\n ],\n id_folder1: [\n id_folder2,\n ],\n id_folder2: [\n id_item,\n ],\n })\n self.assertEqual(set(glob.iglob(os.path.join(self.test_output, '**'), recursive=True)), {\n os.path.join(self.test_output, ''),\n os.path.join(self.test_output, id_item),\n os.path.join(self.test_output, id_item, 'index.html'),\n })", "def links(iati_import, activity, project, activities_globals):\n imported_links = []\n changes = []\n\n for website in activity.findall('activity-website'):\n url = get_text(website, activities_globals['version'])\n\n # Skip RSR links\n if url and 'rsr.akvo.org' in url:\n continue\n\n link, created = get_model('rsr', 'link').objects.get_or_create(\n project=project,\n url=url\n )\n\n if created:\n changes.append(u'added link (id: %s): %s' % (str(link.pk), link))\n\n imported_links.append(link)\n\n for doc_link in activity.findall(\"document-link[@format='application/http']\"):\n url = ''\n caption = ''\n\n if 'url' in doc_link.attrib.keys():\n url = doc_link.attrib['url']\n\n # Skip RSR links\n if url and 'rsr.akvo.org' in url:\n continue\n\n title_element = doc_link.find('title')\n if not title_element is None:\n caption = get_text(title_element, activities_globals['version'])\n if len(caption) > 50:\n add_log(iati_import, 'link_caption', 'caption is too long (50 characters allowed)',\n project, IatiImportLog.VALUE_PARTLY_SAVED)\n caption = caption[:50]\n\n link, created = get_model('rsr', 'link').objects.get_or_create(\n project=project,\n url=url,\n caption=caption\n )\n\n if created:\n changes.append(u'added link (id: %s): %s' % (str(link.pk), link))\n\n imported_links.append(link)\n\n for link in project.links.all():\n if not link in imported_links:\n changes.append(u'deleted link (id: %s): %s' %\n (str(link.pk),\n link.__unicode__()))\n link.delete()\n\n return changes", "def doLink(self):\n self.log.info('Starting TabLinker for all sheets in workbook')\n \n for n in range(self.rb.nsheets) :\n self.log.info('Starting with sheet {0}'.format(n))\n self.r_sheet = self.rb.sheet_by_index(n)\n self.w_sheet = self.wb.get_sheet(n)\n \n self.rowns, self.colns = self.getValidRowsCols()\n \n self.sheet_qname = urllib.quote(re.sub('\\s','_',self.r_sheet.name))\n self.log.info('Base for QName generator set to: {0}'.format(self.sheet_qname))\n \n self.log.debug('Starting parser')\n self.parseSheet()", "def createLink(self):\n \n if( self.useLink ):\n trymakedir( self.parent.installPath + \"/\" + self.alias )\n\n os.chdir( self.parent.installPath + \"/\" + self.alias )\n \n # check for already existing symlinks or dirs \n if( os.path.islink( self.version )):\n os.unlink( self.version )\n elif( os.path.isdir( self.version )):\n self.abort( \"could not create link to [ \" + self.linkPath + \" ]\\nin [ \" \\\n + os.path.basename( self.installPath ) + \" ]!!!\" )\n\n os.symlink( self.linkPath , self.version )\n print \"+ Linking \" + self.parent.installPath + \"/\" + self.alias + \"/\" + self.version \\\n + \" -> \" + self.linkPath", "def generateLink(folder, filename):\n if not folder.endswith('/'):\n folder += \"/\"\n\n return folder + os.path.basename(filename)", "def add_visible_links(self, tree: nodes.document, show_urls: str = 'inline') -> None:\n\n def make_footnote_ref(doc: nodes.document, label: str) -> nodes.footnote_reference:\n \"\"\"Create a footnote_reference node with children\"\"\"\n footnote_ref = nodes.footnote_reference('[#]_')\n footnote_ref.append(nodes.Text(label))\n doc.note_autofootnote_ref(footnote_ref)\n return footnote_ref\n\n def make_footnote(doc: nodes.document, label: str, uri: str) -> nodes.footnote:\n \"\"\"Create a footnote node with children\"\"\"\n footnote = nodes.footnote(uri)\n para = nodes.paragraph()\n para.append(nodes.Text(uri))\n footnote.append(para)\n footnote.insert(0, nodes.label('', label))\n doc.note_autofootnote(footnote)\n return footnote\n\n def footnote_spot(tree: nodes.document) -> tuple[Element, int]:\n \"\"\"Find or create a spot to place footnotes.\n\n The function returns the tuple (parent, index).\"\"\"\n # The code uses the following heuristic:\n # a) place them after the last existing footnote\n # b) place them after an (empty) Footnotes rubric\n # c) create an empty Footnotes rubric at the end of the document\n fns = list(tree.findall(nodes.footnote))\n if fns:\n fn = fns[-1]\n return fn.parent, fn.parent.index(fn) + 1\n for node in tree.findall(nodes.rubric):\n if len(node) == 1 and node.astext() == FOOTNOTES_RUBRIC_NAME:\n return node.parent, node.parent.index(node) + 1\n doc = next(tree.findall(nodes.document))\n rub = nodes.rubric()\n rub.append(nodes.Text(FOOTNOTES_RUBRIC_NAME))\n doc.append(rub)\n return doc, doc.index(rub) + 1\n\n if show_urls == 'no':\n return\n if show_urls == 'footnote':\n doc = next(tree.findall(nodes.document))\n fn_spot, fn_idx = footnote_spot(tree)\n nr = 1\n for node in list(tree.findall(nodes.reference)):\n uri = node.get('refuri', '')\n if uri.startswith(('http:', 'https:', 'ftp:')) and uri not in node.astext():\n idx = node.parent.index(node) + 1\n if show_urls == 'inline':\n uri = self.link_target_template % {'uri': uri}\n link = nodes.inline(uri, uri)\n link['classes'].append(self.css_link_target_class)\n node.parent.insert(idx, link)\n elif show_urls == 'footnote':\n label = FOOTNOTE_LABEL_TEMPLATE % nr\n nr += 1\n footnote_ref = make_footnote_ref(doc, label)\n node.parent.insert(idx, footnote_ref)\n footnote = make_footnote(doc, label, uri)\n fn_spot.insert(fn_idx, footnote)\n footnote_ref['refid'] = footnote['ids'][0]\n footnote.add_backref(footnote_ref['ids'][0])\n fn_idx += 1", "def MakeImageURL(fname, hyperlink='openfile', **kwargs):\n prefix = 'cdb://image/'\n if not _isSupportedUriPath(fname):\n fname = 'file:///%s' % os.path.basename(fname)\n else:\n fname = fname.replace('\\\\', '/')\n if hyperlink:\n hyperlink = ' cdb:hyperlink:%s' % hyperlink\n else:\n hyperlink = ''\n return '%s%s%s' % (prefix, fname, hyperlink)", "def test_download_links():\n\n # dir to download data to\n out_dir = 'test/download_data'\n\n # remove out_dir if it already exists and make a new one\n if os.path.exists(out_dir):\n shutil.rmtree(out_dir)\n os.system('mkdir -p %s'%out_dir)\n\n # List of all available fits\n fit_names = surfinBH.fits_collection.keys()\n for name in fit_names:\n surfinBH.DownloadData(name=name, data_dir=out_dir)\n\n # allow for both naming formats surfinBH7dq2 and NRSur7dq4Remnant\n if 'surfinBH' in name:\n name_tag = name.split('surfinBH')[-1]\n else:\n name_tag = name.split('NRSur')[-1].split('Remnant')[0]\n\n # check that it has the right name\n assert(os.path.isfile('%s/fit_%s.h5'%(out_dir, name_tag)))\n # check that the fit_name matches with the name in the attributes\n # of h5 file.\n h5file = h5py.File('%s/fit_%s.h5'%(out_dir, name_tag), 'r')\n assert(name_tag == h5file.attrs['name'].decode('utf-8'))\n h5file.close()", "def test_supporting_folder02(self):\n index_file = os.path.join(self.test_input, 'mypage.html')\n with open(index_file, 'w', encoding='UTF-8') as fh:\n fh.write(\"\"\"\\\n<!DOCTYPE html>\n<html\n data-scrapbook-create=\"20200101000000000\"\n data-scrapbook-modify=\"20200101000000000\"\n data-scrapbook-source=\"http://example.com\">\n<head>\n<meta charset=\"UTF-8\">\n<title>MyTitle 中文</title>\n</head>\n<body>\npage content\n<img src=\"mypage_files/picture.bmp\">\n</body>\n</html>\n\"\"\")\n img_file = os.path.join(self.test_input, 'mypage_files', 'picture.bmp')\n os.makedirs(os.path.dirname(img_file), exist_ok=True)\n with open(img_file, 'wb') as fh:\n fh.write(b'dummy')\n\n for _info in file2wsb.run(self.test_input, self.test_output):\n pass\n\n book = Host(self.test_output).books['']\n book.load_meta_files()\n book.load_toc_files()\n\n id_item, = book.meta.keys()\n self.assertDictEqual(book.meta, {\n id_item: {\n 'title': 'MyTitle 中文',\n 'type': '',\n 'index': f'{id_item}/index.html',\n 'create': '20200101000000000',\n 'modify': '20200101000000000',\n 'source': 'http://example.com',\n 'icon': '',\n 'comment': '',\n },\n })\n self.assertDictEqual(book.toc, {\n 'root': [\n id_item,\n ],\n })\n self.assertEqual(set(glob.iglob(os.path.join(self.test_output, '**'), recursive=True)), {\n os.path.join(self.test_output, ''),\n os.path.join(self.test_output, id_item),\n os.path.join(self.test_output, id_item, 'index.html'),\n os.path.join(self.test_output, id_item, 'mypage.html'),\n os.path.join(self.test_output, id_item, 'mypage_files'),\n os.path.join(self.test_output, id_item, 'mypage_files', 'picture.bmp'),\n })", "def link(self, fname):\n return fname", "def LinkAnat(self):\n\n if self.anatomical is None:\n return\n for entry in self.info.keys():\n info = self.info[entry]\n if info.has_key('anat_link'):\n self.LinkFiles(info['outdir'], self.anatomical)", "def _link(filename, existing_filename):\n CreateHardLinkW(filename, existing_filename, 0)", "def make_link(path):\r\n tryFile = path.replace('\\\\', '/')\r\n\r\n if os.path.isabs(tryFile) and os.path.isfile(tryFile):\r\n (folder, filename) = os.path.split(tryFile)\r\n (base, ext) = os.path.splitext(filename)\r\n app = get_app()\r\n\r\n editable = {'controllers': '.py', 'models': '.py', 'views': '.html'}\r\n for key in editable.keys():\r\n check_extension = folder.endswith(\"%s/%s\" % (app,key))\r\n if ext.lower() == editable[key] and check_extension:\r\n return A('\"' + tryFile + '\"',\r\n _href=URL(r=request,\r\n f='edit/%s/%s/%s' % (app, key, filename))).xml()\r\n return ''", "def repos():\n print(\"\\nThe following repos are available.\\n\")\n NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"NAME_SHELF\")))\n INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"INDEX_SHELF\")))\n\n print(\"{:<4} {:<20} {:<}\".format(\"Key\", \"| Name\", \"| Path\"))\n print(\"******************************************\")\n for key in INDEX_SHELF.keys():\n name = INDEX_SHELF[key]\n print(\"{:<4} {:<20} {:<}\".format(key, name, str(NAME_SHELF[name])))\n INDEX_SHELF.close()\n NAME_SHELF.close()", "def getNewsIconURL(newsBrain):", "def link(ctx, note1, note2):\n directory = ctx.obj[\"config\"][\"owner\"][\"dir\"]\n\n note1, note2 = Note(directory, note1), Note(directory, note2)\n\n if note1.filename == note2.filename:\n Utils.display_error(\n \"Cannot create a link between a note and itself.\", \"yellow\")\n\n with open(note1.path, \"a\") as file:\n file.write(\"[{}]({})\\n\".format(note2.filename, note2.filename))\n\n with open(note2.path, \"a\") as file:\n file.write(\"[{}]({})\\n\".format(note1.filename, note1.filename))\n\n click.secho(\"Success! {} <-> {}\".format(note1.filename,\n note2.filename), fg=\"green\")", "def run(self):\n logging.debug('Relinking All Programs')\n\n dst = pakit.conf.CONFIG.path_to('link')\n walk_and_unlink_all(dst, pakit.conf.CONFIG.path_to('prefix'))\n\n for _, recipe in pakit.recipe.RDB:\n walk_and_link(recipe.install_dir, dst)", "def test_supporting_folder01(self):\n index_file = os.path.join(self.test_input, 'mypage.html')\n with open(index_file, 'w', encoding='UTF-8') as fh:\n fh.write(\"\"\"\\\n<!DOCTYPE html>\n<html\n data-scrapbook-create=\"20200101000000000\"\n data-scrapbook-modify=\"20200101000000000\"\n data-scrapbook-source=\"http://example.com\">\n<head>\n<meta charset=\"UTF-8\">\n<title>MyTitle 中文</title>\n</head>\n<body>\npage content\n<img src=\"mypage.files/picture.bmp\">\n</body>\n</html>\n\"\"\")\n img_file = os.path.join(self.test_input, 'mypage.files', 'picture.bmp')\n os.makedirs(os.path.dirname(img_file), exist_ok=True)\n with open(img_file, 'wb') as fh:\n fh.write(b'dummy')\n\n for _info in file2wsb.run(self.test_input, self.test_output):\n pass\n\n book = Host(self.test_output).books['']\n book.load_meta_files()\n book.load_toc_files()\n\n id_item, = book.meta.keys()\n self.assertDictEqual(book.meta, {\n id_item: {\n 'title': 'MyTitle 中文',\n 'type': '',\n 'index': f'{id_item}/index.html',\n 'create': '20200101000000000',\n 'modify': '20200101000000000',\n 'source': 'http://example.com',\n 'icon': '',\n 'comment': '',\n },\n })\n self.assertDictEqual(book.toc, {\n 'root': [\n id_item,\n ],\n })\n self.assertEqual(set(glob.iglob(os.path.join(self.test_output, '**'), recursive=True)), {\n os.path.join(self.test_output, ''),\n os.path.join(self.test_output, id_item),\n os.path.join(self.test_output, id_item, 'index.html'),\n os.path.join(self.test_output, id_item, 'mypage.html'),\n os.path.join(self.test_output, id_item, 'mypage.files'),\n os.path.join(self.test_output, id_item, 'mypage.files', 'picture.bmp'),\n })", "def generate_index_page(index_links,\r\n index_fp,\r\n order=[_index_headers['run_summary']]):\r\n # get containing directory for index_fp\r\n top_level_dir = split(split(index_fp)[0])[1]\r\n index_page_header = get_index_page_header()\r\n index_lines = [index_page_header]\r\n d = {}\r\n for e in index_links:\r\n try:\r\n d[e[2]].append((e[0], e[1]))\r\n except KeyError:\r\n d[e[2]] = [(e[0], e[1])]\r\n index_lines.append('<table border=1>\\n')\r\n\r\n # Determine the order the data should be presented in. This should be\r\n # the order that the user requested, followed by any categories that\r\n # the user didn't include in the order parameter.\r\n ordered_table_entries = order + [k for k in d if k not in order]\r\n for k in ordered_table_entries:\r\n v = d[k]\r\n index_lines.append(\r\n '<tr colspan=2 align=center bgcolor=#e8e8e8><td colspan=2 align=center>%s</td></tr>\\n' % k)\r\n for description, path in v:\r\n # if path starts with top_level_dir, replace it\r\n # with ./\r\n path = re.sub('^.*%s\\/' % top_level_dir, './', path)\r\n index_lines.append(\r\n '<tr>%s</tr>\\n' %\r\n format_index_link(\r\n description,\r\n path))\r\n index_lines.append('</table>\\n')\r\n\r\n index_page_footer = get_index_page_footer()\r\n index_lines.append(index_page_footer)\r\n\r\n open(index_fp, 'w').write(''.join(index_lines))", "def createLinko(inverseLabeling, ontology):\n\n # Remove any labels that are empty.\n inverseLabeling = {key: inverseLabeling[key] for key in inverseLabeling\n if len(inverseLabeling[key])>0}\n\n # It might be more robust to search for the maximum value.\n #size = sum(map(len, labels.values()))\n\n # Find the maximum value listed in the labels.\n # Note: this assumes that every command has a label.\n size = max(map(max, inverseLabeling.values())) + 1\n\n linko = Linkograph([(set(), set(), set()) for n in range(size)])\n\n newLabels = set()\n\n # Put in the labels.\n for l in inverseLabeling:\n newLabels.add(l)\n for n in inverseLabeling[l]:\n linko[n][0].add(l)\n\n newLabels = newLabels.union(ontology.keys())\n linko.labels = sorted(list(newLabels))\n\n # If there are no rules or no labels, then return\n # the empty linkograph.\n if not ontology or not len(inverseLabeling):\n return linko\n\n # Loop through each edge in the rules.\n for initialLabel in ontology:\n # Get the index list for the initial label.\n #initialIndecies = labels[initialLabel]\n initialIndecies = inverseLabeling.get(initialLabel)\n\n if initialIndecies is None:\n continue\n\n # Add the indecies to the linkograph\n # for n in initialIndecies:\n # linko[n][0].append(initialLabel)\n\n for terminalLabel in ontology[initialLabel]:\n # Get the index list for the target label\n terminalIndecies = inverseLabeling.get(terminalLabel)\n\n if terminalIndecies is None:\n continue\n\n # iterate through the terminal indecies\n # as long as the terminal index is more than\n # the initiali ndex, add the terminal index\n # to the forelinks of the initial index and\n # add the initial index to the backlinks\n # of the terminal index.\n for teIndex in terminalIndecies[::-1]:\n for inIndex in initialIndecies:\n # RRM: JB put this mod in--I am disabling it because it generates a lot of visibility\n #if(inIndex==15):\n # print(\"We're here!\")\n if teIndex > inIndex:\n # Add the forelink\n linko[inIndex][2].add(teIndex)\n # Add in the backlink\n linko[teIndex][1].add(inIndex)\n else:\n break\n return linko", "def _construct_links_of_interest(self):\n if self.opts.selected_links:\n links, num_links = self.parse_selected_links(\n self.opts.selected_links)\n elif (self.opts.selected_terms or\n self.opts.selected_terms_with_all):\n if self.opts.selected_terms:\n selected_terms_file_name = self.opts.selected_terms\n else:\n selected_terms_file_name = \\\n self.opts.selected_terms_with_all\n logger.info(\"Parsing selected terms file %s.\" %\n selected_terms_file_name)\n selected_terms_file = open(\n selected_terms_file_name, 'rb')\n num_selected_terms = convutils.count_lines(\n selected_terms_file)\n selected_terms = \\\n parsers.parse_selected_terms_file(\n selected_terms_file)\n if self.opts.selected_terms:\n num_links = self._calc_num_links_selected_terms(\n num_selected_terms)\n links = self.make_selected_terms_links(\n selected_terms)\n else:\n num_links = self._calc_num_links_selected_with_all(\n num_selected_terms)\n links = self.make_selected_terms_links_with_all(\n selected_terms, self.annotations_dict)\n else:\n links, num_links = self.make_all_possible_links(\n self.annotations_dict)\n\n self.links, self.num_links = links, num_links", "def download_reference_files(job, inputs, samples):\n # Create dictionary to store FileStoreIDs of shared input files\n shared_ids = {}\n urls = [('amb', inputs.amb), ('ann', inputs.ann), ('bwt', inputs.bwt),\n ('pac', inputs.pac), ('sa', inputs.sa)]\n # Alt file is optional and can only be provided, not generated\n if inputs.alt:\n urls.append(('alt', inputs.alt))\n # Download reference\n download_ref = job.wrapJobFn(download_url_job, inputs.ref, disk='3G') # Human genomes are typically ~3G\n job.addChild(download_ref)\n shared_ids['ref'] = download_ref.rv()\n # If FAI is provided, download it. Otherwise, generate it\n if inputs.fai:\n shared_ids['fai'] = job.addChildJobFn(download_url_job, inputs.fai).rv()\n else:\n faidx = job.wrapJobFn(run_samtools_faidx, download_ref.rv())\n shared_ids['fai'] = download_ref.addChild(faidx).rv()\n # If all BWA index files are provided, download them. Otherwise, generate them\n if all(x[1] for x in urls):\n for name, url in urls:\n shared_ids[name] = job.addChildJobFn(download_url_job, url).rv()\n else:\n job.fileStore.logToMaster('BWA index files not provided, creating now')\n bwa_index = job.wrapJobFn(run_bwa_index, download_ref.rv())\n download_ref.addChild(bwa_index)\n for x, name in enumerate(['amb', 'ann', 'bwt', 'pac', 'sa']):\n shared_ids[name] = bwa_index.rv(x)\n\n # Map_job distributes one sample in samples to the downlaod_sample_and_align function\n job.addFollowOnJobFn(map_job, download_sample_and_align, samples, inputs, shared_ids)", "def __init__(self, root, api, symlink_resource):\n assert root and isinstance(root, config_types.Path)\n self._root = root\n self._api = api\n self._resource = symlink_resource\n # dict[Path]list(Path): Maps target to a list of linknames.\n self._link_map = {}", "def main(file_paths: Optional[List[Path]] = None):\n if file_paths:\n for file_path in file_paths:\n move_and_symlink_file(file_path)\n\n else:\n create_home_directories()\n create_home_directory_symbolic_links()" ]
[ "0.667778", "0.59682673", "0.5959984", "0.567664", "0.5635121", "0.55980814", "0.553239", "0.5522011", "0.54892784", "0.54057527", "0.5380412", "0.5370104", "0.53354234", "0.5332412", "0.5303601", "0.5300853", "0.52821183", "0.5264791", "0.5261789", "0.5261702", "0.5261481", "0.52487564", "0.5235796", "0.5218422", "0.5204191", "0.5178898", "0.51704174", "0.5159474", "0.5132064", "0.51179516", "0.5116074", "0.51089877", "0.51064813", "0.5094619", "0.5080394", "0.50787836", "0.5049362", "0.5046367", "0.5042509", "0.5027774", "0.50242555", "0.5017812", "0.5013695", "0.5005159", "0.4999806", "0.49783576", "0.49768645", "0.49767098", "0.4971203", "0.49614248", "0.49609274", "0.49533442", "0.49467978", "0.49400333", "0.4939618", "0.4934148", "0.4930864", "0.49273595", "0.49267495", "0.49208876", "0.4903156", "0.49021822", "0.48957697", "0.48955992", "0.48907614", "0.48906985", "0.48875234", "0.48863828", "0.4885732", "0.48851874", "0.48844758", "0.4879374", "0.4877377", "0.48666996", "0.4865436", "0.48651674", "0.4859743", "0.48578945", "0.48562524", "0.48535162", "0.48485562", "0.48480752", "0.48446932", "0.48431122", "0.483623", "0.48358473", "0.48349944", "0.4834168", "0.48339757", "0.48261175", "0.48248306", "0.48180375", "0.48175034", "0.48145443", "0.479913", "0.4798447", "0.47964942", "0.47945935", "0.47883186", "0.47851285" ]
0.6162017
1
Extract the initial EPIs stored in dicom format.
def ExtractFirstEpi(self): for entry in self.info: if self.info[entry]['type'] == 'first_epi': epiname = self.info[entry]['imgfile'] cmd = 'convert_file %s -f0 %s %s %s' % \ (self.flip_opts, entry,epiname, self.info[entry]['filetype']) fname = '%s%s' % (epiname, self.info[entry]['suffix']) self.CheckExec(cmd, [fname]) self.info[entry]['imgfile'] = fname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dicom_load():\n # Identify folders with EPI data\n dirs = [i for i in os.listdir(dcm_dir) if os.path.isdir(os.path.join(dcm_dir, i))]\n d_cnt = 0\n for d in dirs:\n dcm_file = os.path.join(dcm_dir,d,os.listdir(os.path.join(dcm_dir,d))[0])\n try:\n dcm_data = pydicom.dcmread(dcm_file)\n except:\n pass\n else:\n # If data is EPI then get start time, etc\n if 'EPI' in dcm_data.ImageType:\n dcm_dict[d_cnt] = {}\n dcm_dict[d_cnt]['dcm_file'] = dcm_file\n dcm_dict[d_cnt]['task_name'] = dcm_data.SeriesDescription\n dcm_dict[d_cnt]['task_name'] = dcm_dict[d_cnt]['task_name'].replace('_','-')\n date = dcm_data.SeriesDate\n start = dcm_data.SeriesTime\n start_time = '%s-%s-%s %s:%s:%s'%(date[0:4],date[4:6],date[6:],start[0:2],start[2:4],start[4:])\n dcm_dict[d_cnt]['start_time'] = datetime.fromisoformat(start_time)\n dcm_dict[d_cnt]['run_length'] = dcm_data[0x0019,0x105a].value/1000\n dcm_dict[d_cnt]['end_time'] = dcm_dict[d_cnt]['start_time'] + timedelta(milliseconds=dcm_dict[d_cnt]['run_length'])\n d_cnt = d_cnt+1", "def _EpiInfo(self, info, path):\n\n epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \\\n 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']}\n for key in self.epi_keys.keys():\n if self.epi_keys[key] != str(epi_vals[key]):\n# Return None, which will cause these data to be ignored.\n return None\n\n# Early versions of the EPIC software saved p-files for the setup epis.\n# Don't process these (or any epi with fewer than eight useable frames).\n if self.hdr['tdim'] < (8 + self.skip):\n return None\n\n info['slice_order'] = self.shdr.get('SliceOrder', 'altplus')\n if self.shdr['EffEchoSpacing'] is not None:\n info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000.\n else:\n info['echo_spacing'] = 0.\n if info['data_filetype'] == 'dicom':\n# Entry is name of dirctory for dicom images.\n if not os.path.isdir(path):\n entry = os.path.dirname(path)\n else:\n entry = path\n else:\n# Otherwise it is the name of a directory containing p-files.\n entry = path\n\n if info['data_filetype'] == 'ge_data' and info['type'] is not None:\n# Found a pfile. Add it to the list.\n if entry not in self.pfiles and info['tdim'] > 2:\n self.pfiles.append(entry)\n self.entry_map['epi'].append(entry)\n if info['series'] not in self.epi_series:\n self.epi_series.append(info['series'])\n elif info['data_filetype'] == 'dicom' and \\\n info['psdname'] == 'epibold':\n# This is the initial EPI done during setup.\n info['outdir'] = self.episetup_dir\n info['type'] = 'first_epi'\n self.entry_map['first_epi'].append(entry)\n info['imgfile'] = '%s/first_epi_%d' % \\\n (self.episetup_dir, len(self.entry_map['first_epi']))\n elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \\\n info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2:\n# This is an epi reconstructed on the scanner.\n self.epi_series.append(info['series'])\n self.entry_map['epi'].append(entry)\n if not os.path.isdir(path):\n tmp_path = os.path.dirname(path)\n else:\n tmp_path = path\n self.epirt_paths.append(tmp_path)\n\n if self.fsl_flip:\n info['filetype'] = 'brik'\n else:\n info['filetype'] = self.tmplt['epi_file_format']\n\n info['TR'] = self.hdr['tsize']\n if self.tmplt['acq_tr'] is None:\n info['acq_tr'] = float(info['TR'])\n else:\n info['acq_tr'] = float(self.tmplt['acq_tr'])\n return OK", "def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compression'] is not None:\n# Data are compressed, copy to tmp.\n compression = self.info[pfile]['compression']\n\n pfile_decomp = '%s/%s' % (self.tmpdir, \\\n os.path.basename(self.info[pfile]['pfile_decomp']))\n if os.path.exists(pfile_decomp):\n errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \\\n ' in ReconEpis'\n\n cmd = '%s %s > %s' % \\\n (decompress_cmds[compression], pfile, pfile_decomp)\n self.ExecCmd(cmd)\n else:\n# Create a link on /tmp to the pfile so the link to ref.dat will also\n# be on /tmp, (which is always writeable.)\n pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))\n if not os.path.exists(pfile_decomp):\n os.symlink(pfile, pfile_decomp)\n refname, refcmpress = self.CheckCompression( \\\n self.info[pfile]['refdat'])\n if refcmpress is not None:\n refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))\n cmd = '%s %s > %s' % \\\n (decompress_cmds[refcmpress], \\\n self.info[pfile]['refdat'], refdat_decomp)\n self.ExecCmd(cmd)\n else:\n refdat_decomp = self.info[pfile]['refdat']\n if refdat_decomp is not None:\n if refdat_decomp != 'ref.dat':\n# Create link bearing the file name epirecon_ex expects.\n refdat_link = '%s/ref.dat' % self.tmpdir\n if not os.path.exists(refdat_link):\n if self.verbose:\n print 'ln -s %s %s' % (refdat_decomp, refdat_link)\n if os.path.islink(refdat_link):\n# ref.dat is a broken symbolic link.\n if self.verbose:\n print 'rm %s' % ref_file\n os.remove(refdat_link)\n try:\n os.symlink(refdat_decomp, refdat_link)\n except OSError:\n self.errors = True\n pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))\n os.symlink(pfile_decomp, pfile_link)\n os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)\n\n series = int(self.info[pfile]['series'])\n run[series] = run[series] + 1\n epiname = self.info[pfile]['imgfile']\n cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \\\n (pfile_decomp, epiname, self.skip)\n fname = '%s+orig.BRIK' % epiname\n self.CheckExec(cmd, [fname])\n# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']\n else:\n errstr = '*******************************************\\n' + \\\n 'No ref.dat file exists for %s\\n' % pfile + \\\n '*******************************************\\n'\n self.error_log = self.error_log + errstr\n self.f_crash.write(errstr)", "def _get_dmi(cls, data):\n\t\tdata['pdi'] = cls._get_pdi(data, 14)\n\t\tdata['mdi'] = cls._get_mdi(data, 14)\n\t\tdata['dx'] = cls._get_dx(data, 14)\n\t\tdata['adx'] = data['dx_6_ema']\n\t\tdata['adxr'] = data['adx_6_ema']\n\t\treturn data", "def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])", "def extractSeriesInfo(self, inputdir):\n self.m_status.SetLabelText(\"Detecting DICOM data ... please wait\")\n allfiles = [y for x in walk(inputdir) for y in iglob(join(x[0], '*.IMA'))]\n self.controller.parseDicom(self, allfiles)\n # n = 1\n # for filename in allfiles:\n # try:\n # if not self.db.hasFile(filename):\n # dcm = dicom.read_file(filename)\n # updatemsg = \"Detecting DICOM data ... %d of %d\" % (n, len(allfiles))\n # self.m_status.SetLabelText(updatemsg)\n # n += 1\n #\n # # Check DICOM header info\n # series_num = str(dcm.SeriesInstanceUID)\n # uuid = self.generateuid(series_num)\n # imagetype = str(dcm.ImageType[2])\n # dicomdata = {'uuid': uuid,\n # 'patientid': str(dcm.PatientID),\n # 'patientname': str(dcm.PatientName),\n # 'seriesnum': series_num,\n # 'sequence': str(dcm.SequenceName),\n # 'protocol': str(dcm.ProtocolName),\n # 'imagetype': imagetype\n # }\n #\n # if not self.db.hasUuid(uuid):\n # self.db.addDicomdata(dicomdata)\n # if not self.db.hasFile(filename):\n # self.db.addDicomfile(uuid, filename)\n # except InvalidDicomError:\n # print(\"Not DICOM - skipping: \", filename)\n # continue\n # Load for selection\n # Columns: Toggle Select\n # Text PatientID\n # Text Sequence\n # Text Protocol\n # Text Image Type\n # Text Num Files\n # Text Series ID\n\n # for suid in db.getNewUuids():\n # numfiles = db.getNumberFiles(suid)\n # self.m_dataViewListCtrl1.AppendItem(\n # [True, self.controller.db.getDicomdata(suid, 'patientname'),\n # self.controller.db.getDicomdata(suid, 'sequence'),\n # self.controller.db.getDicomdata(suid, 'protocol'),\n # self.controller.db.getDicomdata(suid, 'imagetype'), str(numfiles),\n # self.controller.db.getDicomdata(suid, 'seriesnum')])\n #\n # msg = \"Total Series loaded: %d\" % self.m_dataViewListCtrl1.GetItemCount()\n # self.m_status.SetLabelText(msg)", "def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps", "def as_exons(self,input={}):\n # handle potentially applied input argument\n self._handle_input_subdict(input)\n # parse data in the AbgpGeneLocusDir\n self.parseinputgff()\n self.rungetorf()\n # we need abgp_geneconfirmation.geneconfirmation first!\n geneconfirmation( { self._create_auto_key(): self.input } )\n\n # get only the CDS-type of tracks that define the coding sequence\n genecdstracks = filtergffs4fmethod( self._obtain_gene_gff(), GFF_CDS_FMETHOD ) \n\n if len(genecdstracks) == 1:\n # deal with SingleExonOnOrf -> TSS + donor\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n return [ SingleExonOnOrf(tss,genecdstracks[-1][4],orf,gff={}) ]\n\n elif len(genecdstracks) == 0:\n # no tracks !?\n return []\n elif not self.input['orfid-genestructure']:\n # not mappable on Orfs / or no genestructure provided\n return []\n else:\n # list with exons,introns to return \n exons = []\n introns = []\n exonsandintrons = []\n\n # deal with FirstExonOnOrf -> TSS + donor\n try:\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n except:\n print self.input.keys(), self.input['proteinfref']\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n donor = self._gene_cds_track_2_donor( genecdstracks[0], orf )\n donor.phase = ( genecdstracks[0][4]-genecdstracks[0][3]-1 ) % 3\n exons.append( FirstExonOnOrf(tss,donor,orf,gff={}) )\n exonsandintrons.append( exons[-1] )\n\n # deal with internal ExonOnOrf(s): -> acceptor + donor\n for pos in range(1,len(genecdstracks)-1):\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][pos])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[pos], orf )\n accep.phase = exons[-1].donor.phase\n donor = self._gene_cds_track_2_donor( genecdstracks[pos], orf )\n donor.phase = ( genecdstracks[pos][4]-genecdstracks[pos][3]-1+accep.phase ) % 3\n exons.append( ExonOnOrf(accep,donor,orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf, exons[-2].orf,\n exons[-1].acceptor, exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # deal with FinalExonOnOrf -> acceptor + StopCodon\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][-1])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[-1], orf )\n accep.phase = exons[-1].donor.phase\n exons.append( FinalExonOnOrf(accep,genecdstracks[-1][4],orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf,exons[-2].orf,\n exons[-1].acceptor,exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # return list of exons&introns\n return exonsandintrons", "def read_icd(self):\n wiki = wikipediaapi.Wikipedia('en') # may as well declare this here so I don't need to call it every query\n supplemental_articles = []\n with open(ICD10_DESC_PATH, 'r') as f:\n current_family = [] # list of lists of descriptions within the current family (3 letter code = family)\n current_parent = None # Most recent 3 letter code seen\n for line in tqdm(f.readlines(), desc=\"ICD10 Lines Processed\"):\n\n code = line[6:14].strip().lower()\n description = simple_clean(line[77:])\n self.code2desc[code] = description.split()\n\n if len(code) == PARENT_CODE_LENGTH: # found a parent\n # query web if set params to True\n wiki_result = self.query_wikipedia(wiki, description) if self.query else []\n pubmed_result = self.query_pubmed(description) if self.query else []\n\n # store results\n if wiki_result:\n supplemental_articles.extend(wiki_result)\n if pubmed_result:\n supplemental_articles.extend(pubmed_result)\n\n # update metrics using current family\n self.process_family_frequencies(current_parent, current_family)\n current_family = []\n current_parent = code\n current_family.append(description.split())\n self.n_desc += 1\n\n # process the last family\n self.process_family_frequencies(current_parent, current_family)\n # go through all the articles we found, preprocess, and add to self.data\n self.data.extend(self.process_articles(supplemental_articles))\n\n # lastly calculate tf and idf over all descriptions (not including articles here) for use in weighting later\n self.n_words = log10(self.n_words)\n self.n_desc = log10(self.n_words)\n self.word2tf = {word: log10(count) - self.n_words for word, count in self.word2tf.items()}\n self.word2df = {word: count - self.n_desc for word, count in self.word2df.items()}\n self.dump()", "def extract(args):\n prism.extract.run(\n input_fp=args.input,\n output_fp=args.output,\n depth_cutoff=args.depth_cutoff,\n num_cpg_cutoff=args.num_cpg_cutoff,\n prepend_chr=args.prepend_chr,\n paired=args.paired,\n verbose=args.verbose,\n )", "def epitopes(record, info, ens_data):\n\n funcensGene = info.Consequence\n allowed_contigs = ens_data.contigs()\n epitopes = list()\n if 'missense' in funcensGene or 'frame' in funcensGene:\n gene = info.SYMBOL\n transcript = info.Feature\n # sequence = ens_data.transcript_by_id(info.Feature)\n mut_dna = info.HGVSc.split(':')[1] if len(info.HGVSc.split(':')) > 1 else ''\n mut_aa = info.HGVSp.split(':')[1] if len(info.HGVSp.split(':')) > 1 else ''\n chrom = record.CHROM.replace('chr', '') if 'chr' in record.CHROM else record.CHROM\n if chrom == 'M':\n chrom = 'MT'\n if chrom in allowed_contigs:\n # TODO this should return a list \n pos, flags, wtmer, mutmer = create_epitope_varcode(chrom,\n record.POS,\n record.REF,\n info.Allele,\n ens_data,\n transcript)\n epitopes.append(Epitope(transcript, gene, funcensGene, mut_dna, mut_aa, flags, wtmer, mutmer))\n else:\n print(\"Unable to infer epitope for contig {}\".format(chrom))\n return epitopes", "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)", "def disease_descriptors(civic_did8):\n return [civic_did8]", "def get_ephemeris(rundate, sat_name):\n file_key = \"slr_ephemeris\"\n ephemeris_data = get_satellite_vars(sat_name)\n provider_list = config.tech.prediction_providers.list\n # Find the latest version of the observation file\n versions = config.files.glob_variable(file_key, \"version\", r\"\\d+\", file_vars=ephemeris_data)\n\n try:\n ephemeris_data[\"version\"] = sorted(versions)[-1]\n providers = config.files.glob_variable(file_key, \"provider\", r\"\\w+\", file_vars=ephemeris_data)\n for provider in provider_list:\n if provider in providers:\n ephemeris_data[\"provider\"] = provider\n break\n else:\n log.fatal(f\"No valid provider found: {', '.join(providers)}\")\n except IndexError:\n log.info(\"No ephemeris data found\")\n log.info(f\"Download manually from https://cddis.nasa.gov/archive/slr/cpf_predicts/{rundate.year}/{sat_name}\")\n log.fatal(f\"Please save missing file as '{config.files.path(file_key)}' !\")\n eph_parser = parsers.parse_key(file_key, file_vars=ephemeris_data)\n eph = calculate_initial_values(eph_parser.as_dict(), rundate)\n\n return eph", "def extract_core_ids(self):\n path2folder = 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[self.radii[0]] + '/'\n analysis_files = [dir for dir in os.listdir(path2folder) if dir.startswith('Matrix-analysis-IP_')]\n analysis_file = path2folder + analysis_files[0] #work for 1 component system\n with open(analysis_file, 'r') as fid:\n my_file = yaml.load(fid, Loader=yaml.FullLoader)\n self.core_ids = list(my_file.keys())\n self.mol_name = analysis_files[0].split('_')[1].split('.')[0]\n\n\n print('coreids', self.core_ids)", "def exon_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n exonpos = defline[1:].split(' ')[1]\n seqs[exonpos] = seq\n\n rnaid_to_accession = dict()\n reported_exons = {}\n exons, cdss = [], {}\n start, stop = None, None\n moltypes = ['mRNA', 'tRNA', 'ncRNA', 'transcript', 'primary_transcript',\n 'V_gene_segment', 'D_gene_segment', 'J_gene_segment',\n 'C_gene_segment']\n for entry in gff3:\n for moltype in moltypes:\n if ('\\t%s\\t' % moltype) in entry:\n accession = re.search(r'accession=([^;\\n]+)', entry).group(1)\n tid = re.search(r'ID=([^;\\n]+)', entry).group(1)\n rnaid_to_accession[tid] = accession\n\n if '\\texon\\t' in entry:\n exons.append(entry)\n elif '\\tCDS\\t' in entry:\n fields = entry.split('\\t')\n pos = '%s_%s-%s%s' % (fields[0], fields[3], fields[4], fields[6])\n cdss[pos] = entry\n elif '\\tstart_codon\\t' in entry:\n start = entry\n elif '\\tstop_codon\\t' in entry:\n stop = entry\n elif entry.startswith('###'):\n if len(exons) == 0:\n continue\n xcept = False\n for exonpos in cdss:\n if ';exception=ribosomal slippage' in cdss[exonpos]:\n xcept = True\n if xcept:\n exons, cdss = [], {}\n start, stop = None, None\n continue\n assert start, 'No start codon for exon(s): %s' % exons[0]\n assert stop, 'No stop codon for exon(s): %s' % exons[0]\n for exon in exons:\n fields = exon.split('\\t')\n assert len(\n fields) == 9, 'entry does not have 9 fields: %s' % exon\n mrnaid = re.search(r'Parent=([^;\\n]+)', fields[8]).group(1)\n exonpos = '%s_%s-%s%s' % (fields[0],\n fields[3], fields[4], fields[6])\n if exonpos in reported_exons:\n continue\n exonlength = int(fields[4]) - int(fields[3]) + 1\n exonseq = seqs[exonpos]\n assert len(exonseq) == exonlength, \\\n 'exon \"%s\": length mismatch; gff=%d, fa=%d' % (\n exonpos, exonlength, len(exonseq))\n gccontent = gc_content(exonseq)\n gcskew = gc_skew(exonseq)\n ncontent = n_content(exonseq)\n context = exon_context(exon, start, stop)\n phase = None\n remainder = None\n if context == 'cds':\n cexon = cdss[exonpos]\n phase = int(cexon.split('\\t')[7])\n remainder = (exonlength - phase) % 3\n values = '%s %s %d %.3f %.3f %.3f %s %r %r' % (\n exonpos, rnaid_to_accession[mrnaid], exonlength, gccontent,\n gcskew, ncontent, context, phase, remainder)\n reported_exons[exonpos] = 1\n yield values.split(' ')\n exons, cdss = [], {}\n start, stop = None, None", "def make_pmodel_energies():\n cwd = os.getcwd()\n\n os.chdir(\"test_data/protein_load\")\n pmodel = pyODEM.model_loaders.Protein(\"ww_domain.ini\")\n os.chdir(cwd)\n\n data = pmodel.load_data(\"test_data/protein_load/traj/traj_test.xtc\")\n heps, dheps = pmodel.get_potentials_epsilon(data)\n\n true_energies = np.loadtxt(\"test_data/protein_load/traj/energy_gaussian_test.dat\")\n\n return pmodel, data, heps, dheps, true_energies", "def dicom_cli():", "def test_getAbstract(self):\n cases = {\n self.test_eac + 'NE00001.xml':'NE00001',\n self.test_eac + 'NE00100.xml':'NE00100',\n self.test_eac + 'NE00200.xml':'NE00200',\n self.test_eac + 'NE00600.xml':'NE00600',\n }\n for case in cases:\n doc = EacCpf.EacCpf(case, 'http://www.example.com/metadata.xml', 'http://www.example.com/presentation.html')\n self.assertNotEqual(doc, None)\n abstract = doc.getAbstract()\n self.assertNotEqual(abstract, None)", "def extract_all_lazy():\n\n\t#Construct filepaths: Data COMP_INFO_1\n\tdata_ci1_name = \"DATA_2016_COMP_INFO_1.csv\"\n\tdata_ci1_fullname = os.path.join(files_location, data_ci1_name)\n\t#Data COMP_INFO_2\n\tdata_ci2_name = \"DATA_2016_COMP_INFO_2.csv\"\n\tdata_ci2_fullname = os.path.join(files_location, data_ci2_name)\n\t#Data PROPERTY INFO\n\tdata_pi_name = \"DATA_2016_PROPERTY_INFO_ST.csv\"\n\tdata_pi_fullname = os.path.join(files_location, data_pi_name)\n\t#Data General Info\n\tdata_gi_name = \"DATA_2016_GENERAL_INFO.csv\"\n\tdata_gi_fullname = os.path.join(files_location, data_gi_name)\n\n\t#Read & Process COMP_INFO\n\tdata_ci1 = pd.read_csv(data_ci1_fullname, skiprows=2, usecols = constants.keep_columns_CI, encoding='ISO-8859-1')\n\tdata_ci2 = pd.read_csv(data_ci2_fullname, skiprows=2, usecols = constants.keep_columns_CI, encoding='ISO-8859-1')\n\n\tdata_ci = data_ci1.append(data_ci2)\n\tdata_ci['QUESTION'] = data_ci['QUESTION'].replace(constants.ci_mapping)\n\t# Take only the survey questions mapped\n\tdata_ci = data_ci[data_ci['QUESTION'].isin(constants.ci_mapping.values())]\n\tdata_ci = data_ci.set_index(['PROPERTY_CODE','PROPERTY_NAME','JOB_CODE','POSITION'])\n\tdata_ci = data_ci.pivot(columns=\"QUESTION\")\n\tdata_ci.columns = [\"_\".join(pair) for pair in data_ci.columns]\n\tdata_ci = data_ci.reset_index()\n\n\t#Read & Process Property Info data\n\tdata_pi = pd.read_csv(data_pi_fullname, usecols = constants.keep_columns_PI, encoding='ISO-8859-1')\n\t#survey_type_transformed = transform.surveytype_categorical(data_pi)\n\t#data_pi = pd.merge(data_pi, survey_type_transformed, on=['PROPERTY_CODE'])\n\n\t#Read & Process General Info\n\tdata_gi = pd.read_csv(data_gi_fullname, skiprows = 2, usecols = constants.keep_columns_GI, encoding='ISO-8859-1')\n\tdata_gi['QUESTION'] = data_gi['QUESTION'].replace(constants.gi_mapping)\n\t# Take onl the survey questions mapped\n\tdata_gi = data_gi[data_gi['QUESTION'].isin(constants.gi_mapping.values())]\n\tdata_gi = data_gi.set_index(['PROPERTY_CODE','PROPERTY_NAME'])\n\tdata_gi = data_gi.pivot(columns=\"QUESTION\")\n\tdata_gi.columns = [\"_\".join(pair) for pair in data_gi.columns]\n\tdata_gi = data_gi.reset_index()\n\n\t#This frame needs to be reworked\n\td_ci = pd.merge(data_gi, data_pi, on = ['PROPERTY_CODE','PROPERTY_NAME'])\n\td_ci = pd.merge(d_ci, data_ci, on = ['PROPERTY_CODE','PROPERTY_NAME'],suffixes= ['_ci','_gi'])\n\n\t#Observations by Dimensions to determine top X markets\n\t#Can this be in a better position?\n\td_ci = d_ci[~(d_ci['PROPERTY_NAME'].isin(constants.del_rows_property_name))]\n\td_ci['POSITION'] = d_ci['POSITION'].astype(str)\n\n\tpayload = {}\n\tpayload['gi'] = data_gi\n\tpayload['pi'] = data_pi\n\tpayload['ci'] = data_ci\n\tpayload['d_ci'] = d_ci\n\n\treturn payload", "def extract(src_dir,feat_file,ivectors_dir,num_gselect):\n os.system(\"./extract_ivectors.sh --num-gselect \"+str(num_gselect)+ \" \" + src_dir + \" \" + feat_file + \" \" + ivectors_dir)\n keys=[]\n ivectors=np.empty((0,0))\n for key,mat in kaldi_io.read_vec_flt_scp(ivectors_dir+'/ivector.scp'):\n if ivectors.shape[1] != mat.shape[0]:\n ivectors=ivectors.reshape((0,mat.shape[0]))\n ivectors=np.vstack((ivectors,mat))\n keys.append(key)\n\n ivectors=np.asarray(ivectors)\n keys=np.asarray(keys)\n return ivectors,keys", "def extract_mediapackage_endpoints(mp_client, mp_channel_id_list):\n emp_endpoint_list = {}\n for channel in mp_channel_id_list:\n emp_endpoint_list[str(channel)] = []\n response = mp_client.list_origin_endpoints()\n for endpoint in response['OriginEndpoints']:\n if str(endpoint[\"ChannelId\"]) in mp_channel_id_list:\n emp_endpoint_list[str(endpoint[\"ChannelId\"])].append(str(endpoint['Id']))\n return emp_endpoint_list", "def __init__(self, epics_only=False, *args, **kwargs):\n self._kwargs = {}\n self._detectors = {}\n self._det_list = [] \n self._det_aliases = {}\n self._psplots = {}\n self._event_functions = {}\n self._source_attrs = []\n self._evt_time_last = (0,0)\n self.ievent = 0\n self._reloadOnLoadRun = False\n self._reloadOnNextEvent = False\n self.psana_cfg_dict = {}\n self._default_module_path = ''\n\n# self._user_attrs = {}\n# self._histograms = {}\n \n for key in kwargs:\n self._kwargs[key] = kwargs[key] \n if key in self._exp_defaults:\n setattr(self,key,kwargs[key])\n print 'setting ',key, kwargs[key]\n\n self._device_config = read_device_config(**kwargs)\n self._device_sets = self._device_config['device_sets'] \n self._device_types = self._device_config['device_types'] \n\n for det in self._device_sets:\n if 'det' in self._device_sets[det]:\n if ('detName' in self._device_sets[det]['det'] or\n 'typeName' in self._device_sets[det]['det']):\n self._det_list.append(det)\n if 'det_key' in self._device_sets[det]['det']:\n det_key = self._device_sets[det]['det']['det_key']\n self._det_aliases[det_key] = det \n else:\n pass\n \n# if 'pvs' in self._device_sets[det]:\n# for attr in self._device_sets[det]['pvs']:\n# pvbase = self._device_sets[det]['pvs'][attr]['base']\n# alias = '_'.join([det,attr])\n# self.add_pv(pvbase, alias)\n\n self.set_exp_defaults(**kwargs)\n if not self._kwargs.get('noload'):\n self.data_source = self.get_data_source(**kwargs)\n print 'Data Source = ', self.data_source\n else:\n self.data_source = None\n\n if not self.data_source:\n self._kwargs['noload'] = True\n else:\n kwargs['run'] = self.run\n\n# if self._kwargs.get('noload') or self.live:\n# if self._kwargs.get('epics_live'):\n# self.set_kwargs(ami=True)\n \n if self._kwargs.get('ami'):\n print 'loading ami'\n self.load_ami(**kwargs)\n\n if not self._kwargs.get('noload'):\n print 'loading run'\n self.load_run(*args, **kwargs)\n self._no_epicsStore = False\n \n print 'Instrument = ', self.instrument\n\n if self._kwargs.get('epics_live'): # and self._kwargs.get('epics_file'):\n print 'loading epics'\n self.load_epicsLive(**kwargs)\n\n if self.ds and self.live:\n self.next_event()\n \n if self.ds and self._reloadOnNextEvent:\n self.next_event()\n \n if not self.ds:\n self._no_epicsStore = True\n self._no_evtData = True\n for det in self._device_sets:\n if 'pvs' in self._device_sets[det]:\n print 'Adding epics ',det\n self.add_detector(det)", "def list_all_ephemerides_files(self) -> Dict:\n ephs = self.list_result_ephemerides_files()\n while 'nextPageToken' in ephs:\n next_page_token = ephs['nextPageToken']\n _, e = self.list_result_ephemerides_files(page_token=next_page_token)\n ephs['ephemerisResourcePath'].extend(e['ephemerisResourcePath'])\n return ephs", "def _get_eps_xml(self):\n format_path = os.path.join(os.path.dirname(__file__), \"formats\")\n\n # loop through files where filename starts with \"eps_ascat\".\n for filename in fnmatch.filter(os.listdir(format_path), \"eps_ascat*\"):\n doc = etree.parse(os.path.join(format_path, filename))\n file_extension = doc.xpath(\"//file-extensions\")[0].getchildren()[0]\n\n format_version = doc.xpath(\"//format-version\")\n for elem in format_version:\n major = elem.getchildren()[0]\n minor = elem.getchildren()[1]\n\n # return the xml file matching the metadata of the datafile.\n if major.text == self.mphr[\"FORMAT_MAJOR_VERSION\"] and \\\n minor.text == self.mphr[\"FORMAT_MINOR_VERSION\"] and \\\n self.mphr[\n \"PROCESSING_LEVEL\"] in file_extension.text and \\\n self.mphr[\"PRODUCT_TYPE\"] in file_extension.text:\n return os.path.join(format_path, filename)", "def gene_descriptors(civic_gid19):\n return [civic_gid19]", "def decode(self):\n # Extract all the experiments\n\n # Map of imageset/scan pairs\n imagesets = {}\n\n # For every experiment, use the given input to create\n # a sensible experiment.\n el = ExperimentList()\n for eobj in self._obj[\"experiment\"]:\n\n # Get the models\n identifier = eobj.get(\"identifier\", \"\")\n beam = self._lookup_model(\"beam\", eobj)\n detector = self._lookup_model(\"detector\", eobj)\n goniometer = self._lookup_model(\"goniometer\", eobj)\n scan = self._lookup_model(\"scan\", eobj)\n crystal = self._lookup_model(\"crystal\", eobj)\n profile = self._lookup_model(\"profile\", eobj)\n scaling_model = self._lookup_model(\"scaling_model\", eobj)\n\n key = (eobj.get(\"imageset\"), eobj.get(\"scan\"))\n\n imageset = None\n try:\n imageset = imagesets[key] # type: ImageSet\n except KeyError:\n # This imageset hasn't been loaded yet - create it\n imageset_data = self._lookup_model(\"imageset\", eobj)\n\n # Create the imageset from the input data\n if imageset_data is not None:\n if \"params\" in imageset_data:\n format_kwargs = imageset_data[\"params\"]\n else:\n format_kwargs = {}\n\n # Load the external lookup data\n mask_filename, mask = self._load_pickle_path(imageset_data, \"mask\")\n gain_filename, gain = self._load_pickle_path(imageset_data, \"gain\")\n pedestal_filename, pedestal = self._load_pickle_path(\n imageset_data, \"pedestal\"\n )\n dx_filename, dx = self._load_pickle_path(imageset_data, \"dx\")\n dy_filename, dy = self._load_pickle_path(imageset_data, \"dy\")\n\n if imageset_data[\"__id__\"] == \"ImageSet\":\n imageset = self._make_stills(\n imageset_data, format_kwargs=format_kwargs\n )\n elif imageset_data[\"__id__\"] == \"ImageGrid\":\n imageset = self._make_grid(\n imageset_data, format_kwargs=format_kwargs\n )\n elif (\n imageset_data[\"__id__\"] == \"ImageSequence\"\n or imageset_data[\"__id__\"] == \"ImageSweep\"\n ):\n imageset = self._make_sequence(\n imageset_data,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n format_kwargs=format_kwargs,\n )\n elif imageset_data[\"__id__\"] == \"MemImageSet\":\n imageset = self._make_mem_imageset(imageset_data)\n else:\n raise RuntimeError(\"Unknown imageset type\")\n\n if imageset is not None:\n # Set the external lookup\n if mask is None:\n mask = ImageBool()\n else:\n mask = ImageBool(mask)\n if gain is None:\n gain = ImageDouble()\n else:\n gain = ImageDouble(gain)\n if pedestal is None:\n pedestal = ImageDouble()\n else:\n pedestal = ImageDouble(pedestal)\n if dx is None:\n dx = ImageDouble()\n else:\n dx = ImageDouble(dx)\n if dy is None:\n dy = ImageDouble()\n else:\n dy = ImageDouble(dy)\n\n if not imageset.external_lookup.mask.data.empty():\n if not mask.empty():\n mask = tuple(m.data() for m in mask)\n for m1, m2 in zip(\n mask, imageset.external_lookup.mask.data\n ):\n m1 &= m2.data()\n imageset.external_lookup.mask.data = ImageBool(mask)\n else:\n imageset.external_lookup.mask.data = mask\n imageset.external_lookup.mask.filename = mask_filename\n imageset.external_lookup.gain.data = gain\n imageset.external_lookup.gain.filename = gain_filename\n imageset.external_lookup.pedestal.data = pedestal\n imageset.external_lookup.pedestal.filename = pedestal_filename\n imageset.external_lookup.dx.data = dx\n imageset.external_lookup.dx.filename = dx_filename\n imageset.external_lookup.dy.data = dy\n imageset.external_lookup.dy.filename = dy_filename\n\n # Update the imageset models\n if isinstance(imageset, ImageSequence):\n imageset.set_beam(beam)\n imageset.set_detector(detector)\n imageset.set_goniometer(goniometer)\n imageset.set_scan(scan)\n elif isinstance(imageset, (ImageSet, ImageGrid)):\n for i in range(len(imageset)):\n imageset.set_beam(beam, i)\n imageset.set_detector(detector, i)\n imageset.set_goniometer(goniometer, i)\n imageset.set_scan(scan, i)\n\n imageset.update_detector_px_mm_data()\n\n # Add the imageset to the dict - even if empty - as this will\n # prevent a duplicated attempt at reconstruction\n imagesets[key] = imageset\n\n # Append the experiment\n el.append(\n Experiment(\n imageset=imageset,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n crystal=crystal,\n profile=profile,\n scaling_model=scaling_model,\n identifier=identifier,\n )\n )\n\n # Return the experiment list\n return el", "def generate_initial_siaf_aperture_definitions(instrument):\n siaf_detector_layout = iando.read.read_siaf_detector_layout()\n\n prd_siaf = pysiaf.Siaf(instrument)\n siaf_definitions = Table()\n\n\n for attribute_name in 'AperName AperType XDetRef YDetRef XSciSize YSciSize XSciRef YSciRef'.split():\n siaf_definitions[attribute_name] = [getattr(prd_siaf[aperture_name], attribute_name) for aperture_name in prd_siaf.apertures]\n\n parent_apertures = [None]*len(siaf_definitions)\n dependency_type = [None]*len(siaf_definitions)\n\n if instrument == 'NIRISS':\n for i,aperture_name in enumerate(siaf_definitions['AperName']):\n if aperture_name != 'NIS_CEN':\n parent_apertures[i] = 'NIS_CEN'\n if '_OSS' in aperture_name:\n dependency_type[i] = 'oss_default'\n elif aperture_name != 'NIS_CEN':\n dependency_type[i] = 'default'\n\n elif instrument == 'FGS':\n for i,aperture_name in enumerate(siaf_definitions['AperName']):\n if aperture_name not in ['FGS1_FULL', 'FGS2_FULL']:\n parent_apertures[i] = '{}_FULL'.format(aperture_name.split('_')[0])\n else:\n dependency_type[i] = 'master'\n if '_OSS' in aperture_name:\n dependency_type[i] = 'oss_default'\n elif aperture_name not in ['FGS1_FULL', 'FGS2_FULL']:\n dependency_type[i] = 'default'\n\n elif instrument == 'NIRSpec':\n for i,aperture_name in enumerate(siaf_definitions['AperName']):\n if siaf_definitions['AperType'][i] == 'TRANSFORM':\n continue\n\n if '_OSS' in aperture_name:\n dependency_type[i] = 'oss_default'\n parent_apertures[i] = aperture_name.split('_OSS')[0]\n elif 'NRS_IFU' in aperture_name:\n parent_apertures[i] = 'NRS1_FULL'\n dependency_type[i] = 'default'\n elif aperture_name in ['NRS_S200B1_SLIT']:\n parent_apertures[i] = 'NRS2_FULL'\n dependency_type[i] = 'default'\n elif aperture_name in 'NRS_S200A1_SLIT NRS_S200A2_SLIT NRS_S400A1_SLIT NRS_S1600A1_SLIT NRS_FULL_IFU'.split():\n parent_apertures[i] = 'NRS1_FULL'\n dependency_type[i] = 'default'\n elif ('_MSA1' in aperture_name) or ('_MSA2' in aperture_name):\n parent_apertures[i] = 'NRS2_FULL'\n dependency_type[i] = 'default'\n elif ('_MSA3' in aperture_name) or ('_MSA4' in aperture_name):\n parent_apertures[i] = 'NRS1_FULL'\n dependency_type[i] = 'default'\n elif aperture_name in ['NRS1_FP1MIMF']:\n parent_apertures[i] = 'NRS_S1600A1_SLIT'\n dependency_type[i] = 'FP1MIMF'\n elif 'MIMF' in aperture_name:\n parent_apertures[i] = aperture_name.split('_')[0]+'_FULL'\n dependency_type[i] = 'default'\n\n\n # if aperture_name not 'NIS_CEN':\n # parent_apertures[i] = 'NIS_CEN'\n # elif aperture_name != 'NIS_CEN':\n # dependency_type[i] = 'default'\n\n elif instrument == 'NIRCam':\n for i,aperture_name in enumerate(siaf_definitions['AperName']):\n\n # Master apertures\n if aperture_name in siaf_detector_layout['AperName']:\n dependency_type[i] = 'master'\n\n elif siaf_definitions['AperType'][i] in ['SUBARRAY', 'FULLSCA', 'ROI']:\n if 'MASK' in aperture_name:\n # Coronagraphic apertures with wedge offset\n dependency_type[i] = 'wedge'\n elif 'DHSPIL_WEDGES' in aperture_name:\n dependency_type[i] = 'dhspil_wedge'\n else:\n dependency_type[i] = 'default'\n sca_name = aperture_name[0:5]\n parent_apertures[i] = '{}_FULL'.format(sca_name)\n\n # OSS apertures\n elif siaf_definitions['AperType'][i] in ['OSS']:\n dependency_type[i] = 'default'\n parent_apertures[i] = aperture_name.split('_OSS')[0]\n\n elif (siaf_definitions['AperType'][i] in ['SLIT', 'COMPOUND']) and ('GRISM' in aperture_name) and ('WFSS' in aperture_name):\n dependency_type[i] = 'grism_wfss'\n sca_name = aperture_name.split('_')[0]\n # sca_name = aperture_name[0:5]\n parent_apertures[i] = '{}_FULL'.format(sca_name)\n\n # elif 'MASK' in aperture_name:\n # dependency_type[i] = 'wedge'\n # sca_name = aperture_name[0:5]\n # parent_apertures[i] = '{}_FULL'.format(sca_name)\n\n\n\n elif aperture_name in 'NRCALL_FULL NRCAS_FULL NRCBS_FULL'.split():\n dependency_type[i] = 'nircam_compound'\n if aperture_name == 'NRCALL_FULL':\n parent_apertures[i] = '; '.join(['NRCA1_FULL', 'NRCB2_FULL', 'NRCB1_FULL', 'NRCA2_FULL'])\n elif aperture_name == 'NRCAS_FULL':\n parent_apertures[i] = '; '.join(['NRCA1_FULL', 'NRCA3_FULL', 'NRCA4_FULL', 'NRCA2_FULL'])\n elif aperture_name == 'NRCBS_FULL':\n parent_apertures[i] = '; '.join(['NRCB4_FULL', 'NRCB2_FULL', 'NRCB1_FULL', 'NRCB3_FULL'])\n\n\n\n siaf_definitions['parent_apertures'] = parent_apertures\n siaf_definitions['dependency_type'] = dependency_type\n\n siaf_definitions.pprint()\n\n\n siaf_definitions_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_aperture_definition.txt'.format(instrument.lower()))\n\n comments = []\n comments.append('{} aperture definition file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains all the necessary aperture information to generate the full SIAF given the necessary reference files (focal plane alignment, distortion) and auxiliary information (DDC mapping, wedge offsets, ...)')\n comments.append('This file also defines the order in which the apertures are presented.')\n comments.append('')\n comments.append('Originally based on {}.'.format(JWST_PRD_VERSION))\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('{}'.format(username))\n comments.append('')\n siaf_definitions.meta['comments'] = comments\n siaf_definitions.write(siaf_definitions_file_name, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False)", "def _get_data(ch_decim=1):\n # Read evoked\n evoked = mne.read_evokeds(fname_ave, 0, baseline=(None, 0))\n evoked.info[\"bads\"] = [\"MEG 2443\"]\n with evoked.info._unlock():\n evoked.info[\"lowpass\"] = 16 # fake for decim\n evoked.decimate(12)\n evoked.crop(0.0, 0.3)\n picks = mne.pick_types(evoked.info, meg=True, eeg=False)\n picks = picks[::ch_decim]\n evoked.pick_channels([evoked.ch_names[pick] for pick in picks])\n evoked.info.normalize_proj()\n\n noise_cov = mne.read_cov(fname_cov)\n noise_cov[\"projs\"] = []\n noise_cov = regularize(noise_cov, evoked.info, rank=\"full\", proj=False)\n return evoked, noise_cov", "def get_dicom_info(paths, index_col=None, verbose=False):\n meta_info = []\n paths = tqdm_notebook(paths, leave=False) if verbose else paths\n for path in paths:\n first_slice = dicom.read_file(os.path.join(path, os.listdir(path)[0]))\n\n if hasattr(first_slice, 'PatientAge'):\n patient_age = str(first_slice.PatientAge)\n else:\n patient_age = ''\n\n if hasattr(first_slice, 'PatientSex'):\n patient_sex = str(first_slice.PatientSex)\n else:\n patient_sex = ''\n\n locations = []\n for name in os.listdir(path):\n slice_path = os.path.join(path, name)\n dicom_slice = dicom.read_file(slice_path, stop_before_pixels=True)\n locations.append(float(dicom_slice.SliceLocation))\n\n steps_z = np.diff(np.sort(np.array(locations)))\n spacing_z = np.min(steps_z)\n info_dict = {\n \"UniformSpacing\": np.allclose(steps_z, spacing_z),\n 'MinSpacingZ': np.min(steps_z),\n 'MaxSpacingZ': np.max(steps_z),\n 'SliceThickness': float(first_slice.SliceThickness),\n 'SpacingZ': spacing_z,\n 'SpacingY': float(first_slice.PixelSpacing[0]),\n 'SpacingX': float(first_slice.PixelSpacing[1]),\n 'StudyID': str(first_slice.StudyID),\n 'ConvolutionKernel': str(first_slice.ConvolutionKernel),\n 'FilterType': str(first_slice.FilterType),\n 'WindowWidth': str(first_slice.WindowWidth),\n 'WindowCenter': str(first_slice.WindowCenter),\n 'PatientAge': patient_age,\n 'PatientSex': patient_sex,\n 'AccessionNumber': str(first_slice.AccessionNumber),\n 'PatientID': str(first_slice.PatientID),\n 'Rows': int(first_slice.Rows),\n 'Columns': int(first_slice.Columns),\n 'NumSlices': len(os.listdir(path)),\n 'ScanID': os.path.basename(path),\n 'Index': str(first_slice.AccessionNumber) + '_' + os.path.basename(path),\n 'ScanPath': path\n }\n meta_info.append(info_dict)\n return pd.DataFrame(meta_info) if index_col is None else pd.DataFrame(meta_info).set_index(index_col)", "def get_product_with_editions_list ( self ) :\n tile_list = []\n stmt = \"select p.name from sdb_productedition e, sdb_product p where e.sys003 = 4319 and e.sys001 = p.id\"\n self.oracle_cursor.arraysize = 100000\n self.oracle_cursor.execute(stmt)\n resultset = self.oracle_cursor.fetchmany()\n if resultset :\n for row in resultset :\n tile_list.append(str(row[0]))\n return tile_list", "def protein_delins(egfr_context):\n params = {\n \"id\": \"normalize.variation:NP_001333827.1%3Ap.Leu747_Thr751delinsPro\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.eDMXxJw9shlSKF3znIg5abniGoyJ3GQ4\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.eDMXxJw9shlSKF3znIg5abniGoyJ3GQ4\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.Mm8duqYDJyel5ZnwScnxLyGH1i9lcl3T\",\n \"interval\": {\n \"end\": {\"value\": 751, \"type\": \"Number\"},\n \"start\": {\"value\": 746, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.vyo55F6mA6n2LgN4cagcdRzOuh38V4mE\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"P\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"protein\",\n \"structural_type\": \"SO:1000032\",\n \"vrs_ref_allele_seq\": \"LREAT\",\n \"gene_context\": egfr_context\n }\n return VariationDescriptor(**params)", "def _get_econt_info(self, out_log):\n f = open_general(out_log)\n tmptxt = f.readlines()\n f.close()\n econt = {}\n itmp = search_string('[read_energy] number of energy points', tmptxt)\n if itmp>=0: econt['Nepts'] = int(tmptxt.pop(itmp).split()[-1])\n itmp = search_string('energies and weights are:', tmptxt)\n if itmp>=0:\n tmp = []\n for ie in range(econt['Nepts']):\n tmpline = tmptxt[itmp+4+ie].split()[1:]\n tmp.append([float(tmpline[0]), float(tmpline[1]), float(tmpline[2]), float(tmpline[3])])\n tmp = array(tmp)\n econt['epts'] = tmp[:,:2]\n econt['weights'] = tmp[:,2:]\n econt['emin'] = tmp[0,0]\n return econt", "def extract_extended_pdfs(pdfs: Union[Iterable[ZfitPDF], ZfitPDF]) -> List[ZfitPDF]:\n from ..models.functor import BaseFunctor\n\n pdfs = convert_to_container(pdfs)\n indep_pdfs = []\n\n for pdf in pdfs:\n if not pdf.is_extended:\n continue\n elif isinstance(pdf, BaseFunctor):\n if all(pdf.pdfs_extended):\n indep_pdfs.extend(extract_extended_pdfs(pdfs=pdf.pdfs))\n elif not any(pdf.pdfs_extended):\n indep_pdfs.append(pdf)\n else:\n assert False, \"Should not reach this point, wrong assumptions. Please report bug.\"\n else: # extended, but not a functor\n indep_pdfs.append(pdf)\n\n return indep_pdfs", "def read_cine_protocol(series_dicom_header):\n assert len(series_dicom_header.StudyInstanceUID.unique()) == 1, 'Trying to read dicoms from multiple studies!'\n assert len(series_dicom_header.SpacingBetweenSlices.unique()) == 1\n\n SpacingBetweenSlices = list(series_dicom_header.SpacingBetweenSlices)[0]\n SliceLocations = series_dicom_header.SliceLocation.unique()\n number_of_slices = len(SliceLocations) \n\n phases_per_slice = [len(series_dicom_header[series_dicom_header.SliceLocation==SliceLocation].InstanceNumber) \n for SliceLocation in series_dicom_header.SliceLocation.unique()]\n number_of_phases = phases_per_slice[0]\n\n if len(np.unique(phases_per_slice)) != 1:\n warnings.warn('Number of phases is variable across slice locations! Could be real or error, check!.')\n return None\n \n print('Found cine study with (number_of_slices, number_of_phases)', number_of_slices, number_of_phases)\n pixel_array = pydicom.read_file(series_dicom_header.iloc[0].FileName).pixel_array\n \n sax_4D = np.zeros((pixel_array.shape +(number_of_slices, number_of_phases)), dtype=pixel_array.dtype)\n \n dicom_4D_paths = {}\n for SliceIndex, SliceLocation in enumerate(sorted(SliceLocations)):\n slice_header = series_dicom_header[series_dicom_header.SliceLocation==SliceLocation]\n dicom_4D_paths[SliceIndex] = []\n for InstanceIndex, InstanceNumber in enumerate(sorted(slice_header.InstanceNumber)):\n DicomFileName = slice_header[slice_header.InstanceNumber==InstanceNumber].FileName.item()\n dicom = pydicom.read_file(DicomFileName)\n sax_4D[:,:,SliceIndex,InstanceIndex] += dicom.pixel_array\n\n dicom_4D_paths[SliceIndex] += [DicomFileName]\n\n affine = read_affine(series_dicom_header.iloc[series_dicom_header.SliceLocation.argmin()])\n\n sax_4D = nib.Nifti1Image(sax_4D, affine=affine), \n sax_4D.SpacingBetweenSlices = SpacingBetweenSlices\n\n return sax_4D, dicom_4D_paths", "def parse_episode(filename):\n print_info('Extracting episode from {0}'.format(filename))\n for regex in EPISODE_NUM_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_ep = m.group('Episode').lower()\n print_info('Extracted episode: {0}'.format(extracted_ep))\n\n if '-' in extracted_ep:\n print_info('Multiple Episodes found')\n tokens = extracted_ep.split('-e')\n first_token = tokens[0]\n last_token = tokens[len(tokens)-1]\n return parse_episode(first_token) + '-' + parse_episode(last_token)\n else:\n ep_num = int(extracted_ep)\n if ep_num is not None and ep_num > 0:\n print_info('Episode might be: {0}'.format(ep_num))\n return 'E' + format_num(ep_num)\n\n return None", "def test_get_many(self):\r\n\r\n with open(os.path.join(RESOURCE_PATH, 'ND072023.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n particles = parser.get_records(54)\r\n log.debug('got back %d records', len(particles))\r\n\r\n self.assert_particles(particles, 'ND072023_recov.yml', RESOURCE_PATH)", "def _get_components(\n design_unit: oechem.OEDesignUnit\n ) -> Tuple[oechem.OEGraphMol(), oechem.OEGraphMol(), oechem.OEGraphMol()]:\n from openeye import oechem\n\n protein, solvent, ligand = oechem.OEGraphMol(), oechem.OEGraphMol(), oechem.OEGraphMol()\n\n logging.debug(\"Extracting molecular components ...\")\n design_unit.GetProtein(protein)\n design_unit.GetSolvent(solvent)\n design_unit.GetLigand(ligand)\n\n # delete protein atoms with no name (found in prepared protein of 4ll0)\n for atom in protein.GetAtoms():\n if not atom.GetName().strip():\n logging.debug(\"Deleting unknown atom ...\")\n protein.DeleteAtom(atom)\n\n # perceive residues to remove artifacts of other design units in the sequence of the protein\n # preserve certain properties to assure correct behavior of the pipeline,\n # e.g. deletion of chains in OEKLIFSKinaseApoFeaturizer._process_kinase_domain method\n preserved_info = (\n oechem.OEPreserveResInfo_ResidueNumber\n | oechem.OEPreserveResInfo_ResidueName\n | oechem.OEPreserveResInfo_AtomName\n | oechem.OEPreserveResInfo_ChainID\n | oechem.OEPreserveResInfo_HetAtom\n | oechem.OEPreserveResInfo_InsertCode\n | oechem.OEPreserveResInfo_AlternateLocation\n )\n oechem.OEPerceiveResidues(protein, preserved_info)\n oechem.OEPerceiveResidues(solvent, preserved_info)\n oechem.OEPerceiveResidues(ligand)\n\n logging.debug(\n \"Number of component atoms: \" +\n f\"Protein - {protein.NumAtoms()}, \" +\n f\"Solvent - {solvent.NumAtoms()}, \" +\n f\"Ligand - {ligand.NumAtoms()}.\"\n )\n return protein, solvent, ligand", "def read_SimCenter_EDP_input(input_path, EDP_kinds=('PID','PFA'), \n units = dict(PID=1., PFA=1.),\n verbose=False):\n \n # initialize the data container\n data = {}\n\n # read the collection of EDP inputs...\n # the read_csv method in pandas is sufficiently versatile to handle the\n # tabular format of dakota\n EDP_raw = pd.read_csv(input_path, sep='\\s+', header=0,\n index_col='%eval_id')\n # set the index to be zero-based\n EDP_raw.index = EDP_raw.index - 1\n\n # search the header for EDP information\n for column in EDP_raw.columns:\n for kind in EDP_kinds:\n if kind in column:\n\n if kind not in data.keys():\n data.update({kind: []})\n\n # extract info about the location, direction, and scenario\n info = column.split('-')\n \n # get the scale factor to perform unit conversion\n f_unit = units[kind]\n \n # store the data\n data[kind].append(dict(\n raw_data=(EDP_raw[column].values * f_unit).tolist(),\n location=info[2],\n direction=info[3],\n scenario_id=info[0]\n ))\n\n if verbose: pp.pprint(data)\n\n return data", "def Abstracts(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('abstracts', default)\n return [HEP.SourcedValueObject(i) for i in tmp]", "def getEnergyEvolution(self):\n\n\t\tEBefore = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleBefore]\n\t\tEAfter = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleAfter]\n\n\t\treturn EBefore, EAfter", "def extract_energies(self):\n path2save = 'Analysis/energies.pkl'\n #check, if I have to extract them, or they are already extracted. This the latter case, load them.\n if os.path.exists(path2save):\n print(\"extraction of the polarizaion has already been done. Loading polarizations from from pkl\")\n # TODO delete to check if exists above and do load without doing\n with open('Analysis/energies.pkl', 'rb') as fid:\n [self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols] \\\n = pickle.load(fid)\n else:\n print('Energies are being extracting and will be saved to pkl')\n for i, radius in enumerate(self.radii):\n self.E_sd_plus[radius] = {}\n self.E_sd_0[radius] = {}\n self.E_sd_minus[radius] = {}\n\n self.E_sum_env_plus[radius] = {}\n self.E_sum_env_0[radius] = {}\n self.E_sum_env_minus[radius] = {}\n\n self.V0_plus[radius] = {}\n self.V0_0[radius] = {}\n self.V0_minus[radius] = {}\n\n self.E_env_plus[radius] = {}\n self.E_env_0[radius] = {}\n self.E_env_minus[radius] = {}\n\n self.V_env_plus[radius] = {}\n self.V_env_0[radius] = {}\n self.V_env_minus[radius] = {}\n\n self.n_mols[radius] = {}\n\n for j, core_id in enumerate(self.core_ids):\n #path2file_ip = \\\n # 'Analysis/' + self.dict_radii_folder_IP[radius] + '/Matrix-analysis-IP_' \\\n # + self.mol_name + '-Mol_' + str(core_id) + '_C_1.yml'\n\n path2file_ip = \\\n 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[radius]\\\n + '/Matrix-analysis-IP_' + self.mol_name + '.yml' # new\n path2file_ea = \\\n 'Analysis/EA_by_radius/' + self.dict_radii_folder_EA[radius]\\\n + '/Matrix-analysis-EA_' + self.mol_name + '.yml'\n\n # IP. Charged states: \"+\" and \"0\"\n with open(path2file_ip) as fid:\n ip_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n with open(path2file_ea) as fid:\n ea_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n\n\n # number of mols extraction\n self.n_mols[radius][core_id] = len(ip_dict[int(core_id)]['energies'])\n\n # sd extraction. E_sd = E_0 + V_0\n self.E_sd_plus[radius][core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged'] #new\n self.E_sd_0[radius][core_id] = ip_dict[core_id]['energies'][int(core_id)]['total_e_uncharged']\n self.E_sd_minus[radius][core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged']\n # E_0\n self.E0_plus[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n self.E0_0[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_uncharged_vacuum']\n self.E0_minus[core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n # # E_0_vacuum\n # self.E0_plus_vacuum[core_id] =\n # self.E0_0_vacuum[core_id] =\n # self.E0_minus_vacuum[core_id] =\n\n\n # V_0\n self.V0_plus[radius][core_id] = self.E_sd_plus[radius][core_id] - self.E0_plus[core_id]\n self.V0_0[radius][core_id] = self.E_sd_0[radius][core_id] - self.E0_0[core_id]\n self.V0_minus[radius][core_id] = self.E_sd_minus[radius][core_id] - self.E0_minus[core_id]\n\n # E_sum_env = \\sum_i\\ne 0 E_i \\sum_{j=0}^{N} V_{ij}\n ip_env_sub_dict = ip_dict[int(core_id)]['energies']#new\n del ip_env_sub_dict[int(core_id)]\n # del ip_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n\n ea_env_sub_dict = ea_dict[int(core_id)]['energies'] # new\n del ea_env_sub_dict[int(core_id)]\n # del ea_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n # tmp = ip_env_sub_dict['energies'][]\n\n list_total_e_env_plus = [ip_env_sub_dict[env_id]['total_e_charged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_plus[radius][int(core_id)] = np.sum(list_total_e_env_plus) if not list_total_e_env_plus == [] else 0.0\n list_total_e_env_0 = [ip_env_sub_dict[env_id]['total_e_uncharged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_0[radius][int(core_id)] = np.sum(list_total_e_env_0) if not list_total_e_env_0 == [] else 0.0\n list_total_e_env_minus = [ea_env_sub_dict[env_id]['total_e_charged'] for env_id in ea_env_sub_dict]\n self.E_sum_env_minus[radius][int(core_id)] = np.sum(list_total_e_env_minus) if not list_total_e_env_minus == [] else 0.0\n\n # E_env = \\sum_i \\ne 0 E_i. sum of DFT env energies.\n list_vacuum_env_e_plus = [ip_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_plus[radius][int(core_id)] = np.sum(list_vacuum_env_e_plus) if not list_vacuum_env_e_plus == [] else 0.0\n list_vacuum_env_e_0 = [ip_env_sub_dict[env_id]['total_e_uncharged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_0[radius][int(core_id)] = np.sum(list_vacuum_env_e_0) if not list_vacuum_env_e_0 == [] else 0.0\n list_vacuum_env_e_minus = [ea_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ea_env_sub_dict]\n self.E_env_minus[radius][int(core_id)] = np.sum(list_vacuum_env_e_minus) if not list_vacuum_env_e_minus == [] else 0.0\n\n # V_env = 0.5 (\\sum_{i=1} \\sum_{j=1} V_{ij}). classical interaction of env. mols\n self.V_env_plus[radius][core_id] = 0.5 * (self.E_sum_env_plus[radius][core_id]\n - self.E_env_plus[radius][core_id]\n - self.V0_plus[radius][core_id])\n\n self.V_env_0[radius][core_id] = 0.5 * (self.E_sum_env_0[radius][core_id]\n - self.E_env_0[radius][core_id]\n - self.V0_0[radius][core_id])\n\n self.V_env_minus[radius][core_id] = 0.5 * (self.E_sum_env_minus[radius][core_id]\n - self.E_env_minus[radius][core_id]\n - self.V0_minus[radius][core_id])\n\n\n append_dict_with_mean(self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.E0_plus, self.E0_0, self.E0_minus,\n self.n_mols) # compute and add \"mean\" to all mentioned dicts\n\n with open('Analysis/energies.pkl', 'wb') as fid:\n pickle.dump([self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols],\n fid)\n print(\"Energies are extracted and dumped to pkl\")", "def load_all_dicom_images(self, verbose=True):\n if verbose: print(\"Loading dicom files ... This may take a moment.\")\n\n path = self.get_path_to_dicom_files()\n fnames = [fname for fname in os.listdir(path)\n if fname.endswith('.dcm') and not fname.startswith(\".\")]\n images = []\n for fname in fnames:\n image = dicom.dcmread(os.path.join(path,fname))\n\n seid = str(image.SeriesInstanceUID).strip()\n stid = str(image.StudyInstanceUID).strip()\n\n if seid == self.series_instance_uid and\\\n stid == self.study_instance_uid:\n images.append(image)\n\n # ##############################################\n # Clean multiple z scans.\n #\n # Some scans contain multiple slices with the same `z` coordinate \n # from the `ImagePositionPatient` tag.\n # The arbitrary choice to take the slice with lesser \n # `InstanceNumber` tag is made.\n # This takes some work to accomplish...\n zs = [float(img.ImagePositionPatient[-1]) for img in images]\n inums = [float(img.InstanceNumber) for img in images]\n inds = list(range(len(zs)))\n while np.unique(zs).shape[0] != len(inds):\n for i in inds:\n for j in inds:\n if i!=j and zs[i] == zs[j]:\n k = i if inums[i] > inums[j] else j\n inds.pop(inds.index(k))\n\n # Prune the duplicates found in the loops above.\n zs = [zs[i] for i in range(len(zs)) if i in inds]\n images = [images[i] for i in range(len(images)) if i in inds]\n\n # Sort everything by (now unique) ImagePositionPatient z coordinate.\n sort_inds = np.argsort(zs)\n images = [images[s] for s in sort_inds]\n # End multiple z clean.\n # ##############################################\n\n return images", "def _get_all_elems(self, protein_id: int):\n path_protein, _ = self._get_path(protein_id)\n try:\n # mol_pocket = Molecule(path_protein)\n mol_protein = Molecule(path_protein)\n mol_protein.filter('protein')\n if (self.type_feature == \"bio_properties\" or self.type_feature == \"bio_all_properties\"):\n mol_protein = prepareProteinForAtomtyping(mol_protein, verbose = False)\n mol_pocket_element = mol_protein.element\n except FileNotFoundError:\n print(protein_id, \" exception\")\n path_protein, path_lig = self._get_path(2)\n mol_pocket = Molecule(path_protein)\n mol_pocket_element = mol_pocket.element\n return mol_pocket_element", "def _iep(s):\n d = defaultdict(dict)\n for _ep in iter_entry_points(s):\n try:\n d[_ep.name] = _ep.load()\n except Exception as e:\n d[_ep.name] = functools.partial(_broken_ep, _ep, e)\n return d", "def test_epik_enumeration():\n with mmtools.utils.temporary_directory() as tmp_dir:\n yaml_content = get_template_script(tmp_dir, keep_schrodinger=True)\n exp_builder = ExperimentBuilder(yaml_content)\n mol_ids = ['benzene-epik0', 'benzene-epikcustom']\n exp_builder._db._setup_molecules(*mol_ids)\n\n for mol_id in mol_ids:\n output_dir = exp_builder._db.get_molecule_dir(mol_id)\n output_basename = os.path.join(output_dir, mol_id + '-epik.')\n assert os.path.exists(output_basename + 'mol2')\n assert os.path.getsize(output_basename + 'mol2') > 0\n assert os.path.exists(output_basename + 'sdf')\n assert os.path.getsize(output_basename + 'sdf') > 0", "def parse_ic_info(file_path: str) -> pd.DataFrame:\n log = ess_factory(fullpath=file_path, check_for_errors=False)\n ic_dict = {item: []\n for item in ['label', 'type', 'atoms', 'redundant', 'scan']}\n scan_args = parse_scan_args(file_path)\n max_atom_ind = scan_args['n_atom']\n if isinstance(log, GaussianLog):\n ic_info_block = parse_str_blocks(file_path, 'Initial Parameters', '-----------', regex=False,\n tail_count=3)[0][5:-1]\n for line in ic_info_block:\n # Line example with split() indices:\n # 0 1 2 3 4 5 6 7\n # ! R1 R(1, 2) 1.3581 calculate D2E/DX2 analytically !\n terms = line.split()\n ic_dict['label'].append(terms[1])\n ic_dict['type'].append(terms[1][0]) # 'R: bond, A: angle, D: dihedral\n atom_inds = re.split(r'[(),]', terms[2])[1:-1]\n ic_dict['atoms'].append([int(atom_ind) for atom_ind in atom_inds])\n\n # Identify redundant, cases like 5 atom angles or redundant atoms\n if (ic_dict['type'][-1] == 'A' and len(atom_inds) > 3) \\\n or (ic_dict['type'][-1] == 'R' and len(atom_inds) > 2) \\\n or (ic_dict['type'][-1] == 'D' and len(atom_inds) > 4):\n ic_dict['redundant'].append(True)\n else:\n # Sometimes, redundant atoms with weird indices are added.\n # Reason unclear. Maybe to better define the molecule, or to\n # solve equations more easily.\n weird_indices = [index for index in ic_dict['atoms'][-1]\n if index <= 0 or index > max_atom_ind]\n if weird_indices:\n ic_dict['redundant'].append(True)\n else:\n ic_dict['redundant'].append(False)\n\n # Identify ics being scanned\n if len(scan_args['scan']) == len(atom_inds) == 4 \\\n and is_same_pivot(scan_args['scan'], ic_dict['atoms'][-1]):\n ic_dict['scan'].append(True)\n elif len(scan_args['scan']) == len(atom_inds) == 2 \\\n and set(scan_args['scan']) == set(ic_dict['atoms'][-1]):\n ic_dict['scan'].append(True)\n else:\n # Currently doesn't support scan of angles.\n ic_dict['scan'].append(False)\n else:\n raise NotImplementedError(f'parse_ic_info() can currently only parse Gaussian output '\n f'files, got {log}')\n ic_info = pd.DataFrame.from_dict(ic_dict)\n ic_info = ic_info.set_index('label')\n return ic_info", "def pdelements_first(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(1), ctypes.c_int32(0))", "def extract_activities(ncfile):\n\n # Get current dimensions.\n (niterations, nstates) = ncfile.variables['energies'].shape\n\n # Extract energies.\n print \"Reading energies...\"\n energies = ncfile.variables['energies']\n u_kln_replica = zeros([nstates, nstates, niterations], float64)\n for n in range(niterations):\n u_kln_replica[:,:,n] = energies[n,:,:]\n print \"Done.\"\n\n # Deconvolute replicas\n print \"Deconvoluting replicas...\"\n u_kln = zeros([nstates, nstates, niterations], float64)\n for iteration in range(niterations):\n state_indices = ncfile.variables['states'][iteration,:]\n u_kln[state_indices,:,iteration] = energies[iteration,:,:]\n print \"Done.\"\n\n # Show all self-energies\n print 'all self-energies for all replicas'\n for iteration in range(niterations):\n for replica in range(nstates):\n state = int(ncfile.variables['states'][iteration,replica])\n print '%12.1f' % energies[iteration, replica, state],\n print ''\n\n # If no energies are 'nan', we're clean.\n if not any(isnan(energies[:,:,:])):\n return\n\n # There are some energies that are 'nan', so check if the first iteration has nans in their *own* energies:\n u_k = diag(energies[0,:,:])\n if any(isnan(u_k)):\n print \"First iteration has exploded replicas. Check to make sure structures are minimized before dynamics\"\n print \"Energies for all replicas after equilibration:\"\n print u_k\n sys.exit(1)\n\n # There are some energies that are 'nan' past the first iteration. Find the first instances for each replica and write PDB files.\n first_nan_k = zeros([nstates], int32)\n for iteration in range(niterations):\n for k in range(nstates):\n if isnan(energies[iteration,k,k]) and first_nan_k[k]==0:\n first_nan_k[k] = iteration\n if not all(first_nan_k == 0):\n print \"Some replicas exploded during the simulation.\"\n print \"Iterations where explosions were detected for each replica:\"\n print first_nan_k\n print \"Writing PDB files immediately before explosions were detected...\"\n for replica in range(nstates): \n if (first_nan_k[replica] > 0):\n state = ncfile.variables['states'][iteration,replica]\n iteration = first_nan_k[replica] - 1\n filename = 'replica-%d-before-explosion.pdb' % replica\n title = 'replica %d state %d iteration %d' % (replica, state, iteration)\n write_pdb(atoms, filename, iteration, replica, title, ncfile)\n filename = 'replica-%d-before-explosion.crd' % replica \n write_crd(filename, iteration, replica, title, ncfile)\n sys.exit(1)\n\n # There are some energies that are 'nan', but these are energies at foreign lambdas. We'll just have to be careful with MBAR.\n # Raise a warning.\n print \"WARNING: Some energies at foreign lambdas are 'nan'. This is recoverable.\"\n \n return", "def read_ncbi_disease(flo):\n documents = []\n current_PMID, title, abstract, annotations = None, None, None, []\n for ln, line in enumerate(flo, start=1):\n line = line.rstrip('\\n')\n if not line:\n if current_PMID is not None:\n documents.append(Document(current_PMID, title, abstract,\n annotations))\n current_PMID, title, abstract = None, None, None\n annotations = []\n continue\n m = TEXT_LINE_RE.match(line)\n if m:\n PMID, tiab, text = m.groups()\n current_PMID = check_PMID(current_PMID, PMID)\n if tiab == 't':\n if title is not None:\n raise FormatError('Multiple titles for %s' % PMID)\n title = text\n elif tiab == 'a':\n if abstract is not None:\n raise FormatError('Multiple abstracts for %s' % PMID)\n abstract = text\n else:\n raise FormatError('Failed to parse line %s' % line)\n else:\n # Annotation line\n annotation = parse_annotation_line(line, ln)\n current_PMID = check_PMID(current_PMID, annotation.PMID)\n annotations.append(annotation)\n if current_PMID is not None:\n documents.append(Document(current_PMID, title, abstract, annotations))\n for d in documents:\n d.verify_annotations()\n return documents", "def get_abstract(doi):\n xml = download_article(doi)\n et = ET.fromstring(xml)\n coredata = et.find('article:coredata', elsevier_ns)\n abstract = coredata.find('dc:description', elsevier_ns)\n abs_text = abstract.text\n return abs_text", "def collect(self, vcfname, tag):\n if tag not in [\"TP\", \"FN\"]:\n return extractPiscesIndelFeatures(vcfname, tag, self.chr_depth)\n else:\n features = [\"CHROM\", \"POS\", \"REF\", \"ALT\", \"QUAL\", \"S.1.VT\",\n \"I.T_ALT_RATE\", \"I.DP_normal\", \"I.DP_tumor\", \"I.tag\", \"I.count\"]\n return GenericFeatures.collectFeatures(vcfname, tag, features, processor=StrelkaAdmixIndelFeatures.processValue)", "def readPubTator(args):\n if not os.path.exists('/'.join(args.output_file.split('/')[:-1])):\n os.makedirs('/'.join(args.output_file.split('/')[:-1]))\n\n abstracts = OrderedDict()\n entities = OrderedDict()\n relations = OrderedDict()\n\n with open(args.input_file, 'r') as infile:\n for line in tqdm(infile):\n\n # text\n if len(line.rstrip().split('|')) == 3 and \\\n (line.strip().split('|')[1] == 't' or line.strip().split('|')[1] == 'a'):\n line = line.strip().split('|')\n\n pmid = line[0]\n text = line[2] # .replace('>', '\\n')\n\n # replace weird symbols and spaces\n text = replace2symbol(text)\n text = replace2space(text)\n\n if pmid not in abstracts:\n abstracts[pmid] = [TextStruct(pmid, text)]\n else:\n abstracts[pmid] += [TextStruct(pmid, text)]\n\n # entities\n elif len(line.rstrip().split('\\t')) == 6:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n\n # currently consider each possible ID as another entity\n for k in kb_id:\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n\n elif len(line.rstrip().split('\\t')) == 7:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n extra_ents = line[6].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n for i, e in enumerate(extra_ents):\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n\n # relations\n elif len(line.rstrip().split('\\t')) == 4:\n line = line.strip().split('\\t')\n pmid = line[0]\n rel_type = line[1]\n arg1 = tuple((line[2].split('|')))\n arg2 = tuple((line[3].split('|')))\n\n if pmid not in relations:\n relations[pmid] = [RelStruct(pmid, rel_type, arg1, arg2)]\n else:\n relations[pmid] += [RelStruct(pmid, rel_type, arg1, arg2)]\n\n elif line == '\\n':\n continue\n\n return abstracts, entities, relations", "def ProvideEphemerisData(self):\n return _gmat_py.EphemManager_ProvideEphemerisData(self)", "def list_result_ephemerides_files(\n self, page_size: int = 100, page_token: str = None) -> Dict:\n params = {}\n if page_size < 0 or page_size > 100:\n page_size = 100\n params['pageSize'] = page_size\n if page_token:\n params['pageToken'] = page_token\n ephs = self._rp._rest.get(\n f'/projects/{self._rp._project}/jobs/{self._job_uuid}'\n f'/ephemerides?{urllib.parse.urlencode(params)}')\n return ephs", "def _GetEpiOrder(self):\n self.epi_series.sort()\n for series in self.epi_series:\n self.GetEpiAcqTimes(series)\n self.AssignEpiNames()", "def get_endpoints(self, epg_dn):\n result = []\n for item in filter(lambda x: type(x).__name__ == 'CEp', self.query_child_objects(epg_dn)):\n # Creates a dynamic object type.\n endpoint = type('endpoint', (object,), {})\n\n # Filter the endpoint in memory looking for the object that contains the interface where the endpoint is\n # attached\n endpoint_connection_mo = filter(lambda x: type(x).__name__ == 'RsCEpToPathEp',\n self.query_child_objects(item.dn))[0]\n\n # Format the string to be human readable\n endpoint_connection_interface = str(endpoint_connection_mo.tDn).replace('topology/pod-1/paths','node').\\\n replace('pathep-[', '').replace(']','')\n\n # Add attributes to the object\n endpoint.ip = item.ip\n endpoint.mac = item.mac\n endpoint.name = item.name\n endpoint.interface = endpoint_connection_interface\n\n # Append it to the list\n result.append(endpoint)\n return result", "def get_identifiers(\n dicom_files,\n force=True,\n config=None,\n strip_sequences=False,\n remove_private=False,\n disable_skip=False,\n expand_sequences=True,\n):\n if not isinstance(dicom_files, list):\n dicom_files = [dicom_files]\n\n bot.debug(\"Extracting identifiers for %s dicom\" % len(dicom_files))\n lookup = dict()\n\n # Parse each dicom file\n for dicom_file in dicom_files:\n parser = DicomParser(dicom_file, force=force, config=config, disable_skip=False)\n lookup[parser.dicom_file] = parser.get_fields(expand_sequences=expand_sequences)\n\n return lookup", "def load_Xmd():\n # More dissimilar inputs\n x1md = [1, 1, -1, 1, -1, -1, -1, 1] # 4 out of 8 dissimilar\n x2md = [1, 1, 1, 1, 1, 1, -1, -1] # 5 out of 8 dissimilar\n x3md = [1, -1, -1, 1, 1, 1, -1, 1] # 5 out of 8 dissimilar\n Xmd = np.vstack((x1md, x2md, x3md))\n\n return Xmd", "def DOIs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('dois', default)\n return [HEP.DOIObject(i) for i in tmp]", "def getDetIDs():\n from ROOT import TFile, GATDataSet, MJTChannelMap\n run = bkg.getRunList(5,1)[0]\n gds = GATDataSet(run)\n chMap = gds.GetChannelMap()\n # chMap.DumpDetectorNames()\n\n dets = det.allDets\n for d in dets:\n detName = chMap.GetDetectorName(int(d[0]), int(d[1]), int(d[2]))\n\n # now match it to the IDs used in DataSetInfo.cc::Load(X)DetectorMap\n detID = '1' if detName[0]==\"P\" else '2'\n detID += detName[1:]\n tmp = list(detID)\n if detID[0] == '1':\n if tmp[-1] == \"A\": tmp[-1] = '0'\n if tmp[-1] == \"B\": tmp[-1] = '1'\n if tmp[-1] == \"C\": tmp[-1] = '2'\n detID = ''.join(tmp)\n\n print(\"'%s':%s,\" % (d, detID))", "def extract_nac_pet(dicom_folder):\n from glob import glob\n import os\n import shutil\n import re\n from nipype.interfaces.dcm2nii import Dcm2nii\n\n def atoi(text):\n return int(text) if text.isdigit() else text\n\n def natural_keys(text):\n return [atoi(c) for c in re.split('(\\d+)', text)]\n\n files = glob(os.path.join(os.path.abspath(dicom_folder), '*'))\n sorted_files = sorted(files, key=natural_keys)\n nac_pet_files = sorted_files[-127:]\n for f in nac_pet_files:\n shutil.copy(f, os.getcwd())\n dcm2nii = Dcm2nii()\n dcm2nii.inputs.source_dir = os.getcwd()\n nii_outputs = dcm2nii.run().outputs.converted_files\n print (nii_outputs)\n return nii_outputs[0]", "def parse_eeg_file(path):\n if os.path.splitext(path)[-1].lower() != '.edf':\n NotImplementedError(\"Only EDFs are supported currently. More files coming.\")\n\n try: #edf\n edf_file = mne.io.read_raw_edf(path, stim_channel=None, verbose=False)\n except RuntimeError: #edf+\n edf_file = mne.io.read_raw_edf(path, preload=True, stim_channel=None, verbose=False)\n\n # TODO edf++\n\n eeg_data = {}\n eeg_data['meas_date'] = datetime.datetime.fromtimestamp(edf_file.info[\"meas_date\"])\n eeg_data['nchan'] = edf_file.info[\"nchan\"]\n eeg_data['sfreq'] = edf_file.info[\"sfreq\"]\n eeg_data['subject_info'] = edf_file.info[\"subject_info\"]\n eeg_data['ch_names'] = edf_file.ch_names\n\n return {\"eeg_\"+key: value for key, value in eeg_data.items()}", "def GEEnasaNEXGDDP(ptsFile,metric,timeStep,startYear,endYear,scenarios,buf,poly,username,folderOut,models = ['ACCESS1-0', 'bcc-csm1-1', 'BNU-ESM',\n 'CanESM2', 'CCSM4', 'CESM1-BGC', 'CNRM-CM5', 'CSIRO-Mk3-6-0',\n 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'inmcm4', 'IPSL-CM5A-LR',\n 'IPSL-CM5A-MR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 'MIROC5', 'MPI-ESM-LR',\n 'MPI-ESM-MR', 'MRI-CGCM3', 'NorESM1-M'], scalePix = 25000):\n \n # load required libraries\n import ee\n\n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n\n time_d = {}\n time_d['day'] = 'projd'\n time_d['month'] = 'projm'\n time_d['year'] = 'projy'\n \n for met in metric:\n\n for scenario in scenarios:\n\n for model in models:\n\n NEX = (ee.ImageCollection('NASA/NEX-GDDP')\n .select(met)\n .filterMetadata('model', 'equals', model)\n .filterMetadata('scenario', 'equals', scenario))\n\n metL = [met]\n \n years = list(range(startYear, endYear + 1))\n monthsEE = ee.List(list(range(0,(12*len(years)))))\n yearsEE = ee.List(years)\n\n######Turned off unit conversion, because it fails when there are too many pts\n## if (met == 'pr'):\n##\n## def Scale1(img):\n## return (img.float()\n## .multiply(86400)\n## .copyProperties(img,['system:time_start','system:time_end']))\n##\n## NEX = NEX0.map(Scale1)\n## \n## elif any([(met == 'tasmin'),(met == 'tasmax')]):\n##\n## def KtoC(img):\n## return (img.float()\n## .subtract(273.15)\n## .copyProperties(img,['system:time_start','system:time_end']))\n##\n## NEX = NEX0.map(KtoC)\n \n if all([(timeStep == 'year'),any([(met == 'tasmin'),(met == 'tasmax')])]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif all([(timeStep == 'month'),any([(met == 'tasmin'),(met == 'tasmax')])]):\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif all([(timeStep == 'year'),(met == 'pr')]):\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .sum()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif all([(timeStep == 'month'),(met == 'pr')]):\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (NEX\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .sum()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif timeStep == 'day':\n\n img_col = NEX.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n #else:\n #print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_NEX_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for NEX: ' + met + ' ' + scenario + ' ' + model)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_NEX_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for NEX: ' + met + ' ' + scenario + ' ' + model)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = str(time_d[timeStep])+'_NEX_'+str(met)+'_'+scenario+'_'+model+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for NEX: ' + met + ' ' + scenario + ' ' + model)", "def read_uef_details(chunks):\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0x0])\n\n\tif pos == None:\n\n\t\toriginator = 'Unknown'\n\n\telif chunk[1] == '':\n\n\t\toriginator = 'Unknown'\n\telse:\n\t\toriginator = chunk[1]\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0x5])\n\n\tif pos == None:\n\n\t\tmachine, keyboard = 'Unknown', 'Unknown'\n\n\telse:\n\n\t\tmachines = ('BBC Model A', 'Electron', 'BBC Model B', 'BBC Master')\n\t\tkeyboards = ('Any layout', 'Physical layout', 'Remapped')\n\n\t\tmachine = ord(chunk[1][0]) & 0x0f\n\t\tkeyboard = (ord(chunk[1][0]) & 0xf0) >> 4\n\n\t\tif machine < len(machines):\n\t\t\tmachine = machines[machine]\n\t\telse:\n\t\t\tmachine = 'Unknown'\n\n\t\tif keyboard < len(keyboards):\n\t\t\tkeyboard = keyboards[keyboard]\n\t\telse:\n\t\t\tkeyboard = 'Unknown'\n\n\tpos, chunk = find_next_chunk(chunks, 0, [0xff00])\n\n\tif pos == None:\n\n\t\temulator = 'Unknown'\n\n\telif chunk[1] == '':\n\n\t\temulator = 'Unknown'\n\telse:\n\t\temulator = chunk[1]\n\n\n\t# Remove trailing null bytes\n\twhile originator[-1] == '\\000':\n\n\t\toriginator = originator[:-1]\n\n\twhile emulator[-1] == '\\000':\n\n\t\temulator = emulator[:-1]\n\n\tfeatures = ''\n\tif find_next_chunk(chunks, 0, [0x1])[0] != None:\n\t\tfeatures = features + '\\n' + 'Instructions'\n\tif find_next_chunk(chunks, 0, [0x2])[0] != None:\n\t\tfeatures = features + '\\n' + 'Credits'\n\tif find_next_chunk(chunks, 0, [0x3])[0] != None:\n\t\tfeatures = features + '\\n' + 'Inlay'\n\n\treturn originator, machine, keyboard, emulator, features", "def get_ead_components(context, delete=True):\n result = []\n for ead_file in context.db.query(EadFile):\n ead_file._context = context\n result += ead_file.extract_components()\n return result", "def get_start_address():\n try:\n return command(\"P\")\n except EppException as e:\n print 'No EPROM type is selected.', e.value", "def getDataInterfaces(context, export_only=False):\n from bika.lims.exportimport import instruments\n exims = []\n for exim_id in instruments.__all__:\n exim = instruments.getExim(exim_id)\n if export_only and not hasattr(exim, 'Export'):\n pass\n else:\n exims.append((exim_id, exim.title))\n exims.sort(lambda x, y: cmp(x[1].lower(), y[1].lower()))\n exims.insert(0, ('', t(_('None'))))\n return DisplayList(exims)", "def query_pubmed(self, query, n_articles=3):\n try:\n URL = f'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term={query}&&retmax={n_articles}'\n resp = requests.get(URL)\n ids = [id.text for id in ElementTree.fromstring(resp.content)[3]]\n URL = f\"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id={','.join(ids)}&retmode=xml&rettype=abstract\"\n resp = requests.get(URL)\n articles = [article[0][2][4][0].text for article in ElementTree.fromstring(resp.content)]\n return articles\n except:\n return []", "def xephemFormat(self):\n line = []\n #Field 1: names\n names = [self.getName()]\n identifiers = self.getIdentifiers()\n if identifiers[0] is not None:\n names.append(identifiers[0])\n for i in range(1,4):\n if identifiers[i] is not None:\n names.extend(identifiers[i])\n line.append(\"|\".join(names))\n\n #Field 2: type designation\n objType = self.getType()\n if objType in (\"Galaxy Pair\", \"Galaxy Triplet\", \"Group of galaxies\"):\n line.append(\"f|A\")\n elif objType == \"Globular Cluster\":\n line.append(\"f|C\")\n elif objType == \"Double star\":\n line.append(\"f|D\")\n elif objType in (\"HII Ionized region\", \"Nebula\"):\n line.append(\"f|F\")\n elif objType == \"Galaxy\":\n if self.getHubble().startswith(\"S\"):\n line.append(\"f|G\")\n else:\n line.append(\"f|H\")\n elif objType == \"Dark Nebula\":\n line.append(\"f|K\")\n elif objType in (\"Emission Nebula\", \"Reflection Nebula\"):\n line.append(\"f|N\")\n elif objType in (\"Association of stars\", \"Open Cluster\"):\n line.append(\"f|O\")\n elif objType == \"Planetary Nebula\":\n line.append(\"f|P\")\n elif objType == \"Supernova remnant\":\n line.append(\"f|R\")\n elif objType == \"Star\":\n line.append(\"f|S\")\n elif objType == \"Star cluster + Nebula\":\n line.append(\"f|U\")\n else:\n line.append(\"f\")\n\n #Field 3: Right Ascension\n line.append(self.getRA())\n\n #Field 4: Declination\n line.append(self.getDec())\n\n #Field 5: Magnitude\n #We use the first available magnitude in the sequence b,v,j,h,k\n for mag in self.getMagnitudes():\n if mag is not None:\n line.append(str(mag))\n break\n\n #Field 6: optional Epoch, we let it empty\n line.append(\"\")\n\n #Field 7: Dimensions\n dimensions = []\n #Xephem format wants axes espressed in arcsec, we have arcmin\n for value in (self.getDimensions()[0],self.getDimensions()[1]):\n if value is not None:\n dimensions.append(str(value*60))\n else:\n dimensions.append(\"\")\n if self.getDimensions()[2] is not None:\n dimensions.append(str(value))\n else:\n dimensions.append(\"\")\n line.append(\"|\".join(dimensions))\n\n return \",\".join(line)", "def extract_gene_data(info):\n gene_id = None\n gene_type = None\n for i in info:\n if i.startswith('gene_id'):\n gene_id = i.split(\" \", 1)[1].replace('\"', '')\n elif i.startswith('gene_type'):\n gene_type = i.split(\" \", 1)[1].replace('\"', '')\n\n assert gene_id is not None, 'No gene_id found {0}'.format(info)\n assert gene_type is not None, 'No gene_type found {0}'.format(info)\n return gene_id, gene_type", "def start_point_extraction():\n\tquery_palce_id = [\"32682%2C32683%2C32684%2C32685%2C32686%2C32687%2C32688%2C32689%2C32690%2C32691\",\n\t\t\t\t\t\t\"32692%2C32693%2C32694%2C32695%2C32696%2C32697%2C32698%2C32699%2C32700%2C32701\"]\n\tfor i in query_palce_id:\n\t\tsend_loop(q=i)", "def fetch_fermi_extended_sources(catalog):\n BASE_URL = 'http://fermi.gsfc.nasa.gov/ssc/data/access/lat/'\n if catalog == '3FGL':\n url = BASE_URL + '4yr_catalog/LAT_extended_sources_v15.tgz'\n elif catalog == '2FGL':\n url = BASE_URL + '2yr_catalog/gll_psc_v07_templates.tgz'\n elif catalog == '1FHL':\n url = BASE_URL + '1FHL/LAT_extended_sources_v12.tar'\n else:\n ss = 'Invalid catalog: {}\\n'.format(catalog)\n raise ValueError(ss)\n\n filename = download_file(url, cache=True)\n tar = tarfile.open(filename, 'r')\n\n hdu_list = []\n for member in tar.getmembers():\n if member.name.endswith(\".fits\"):\n file = tar.extractfile(member)\n hdu = fits.open(file)[0]\n hdu_list.append(hdu)\n hdu_list = fits.HDUList(hdu_list)\n\n return hdu_list", "def getEpoints(jobInfo):\n for line in jobInfo:\n if line.strip().startswith(\"E=\"):\n print(line)\n P=line\n\n return parseLineDigits(P)", "def CleanEpi(self):\n for entry in self.info.keys():\n info = self.info[entry]\n if info['psdname'] == 'epi':\n for tag in ('imgfile', 'imgfile_m', 'imgfile_mf', 'imgfile_t'):\n if info.has_key(tag) and info[tag] is not None and \\\n os.path.exists(info[tag]):\n print 'Deleting %s*' % (info[tag], info['suffix'])\n cmd = '/bin/rm %s%s*' % (info[tag], info['suffix'])\n self.ExecCmd(cmd)\n if '.BRIK' in info['suffix']:\n cmd = '/bin/rm %s%s*' % (info[tag], \\\n info['suffix'].replace('.BRIK','.HEAD'))\n self.ExecCmd(cmd)", "def read_dip(fname, verbose=None):\n dipole = read_dipole(fname)\n return (dipole.times * 1000., dipole.pos, dipole.amplitude,\n 1e9 * dipole.ori * dipole.amplitude[:, np.newaxis], dipole.gof)", "def extract_information(preprocessed_sentences):\n parsed = list(map(lambda sentence: nlp(sentence), preprocessed_sentences))\n\n quantities = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'QUANTITY'), parsed))\n dates = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'DATE'), parsed))\n\n hurricane_name = eh.extract_frequent_regex_match(parsed, '[Hh]urricane ([A-Z][a-z]+)').most_common(1)[0][0]\n hurricane_category = eh.extract_frequent_regex_match(parsed, '[Cc]ategory ([0-9]+)').most_common(1)[0][0]\n\n tropical_storm_name = eh.extract_frequent_regex_match(parsed, '[Tt]ropical [Ss]torm ([A-Z][a-z]+)').most_common(1)[0][0]\n formation_date, middle_month = extract_storm_timeline(dates, hurricane_name)\n\n preperation_info = extract_preparation_information(parsed)\n prep_gpes = preperation_info[0].most_common(3)\n\n restore_info = extract_restoration_information(parsed)\n\n landfall_info = extract_landfall_information(parsed)\n\n wind_info = extract_wind_information(quantities)\n rain_info = extract_rain_information(quantities)\n size_info = extract_size_information(parsed)\n\n # formation_info = extract_formation_info(parsed)\n death_info = extract_death_damages_info(parsed)\n\n print(constants.HURRICANE_SENTENCE.format(hurricane_name, middle_month, hurricane_category))\n print(constants.LANDFALL_SENTENCE.format(hurricane_name, landfall_info[2], landfall_info[3], landfall_info[0], landfall_info[1]))\n print(constants.WIND_SENTENCE.format(wind_info[0], wind_info[1], wind_info[2]))\n print(constants.RAIN_SENTENCE.format(hurricane_name, rain_info[1], rain_info[0], rain_info[2]))\n print(constants.FORMATION_SENTENCE.format(formation_date, tropical_storm_name))\n print(constants.PREPARATION_SENTENCE.format(prep_gpes[0][0], prep_gpes[1][0], prep_gpes[2][0], preperation_info[1].\n most_common(1)[0][0]))\n print(constants.SIZE_SENTENCE.format(size_info[0], size_info[1]))", "def extract_data(filename, id_ep, conn):\n\n try:\n tree = ET.parse(filename)\n root = tree.getroot()\n\n # creation of a speaker's dict to manage the local ids (specific speakers' id file-dependent)\n speakers = {}\n # creation of a iterable to identify the unknown/unnamed speakers\n uknw_id = 0\n\n names = []\n cur = conn.cursor()\n cur.execute(\"SELECT name FROM speaker\")\n rows = cur.fetchall()\n i = 0\n for row in rows:\n names.append(row[0])\n i += 1\n\n # creation of speakers\n for spkr in root.iter(\"Speaker\"):\n name = spkr.attrib[\"name\"]\n if ((\",\" in name) or (\"sup+\" in name) or (\"Sup+\" in name)):\n name = \"multi_spk\"\n elif ((\"spk\" in name) or (\"speaker\" in name) or (\"Inconnu\" in name) or (\"unknown\" in name)):\n name = \"spk_\"+str(id_ep)+\"_\"+str(uknw_id)\n uknw_id += 1\n else :\n n = name.split(\" \")\n name = n[0]\n if len(n) > 1:\n for i in range(1, len(n)):\n name += \"_\" + n[i].upper()\n\n if \"type\" in spkr.attrib:\n if spkr.attrib[\"type\"] not in (\"male\", \"female\"):\n gender = \"NA\"\n else:\n gender = spkr.attrib[\"type\"]\n else:\n gender = \"NA\"\n\n if \"dialect\" in spkr.attrib:\n native = spkr.attrib[\"dialect\"]\n else:\n native = \"NA\"\n\n # speaker added only if doesn't already exists in the database\n if name not in names:\n new_speaker = (name, gender, native)\n create_speaker(conn, new_speaker)\n\n # update of the local id->name dictionary\n speakers[spkr.attrib['id']] = name\n\n # creation of turns\n for turn in root.iter(\"Turn\"):\n\n if \"speaker\" in turn.attrib:\n if len(turn.attrib[\"speaker\"]) > 5:\n speaker_name = \"multi_spk\"\n else:\n speaker_name = speakers[turn.attrib[\"speaker\"]]\n start_time = turn.attrib[\"startTime\"]\n end_time = turn.attrib[\"endTime\"]\n\n cur = conn.cursor()\n cur.execute(\"SELECT id_speaker FROM speaker WHERE name=?\", (speaker_name,))\n id_speaker = cur.fetchone()[0]\n id_episode = id_ep\n trans = ET.tostring(turn, \"ISO-8859-1\", method=\"text\")\n trans = trans.decode(\"ISO-8859-1\")\n trans = re.sub(\"\\n\", \" \", trans)\n trans = re.sub(\" \", \" \", trans)\n\n new_turn = (start_time, end_time, id_speaker, id_episode, trans)\n create_turn(conn, new_turn)\n\n except Exception as e:\n print(e)\n print(filename)\n pass", "def explode(self):\n return self.d_series.map_partitions(\n lambda s: s.struct.explode(),\n meta=self.d_series._meta.struct.explode(),\n )", "def parse_1d_scan_energies(path: str,\n initial_angle: float = 0.0,\n ) -> Tuple[Optional[List[float]], Optional[List[float]]]:\n if not os.path.isfile(path):\n raise InputError(f'Could not find file {path}')\n energies, angles = None, None\n software = identify_ess(path)\n if software == 'xtb':\n scan_path = os.path.join(os.path.dirname(path), 'xtbscan.log')\n energies = list()\n if os.path.isfile(scan_path):\n lines = _get_lines_from_file(scan_path)\n for line in lines:\n if 'energy:' in line:\n energies.append(hartree_to_si(float(line.split()[1])))\n min_e = min(energies)\n energies = [e - min_e for e in energies]\n resolution = 360.0 / len(energies)\n angles = [initial_angle + i * resolution for i in range(len(energies))]\n return energies, angles\n\n log = ess_factory(fullpath=path, check_for_errors=False)\n try:\n energies, angles = log.load_scan_energies()\n energies *= 0.001 # convert to kJ/mol\n angles *= 180 / np.pi # convert to degrees\n except (LogError, NotImplementedError, ZeroDivisionError):\n logger.warning(f'Could not read energies from {path}')\n return energies, angles", "def get_initial_spectra(self, t, E, flavors=Flavor):\n pass", "def eid2partid(self, eids, etype=...):\n ...", "def eid2partid(self, eids, etype=...):\n ...", "def _extract_pre_basecalled_events(self):\n\t\t# try:\n\t\ttable = self.hdf5file[fastq_paths[self.version]['pre_basecalled']]\n\t\tevents = []\n\t\tfor read in table:\n\t\t\tevents.extend(table[read][\"Events\"][()])\n\t\tself.pre_basecalled_events = [Event(x) for x in events]\n\t\t# except Exception, e:\n\t\t\t# self.pre_basecalled_events = []", "def _compute_cfdi_values(self):\n for rec in self:\n attachment_id = rec.l10n_mx_edi_retrieve_last_attachment()\n if not attachment_id:\n continue\n attachment_id = attachment_id[0]\n # At this moment, the attachment contains the file size in its 'datas' field because\n # to save some memory, the attachment will store its data on the physical disk.\n # To avoid this problem, we read the 'datas' directly on the disk.\n datas = attachment_id._file_read(attachment_id.store_fname)\n if not datas:\n _logger.exception('The CFDI attachment cannot be found')\n continue\n rec.l10n_mx_edi_cfdi = datas\n tree = rec.l10n_mx_edi_get_xml_etree(base64.decodestring(datas))\n tfd_node = rec.l10n_mx_edi_get_tfd_etree(tree)\n if tfd_node is not None:\n rec.l10n_mx_edi_cfdi_uuid = tfd_node.get('UUID')\n rec.l10n_mx_edi_cfdi_supplier_rfc = tree.Emisor.get(\n 'Rfc', tree.Emisor.get('rfc'))\n rec.l10n_mx_edi_cfdi_customer_rfc = tree.Receptor.get(\n 'Rfc', tree.Receptor.get('rfc'))\n certificate = tree.get('noCertificado', tree.get('NoCertificado'))\n rec.l10n_mx_edi_cfdi_certificate_id = self.env['l10n_mx_edi.certificate'].sudo().search(\n [('serial_number', '=', certificate)], limit=1)", "def from_xds(xds_inp, xds_other):\n # Get the sequence from the XDS files\n sequence = xds.to_imageset(xds_inp, xds_other)\n\n # Get the crystal from the XDS files\n crystal = xds.to_crystal(xds_other)\n\n # Create the experiment list\n experiments = ExperimentListFactory.from_imageset_and_crystal(sequence, crystal)\n\n # Set the crystal in the experiment list\n assert len(experiments) == 1\n\n # Return the experiment list\n return experiments", "def load_PSF_data(self):\n self.epsf = {}\n for filter in ['F105W', 'F125W', 'F140W', 'F160W']:\n file = os.path.join(os.getenv('GRIZLI'), 'CONF',\n 'PSFSTD_WFC3IR_{0}.fits'.format(filter))\n \n data = pyfits.open(file)[0].data.T\n data[data < 0] = 0 \n \n self.epsf[filter] = data\n \n # Dummy, use F105W ePSF for F098M and F110W\n self.epsf['F098M'] = self.epsf['F105W']\n self.epsf['F110W'] = self.epsf['F105W']\n \n # Extended\n self.extended_epsf = {}\n for filter in ['F105W', 'F125W', 'F140W', 'F160W']:\n file = os.path.join(os.getenv('GRIZLI'), 'CONF',\n 'extended_PSF_{0}.fits'.format(filter))\n \n if not os.path.exists(file):\n msg = 'Extended PSF file \\'{0}\\' not found.'.format(file)\n msg += '\\n Get the archive from http://www.stsci.edu/~brammer/Grizli/Files/WFC3IR_extended_PSF.v1.tar.gz'\n msg += '\\n and unpack in ${GRIZLI}/CONF/' \n raise FileNotFoundError(msg)\n \n data = pyfits.open(file)[0].data#.T\n data[data < 0] = 0 \n \n # Mask center\n NX = data.shape[0]/2-1\n yp, xp = np.indices(data.shape)\n R = np.sqrt((xp-NX)**2+(yp-NX)**2)\n data[R <= 4] = 0.\n \n self.extended_epsf[filter] = data\n self.extended_N = int(NX)\n \n self.extended_epsf['F098M'] = self.extended_epsf['F105W']\n self.extended_epsf['F110W'] = self.extended_epsf['F105W']", "def read_fermi(self):\n E_f=None\n for line in open('OUTCAR', 'r'):\n if line.rfind('E-fermi') > -1:\n E_f=float(line.split()[2])\n return E_f", "def info_for_all_observations(self):\n # Get all combinations of instrument, detector, filter, exp_type,\n all_combinations = []\n for i in range(len(self.info['Instrument'])):\n # Get instrument information for the exposure\n instrument = self.info['Instrument'][i]\n detector = self.info['detector'][i]\n if instrument == 'NIRCAM':\n detector = 'NRC{}'.format(detector)\n if '5' in detector:\n filtername = self.info['LongFilter'][i]\n pupilname = self.info['LongPupil'][i]\n detector = detector.replace('5', 'LONG')\n else:\n filtername = self.info['ShortFilter'][i]\n pupilname = self.info['ShortPupil'][i]\n elif instrument == 'NIRISS':\n filtername = self.info['ShortFilter'][i]\n pupilname = self.info['ShortPupil'][i]\n elif instrument == 'FGS':\n filtername = 'N/A'\n pupilname = 'N/A'\n readpattern = self.info['ReadoutPattern'][i]\n\n if instrument == 'NIRCAM':\n exptype = 'NRC_IMAGE'\n elif instrument == 'NIRISS':\n exptype = 'NIS_IMAGE'\n elif instrument == 'FGS':\n exptype = 'FGS_IMAGE'\n\n entry = (instrument, detector, filtername, pupilname, readpattern, exptype)\n all_combinations.append(entry)\n unique_combinations = list(set(all_combinations))\n return all_combinations, unique_combinations", "def GetIscEventcatalog(start_date_time, days, pos, catalog_type):\n # Read the isc data. Note that we take any data points within 1000 km, which\n # is a huge distance. We let the polygon distance calculation below pull it\n # in closer.\n data = isc.ReadISCData('gs://clouddfe-cfs/isc', catalog_type, start_date_time,\n days, pos, 1000)\n\n # Munge the data. Brendan's calculations have different field names from\n # what's returned from the ISC reader. Likely we want to remove this step.\n # TODO(jfaller, meadeb): Make field names consistent.\n ret = {}\n ret['yr'] = [x['date_time'].year for x in data]\n ret['mon'] = [x['date_time'].month for x in data]\n ret['day'] = [x['date_time'].day for x in data]\n ret['hr'] = [x['date_time'].hour for x in data]\n ret['min'] = [x['date_time'].minute for x in data]\n ret['sec'] = [x['date_time'].second for x in data]\n ret['latitude'] = [x['lat'] for x in data]\n ret['longitude'] = [x['lon'] for x in data]\n ret['depth'] = [x['depth'] for x in data]\n ret['magnitude'] = [x['magnitude'] for x in data]\n ret['datetime'] = [x['date_time'] for x in data]\n\n return ret", "def process_exposure_product(self, exp_product, pool_name=\" \", asn_file=\" \"):\n # Find all the member types in the product\n members_by_type = defaultdict(list)\n for member in exp_product[\"members\"]:\n members_by_type[member[\"exptype\"].lower()].append(member[\"expname\"])\n\n # Get the science member. Technically there should only be\n # one. We'll just get the first one found.\n science = members_by_type[\"science\"]\n if len(science) != 1:\n self.log.warning(\n \"Wrong number of science files found in {}\".format(exp_product[\"name\"])\n )\n self.log.warning(\" Using only first one.\")\n science = science[0]\n\n self.log.info(\"Working on input %s ...\", science)\n if isinstance(science, datamodels.DataModel):\n input = science\n else:\n input = datamodels.open(science)\n\n # Record ASN pool and table names in output\n input.meta.asn.pool_name = pool_name\n input.meta.asn.table_name = asn_file\n\n input = self.dark_current(input)\n input = self.normalize(input)\n\n self.log.info(\"Finished processing product {}\".format(exp_product[\"name\"]))\n return input", "def LoadEEGData(filename, EEGdevice):\n if EEGdevice == 7:\n x = 1\n elif EEGdevice == 8:\n # Read in the .easy file\n df = pd.read_csv(filename, delimiter='\\t', header=None)\n\n # Get metadata from the .info file\n fname = filename[:-5] + '.info'\n with open(fname) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n\n # Get the channel names\n channel_info = [x for x in content if 'Channel ' in x]\n channel_names = []\n for ch in range(len(channel_info)):\n channel_names.append(channel_info[ch].split(': ')[1])\n\n channel_names.append('X')\n channel_names.append('Y')\n channel_names.append('Z')\n channel_names.append('STI 014')\n channel_names.append('DateTime')\n\n # Get sampling rates\n sampling_rates = [x for x in content if 'sampling rate: ' in x]\n fs_all = []\n for freq in range(len(sampling_rates)):\n tmp = sampling_rates[freq].split(': ')[1].split(' ')[0]\n if tmp in ['N/A']:\n print('Skipping N/A')\n else:\n fs_all.append(float(sampling_rates[freq].split(': ')[1].split(' ')[0]))\n\n # Store sampling rates\n fs = fs_all[0]\n fs_accel = fs_all[1]\n\n # Assign the column names\n df.columns = channel_names\n \n # Return dataframe and sampling rates\n return df, fs, fs_accel", "def ProvideEphemerisData(self):\n return _gmat_py.Spacecraft_ProvideEphemerisData(self)", "def infotodict(seqinfo):\n \n \"\"\"\n MCF Pilot Protocol acquired on Friday April 13th\n \n >>> hdc_look.py -s mfc001 -ss 1\n series_id sequence_name series_description dim1 dim2 dim3 dim4 TR TE is_derived is_motion_corrected\n 0 1-localizer *fl2d1 localizer 192 192 3 1 0.020 5.00 False False\n 1 2-pre_Neutral1_A>>P Resting 4X4X4 *epfid2d1_64 pre_Neutral1_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 2 3-pre_topup_A>>P *epse2d1_64 pre_topup_A>>P 64 64 140 1 2.400 38.00 False False\n 3 4-pre_topup_P>>A *epse2d1_64 pre_topup_P>>A 64 64 140 1 2.400 38.00 False False\n 4 5-Field_mapping 4X4X4 A>>P *fm2d2r Field_mapping 4X4X4 A>>P 64 64 35 1 0.488 4.92 False False\n 5 6-Field_mapping 4X4X4 A>>P *fm2d2r Field_mapping 4X4X4 A>>P 64 64 35 1 0.488 7.38 False False\n 6 7-pre+heat1_A>>P 4X4X4 *epfid2d1_64 pre+heat1_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 7 8-pre_Neutral2_A>>P Resting 4X4X4 *epfid2d1_64 pre_Neutral2_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 8 9-pre+heat2_A>>P 4X4X4 *epfid2d1_64 pre+heat2_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 9 10-MPRAGE_GRAPPA2 *tfl3d1_16ns MPRAGE_GRAPPA2 256 240 192 1 2.300 2.98 False False\n 10 11-post_Neutral3_A>>P Resting 4X4X4 *epfid2d1_64 post_Neutral3_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 11 12-post+heat3_A>>P 4X4X4 *epfid2d1_64 post+heat3_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 12 13-post_Neutral4_A>>P Resting 4X4X4 *epfid2d1_64 post_Neutral4_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 13 14-post+heat4_A>>P 4X4X4 *epfid2d1_64 post+heat4_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 14 15-post_topup_A>>P *epse2d1_64 post_topup_A>>P 64 64 140 1 2.400 38.00 False False\n 15 16-post_topup_P>>A *epse2d1_64 post_topup_P>>A 64 64 140 1 2.400 38.00 False False\n \n \"\"\"\n\n bids_prefix = 'sub-{subject}/{session}/'\n\n pre_neutral1_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral1_acq-epi_rec-fmap_bold.{item:01d}')\n pre_heat1_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat1_acq-epi_rec-fmap_bold.{item:01d}')\n pre_heat2_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat2_acq-epi_rec-fmap_bold.{item:01d}')\n pre_neutral2_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral2_acq-epi_rec-fmap_bold.{item:01d}')\n\n pre_neutral1_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral1_acq-epi_rec-topup_bold.{item:01d}')\n pre_heat1_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat1_acq-epi_rec-topup_bold.{item:01d}')\n pre_heat2_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat2_acq-epi_rec-topup_bold.{item:01d}')\n pre_neutral2_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral2_acq-epi_rec-topup_bold.{item:01d}')\n\n pre_topup_ap = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-preEpi_dir-ap_epi.{item:01d}')\n pre_topup_pa = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-preEpi_dir-pa_epi.{item:01d}')\n\n # The item was commented out for Phase Difference field maps. Conversion did not work correctly. I removed the item number to try to\n # isolate the problem.\n\n pre_fmap_magnitude1 = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-pre_magnitude1.{item:01d}')\n pre_fmap_phasediff = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-pre_phasediff.{item:01d}')\n\n t1w = create_key(bids_prefix + 'anat/sub-{subject}_{session}_T1w')\n\n post_neutral3_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral3_acq-epi_rec-fmap_bold.{item:01d}')\n post_heat3_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat3_acq-epi_rec-fmap_bold.{item:01d}')\n post_heat4_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat4_acq-epi_rec-fmap_bold.{item:01d}')\n post_neutral4_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral4_acq-epi_rec-fmap_bold.{item:01d}')\n\n post_neutral3_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral3_acq-epi_rec-topup_bold.{item:01d}')\n post_heat3_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat3_acq-epi_rec-topup_bold.{item:01d}')\n post_heat4_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat4_acq-epi_rec-topup_bold.{item:01d}')\n post_neutral4_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral4_acq-epi_rec-topup_bold.{item:01d}')\n\n post_topup_ap = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-postEpi_dir-ap_epi.{item:01d}')\n post_topup_pa = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-postEpi_dir-pa_epi.{item:01d}')\n\n # Create an empty dictionary called info for each key\n\n info = {pre_neutral1_ap_fmap: [],\n pre_heat1_ap_fmap: [],\n pre_heat2_ap_fmap: [],\n pre_neutral2_ap_fmap: [],\n\n pre_neutral1_ap_topup: [],\n pre_heat1_ap_topup: [],\n pre_heat2_ap_topup: [],\n pre_neutral2_ap_topup: [],\n\n pre_topup_ap: [],\n pre_topup_pa: [],\n\n pre_fmap_magnitude1: [],\n pre_fmap_phasediff: [],\n\n t1w: [],\n\n post_neutral3_ap_fmap: [],\n post_heat3_ap_fmap: [],\n post_heat4_ap_fmap: [],\n post_neutral4_ap_fmap: [],\n\n post_neutral3_ap_topup: [],\n post_heat3_ap_topup: [],\n post_heat4_ap_topup: [],\n post_neutral4_ap_topup: [],\n\n post_topup_ap: [],\n post_topup_pa: [],\n\n }\n\n # Loop over each sequence. Use if statements to determine which sequences should be linked to which key\n\n for idx, s in enumerate(seqinfo):\n\n if 'pre_Neutral1' in s.series_id:\n info[pre_neutral1_ap_fmap].append([s.series_id])\n info[pre_neutral1_ap_topup].append([s.series_id])\n\n if 'pre+heat1' in s.series_id:\n info[pre_heat1_ap_fmap].append([s.series_id])\n info[pre_heat1_ap_topup].append([s.series_id])\n\n if 'pre+heat2' in s.series_id:\n info[pre_heat2_ap_fmap].append([s.series_id])\n info[pre_heat2_ap_topup].append([s.series_id])\n\n if 'pre_Neutral2' in s.series_id:\n info[pre_neutral2_ap_fmap].append([s.series_id])\n info[pre_neutral2_ap_topup].append([s.series_id])\n\n if 'pre_topup_A>>P' in s.series_id:\n info[pre_topup_ap].append([s.series_id])\n\n if 'pre_topup_P>>A' in s.series_id:\n info[pre_topup_pa].append([s.series_id])\n\n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n (s.TE == 4.92)):\n info[pre_fmap_magnitude1].append([s.series_id])\n \n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n (s.TE == 7.38)):\n info[pre_fmap_phasediff].append([s.series_id])\n\n if 'MPRAGE_GRAPPA2' in s.series_id:\n info[t1w].append([s.series_id])\n\n if 'post_Neutral3' in s.series_id:\n info[post_neutral3_ap_fmap].append([s.series_id])\n info[post_neutral3_ap_topup].append([s.series_id])\n\n if 'post+heat3' in s.series_id:\n info[post_heat3_ap_fmap].append([s.series_id])\n info[post_heat3_ap_topup].append([s.series_id])\n\n if 'post+heat4' in s.series_id:\n info[post_heat4_ap_fmap].append([s.series_id])\n info[post_heat4_ap_topup].append([s.series_id])\n\n if 'post_Neutral4' in s.series_id:\n info[post_neutral4_ap_fmap].append([s.series_id])\n info[post_neutral4_ap_topup].append([s.series_id])\n\n if 'post_topup_A>>P' in s.series_id:\n info[post_topup_ap].append([s.series_id])\n\n if 'post_topup_P>>A' in s.series_id:\n info[post_topup_pa].append([s.series_id])\n\n return info", "def propositions(civic_eid2997_proposition):\n return [civic_eid2997_proposition]", "def _get_grism_ext_info(self, grisim='', config='', spec_hdu=None):\n\n if spec_hdu is None:\n conf = configfile.ConfigFile(getCONF(config))\n ext_info = get_ext_info(getDATA(grisim), conf)\n\n else:\n # make by hand the extension information\n ext_info = {'axe_ext': spec_hdu, 'fits_ext': spec_hdu-1}\n\n # return the extension info\n return ext_info", "def load_demos():\n for index in range(len(feconf.DEMO_EXPLORATIONS)):\n load_demo(str(index))", "def test_psi4_efp_5c():\n subject = subject5 + '\\nno_com\\nfix_orientation\\nsymmetry c1'\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True)", "def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"definition\",\n \"doseForm\",\n \"intendedRoute\",\n \"ingredient\",\n \"drugCharacteristic\",\n ]", "def readSeisComPEventXML0_6(filename):\n # realy dirty hack\n spans = [0]\n xml = open(filename, 'rt').read()\n m_event = ev_expr.search(xml)\n ins = '<eventParameters>' + m_event.group()\n for m in mag_expr.finditer(xml):\n ins += m.expand(r'\\1\\2\\3<originID>\\2</originID>\\4mag\\6mag\\8')\n spans.extend(list(m.span()))\n spans.extend(list(m_event.span()) + [-1])\n xml = ''.join([xml[spans[2 * i]:spans[2 * i + 1]] for i in range(len(spans) // 2)])\n xml = ev_para_expr.sub(ins, xml)\n xml = xml.replace('EventParameters', 'eventParameters')\n temp = StringIO.StringIO(xml)\n return readQuakeML(temp)" ]
[ "0.6132044", "0.59762484", "0.5642774", "0.53444785", "0.52427983", "0.52217543", "0.5216209", "0.51387924", "0.5125599", "0.50237185", "0.5018066", "0.5001039", "0.49862388", "0.49604324", "0.4950634", "0.49402615", "0.49259216", "0.49019468", "0.48623866", "0.4825966", "0.4802431", "0.47943074", "0.47941", "0.4782276", "0.47811458", "0.4775542", "0.47599554", "0.47576952", "0.47461975", "0.47358406", "0.47181705", "0.47053275", "0.47048223", "0.47013682", "0.4700134", "0.46840352", "0.46771613", "0.4659731", "0.465713", "0.46560144", "0.46535808", "0.46491078", "0.46396595", "0.46341977", "0.4633495", "0.46297553", "0.46256024", "0.4625393", "0.4625088", "0.460806", "0.46060354", "0.4597609", "0.45939246", "0.45881507", "0.45845878", "0.45818406", "0.45804113", "0.4579906", "0.45734468", "0.45717204", "0.4567382", "0.45665073", "0.45579457", "0.4556674", "0.45522434", "0.45515144", "0.45513707", "0.45506355", "0.45450395", "0.45415005", "0.4536215", "0.45314702", "0.45269966", "0.45165852", "0.4516284", "0.44988555", "0.4498319", "0.44977325", "0.44976643", "0.44974256", "0.4496575", "0.44800326", "0.44800326", "0.4476799", "0.4476648", "0.44758275", "0.4474326", "0.4473491", "0.44726202", "0.44689748", "0.4466644", "0.44634166", "0.44608882", "0.445693", "0.44554496", "0.44550213", "0.445119", "0.44485313", "0.4448181", "0.44461808" ]
0.6886132
0
Reconstruct the EPIs from pfiles.
def ReconEpis(self): run = zeros(100) if self.verbose: print 'Reconstruct EPIs' for pfile in self.pfiles_recon: if self.info[pfile]['refdat'] is None: # Find the ref.dat file later. continue if self.info[pfile]['compression'] is not None: # Data are compressed, copy to tmp. compression = self.info[pfile]['compression'] pfile_decomp = '%s/%s' % (self.tmpdir, \ os.path.basename(self.info[pfile]['pfile_decomp'])) if os.path.exists(pfile_decomp): errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \ ' in ReconEpis' cmd = '%s %s > %s' % \ (decompress_cmds[compression], pfile, pfile_decomp) self.ExecCmd(cmd) else: # Create a link on /tmp to the pfile so the link to ref.dat will also # be on /tmp, (which is always writeable.) pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile)) if not os.path.exists(pfile_decomp): os.symlink(pfile, pfile_decomp) refname, refcmpress = self.CheckCompression( \ self.info[pfile]['refdat']) if refcmpress is not None: refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname)) cmd = '%s %s > %s' % \ (decompress_cmds[refcmpress], \ self.info[pfile]['refdat'], refdat_decomp) self.ExecCmd(cmd) else: refdat_decomp = self.info[pfile]['refdat'] if refdat_decomp is not None: if refdat_decomp != 'ref.dat': # Create link bearing the file name epirecon_ex expects. refdat_link = '%s/ref.dat' % self.tmpdir if not os.path.exists(refdat_link): if self.verbose: print 'ln -s %s %s' % (refdat_decomp, refdat_link) if os.path.islink(refdat_link): # ref.dat is a broken symbolic link. if self.verbose: print 'rm %s' % ref_file os.remove(refdat_link) try: os.symlink(refdat_decomp, refdat_link) except OSError: self.errors = True pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp)) os.symlink(pfile_decomp, pfile_link) os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir) series = int(self.info[pfile]['series']) run[series] = run[series] + 1 epiname = self.info[pfile]['imgfile'] cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \ (pfile_decomp, epiname, self.skip) fname = '%s+orig.BRIK' % epiname self.CheckExec(cmd, [fname]) # self.epi_prefixes[pfile] = self.info[pfile]['imgfile'] else: errstr = '*******************************************\n' + \ 'No ref.dat file exists for %s\n' % pfile + \ '*******************************************\n' self.error_log = self.error_log + errstr self.f_crash.write(errstr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)", "def ExtractFirstEpi(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'first_epi':\n epiname = self.info[entry]['imgfile']\n cmd = 'convert_file %s -f0 %s %s %s' % \\\n (self.flip_opts, entry,epiname, self.info[entry]['filetype'])\n fname = '%s%s' % (epiname, self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])\n self.info[entry]['imgfile'] = fname", "def _EpiInfo(self, info, path):\n\n epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \\\n 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']}\n for key in self.epi_keys.keys():\n if self.epi_keys[key] != str(epi_vals[key]):\n# Return None, which will cause these data to be ignored.\n return None\n\n# Early versions of the EPIC software saved p-files for the setup epis.\n# Don't process these (or any epi with fewer than eight useable frames).\n if self.hdr['tdim'] < (8 + self.skip):\n return None\n\n info['slice_order'] = self.shdr.get('SliceOrder', 'altplus')\n if self.shdr['EffEchoSpacing'] is not None:\n info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000.\n else:\n info['echo_spacing'] = 0.\n if info['data_filetype'] == 'dicom':\n# Entry is name of dirctory for dicom images.\n if not os.path.isdir(path):\n entry = os.path.dirname(path)\n else:\n entry = path\n else:\n# Otherwise it is the name of a directory containing p-files.\n entry = path\n\n if info['data_filetype'] == 'ge_data' and info['type'] is not None:\n# Found a pfile. Add it to the list.\n if entry not in self.pfiles and info['tdim'] > 2:\n self.pfiles.append(entry)\n self.entry_map['epi'].append(entry)\n if info['series'] not in self.epi_series:\n self.epi_series.append(info['series'])\n elif info['data_filetype'] == 'dicom' and \\\n info['psdname'] == 'epibold':\n# This is the initial EPI done during setup.\n info['outdir'] = self.episetup_dir\n info['type'] = 'first_epi'\n self.entry_map['first_epi'].append(entry)\n info['imgfile'] = '%s/first_epi_%d' % \\\n (self.episetup_dir, len(self.entry_map['first_epi']))\n elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \\\n info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2:\n# This is an epi reconstructed on the scanner.\n self.epi_series.append(info['series'])\n self.entry_map['epi'].append(entry)\n if not os.path.isdir(path):\n tmp_path = os.path.dirname(path)\n else:\n tmp_path = path\n self.epirt_paths.append(tmp_path)\n\n if self.fsl_flip:\n info['filetype'] = 'brik'\n else:\n info['filetype'] = self.tmplt['epi_file_format']\n\n info['TR'] = self.hdr['tsize']\n if self.tmplt['acq_tr'] is None:\n info['acq_tr'] = float(info['TR'])\n else:\n info['acq_tr'] = float(self.tmplt['acq_tr'])\n return OK", "def from_file(epub_file):\n self = Epub()\n\n # TODO: zipfile.ZipFile accepts a file or a fileobject.\n # That seems ambiguous. We should probably create a\n # separate method to create an EPUB from a file object to be more\n # clear.\n\n if (isinstance(epub_file, file)):\n self.filename = file.name\n\n if (isinstance(epub_file, str)):\n self.filename = epub_file\n\n try:\n archive = zipfile.ZipFile(epub_file)\n except Exception as e:\n print 'Could not open zipfile \"%s\" \\n' % self.filename\n print e\n\n # parse container.xml for full path to content.opf file\n container_xml = archive.read(PATH_TO_CONTAINER_XML)\n container_xml_tree = etree.fromstring(container_xml)\n fullpath = container_xml_tree.xpath('n:rootfiles/n:rootfile/@full-path',\n namespaces=XML_NAMESPACES)[0]\n\n # Each major XML element in the content.opf file is mapped to its own class.\n # This dict maps those classes to the XPaths that point to the corresponding XML\n # element.\n #\n # for example: the XPath \"opf:package\" points to the '<package>' XML element\n # which is mapped to the Package class\n element_map = [{'name': 'package',\n 'class': Package,\n 'element_xpath': '/opf:package'},\n {'name': 'metadata',\n 'class': MetaData,\n 'element_xpath': '/opf:package/opf:metadata',\n 'sub_element_class': Element,\n 'sub_element_xpath': \"./*\"},\n {'name': 'manifest',\n 'class': Manifest,\n 'element_xpath': '/opf:package/opf:manifest',\n 'sub_element_class': ManifestElement,\n 'sub_element_xpath': 'opf:item'},\n {'name': 'spine',\n 'class': Spine,\n 'element_xpath': '/opf:package/opf:spine',\n 'sub_element_class': Element,\n 'sub_element_xpath': 'opf:itemref'},\n {'name': 'guide',\n 'class': Guide,\n 'element_xpath': '/opf:package/opf:guide',\n 'sub_element_class': Element,\n 'sub_element_xpath': 'opf:reference',\n 'optional': True}]\n\n tree = etree.fromstring(archive.read(fullpath))\n\n for element in element_map:\n try:\n element_tree = tree.xpath(element['element_xpath'], namespaces=XML_NAMESPACES)[0]\n except IndexError as e:\n # If the element is marked as optional, just keep going if we don't find it.\n if element['optional']:\n continue\n else:\n print element\n element_class = element['class']()\n element_class.as_xhtml = etree.tostring(element_tree)\n # Step through the attrib dict and replace each key with its localname version\n # i.e. if the key is '{namespace}event', replace it with 'event'.\n # There *shouldn't* be any collisions.\n element_class.tag.attributes = {etree.QName(key).localname: value for key, value in\n element_tree.attrib.iteritems()}\n element_class.tag.localname = etree.QName(element_tree).localname\n element_class.tag.namespace = etree.QName(element_tree).namespace\n element_class.text = element_tree.text\n\n if 'sub_element_class' in element:\n sub_element_tree = element_tree.xpath(element['sub_element_xpath'], namespaces=XML_NAMESPACES)\n for k in sub_element_tree:\n sub_element_class = element['sub_element_class']()\n sub_element_class.as_xhtml = etree.tostring(k)\n sub_element_class.tag.attributes = {etree.QName(key).localname: value for key, value in\n k.attrib.iteritems()}\n sub_element_class.tag.localname = etree.QName(k.tag).localname\n sub_element_class.tag.namespace = etree.QName(k.tag).namespace\n sub_element_class.tag.text = k.text\n element_class.append(sub_element_class)\n\n # if we just created a ManifestElement, we need to additionally\n # pass it a reference to the epub archive and the dirname\n # contained in the fullpath in order for it to access the file\n # it points to\n\n if type(sub_element_class) == ManifestElement:\n # fullpath is the path to the content.opf file.\n # This should also be the path to the manifest item files.\n sub_element_class.basedir = os.path.dirname(fullpath)\n sub_element_class.archive = archive\n\n # Assigns the class we just created as an attribute of the Epub object.\n # The attr name is taken from the 'name' value in the element_map above.\n setattr(self, element['name'], element_class)\n\n # If we just created the spine element, we need to pass it\n # a reference to the manifest. This will enable the spine element to access\n # manifeset elements directly\n # note: this assumes the manifest element has alreay been created\n if element['name'] == 'spine':\n self.spine.manifest = self.manifest\n\n # read in the items from the manifest\n for element in self.manifest:\n if element.isDocument():\n pass\n if element.isImage():\n self.images.append(element)\n if element.isCSS():\n self.css.append(element)\n if element.isTOC():\n pass\n\n # create an array called parts that references elements\n # listed in the spine\n\n for itemref in self.spine.list:\n self.parts.append(self.manifest.getElementById(itemref.tag.attributes['idref']))\n\n return self", "def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps", "def ConvertRtEpis(self):\n if self.verbose:\n print 'Convert EPIs to brik'\n for entry in self.entry_map['epi']:\n if ('epirt' in self.info[entry]['psdname'] or \\\n self.info[entry]['psdname'] == 'epi' or \\\n self.info[entry]['psdname'] == '*epfid2d1_64') and \\\n self.info[entry]['data_filetype'] == 'dicom':\n series = self.info[entry]['series']\n if self.info[entry]['skip'] > 0:\n skip = '--skip=%s' % self.info[entry]['skip']\n else:\n skip = ''\n cmd = 'convert_file %s %s %s brik' % \\\n (skip, entry, self.info[entry]['imgfile'])\n checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile'])\n self.CheckExec(cmd, [checkname])", "def reffile_setup(self):\n # Prepare to find files listed as 'config'\n # and set up PSF path\n\n # set up as dictionary of dictionaries\n self.configfiles = {}\n self.psfpath = {}\n self.psfbasename = {}\n self.psfpixfrac = {}\n self.reference_file_dir = {}\n\n for instrument in 'nircam niriss fgs'.split():\n self.configfiles[instrument] = {}\n self.psfpath[instrument] = os.path.join(self.datadir, instrument, 'gridded_psf_library')\n self.psfbasename[instrument] = instrument\n self.reference_file_dir[instrument] = os.path.join(self.datadir, instrument, 'reference_files')\n\n # Set instrument-specific file paths\n if instrument == 'nircam':\n self.psfpixfrac[instrument] = 0.25\n elif instrument == 'niriss':\n self.psfpixfrac[instrument] = 0.1\n elif instrument == 'fgs':\n self.psfpixfrac[instrument] = 0.1\n\n # Set global file paths\n self.configfiles[instrument]['filter_throughput'] = os.path.join(self.modpath, 'config', 'placeholder.txt')\n\n for instrument in 'miri nirspec'.split():\n self.configfiles[instrument] = {}\n self.psfpixfrac[instrument] = 0\n self.psfbasename[instrument] = 'N/A'\n\n # create empty dictionaries\n list_names = 'superbias linearity gain saturation ipc astrometric photom pam dark lindark'.split()\n for list_name in list_names:\n setattr(self, '{}_list'.format(list_name), {})\n\n self.det_list = {}\n self.det_list['nircam'] = ['A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3', 'B4', 'B5']\n self.det_list['niriss'] = ['NIS']\n self.det_list['fgs'] = ['G1', 'G2']\n self.det_list['nirspec'] = ['NRS']\n self.det_list['miri'] = ['MIR']\n\n for instrument in 'nircam niriss fgs miri nirspec'.split():\n for list_name in list_names:\n getattr(self, '{}_list'.format(list_name))[instrument] = {}\n\n if self.offline:\n # no access to central store. Set all files to none.\n for list_name in list_names:\n if list_name in 'dark lindark'.split():\n default_value = ['None']\n else:\n default_value = 'None'\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n elif instrument == 'nircam':\n rawdark_dir = os.path.join(self.datadir, 'nircam/darks/raw')\n lindark_dir = os.path.join(self.datadir, 'nircam/darks/linearized')\n for det in self.det_list[instrument]:\n self.dark_list[instrument][det] = glob(os.path.join(rawdark_dir, det, '*.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(lindark_dir, det, '*.fits'))\n\n elif instrument in ['nirspec', 'miri']:\n for key in 'subarray_def_file fluxcal filtpupil_pairs readpatt_def_file crosstalk ' \\\n 'dq_init_config saturation_config superbias_config refpix_config ' \\\n 'linearity_config filter_throughput'.split():\n self.configfiles[instrument][key] = 'N/A'\n default_value = 'none'\n for list_name in list_names:\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n else: # niriss and fgs\n for det in self.det_list[instrument]:\n if det == 'G1':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS1_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS1_DARK_SEARCH_STRING))\n\n elif det == 'G2':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS2_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS2_DARK_SEARCH_STRING))\n\n elif det == 'NIS':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/raw',\n '*uncal.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/linearized',\n '*linear_dark_prep_object.fits'))", "def ppt_files_to_dict(self):\n if len(self.ppt_path_list) == 0:\n return\n\n for file_path in self.ppt_path_list:\n self.ppt_file_to_dict(file_path)", "def __init__(self, config):\n self.config = config\n self.outpath = prepDir(config.outpath)\n self.xslpath = config.xslpath\n self.imagespath = config.imagespath\n self.errors = []\n self.xeps = []\n files = []\n if config.xeps:\n for xep in config.xeps:\n if os.path.isfile(xep):\n files.append(os.path.abspath(xep))\n elif os.path.isdir(xep):\n fltr = os.path.join(os.path.abspath(xep), '*.xml')\n files += glob.glob(fltr)\n else:\n if os.path.isfile(\"xep-{0}.xml\".format(xep)):\n files.append(\n os.path.abspath(os.path.join(os.getcwd(), \"xep-{0}.xml\".format(xep))))\n else:\n # no xeps given, try all xml-files in curdir\n fls = glob.glob(os.path.join(os.getcwd(), '*.xml'))\n for fle in fls:\n files.append(os.path.abspath(fle))\n # try if we can find an existing XEP-table:\n if os.path.isfile(os.path.join(self.outpath, \"xeps.xml\")):\n self.xeptable = os.path.join(self.outpath, \"xeps.xml\")\n else:\n self.xeptable = None\n # read files to xeps\n for fle in sorted(set(files)):\n try:\n self.xeps.append(\n xeputils.xep.XEP(fle,\n outpath=self.outpath,\n xslpath=self.xslpath,\n imagespath=self.imagespath))\n except:\n e = \"Error while parsing {}\\n\".format(fle)\n e += \"FATAL: {} is not included\\n\".format(fle)\n e += traceback.format_exc()\n self.errors.append(e)", "def generate_extracts(self, root=None):\n import collections\n from ambry.util import toposort\n\n \n ext_config = self.extracts\n\n # Order the extracts to satisfy dependencies. \n graph = {}\n for key,extract in ext_config.items():\n graph[key] = set(extract.get('depends',[]))\n \n\n if graph:\n exec_list = []\n for group in toposort(graph):\n exec_list.extend(group)\n else:\n exec_list = ext_config.keys()\n \n if root:\n deps = self.dep_tree(root)\n exec_list = [ n for n in exec_list if n in deps]\n \n \n # now can iterate over the list. \n for key in exec_list:\n extract = ext_config[key]\n extract['_name'] = key\n for_ = extract.get('for', \"'True'\")\n function = extract.get('function', False)\n file_ = extract.get('file', False)\n each = extract.get('each', [])\n p_id = extract.get('partition', False)\n eaches = self._expand_each(each)\n \n \n # This part is a awful hack and should be refactored\n if function:\n for data in eaches: \n yield self._sub(dict(extract.items() + data.items()))\n\n elif p_id: \n partitions = self._expand_partitions(p_id, for_)\n \n for partition in partitions:\n p_dict = self._make_partition_dict(partition)\n for data in eaches: \n yield self._sub(dict(p_dict.items()+extract.items() + \n data.items() ))\n elif file_:\n for data in eaches:\n yield self._sub(dict(extract.items() + data.items()))\n else:\n self.bundle.error(\"Extract group {} should have either a function or a partition\".format(key))", "def extract(self):\n self.build_path_pairs()\n self.extract_field_blocks()\n self.assert_filenames()", "def extract_pp(self):\n build([srna.ExtractPPW(fastq_dic=self.fastq_dic, num_cpus=self.num_cpus,\n indexfile=self.hisat_index, workdir=self.workdir,\n kingdom=self.kingdom)],\n local_scheduler=self.local_scheduler)", "def PruneEpiEntries(self):\n pruned = {}\n basefiles = []\n baseentries = {}\n for entry in self.entry_map['epi']:\n if baseentries.has_key(self.info[entry]['basefile']):\n baseentries[self.info[entry]['basefile']].append(entry)\n else:\n baseentries[self.info[entry]['basefile']] = [entry]\n for entry in self.entry_map['epi']:\n targets = []\n if self.no_motcorr:\n target = self.info[entry]['imgfile']\n elif self.info[entry]['fmapname'] is None or self.no_fmapcorr:\n target = self.info[entry]['imgfile_m']\n else:\n target = self.info[entry]['imgfile_mf']\n targets.append(target + self.info[entry]['suffix'])\n targets.append('%s%s' % (self.info[entry]['censor_prefix'], '_censor.1D'))\n pruned[entry] = [True, baseentries[self.info[entry]['basefile']]]\n for target in targets:\n pruned[entry] = \\\n [False, baseentries[self.info[entry]['basefile']]]\n for key in pruned.keys():\n if not pruned[key][0]:\n for entry in pruned[key][1]:\n pruned[entry][0] = False\n tmp = new_map = []\n for entry in self.entry_map['epi']:\n if pruned[entry][0]:\n if self.verbose:\n print 'Skipping %s: Already reconstructed.' % targets[0]\n if entry in self.pfiles_recon:\n self.pfiles_recon.remove(entry)\n else:\n new_map.append(entry)\n self.entry_map['epi'] = new_map", "def readPubTator(args):\n if not os.path.exists('/'.join(args.output_file.split('/')[:-1])):\n os.makedirs('/'.join(args.output_file.split('/')[:-1]))\n\n abstracts = OrderedDict()\n entities = OrderedDict()\n relations = OrderedDict()\n\n with open(args.input_file, 'r') as infile:\n for line in tqdm(infile):\n\n # text\n if len(line.rstrip().split('|')) == 3 and \\\n (line.strip().split('|')[1] == 't' or line.strip().split('|')[1] == 'a'):\n line = line.strip().split('|')\n\n pmid = line[0]\n text = line[2] # .replace('>', '\\n')\n\n # replace weird symbols and spaces\n text = replace2symbol(text)\n text = replace2space(text)\n\n if pmid not in abstracts:\n abstracts[pmid] = [TextStruct(pmid, text)]\n else:\n abstracts[pmid] += [TextStruct(pmid, text)]\n\n # entities\n elif len(line.rstrip().split('\\t')) == 6:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n\n # currently consider each possible ID as another entity\n for k in kb_id:\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n\n elif len(line.rstrip().split('\\t')) == 7:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n extra_ents = line[6].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n for i, e in enumerate(extra_ents):\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n\n # relations\n elif len(line.rstrip().split('\\t')) == 4:\n line = line.strip().split('\\t')\n pmid = line[0]\n rel_type = line[1]\n arg1 = tuple((line[2].split('|')))\n arg2 = tuple((line[3].split('|')))\n\n if pmid not in relations:\n relations[pmid] = [RelStruct(pmid, rel_type, arg1, arg2)]\n else:\n relations[pmid] += [RelStruct(pmid, rel_type, arg1, arg2)]\n\n elif line == '\\n':\n continue\n\n return abstracts, entities, relations", "def load(self):\n if self.__fname == '':\n print('You must pass in a file name to load!')\n return []\n\n ext = os.path.splitext(self.__fname)[1]\n first_pt = None\n if len(self.__fea.points) > 0:\n first_pt = self.__fea.points[0]\n if ext == '.dxf':\n parts = self.__load_dxf()\n elif ext in ['.brep', '.brp', '.iges', '.igs', '.step', '.stp']:\n self.__make_geo()\n parts = self.__load_geo()\n last_pt = None\n if first_pt != None:\n if len(self.__fea.points) > 2:\n last_pt = self.__fea.points[-1]\n if self.__scale != '':\n # call scale\n pass\n return parts", "def compute_products(self):\r\n src_to_classfiles = defaultdict(list)\r\n for pcd_entry in self.pcd_entries:\r\n srcfile = pcd_entry[1]\r\n # In the file classes are represented with slashes, not dots. E.g., com/foo/bar/Baz.\r\n src_to_classfiles[srcfile].append(pcd_entry[0] + '.class')\r\n return src_to_classfiles", "def init_pta(params_all):\n\n ptas = dict.fromkeys(params_all.models)\n for ii, params in params_all.models.items():\n\n allpsr_model = params_all.noise_model_obj(psr=params_all.psrs,\n params=params)\n\n models = list()\n from_par_file = list()\n ecorrexists = np.zeros(len(params_all.psrs))\n\n # Including parameters common for all pulsars\n if params.tm=='default':\n tm = gp_signals.TimingModel()\n elif params.tm=='ridge_regression':\n log10_variance = parameter.Uniform(-20, -10)\n basis = scaled_tm_basis()\n prior = ridge_prior(log10_variance=log10_variance)\n tm = gp_signals.BasisGP(prior, basis, name='ridge')\n\n # Adding common noise terms for all pulsars\n # Only those common signals are added that are listed in the noise model\n # file, getting Enterprise models from the noise model object.\n if 'm_all' in locals():\n del m_all\n for psp, option in params.common_signals.items():\n if 'm_all' in locals():\n m_all += getattr(allpsr_model, psp)(option=option)\n else:\n m_all = tm + getattr(allpsr_model, psp)(option=option)\n\n # Including single pulsar noise models\n for pnum, psr in enumerate(params_all.psrs):\n\n singlepsr_model = params_all.noise_model_obj(psr=psr, params=params)\n\n # Determine if ecorr is mentioned in par file\n try:\n for key,val in psr.t2pulsar.noisemodel.items():\n if key.startswith('ecorr') or key.startswith('ECORR'):\n ecorrexists[pnum]=True\n except Exception as pint_problem:\n print(pint_problem)\n ecorrexists[pnum]=False\n\n # Add noise models\n if psr.name in params.noisemodel.keys():\n noise_model_dict_psr = params.noisemodel[psr.name]\n else:\n noise_model_dict_psr = params.universal\n for psp, option in noise_model_dict_psr.items():\n if 'm_sep' in locals():\n m_sep += getattr(singlepsr_model, psp)(option=option)\n elif 'm_all' in locals():\n m_sep = m_all + getattr(singlepsr_model, psp)(option=option)\n else:\n m_sep = tm + getattr(singlepsr_model, psp)(option=option)\n\n models.append(m_sep(psr))\n del m_sep\n\n pta = signal_base.PTA(models)\n\n if 'noisefiles' in params.__dict__.keys():\n noisedict = get_noise_dict(psrlist=[p.name for p in params_all.psrs],\\\n noisefiles=params.noisefiles)\n print('For constant parameters using noise files in PAL2 format')\n pta.set_default_params(noisedict)\n\n print('Model',ii,'params (',len(pta.param_names),') in order: ', \\\n pta.param_names)\n\n if params.opts is not None:\n if params.opts.mpi_regime != 2:\n np.savetxt(params.output_dir + '/pars.txt', pta.param_names, fmt='%s')\n \n ptas[ii]=pta\n\n return ptas", "def __load(self, pkgrels):\n # keep track of which parts are already loaded\n part_dict = {}\n\n # discard any previously loaded relationships\n self.__relationships = _RelationshipCollection()\n\n # add model-side rel for each pkg-side one, and load target parts\n for pkgrel in pkgrels:\n # unpack working values for part to be loaded\n reltype = pkgrel.reltype\n pkgpart = pkgrel.target\n partname = pkgpart.partname\n content_type = pkgpart.content_type\n # log.debug(\"%s -- %s\", reltype, partname)\n\n # create target part\n part = Part(reltype, content_type)\n part_dict[partname] = part\n part._load(pkgpart, part_dict)\n\n # create model-side package relationship\n model_rel = _Relationship(pkgrel.rId, reltype, part)\n self.__relationships._additem(model_rel)\n\n # gather references to image parts into __images\n self.__images = ImageCollection()\n image_parts = [part for part in self._parts\n if part.__class__.__name__ == 'Image']\n for image in image_parts:\n self.__images._loadpart(image)", "def _extract(self):\r\n self._data = []\r\n for fname in self.files:\r\n meta = dict(filename=fname)\r\n\r\n # Perform the actual metadata extraction\r\n fname = os.path.splitext(self.filter_filename(fname))[0]\r\n values = fname.split(self.sep)\r\n\r\n # Handle the case where number of fields is less than the length\r\n # of the extracted values, ie cases where we only want to extract\r\n # a subset of available fields.\r\n if self.index:\r\n values = [val for i, val in enumerate(values) if i in self.index]\r\n\r\n meta.update(dict(zip(self.fields, values)))\r\n if self.split_by in self.fields:\r\n meta[self.split_by] = self._get_split_field_values(meta['filename'])\r\n self._data.append(meta)", "def CleanEpi(self):\n for entry in self.info.keys():\n info = self.info[entry]\n if info['psdname'] == 'epi':\n for tag in ('imgfile', 'imgfile_m', 'imgfile_mf', 'imgfile_t'):\n if info.has_key(tag) and info[tag] is not None and \\\n os.path.exists(info[tag]):\n print 'Deleting %s*' % (info[tag], info['suffix'])\n cmd = '/bin/rm %s%s*' % (info[tag], info['suffix'])\n self.ExecCmd(cmd)\n if '.BRIK' in info['suffix']:\n cmd = '/bin/rm %s%s*' % (info[tag], \\\n info['suffix'].replace('.BRIK','.HEAD'))\n self.ExecCmd(cmd)", "def make_pmodel_energies():\n cwd = os.getcwd()\n\n os.chdir(\"test_data/protein_load\")\n pmodel = pyODEM.model_loaders.Protein(\"ww_domain.ini\")\n os.chdir(cwd)\n\n data = pmodel.load_data(\"test_data/protein_load/traj/traj_test.xtc\")\n heps, dheps = pmodel.get_potentials_epsilon(data)\n\n true_energies = np.loadtxt(\"test_data/protein_load/traj/energy_gaussian_test.dat\")\n\n return pmodel, data, heps, dheps, true_energies", "def setESFiles(self, eSourceDir = None, verbose = False):\n\n print('\\n***Setting electronic structure files')\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc':\n # Check and set electronic structure file for packaging.\n if '***Missing' in self.nbDetails[key]['jobInfo'][2]:\n self.nbDetails[key]['elecStructure'] = None\n else:\n if eSourceDir is not None:\n # Copy electronic structure files to package using supplied path\n fileName = Path(self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\"))\n self.nbDetails[key]['elecStructure'] = Path(eSourceDir, fileName.name).as_posix()\n\n else:\n # Copy electronic structure files to package, based on full path from original job\n self.nbDetails[key]['elecStructure'] = self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\")\n\n checkList = self.checkFiles(self.nbDetails[key]['elecStructure'])\n\n # If file is missing, set to \"missing\"\n if not checkList[0]:\n self.nbDetails[key]['elecStructure'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n\n # If file is present, check also for corresponding files\n else:\n # Assuming above is molden file, check also for corresponding Gamess file\n gFile = Path(self.nbDetails[key]['elecStructure']).with_suffix('.log')\n checkList = self.checkFiles(gFile)\n if checkList[0]:\n # self.nbDetails[key]['elecStructure'].append(gFile.as_posix()) # Set here to append... hopefully works OK with arch update code...\n self.nbDetails[key]['elecStructureGamess'] = gFile.as_posix() # Set here as separate item\n else:\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {gFile.as_posix()}\"\n #\n\n if verbose:\n print(f\"Job {key}: {self.nbDetails[key]['title']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructure']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructureGamess']}\")", "def load_PSF_data(self):\n self.epsf = {}\n for filter in ['F105W', 'F125W', 'F140W', 'F160W']:\n file = os.path.join(os.getenv('GRIZLI'), 'CONF',\n 'PSFSTD_WFC3IR_{0}.fits'.format(filter))\n \n data = pyfits.open(file)[0].data.T\n data[data < 0] = 0 \n \n self.epsf[filter] = data\n \n # Dummy, use F105W ePSF for F098M and F110W\n self.epsf['F098M'] = self.epsf['F105W']\n self.epsf['F110W'] = self.epsf['F105W']\n \n # Extended\n self.extended_epsf = {}\n for filter in ['F105W', 'F125W', 'F140W', 'F160W']:\n file = os.path.join(os.getenv('GRIZLI'), 'CONF',\n 'extended_PSF_{0}.fits'.format(filter))\n \n if not os.path.exists(file):\n msg = 'Extended PSF file \\'{0}\\' not found.'.format(file)\n msg += '\\n Get the archive from http://www.stsci.edu/~brammer/Grizli/Files/WFC3IR_extended_PSF.v1.tar.gz'\n msg += '\\n and unpack in ${GRIZLI}/CONF/' \n raise FileNotFoundError(msg)\n \n data = pyfits.open(file)[0].data#.T\n data[data < 0] = 0 \n \n # Mask center\n NX = data.shape[0]/2-1\n yp, xp = np.indices(data.shape)\n R = np.sqrt((xp-NX)**2+(yp-NX)**2)\n data[R <= 4] = 0.\n \n self.extended_epsf[filter] = data\n self.extended_N = int(NX)\n \n self.extended_epsf['F098M'] = self.extended_epsf['F105W']\n self.extended_epsf['F110W'] = self.extended_epsf['F105W']", "def extract_data(filename, id_ep, conn):\n\n try:\n tree = ET.parse(filename)\n root = tree.getroot()\n\n # creation of a speaker's dict to manage the local ids (specific speakers' id file-dependent)\n speakers = {}\n # creation of a iterable to identify the unknown/unnamed speakers\n uknw_id = 0\n\n names = []\n cur = conn.cursor()\n cur.execute(\"SELECT name FROM speaker\")\n rows = cur.fetchall()\n i = 0\n for row in rows:\n names.append(row[0])\n i += 1\n\n # creation of speakers\n for spkr in root.iter(\"Speaker\"):\n name = spkr.attrib[\"name\"]\n if ((\",\" in name) or (\"sup+\" in name) or (\"Sup+\" in name)):\n name = \"multi_spk\"\n elif ((\"spk\" in name) or (\"speaker\" in name) or (\"Inconnu\" in name) or (\"unknown\" in name)):\n name = \"spk_\"+str(id_ep)+\"_\"+str(uknw_id)\n uknw_id += 1\n else :\n n = name.split(\" \")\n name = n[0]\n if len(n) > 1:\n for i in range(1, len(n)):\n name += \"_\" + n[i].upper()\n\n if \"type\" in spkr.attrib:\n if spkr.attrib[\"type\"] not in (\"male\", \"female\"):\n gender = \"NA\"\n else:\n gender = spkr.attrib[\"type\"]\n else:\n gender = \"NA\"\n\n if \"dialect\" in spkr.attrib:\n native = spkr.attrib[\"dialect\"]\n else:\n native = \"NA\"\n\n # speaker added only if doesn't already exists in the database\n if name not in names:\n new_speaker = (name, gender, native)\n create_speaker(conn, new_speaker)\n\n # update of the local id->name dictionary\n speakers[spkr.attrib['id']] = name\n\n # creation of turns\n for turn in root.iter(\"Turn\"):\n\n if \"speaker\" in turn.attrib:\n if len(turn.attrib[\"speaker\"]) > 5:\n speaker_name = \"multi_spk\"\n else:\n speaker_name = speakers[turn.attrib[\"speaker\"]]\n start_time = turn.attrib[\"startTime\"]\n end_time = turn.attrib[\"endTime\"]\n\n cur = conn.cursor()\n cur.execute(\"SELECT id_speaker FROM speaker WHERE name=?\", (speaker_name,))\n id_speaker = cur.fetchone()[0]\n id_episode = id_ep\n trans = ET.tostring(turn, \"ISO-8859-1\", method=\"text\")\n trans = trans.decode(\"ISO-8859-1\")\n trans = re.sub(\"\\n\", \" \", trans)\n trans = re.sub(\" \", \" \", trans)\n\n new_turn = (start_time, end_time, id_speaker, id_episode, trans)\n create_turn(conn, new_turn)\n\n except Exception as e:\n print(e)\n print(filename)\n pass", "def elastixTemplates():\n\t\ttransformations = []\n\t\tfileNames = os.listdir(AppVars.transformationsPath())\n\t\tfor fileName in fileNames:\n\t\t\tfullFileName = os.path.join(AppVars.transformationsPath(), fileName)\n\t\t\ttransformation = ParameterList()\n\t\t\tif transformation.loadFromFile(fullFileName):\n\t\t\t\ttransformations.append(transformation)\n\t\treturn transformations", "def parse_products(self, infile):\r\n raise NotImplementedError()", "def _extract_models(self, name, from_dict):\n \"\"\"if name == imageset: Extract imageset objects from the source.\n\n This function does resolving of an (old) method of imageset lookup\n e.g. it was valid to have a string as the imageset value in an\n experiment instead of an int - in which case the imageset was\n loaded from the named file in the target directory.\n\n If any experiments point to a file in this way, the imageset is\n loaded and the experiment is rewritted with an integer pointing\n to the new ImageSet in the returned list.\n\n Returns:\n The ordered list of serialized-ImageSet dictionaries\n that the Experiment list points to.\n \"\"\"\n\n # Extract all the model list\n mlist = self._obj.get(name, [])\n\n # Convert the model from dictionary to concreate\n # python class for the model.\n mlist = [from_dict(d) for d in mlist]\n\n # Dictionaries for file mappings\n mmap = {}\n\n # For each experiment, check the model is not specified by\n # a path, if it is then get the dictionary of the model\n # and insert it into the list. Replace the path reference\n # with an index\n for eobj in self._obj[\"experiment\"]:\n value = eobj.get(name)\n if value is None:\n continue\n elif isinstance(value, str):\n if value not in mmap:\n mmap[value] = len(mlist)\n mlist.append(\n from_dict(_experimentlist_from_file(value, self._directory))\n )\n eobj[name] = mmap[value]\n elif not isinstance(value, int):\n raise TypeError(\"expected int or str, got %s\" % type(value))\n\n # Return the model list\n return mlist", "def extract_all_lazy():\n\n\t#Construct filepaths: Data COMP_INFO_1\n\tdata_ci1_name = \"DATA_2016_COMP_INFO_1.csv\"\n\tdata_ci1_fullname = os.path.join(files_location, data_ci1_name)\n\t#Data COMP_INFO_2\n\tdata_ci2_name = \"DATA_2016_COMP_INFO_2.csv\"\n\tdata_ci2_fullname = os.path.join(files_location, data_ci2_name)\n\t#Data PROPERTY INFO\n\tdata_pi_name = \"DATA_2016_PROPERTY_INFO_ST.csv\"\n\tdata_pi_fullname = os.path.join(files_location, data_pi_name)\n\t#Data General Info\n\tdata_gi_name = \"DATA_2016_GENERAL_INFO.csv\"\n\tdata_gi_fullname = os.path.join(files_location, data_gi_name)\n\n\t#Read & Process COMP_INFO\n\tdata_ci1 = pd.read_csv(data_ci1_fullname, skiprows=2, usecols = constants.keep_columns_CI, encoding='ISO-8859-1')\n\tdata_ci2 = pd.read_csv(data_ci2_fullname, skiprows=2, usecols = constants.keep_columns_CI, encoding='ISO-8859-1')\n\n\tdata_ci = data_ci1.append(data_ci2)\n\tdata_ci['QUESTION'] = data_ci['QUESTION'].replace(constants.ci_mapping)\n\t# Take only the survey questions mapped\n\tdata_ci = data_ci[data_ci['QUESTION'].isin(constants.ci_mapping.values())]\n\tdata_ci = data_ci.set_index(['PROPERTY_CODE','PROPERTY_NAME','JOB_CODE','POSITION'])\n\tdata_ci = data_ci.pivot(columns=\"QUESTION\")\n\tdata_ci.columns = [\"_\".join(pair) for pair in data_ci.columns]\n\tdata_ci = data_ci.reset_index()\n\n\t#Read & Process Property Info data\n\tdata_pi = pd.read_csv(data_pi_fullname, usecols = constants.keep_columns_PI, encoding='ISO-8859-1')\n\t#survey_type_transformed = transform.surveytype_categorical(data_pi)\n\t#data_pi = pd.merge(data_pi, survey_type_transformed, on=['PROPERTY_CODE'])\n\n\t#Read & Process General Info\n\tdata_gi = pd.read_csv(data_gi_fullname, skiprows = 2, usecols = constants.keep_columns_GI, encoding='ISO-8859-1')\n\tdata_gi['QUESTION'] = data_gi['QUESTION'].replace(constants.gi_mapping)\n\t# Take onl the survey questions mapped\n\tdata_gi = data_gi[data_gi['QUESTION'].isin(constants.gi_mapping.values())]\n\tdata_gi = data_gi.set_index(['PROPERTY_CODE','PROPERTY_NAME'])\n\tdata_gi = data_gi.pivot(columns=\"QUESTION\")\n\tdata_gi.columns = [\"_\".join(pair) for pair in data_gi.columns]\n\tdata_gi = data_gi.reset_index()\n\n\t#This frame needs to be reworked\n\td_ci = pd.merge(data_gi, data_pi, on = ['PROPERTY_CODE','PROPERTY_NAME'])\n\td_ci = pd.merge(d_ci, data_ci, on = ['PROPERTY_CODE','PROPERTY_NAME'],suffixes= ['_ci','_gi'])\n\n\t#Observations by Dimensions to determine top X markets\n\t#Can this be in a better position?\n\td_ci = d_ci[~(d_ci['PROPERTY_NAME'].isin(constants.del_rows_property_name))]\n\td_ci['POSITION'] = d_ci['POSITION'].astype(str)\n\n\tpayload = {}\n\tpayload['gi'] = data_gi\n\tpayload['pi'] = data_pi\n\tpayload['ci'] = data_ci\n\tpayload['d_ci'] = d_ci\n\n\treturn payload", "def parse( cls, filename, verbose = False ) :\n if verbose : sys.stdout.write( \"%s.parse(%s)\\n\" % (cls.__name__, filename,) )\n\n infile = os.path.realpath( filename )\n dat = cls( verbose )\n\n with open( infile, \"rU\" ) as inf :\n expt_num = None\n for line in inf :\n if verbose :\n sys.stdout.write( line )\n\n m = dat.version_pat.search( line )\n if m :\n dat.version = m.group( 1 )\n continue\n\n m = dat.expt_pat.search( line )\n if m :\n expt_num = int( m.group( 1 ) )\n par_set = m.group( 2 ).upper()\n\n if not par_set in bmrbmb.topspin.EXPERIMENTS.keys() :\n raise Exception( \"Unknown experiment parameter set: %s\" % (m.group( 2 ),) )\n\n# adapted sweep width HSQC\n#\n if (par_set == \"HSQCETGP\") and (m.group( 3 ) is not None) :\n expt_name = \"2D 1H-13C HSQC SW small\"\n else :\n expt_name = bmrbmb.topspin.EXPERIMENTS[par_set]\n\n dat.data[expt_num] = { \"name\" : expt_name }\n\n# next line should have experiment details\n# 1 or 2D only\n#\n\n m = dat.dim_pat.search( line )\n if m :\n if expt_num is None :\n raise Exception( \"Experiment detail without parameter set\" )\n\n dims = { m.group( 1 ) : { \"nuc\" : m.group( 2 ), \"sw\" : m.group( 3 ) } }\n if m.group( 4 ) is not None :\n dims[m.group( 4 )] = { \"nuc\" : m.group( 5 ), \"sw\" : m.group( 6 ) }\n\n dat.data[expt_num][\"dims\"] = dims\n\n expt_num = None\n\n return dat", "def load_host_seq_prep(prep_data):\n module_logger.info(\"Creating a template %s.\", __name__)\n prep = HostSeqPrep()\n\n module_logger.debug(\"Filling in %s details.\", __name__)\n\n # The attributes commmon to all iHMP nodes\n prep._set_id(prep_data['id'])\n prep.version = prep_data['ver']\n prep.links = prep_data['linkage']\n prep.tags = prep_data['meta']['tags']\n\n # The attributes that are particular to HostSeqPrep documents\n prep.comment = prep_data['meta']['comment']\n prep.lib_layout = prep_data['meta']['lib_layout']\n prep.lib_selection = prep_data['meta']['lib_selection']\n prep.ncbi_taxon_id = prep_data['meta']['ncbi_taxon_id']\n prep.prep_id = prep_data['meta']['prep_id']\n prep.sequencing_center = prep_data['meta']['sequencing_center']\n prep.sequencing_contact = prep_data['meta']['sequencing_contact']\n prep.storage_duration = prep_data['meta']['storage_duration']\n\n if 'adapters' in prep_data['meta']:\n module_logger.info(\"%s data has 'adapters' present.\", __name__)\n prep.adapters = prep_data['meta']['adapters']\n\n if 'experimental_factor' in prep_data['meta']:\n module_logger.info(\"%s data has 'experimental_factor' present.\", __name__)\n prep.experimental_factor = prep_data['meta']['experimental_factor']\n\n if 'findex' in prep_data['meta']:\n module_logger.info(\"%s data has 'findex' present.\", __name__)\n prep.findex = prep_data['meta']['findex']\n\n if 'frag_size' in prep_data['meta']:\n module_logger.info(\"%s data has 'frag_size' present.\", __name__)\n prep.frag_size = prep_data['meta']['frag_size']\n\n if 'lib_const_meth' in prep_data['meta']:\n module_logger.info(\"%s data has 'lib_const_meth' present.\", __name__)\n prep.lib_const_meth = prep_data['meta']['lib_const_meth']\n\n if 'lib_screen' in prep_data['meta']:\n module_logger.info(\"%s data has 'lib_screen' present.\", __name__)\n prep.lib_screen = prep_data['meta']['lib_screen']\n\n if 'lib_size' in prep_data['meta']:\n module_logger.info(\"%s data has 'lib_size' present.\", __name__)\n prep.lib_size = prep_data['meta']['lib_size']\n\n if 'lib_vector' in prep_data['meta']:\n module_logger.info(\"%s data has 'lib_vector' present.\", __name__)\n prep.lib_vector = prep_data['meta']['lib_vector']\n\n if 'mims' in prep_data['meta']:\n module_logger.info(\"%s data has 'mims' present.\", __name__)\n prep.mims = prep_data['meta']['mims']\n\n if 'nucl_acid_amp' in prep_data['meta']:\n module_logger.info(\"%s data has 'nucl_acid_amp' present.\", __name__)\n prep.nucl_acid_amp = prep_data['meta']['nucl_acid_amp']\n\n if 'nucl_acid_ext' in prep_data['meta']:\n module_logger.info(\"%s data has 'nucl_acid_amp' present.\", __name__)\n prep.nucl_acid_ext = prep_data['meta']['nucl_acid_ext']\n\n if 'rindex' in prep_data['meta']:\n module_logger.info(\"%s data has 'rindex' present.\", __name__)\n prep.rindex = prep_data['meta']['rindex']\n\n if 'samp_mat_process' in prep_data['meta']:\n module_logger.info(\"%s data has 'samp_mat_process' present.\", __name__)\n prep.samp_mat_process = prep_data['meta']['samp_mat_process']\n\n if 'srs_id' in prep_data['meta']:\n module_logger.info(\"%s data has 'srs_id' present.\", __name__)\n prep.srs_id = prep_data['meta']['srs_id']\n\n module_logger.debug(\"Returning loaded %s\", __name__)\n\n return prep", "def _SetFmapInfo(self):\n for epi in self.pfiles + self.epirt_paths:\n self.info[epi]['fmapname'] = None\n self.info[epi]['fmap_entry'] = None\n for entry in self.entry_map['fmap']:\n fmap_name = self.info[entry]['imgfile'] + self.info[entry]['suffix']\n if self.info[entry]['plane'] == self.info[epi]['plane']:\n# Use the fieldmap acquired at the same plane.\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n else:\n# for fmap in self.fmaps.keys():\n for entry in self.entry_map['fmap']:\n# No fmap at same orientation, look for fmaps in other planes.\n# There won't be more than one, so it isn't much of a choice.\n fmap_name = self.info[entry]['imgfile'] + \\\n self.info[entry]['suffix']\n if self.info[entry]['plane'] == 'sagittal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'axial':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'coronal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'oblique':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n self.info[epi]['plane'] = 'oblique'\n break", "def _make_image_info_des(self, flistname):\n\n flist=[]\n psfex_flist=[]\n magzp_list=[]\n with open(flistname) as fobj:\n for line in fobj:\n ls = line.split()\n fname = ls[0]\n magzp = float(ls[1])\n magzp_list.append(magzp)\n\n flist.append(fname)\n\n psfex_fname = fname.replace('.fits.fz','_psfcat.psf')\n psfex_flist.append(psfex_fname)\n\n nimage = len(flist)\n magzp = np.array(magzp_list)\n\n path_len = max([len(f) for f in flist])\n psfex_path_len = max([len(f) for f in psfex_flist])\n\n try:\n ext_len = len(self['image_ext'])\n except:\n ext_len=None\n\n extra_dtype = [\n ('psfex_path','U%d' % psfex_path_len),\n ]\n\n #image_info = meds.util.get_image_info_struct(\n image_info = get_image_info_struct(\n nimage,\n path_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n image_info['position_offset'] = 1\n image_info['image_ext'] = self['image_ext']\n image_info['weight_ext'] = self['weight_ext']\n\n for i,f in enumerate(flist):\n image_info['image_id'][i] = i\n image_info['image_path'][i] = f\n image_info['weight_path'][i] = f\n image_info['psfex_path'][i] = psfex_flist[i]\n\n image_info['magzp'] = magzp\n image_info['scale'] = self._get_scale_from_magzp(magzp)\n return image_info", "def collect(fdr):\n eot = []\n hof = []\n pcs = []\n # __n = 0\n for fname in sorted(ls(fdr)):\n if not fname.endswith('pgz'):\n continue\n # if not __n < 10:\n # break\n # __n = __n + 1\n fname = pt.join(fdr, fname)\n print(fname)\n output = lpz(fname)\n eot.append(output['eot'])\n hof.append(output['hof'])\n if not isinstance(output['pcs'], Exception):\n pcs.append(output['pcs'][:, 0:16])\n\n import numpy as np\n eot = np.array(eot, 'f')\n hof = np.array(hof, 'f')\n pcs = np.array(pcs, 'f')\n hof = np.transpose(hof, [1, 2, 0])\n pcs = np.transpose(pcs, [1, 2, 0])\n ret = {'eot': eot, 'hof': hof, 'pcs': pcs}\n return ret", "def __init__(self, epics_only=False, *args, **kwargs):\n self._kwargs = {}\n self._detectors = {}\n self._det_list = [] \n self._det_aliases = {}\n self._psplots = {}\n self._event_functions = {}\n self._source_attrs = []\n self._evt_time_last = (0,0)\n self.ievent = 0\n self._reloadOnLoadRun = False\n self._reloadOnNextEvent = False\n self.psana_cfg_dict = {}\n self._default_module_path = ''\n\n# self._user_attrs = {}\n# self._histograms = {}\n \n for key in kwargs:\n self._kwargs[key] = kwargs[key] \n if key in self._exp_defaults:\n setattr(self,key,kwargs[key])\n print 'setting ',key, kwargs[key]\n\n self._device_config = read_device_config(**kwargs)\n self._device_sets = self._device_config['device_sets'] \n self._device_types = self._device_config['device_types'] \n\n for det in self._device_sets:\n if 'det' in self._device_sets[det]:\n if ('detName' in self._device_sets[det]['det'] or\n 'typeName' in self._device_sets[det]['det']):\n self._det_list.append(det)\n if 'det_key' in self._device_sets[det]['det']:\n det_key = self._device_sets[det]['det']['det_key']\n self._det_aliases[det_key] = det \n else:\n pass\n \n# if 'pvs' in self._device_sets[det]:\n# for attr in self._device_sets[det]['pvs']:\n# pvbase = self._device_sets[det]['pvs'][attr]['base']\n# alias = '_'.join([det,attr])\n# self.add_pv(pvbase, alias)\n\n self.set_exp_defaults(**kwargs)\n if not self._kwargs.get('noload'):\n self.data_source = self.get_data_source(**kwargs)\n print 'Data Source = ', self.data_source\n else:\n self.data_source = None\n\n if not self.data_source:\n self._kwargs['noload'] = True\n else:\n kwargs['run'] = self.run\n\n# if self._kwargs.get('noload') or self.live:\n# if self._kwargs.get('epics_live'):\n# self.set_kwargs(ami=True)\n \n if self._kwargs.get('ami'):\n print 'loading ami'\n self.load_ami(**kwargs)\n\n if not self._kwargs.get('noload'):\n print 'loading run'\n self.load_run(*args, **kwargs)\n self._no_epicsStore = False\n \n print 'Instrument = ', self.instrument\n\n if self._kwargs.get('epics_live'): # and self._kwargs.get('epics_file'):\n print 'loading epics'\n self.load_epicsLive(**kwargs)\n\n if self.ds and self.live:\n self.next_event()\n \n if self.ds and self._reloadOnNextEvent:\n self.next_event()\n \n if not self.ds:\n self._no_epicsStore = True\n self._no_evtData = True\n for det in self._device_sets:\n if 'pvs' in self._device_sets[det]:\n print 'Adding epics ',det\n self.add_detector(det)", "def pssplit2eps(filename, npages):\n\tfor page in range(npages):\n\t\tos.system(\"psselect -p\"+str(page)+\" \"+filename+\" \"+filename[:-3]+\"_\"+str(page).zfill(3)+\".ps\") \n\t\tos.system(\"ps2eps -f \"+filename[:-3]+\"_\"+str(page).zfill(3)+\".ps\")\n\t\tos.system(\"rm \"+filename[:-3]+\"_\"+str(page).zfill(3)+\".ps\")", "def _parse(self, data):\n pe = construct.PEFILE_HEADER.parse(data)\n\n # Convert Container classes to use ours, so we can deepcopy.\n pe = Container.from_container(pe)\n\n # Convert the FlagEnums into list of constants.\n for section in pe.SectionTable:\n if isinstance(section.Characteristics, dict):\n section.Characteristics = [flag for flag, value in section.Characteristics.items() if value]\n section.data = data[section.PointerToRawData:section.PointerToRawData + section.SizeOfRawData]\n file_header = pe.NTHeaders.FileHeader\n if isinstance(file_header.Characteristics, dict):\n file_header.Characteristics = [\n flag for flag, value in file_header.Characteristics.items() if value]\n optional_header = pe.NTHeaders.OptionalHeader\n if isinstance(optional_header.DllCharacteristics, dict):\n optional_header.DllCharacteristics = [\n flag for flag, value in optional_header.DllCharacteristics.items() if value]\n\n self.update(pe)", "def from_args(args, verbose=False, unhandled=None):\n\n # Create a list for unhandled arguments\n if unhandled is None:\n unhandled = []\n\n experiments = ExperimentList()\n ## First try as image files\n # experiments = ExperimentListFactory.from_datablock(\n # DataBlockFactory.from_args(args, verbose, unhandled1))\n\n # Try to load from serialized formats\n for filename in args:\n try:\n experiments.extend(\n ExperimentListFactory.from_serialized_format(filename)\n )\n if verbose:\n print(\"Loaded experiments from %s\" % filename)\n except Exception as e:\n if verbose:\n print(\"Could not load experiments from %s: %s\" % (filename, str(e)))\n unhandled.append(filename)\n\n # Return the experiments\n return experiments", "def decode_pes(self, pes: bytes)-> PES.PES:\n pesdk = PES.PES()\n try:\n pesdk.stream_id, PES_packet_length = struct.unpack('>BH', pes[0:3])\n if pesdk.stream_id not in [33, 188, 190, 191, 240, 241, 242, 248, 255]:\n # 33 (0x21) - unknown ?????\n # 188 (0xBC) - program_stream_map\n # 190 (0xBE) - padding_stream\n # 191 (0xBF) - private_stream_2\n # 240 (0xF0) - ECM\n # 241 (0xF1) - EMM\n # 242 (0xF2) - DSMCC_stream\n # 248 (0xF8) - ITU-T Rec. H.222.1 type E stream\n # 255 (0xFF) - program_stream_directory\n if pesdk.stream_id >> 4 == 14:\n pesdk.stream_type = 'video-stream'\n pesdk.stream_number = (pesdk.stream_id & 15)\n elif pesdk.stream_id >> 5 == 6:\n pesdk.stream_type = 'audio-stream'\n pesdk.stream_number = (pesdk.stream_id & 31)\n b1, b2, PES_header_data_length = struct.unpack('>BBB', pes[3:6])\n pesdk.PES_scrambling_control = (b1 & 16) >> 4\n # PES_priority = bool((b1 & 8) >> 3)\n # data_alignment_indicator = bool((b1 & 4) >> 2)\n pesdk.copyright = bool((b1 & 2) >> 1)\n pesdk.original_or_copy = bool(b1 & 1)\n pesdk.PTS_DTS_flags = (b2 & 192) >> 6\n pesdk.ESCR_flag = bool((b2 & 32) >> 5)\n pesdk.ES_rate_flag = bool((b2 & 16) >> 4)\n pesdk.DSM_trick_mode_flag = bool((b2 & 8) >> 3)\n pesdk.additional_copy_info_flag = bool((b2 & 4) >> 2)\n pesdk.PES_CRC_flag = bool((b2 & 2) >> 1)\n pesdk.PES_extension_flag = bool(b2 & 1)\n pos = 6\n if pesdk.PTS_DTS_flags in [2, 3]:\n b1, b23, b45 = struct.unpack('>BHH', pes[pos:pos+5])\n pesdk.PTS = (((b1 & 14) << 29) + ((b23 >> 1) << 15) + (b45 >> 1))\n pos += 5\n if pesdk.PTS_DTS_flags == 3:\n b1, b23, b45 = struct.unpack('>BHH', pes[pos:pos + 5])\n pesdk.DTS = (((b1 & 14) << 29) + ((b23 >> 1) << 15) + (b45 >> 1))\n pos += 5\n elif pesdk.stream_id == 190:\n # 190 (0xBE) - padding_stream\n pass\n else:\n # 33 (0x21) - unknown ?????\n # 188 (0xBC) - program_stream_map\n # 191 (0xBF) - private_stream_2\n # 240 (0xF0) - ECM\n # 241 (0xF1) - EMM\n # 242 (0xF2) - DSMCC_stream\n # 248 (0xF8) - ITU-T Rec. H.222.1 type E stream\n # 255 (0xFF) - program_stream_directory\n pass\n return pesdk\n except Exception as err:\n logging.warning('PES parsing error:' + str(err))\n return None", "def file_setup(self):\n #output a .edf file if the input is txt\n if self.args.input_format == 'txt':\n signals = []\n headers = []\n \n #read sample frequency from a .xml file\n if self.args.is_test:\n self.sample_rate = 1024\n else:\n xml_file = open(self.args.input_path + self.args.input_name + '.xml', \"r\")\n xml_content = xml_file.read()\n my_ordered_dict = xmltodict.parse(xml_content)\n dict = json.loads(json.dumps(my_ordered_dict))\n self.sample_rate = eval(dict['RECORD_INFO']['Record']['SamplesFreq'])\n \n #define header, needed for .edf file\n# header = {'label':'ch_name', \n# 'dimension': 'uV',\n# 'sample_rate': self.sample_rate,\n# 'physical_max': 5000,\n# \"physical_min\": -5000,\n# 'digital_max': 5000,\n# 'digital_min': -5000,\n# 'transducer': 'None',\n# 'prefilter': 'None'}\n\n# j = 0\n for i in self.files:\n if i[-3:] != 'xml' and i[-4:] != 'xysw':\n raw = np.loadtxt(self.args.input_path + i)\n self.physical_max.append(np.max(raw))\n self.physical_min.append(np.min(raw))\n \n \n signals.append(raw)\n# new_header = header.copy()\n# new_header['label'] = 'ch' + str(j)\n# new_header['physical_max'] = np.max(raw)\n# new_header['physical_min'] = np.min(raw)\n\n# j = j+1\n# headers.append(new_header)\n self.ch_num = self.ch_num+1\n \n #write edf\n with open(self.output_edf_original, 'w') as output:\n flag = pyedflib.highlevel.write_edf_quick(output.name, signals, self.sample_rate, digital=False)\n if flag == False:\n print('unable to save file into .edf')\n exit()\n else:\n print('txt data loaded into edf, edf saved at ./output_edf as: ' + self.output_edf_original)\n self.raw=mne.io.read_raw_edf(self.output_edf_original,preload=True)\n self.ch_names = self.raw.ch_names\n \n #if already a .edf\n elif self.args.input_format == 'bdf':\n self.raw = mne.io.read_raw_bdf(self.args.input_path + self.files[0], preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n elif self.args.input_format == 'edf':\n self.raw = mne.io.read_raw_edf(self.args.input_path + self.files[0], preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n elif self.args.input_format =='mne':\n mne_exp = mne.datasets.eegbci.load_data(1, 2, path=None, force_update=False, update_path=None, base_url='https://physionet.org/files/eegmmidb/1.0.0/', verbose=None)[0]\n self.raw = mne.io.read_raw_edf(mne_exp, preload = True)\n self.ch_num = len(self.raw.ch_names)\n self.ch_names = self.raw.ch_names\n self.sample_rate = self.raw.info['sfreq']\n \n \n return self.raw", "def input_models():\n return [\n PDBFile(\n Path(golden_data, \"protdna_complex_1.pdb\"),\n path=golden_data,\n score=42.0,\n restr_fname=Path(golden_data, \"example_ambig_1.tbl\")\n ),\n PDBFile(\n Path(golden_data, \"protdna_complex_2.pdb\"),\n path=golden_data,\n score=28.0,\n restr_fname=Path(golden_data, \"example_ambig_2.tbl\")\n )]", "def _prep_items_from_base(base, in_files, metadata, separators, force_single=False):\n details = []\n # in_files = _expand_dirs(in_files, KNOWN_EXTS) ##将in_files 里边的目录转换成KNOWN_EXTS后缀的文件\n # in_files = _expand_wildcards(in_files)\n\n ext_groups = collections.defaultdict(list)\n for ext, files in itertools.groupby(\n in_files, lambda x: KNOWN_EXTS.get(utils.splitext_plus(x)[-1].lower())):\n ext_groups[ext].extend(list(files))\n for ext, files in ext_groups.items():\n if ext == \"bam\":\n for f in files:\n details.append(_prep_bam_input(f, base))\n elif ext in [\"fastq\", \"fq\", \"fasta\"]:\n files, glob_files = _find_glob_matches(files, metadata)\n for fs in glob_files:\n details.append(_prep_fastq_input(fs, base))\n for fs in fastq.combine_pairs(files, force_single, separators=separators):\n details.append(_prep_fastq_input(fs, base))\n elif ext in [\"vcf\"]:\n for f in files:\n details.append(_prep_vcf_input(f, base))\n else:\n print(\"Ignoring unexpected input file types %s: %s\" % (ext, list(files)))\n return details", "def _extract_from_egg(toc):\n new_toc = []\n for item in toc:\n # Item is a tupple\n # (mod_name, path, type)\n modname, pth, typ = item\n if not os.path.isfile(pth):\n pth = check_extract_from_egg(pth)[0][0]\n\n # Add value to new data structure.\n new_toc.append((modname, pth, typ))\n return new_toc", "def _read_expression_direct(cls):\n\n expression_data = {}\n expression_columns = cls._get_columns(EXPRESSION_MANIFEST)\n expression_psvs = cls._get_component_psvs(EXPRESSION_MANIFEST)\n\n for expression_psv in expression_psvs:\n for row in gzip.GzipFile(fileobj=io.BytesIO(cls._read_s3_url(expression_psv))):\n row_dict = dict(zip(expression_columns, row.strip().split(b'|')))\n expression_data.setdefault(\n row_dict[\"cellkey\"].decode(), {})[row_dict[\"featurekey\"].decode()] = \\\n float(row_dict[\"exrpvalue\"])\n\n return expression_data", "def transform(self):\n count=1\n assert len(self.list_folder)>=1 ,\"FILES NOT FOUND\"\n for i,folder in enumerate(self.list_folder):\n path=folder\n for j,pdf in enumerate(os.listdir(path)):\n if pdf!= '.DS_Store':\n self.df.loc[count] = [pdf,folder.split('/')[-2], i+1,None,None]\n \n \"\"\" 0- Read Pdf file \"\"\"\n raw = parser.from_file(os.path.join(path,pdf))\n s = raw['content']\n \n \"\"\" 1- Handle linebreaks to optimize TextBlob.sentences results\"\"\"\n s=self.treat_new_line(s)\n \n \"\"\" 2- Divide text by sentences using TextBlob\"\"\"\n blob=TextBlob(s)\n paragraphs = np.array([str(s) for s in blob.sentences],dtype=str)\n self.parser = []\n self.parser_raw=[]\n p=self.text_processor_pdf(paragraphs)\n \n \"\"\"\n 3- Get rid of bad text data:\n Discard sentences with too long word (16 is the 99% quantile in english)\n Discard sentences with too much upper words (CREDENTIALS, Link, TITLE ..)\n \"\"\"\n index_=[i for i,c in enumerate(self.parser) if (True in [len(w)>=16 for w in c.split()] )]\n index_raw=[i for i,c in enumerate(self.parser_raw) if np.sum([w==w.upper() for w in c.split()])>=4]\n index=list(set(index_ + index_raw))\n self.df.loc[count,'paragraphs']=np.delete(np.array(self.parser),index)\n self.df.loc[count,'raw paragraphs']=np.delete(np.array(self.parser_raw),index)\n count+=1\n \n print(\"files from {} succesfully converted \".format(folder))\n \n return self.df", "def setup(self):\n if not isinstance(self.files, (list, tuple)):\n raise RuntimeError(\"Argument must be list of files.\")\n\n return self.files", "def run(input_f, output_f):\n with open(input_f) as f:\n lines = f.readlines()\n\n # Collect the array dims\n PE_defs = []\n module_start = False\n is_PE = False \n PE_indices = []\n for line_id in range(len(lines)):\n line = lines[line_id]\n if line.find('Module Definition') != -1:\n module_start = not module_start\n if module_start:\n module_start_pos = line_id\n is_PE = False\n else:\n module_end_pos = line_id\n if is_PE:\n PE_defs.append({'def': lines[module_start_pos : module_end_pos + 1], \\\n 'pos': [module_start_pos, module_end_pos]})\n if module_start:\n #print(line_id)\n nxt_line_id = line_id + 1\n while nxt_line_id < len(lines): \n nxt_line = lines[nxt_line_id]\n if nxt_line.find('kernel void PE') != -1:\n is_PE = True\n m = re.search(r'void PE(.+?)\\(', nxt_line)\n #print(nxt_line)\n if m:\n PE_index = m.group(1).split('_')[1:]\n PE_indices.append(PE_index)\n if is_PE:\n break\n if nxt_line.find('Module Definition') != -1:\n break\n nxt_line_id += 1\n\n #print(PE_indices)\n PE_dims = [int(d) for d in PE_indices[0]]\n for ind in PE_indices:\n for dim in range(len(PE_dims)):\n PE_dims[dim] = max(PE_dims[dim], int(ind[dim]) + 1)\n #print(PE_dims)\n \n PE_lines = PE_defs[0]['def']\n # Parse the data transfer information\n data_trans_info = extract_data_trans_info(PE_lines, PE_dims) \n\n # Compose the new PE function\n PE_lines = compose_PE(data_trans_info, PE_dims, PE_defs)\n\n line_offset = 0\n for PE_def in PE_defs:\n lines = lines[:PE_def['pos'][0] - line_offset] + lines[PE_def['pos'][1] + 1 - line_offset:]\n line_offset += (PE_def['pos'][1] - PE_def['pos'][0] + 1)\n\n lines = lines + PE_lines\n\n # Modify the channels\n lines = modify_channels(lines, data_trans_info, PE_dims)\n\n with open(output_f, 'w') as f:\n for line in lines:\n f.write(line)\n # f.writelines(PE_lines)", "def load_prepared(subject):\n raw_cleaned = mne.io.read_raw_fif('ICA_cleaned/' + subject +'_ica_cleaned.fif', preload=True)\n raw_cleaned.set_eeg_reference(ref_channels=None)\n raw_cleaned = raw_cleaned.apply_proj()\n \n events_cleaned = np.load('mne_events/mne_'+ subject +'.npy')\n events_dict = np.load('mne_events/code_dict_' + subject +'.npy').item()\n return raw_cleaned, events_cleaned,events_dict", "def add_catalogs(self):\n n_exposures = len(self.info['Module'])\n self.info['point_source'] = [None] * n_exposures\n self.info['galaxyListFile'] = [None] * n_exposures\n self.info['extended'] = [None] * n_exposures\n self.info['convolveExtended'] = [False] * n_exposures\n self.info['movingTarg'] = [None] * n_exposures\n self.info['movingTargSersic'] = [None] * n_exposures\n self.info['movingTargExtended'] = [None] * n_exposures\n self.info['movingTargToTrack'] = [None] * n_exposures\n\n for i in range(n_exposures):\n if int(self.info['detector'][i][-1]) < 5:\n filtkey = 'ShortFilter'\n pupilkey = 'ShortPupil'\n else:\n filtkey = 'LongFilter'\n pupilkey = 'LongPupil'\n filt = self.info[filtkey][i]\n pup = self.info[pupilkey][i]\n\n if self.point_source[i] is not None:\n # In here, we assume the user provided a catalog to go with each filter\n # so now we need to find the filter for each entry and generate a list that makes sense\n self.info['point_source'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.point_source, 'point source')))\n else:\n self.info['point_source'][i] = None\n if self.galaxyListFile[i] is not None:\n self.info['galaxyListFile'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.galaxyListFile, 'galaxy')))\n else:\n self.info['galaxyListFile'][i] = None\n if self.extended[i] is not None:\n self.info['extended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.extended, 'extended')))\n else:\n self.info['extended'][i] = None\n if self.movingTarg[i] is not None:\n self.info['movingTarg'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTarg, 'moving point source target')))\n else:\n self.info['movingTarg'][i] = None\n if self.movingTargSersic[i] is not None:\n self.info['movingTargSersic'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargSersic, 'moving sersic target')))\n else:\n self.info['movingTargSersic'][i] = None\n if self.movingTargExtended[i] is not None:\n self.info['movingTargExtended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargExtended, 'moving extended target')))\n else:\n self.info['movingTargExtended'][i] = None\n if self.movingTargToTrack[i] is not None:\n self.info['movingTargToTrack'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargToTrack, 'non-sidereal moving target')))\n else:\n self.info['movingTargToTrack'][i] = None\n if self.convolveExtended is True:\n self.info['convolveExtended'] = [True] * n_exposures", "def __parsePackages__(self, f):\n\n\t\tp = apt_pkg.ParseTagFile(f)\n\n\t\t# Just load into memory the fields that are going to be useful\n\t\twhile p.Step() == 1:\n\t\t\tpkg = p.Section['Package']\n\n\t\t\tself.packages[pkg] = {}\n\n\t\t\tfor field in p.Section.keys():\n\t\t\t\tif field == 'Package':\n\t\t\t\t\tpass \n\t\t\t\telif ['Depends', 'Recommends', 'Suggests', 'Enhances', 'Pre-Depends', 'Conflicts', 'Provides'].count(field):\n\t\t\t\t\tvalue = p.Section.get(field, \"\")\n\t\t\t\t\tself.packages[pkg][field] = apt_pkg.ParseDepends(value)\n\t\t\t\telif ['Size', 'Installed-Size'].count(field):\n\t\t\t \t\tvalue = p.Section.get(field, \"0\")\n\t\t\t \t\tself.packages[pkg][field] = int(value)\n\t\t\t\telif field == 'Source':\n\t\t\t\t\tsrc = p.Section.get(field, pkg)\n\t\t\t\t\tidx = src.find('(')\n\t\t\t\t\tif idx != -1:\n\t\t\t\t\t\tsrc = src[:idx].strip()\n\t\t\t\t\tself.packages[pkg][field] = src\n\t\t\t\telif field == 'Provides':\n\t\t\t\t\tself.packages[pkg][\"Provides\"] = apt_pkg.ParseDepends(p.Section.get(\"Provides\", \"\"))\n\t\t\t\telse:\n\t\t\t\t\tself.packages[pkg][field] = p.Section.get(field, '')\n\n\t f.close()", "def buildFromRecords(self, records):\n probes = {}\n for record in records:\n fields = {}\n for field in record.split(self.FIELD_DELIMITER):\n index = field.find(self.KEY_VALUE_DELIMITER)\n if index == -1 or len(field) < (index+1):\n raise InvariantViloation('detected invalid probe record in app info file - {}'.format(record))\n fields.update({field[:index]:field[index+1:]})\n if fields:\n try:\n fields[self.FIELD_FILE] = self.trimWorkspace(fields[self.FIELD_FILE], self.workspace)\n probes.update({\n fields[self.FIELD_RECORDER_RETURN_SITE] : AnchoredProbe(\n fields[self.FIELD_NAME], fields[self.FIELD_FILE], fields[self.FIELD_LINE],\n fields[self.FIELD_ATTRIBUTES], fields[self.FIELD_STATUS] == self.PROBE_STATUS_ENABLED,\n fields[self.FIELD_NAME]\n )\n })\n except KeyError as error:\n raise InvariantViloation('detected record missing field {} - \\n{}\\n{}'.format(error, record, fields))\n return probes", "def _get_eps_xml(self):\n format_path = os.path.join(os.path.dirname(__file__), \"formats\")\n\n # loop through files where filename starts with \"eps_ascat\".\n for filename in fnmatch.filter(os.listdir(format_path), \"eps_ascat*\"):\n doc = etree.parse(os.path.join(format_path, filename))\n file_extension = doc.xpath(\"//file-extensions\")[0].getchildren()[0]\n\n format_version = doc.xpath(\"//format-version\")\n for elem in format_version:\n major = elem.getchildren()[0]\n minor = elem.getchildren()[1]\n\n # return the xml file matching the metadata of the datafile.\n if major.text == self.mphr[\"FORMAT_MAJOR_VERSION\"] and \\\n minor.text == self.mphr[\"FORMAT_MINOR_VERSION\"] and \\\n self.mphr[\n \"PROCESSING_LEVEL\"] in file_extension.text and \\\n self.mphr[\"PRODUCT_TYPE\"] in file_extension.text:\n return os.path.join(format_path, filename)", "def task_process_department_files():\n for dept in Department.list():\n for file_name, file in dept.files.items():\n yield {\n 'name': f'{dept}:{file_name}',\n 'file_dep': file.dependencies +\n [file.raw_path, util.path.CONFIG_PATH],\n 'targets': [file.processed_path],\n 'actions': [file.process],\n 'clean': True,\n }", "def get_filepaths_and_exts(self):\n filepaths = [prod.filepath for prod in self.products]\n exts = [prod.ext for prod in self.products]\n\n return filepaths, exts", "def partid2eids(self, partid, etype=...):\n ...", "def partid2eids(self, partid, etype=...):\n ...", "def parse_design(self, detailed_design_file):", "def parse_form(self, response):\n path = os.path.join(\n self.output_dir, \"epaipm-v%d-rev_%s.xlsx\" %\n (response.meta[\"version\"], response.meta[\"revision\"].isoformat()))\n\n yield items.EpaIpm(\n data=response.body, version=response.meta[\"version\"],\n revision=response.meta[\"revision\"], save_path=path)", "def retrieve_ext_list(self, puid_list):\n xml_iter = self._parse_xml()\n puiddict = {}\n for topelements in xml_iter:\n if (\n topelements.tag\n == \"{http://www.nationalarchives.gov.uk/pronom/SignatureFile}FileFormatCollection\"\n ):\n for fileformats in topelements:\n puid = fileformats.get(\"PUID\")\n for puids in puid_list:\n if puids != puid:\n continue\n ext = fileformats.find(\n \"{http://www.nationalarchives.gov.uk/pronom/SignatureFile}Extension\"\n )\n if ext is not None:\n # Return the first file format extension.\n puiddict[puids] = ext.text\n break\n puiddict[puids] = None\n break\n notfound = []\n for puid in puid_list:\n if puid not in puiddict:\n if puid not in notfound:\n notfound.append(puid)\n if len(notfound) > 0:\n for puid in notfound:\n puiddict[puid] = \"notfound\"\n return puiddict", "def generate(self):\n if len(self.files) == 0:\n raise Exception('no files to process')\n music = []\n for filename in self.files:\n music.extend(self._process_file(filename))\n return self._extract_raw(sorted(music, key=lambda tup: (tup[0], tup[1])))", "def process_input_files(list_input_files):\n global dict_models_results\n global list_spacy_docs\n \n for input_file in list_input_files:\n prefix = prefix_from_filename(input_file)\n \n with open(input_file) as f:\n list_cases = json.load(f)\n dict_models_results[prefix] = list_cases\n \n \n #extract list of questions from all vignettes and create a mapping page -> vignette question\n dict_questions = {}\n for prefix, list_cases in dict_models_results.items():\n for vignette in list_cases:\n dict_questions[vignette[\"book_page\"]] = vignette[\"question\"]\n \n \n for book_page,question in dict_questions.items():\n doc_q = load_bner_onto_tokens_extension(question, book_page)\n list_spacy_docs.append(doc_q)\n \n return", "def _get_emulators ( self, emulator_home, model=\"prosail\" ):\n files = glob.glob(\"%s*.npz\" % emulator_home)\n emulator_search_dict = {}\n for f in files:\n emulator_search_dict[ float(f.split(\"/\")[-1].split(\"_\")[0]),\n float(f.split(\"/\")[-1].split(\"_\")[1]),\n float(f.split(\"/\")[-1].split(\"_\")[2]),\n float(f.split(\"/\")[-1].split(\"_\")[3])] = f\n # So we have a dictionary inddexed by SZA, VZA and RAA and mapping to a filename\n # Remove some weirdos...\n\n emu_keys = np.array( emulator_search_dict.keys() )\n self.emulator = []\n for i in xrange (len ( self.metadata ) ):\n e_sza = emu_keys[\n np.argmin (np.abs( emu_keys[:, 0] - self.sza[i] )), 0]\n e_vza = emu_keys[\n np.argmin (np.abs( emu_keys[:, 2] - self.vza[i] )), 2]\n e_saa = emu_keys[\n np.argmin (np.abs( emu_keys[:, 2] - self.saa[i] )), 1]\n e_vaa = emu_keys[\n np.argmin (np.abs( emu_keys[:, 3] - self.vaa[i] )), 3]\n print self.sza[i], e_sza, self.vza[i], e_vza, self.vaa[i], e_vaa, self.saa[i], e_saa\n the_emulator = \"%.1f_%.1f_%.1f_%.1f_%s.npz\" % (\n e_sza, e_saa, e_vza, e_vaa, model )\n print \"Using emulator %s\" % os.path.join ( emulator_home, the_emulator )\n self.emulator.append ( gp_emulator.MultivariateEmulator\n ( dump=os.path.join ( emulator_home, the_emulator ) ) )", "def generate_processes(emg, source, processes, conf, specifications):\n # Import Specifications\n or_models = list(processes.models.values())\n or_processes = list(processes.environment.values())\n or_entry = processes.entry\n\n all_instance_maps = specifications[\"manual event models\"].get(\"specification\")\n fragment_name = emg.abstract_task_desc['fragment']\n descriptions = None\n for imap in all_instance_maps.get(\"manual event models\", []):\n if fragment_name in imap.get('fragments', []):\n descriptions = imap.get(\"model\", None)\n\n # Import manual process\n if descriptions and (\"functions models\" in descriptions or \"environment processes\" in descriptions):\n manual_processes = ProcessCollection(emg.logger, emg.conf)\n manual_processes.parse_event_specification(descriptions)\n\n # Decide on process replacements\n if manual_processes.entry:\n if (get_conf_property(conf, \"enforce replacement\") and or_entry) or not or_entry:\n or_entry = manual_processes.entry\n\n # Replace rest processes\n for collection, manual in ((or_models, manual_processes.models.values()),\n (or_processes, manual_processes.environment.values())):\n for process in manual:\n if process.pretty_id in {p.pretty_id for p in collection} and \\\n get_conf_property(conf, \"enforce replacement\"):\n collection[[p.pretty_id for p in collection].index(process.pretty_id)] = process\n elif process.pretty_id not in {p.pretty_id for p in collection}:\n collection.insert(0, process)\n else:\n emg.logger.info(\"There is no specification for {!r} or it has invalid format\".format(fragment_name))\n\n processes.entry = or_entry\n processes.models = {p.pretty_id: p for p in or_models}\n processes.environment = {p.pretty_id: p for p in or_processes}\n processes.establish_peers(strict=True)", "def main(desc_key, fxyz, peratom, scale, pca_d, keep_raw=False, output=None, prefix='ASAP'):\n\n if output is None:\n output = prefix + \"-pca-d\" + str(pca_d) + '.xyz'\n peratom = bool(peratom)\n\n # read the xyz file\n frames = ase.io.read(fxyz, ':')\n n_frames = len(frames)\n print('load xyz file: ', fxyz, ', a total of ', str(n_frames), 'frames')\n\n # extract the descriptors from the file\n desc = []\n if n_frames == 1 and not peratom:\n raise RuntimeError('Per-config PCA not possible on a single frame')\n\n # retrieve the SOAP vectors --- both of these throw a ValueError if any are missing or are of wrong shape\n if peratom:\n desc = np.concatenate([a.get_array(desc_key) for a in frames])\n else:\n desc = np.row_stack([a.info[desc_key] for a in frames])\n\n # scale & center\n if scale:\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n print('DEBUG: {}'.format(desc.shape))\n print(scaler.fit(desc))\n desc = scaler.transform(desc) # normalizing the features\n\n # fit PCA\n proj, pvec = pca(desc, pca_d)\n # could do with sklearn as well\n # from sklearn.decomposition import PCA\n # pca_sklearn = PCA(n_components=4) # can set svd_solver\n # proj = pca_sklearn.fit_transform(desc)\n # pvec = pca_sklearn.components_\n\n # add coords to info/arrays\n if peratom:\n running_index = 0\n for at in frames:\n n_atoms = len(at)\n at.arrays['pca_coord'] = proj[running_index:running_index + n_atoms, :].copy()\n running_index += n_atoms\n\n if not keep_raw:\n for at in frames:\n del at.arrays[desc_key]\n else:\n for i, at in enumerate(frames):\n at.info['pca_coord'] = proj[i]\n\n if not keep_raw:\n for at in frames:\n del at.info[desc_key]\n\n # save\n ase.io.write(output, frames, write_results=False)", "def build_epub(self) -> None:\n outname = self.config.epub_basename + '.epub'\n logger.info(__('writing %s file...'), outname)\n epub_filename = path.join(self.outdir, outname)\n with ZipFile(epub_filename, 'w', ZIP_DEFLATED) as epub:\n epub.write(path.join(self.outdir, 'mimetype'), 'mimetype', ZIP_STORED)\n for filename in ('META-INF/container.xml', 'content.opf', 'toc.ncx'):\n epub.write(path.join(self.outdir, filename), filename, ZIP_DEFLATED)\n for filename in self.files:\n epub.write(path.join(self.outdir, filename), filename, ZIP_DEFLATED)", "def _resolve_xml_and_data_paths(self):\n\n supported_extensions = ['.dat', '.lfp', '.eeg']\n self.filename = Path(self.filename)\n self.binary_file = Path(self.binary_file) if self.binary_file is not None else None\n \n if self.filename.suffix == '.xml':\n xml_file_path = self.filename\n data_file_path = self.binary_file \n elif self.filename.suffix == '':\n xml_file_path = self.filename.with_suffix(\".xml\")\n data_file_path = self.binary_file\n elif self.filename.suffix in supported_extensions:\n xml_file_path = self.filename.with_suffix(\".xml\")\n data_file_path = self.filename\n else:\n raise KeyError(f\"Format {self.filename.suffix} not supported, filename format should be {supported_extensions} or .xml\")\n \n if data_file_path is None:\n possible_file_paths = (xml_file_path.with_suffix(extension) for extension in supported_extensions)\n data_file_path = next((path for path in possible_file_paths if path.is_file()), None)\n if data_file_path is None:\n raise FileNotFoundError(f\"data binary not found for file {xml_file_path.stem} with supported extensions: {supported_extensions}\")\n\n \n assert xml_file_path.is_file(), f\"xml file not found at the expected location {xml_file_path}\"\n assert data_file_path.is_file(), f\"binary file not found at the expected location {data_file_path}\"\n\n self.xml_file_path = xml_file_path\n self.data_file_path = data_file_path", "def _iep(s):\n d = defaultdict(dict)\n for _ep in iter_entry_points(s):\n try:\n d[_ep.name] = _ep.load()\n except Exception as e:\n d[_ep.name] = functools.partial(_broken_ep, _ep, e)\n return d", "def decode(self):\n # Extract all the experiments\n\n # Map of imageset/scan pairs\n imagesets = {}\n\n # For every experiment, use the given input to create\n # a sensible experiment.\n el = ExperimentList()\n for eobj in self._obj[\"experiment\"]:\n\n # Get the models\n identifier = eobj.get(\"identifier\", \"\")\n beam = self._lookup_model(\"beam\", eobj)\n detector = self._lookup_model(\"detector\", eobj)\n goniometer = self._lookup_model(\"goniometer\", eobj)\n scan = self._lookup_model(\"scan\", eobj)\n crystal = self._lookup_model(\"crystal\", eobj)\n profile = self._lookup_model(\"profile\", eobj)\n scaling_model = self._lookup_model(\"scaling_model\", eobj)\n\n key = (eobj.get(\"imageset\"), eobj.get(\"scan\"))\n\n imageset = None\n try:\n imageset = imagesets[key] # type: ImageSet\n except KeyError:\n # This imageset hasn't been loaded yet - create it\n imageset_data = self._lookup_model(\"imageset\", eobj)\n\n # Create the imageset from the input data\n if imageset_data is not None:\n if \"params\" in imageset_data:\n format_kwargs = imageset_data[\"params\"]\n else:\n format_kwargs = {}\n\n # Load the external lookup data\n mask_filename, mask = self._load_pickle_path(imageset_data, \"mask\")\n gain_filename, gain = self._load_pickle_path(imageset_data, \"gain\")\n pedestal_filename, pedestal = self._load_pickle_path(\n imageset_data, \"pedestal\"\n )\n dx_filename, dx = self._load_pickle_path(imageset_data, \"dx\")\n dy_filename, dy = self._load_pickle_path(imageset_data, \"dy\")\n\n if imageset_data[\"__id__\"] == \"ImageSet\":\n imageset = self._make_stills(\n imageset_data, format_kwargs=format_kwargs\n )\n elif imageset_data[\"__id__\"] == \"ImageGrid\":\n imageset = self._make_grid(\n imageset_data, format_kwargs=format_kwargs\n )\n elif (\n imageset_data[\"__id__\"] == \"ImageSequence\"\n or imageset_data[\"__id__\"] == \"ImageSweep\"\n ):\n imageset = self._make_sequence(\n imageset_data,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n format_kwargs=format_kwargs,\n )\n elif imageset_data[\"__id__\"] == \"MemImageSet\":\n imageset = self._make_mem_imageset(imageset_data)\n else:\n raise RuntimeError(\"Unknown imageset type\")\n\n if imageset is not None:\n # Set the external lookup\n if mask is None:\n mask = ImageBool()\n else:\n mask = ImageBool(mask)\n if gain is None:\n gain = ImageDouble()\n else:\n gain = ImageDouble(gain)\n if pedestal is None:\n pedestal = ImageDouble()\n else:\n pedestal = ImageDouble(pedestal)\n if dx is None:\n dx = ImageDouble()\n else:\n dx = ImageDouble(dx)\n if dy is None:\n dy = ImageDouble()\n else:\n dy = ImageDouble(dy)\n\n if not imageset.external_lookup.mask.data.empty():\n if not mask.empty():\n mask = tuple(m.data() for m in mask)\n for m1, m2 in zip(\n mask, imageset.external_lookup.mask.data\n ):\n m1 &= m2.data()\n imageset.external_lookup.mask.data = ImageBool(mask)\n else:\n imageset.external_lookup.mask.data = mask\n imageset.external_lookup.mask.filename = mask_filename\n imageset.external_lookup.gain.data = gain\n imageset.external_lookup.gain.filename = gain_filename\n imageset.external_lookup.pedestal.data = pedestal\n imageset.external_lookup.pedestal.filename = pedestal_filename\n imageset.external_lookup.dx.data = dx\n imageset.external_lookup.dx.filename = dx_filename\n imageset.external_lookup.dy.data = dy\n imageset.external_lookup.dy.filename = dy_filename\n\n # Update the imageset models\n if isinstance(imageset, ImageSequence):\n imageset.set_beam(beam)\n imageset.set_detector(detector)\n imageset.set_goniometer(goniometer)\n imageset.set_scan(scan)\n elif isinstance(imageset, (ImageSet, ImageGrid)):\n for i in range(len(imageset)):\n imageset.set_beam(beam, i)\n imageset.set_detector(detector, i)\n imageset.set_goniometer(goniometer, i)\n imageset.set_scan(scan, i)\n\n imageset.update_detector_px_mm_data()\n\n # Add the imageset to the dict - even if empty - as this will\n # prevent a duplicated attempt at reconstruction\n imagesets[key] = imageset\n\n # Append the experiment\n el.append(\n Experiment(\n imageset=imageset,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n crystal=crystal,\n profile=profile,\n scaling_model=scaling_model,\n identifier=identifier,\n )\n )\n\n # Return the experiment list\n return el", "def plotERP(self, ep):\n import os \n import matplotlib.pyplot as plt\n \n try:\n filename = ep.filename.split('\\\\')[-1].split('.fif')[0]\n filename = 'plotsEEG_'+filename.split('_')[0] \n except Exception as err: \n filename = 'plots_eeg_file' \n print(err) \n finally:\n print('Saving ERP plots at >>>>', os.getcwd())\n \n try:\n os.mkdir(os.path.join(os.getcwd(), filename)) \n os.chdir(os.path.join(os.getcwd(), filename)) \n except Exception as err:\n print(err) \n \n \n ep = ep.interpolate_bads(reset_bads='True', mode = 'accurate')\n ep.info['bads'] = []\n \n ep.plot_psd(area_mode='range',fmin=0, fmax=40, tmax=10.0).savefig(filename + '_psd')\n\n# picks = ['FC2', 'C4', 'Cz', 'C5', 'FC1'] \n \n ep.plot_image(picks = None, cmap='interactive', sigma=1) \n \n plt.savefig(filename + '_image') \n \n bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),\n (12, 30, 'Beta'), (30, 45, 'Gamma')] \n \n ep.plot_psd_topomap(bands=bands, vmin=None, vmax=None, \n tmin=0, tmax=0.5).savefig(filename + '_psd_topo')\n \n ep.plot_sensors().savefig(filename + '_sensors_') \n \n ep.plot_topo_image(vmin=-25, vmax=25, title='ERF images', sigma=3.,\n fig_facecolor='w', font_color='k').savefig(filename + '_image_topo') \n \n ep.average().plot().savefig(filename + 'erp_average_')\n ep.average().plot_image().savefig(filename + '_erp_average_image')\n print('Saving ERP plots at >>>>', os.getcwd())", "def _get_emulators ( self, emulator_home, model=\"prosail\"):\n files = glob.glob(\"%s*.npz\" % emulator_home)\n emulator_search_dict = {}\n for f in files:\n emulator_search_dict[ float(f.split(\"/\")[-1].split(\"_\")[0]),\n float(f.split(\"/\")[-1].split(\"_\")[1]),\n float(f.split(\"/\")[-1].split(\"_\")[2]),\n float(f.split(\"/\")[-1].split(\"_\")[3])] = f\n # So we have a dictionary inddexed by SZA, VZA and RAA and mapping to a filename\n # Remove some weirdos...\n\n emu_keys = np.array( emulator_search_dict.keys() )\n self.emulator = []\n for i in xrange (len ( self.metadata ) ):\n e_sza = emu_keys[\n np.argmin (np.abs( emu_keys[:, 0] - self.sza[i] )), 0]\n e_vza = emu_keys[\n np.argmin (np.abs( emu_keys[:, 2] - self.vza[i] )), 2]\n e_saa = emu_keys[\n np.argmin (np.abs( emu_keys[:, 2] - self.saa[i] )), 1]\n e_vaa = emu_keys[\n np.argmin (np.abs( emu_keys[:, 3] - self.vaa[i] )), 3]\n print self.sza[i], e_sza, self.vza[i], e_vza, self.vaa[i], e_vaa, self.saa[i], e_saa\n the_emulator = \"%.1f_%.1f_%.1f_%.1f_%s.npz\" % (\n e_sza, e_saa, e_vza, e_vaa, model )\n print \"Using emulator %s\" % os.path.join ( emulator_home, the_emulator )\n self.emulator.append ( gp_emulator.MultivariateEmulator\n ( dump=os.path.join ( emulator_home, the_emulator ) ) )", "def partid2eids(self, partid, etype): # -> None:\n ...", "def parse_eeg_file(path):\n if os.path.splitext(path)[-1].lower() != '.edf':\n NotImplementedError(\"Only EDFs are supported currently. More files coming.\")\n\n try: #edf\n edf_file = mne.io.read_raw_edf(path, stim_channel=None, verbose=False)\n except RuntimeError: #edf+\n edf_file = mne.io.read_raw_edf(path, preload=True, stim_channel=None, verbose=False)\n\n # TODO edf++\n\n eeg_data = {}\n eeg_data['meas_date'] = datetime.datetime.fromtimestamp(edf_file.info[\"meas_date\"])\n eeg_data['nchan'] = edf_file.info[\"nchan\"]\n eeg_data['sfreq'] = edf_file.info[\"sfreq\"]\n eeg_data['subject_info'] = edf_file.info[\"subject_info\"]\n eeg_data['ch_names'] = edf_file.ch_names\n\n return {\"eeg_\"+key: value for key, value in eeg_data.items()}", "def read(self):\r\n tree = ElementTree(file=self._fn)\r\n self._root = tree.getroot()\r\n for prop in self.prpnames:\r\n self.__setattr__(prop, DocItem.get_attr(self, prop))", "def parse(cls, filepath, spectrum_type, labels, plformat):\n with open(filepath, 'r') as autoassign_file:\n peaklist = pe.PeakList(filepath, spectrum_type, labels, plformat)\n\n for line in autoassign_file:\n if line.startswith('#'):\n continue\n if line.startswith('*'):\n break\n\n peak = line.split()\n index = peak.pop(0)\n workbook = peak.pop()\n intensity = peak.pop()\n num_dims = len(peak)\n assignment = ['?' for i in range(num_dims)]\n peak = [float(dim) for dim in peak]\n peaklist.append(pe.Peak(labels, assignment, peak, peaklist))\n return peaklist", "def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())", "def extract_energies(self):\n path2save = 'Analysis/energies.pkl'\n #check, if I have to extract them, or they are already extracted. This the latter case, load them.\n if os.path.exists(path2save):\n print(\"extraction of the polarizaion has already been done. Loading polarizations from from pkl\")\n # TODO delete to check if exists above and do load without doing\n with open('Analysis/energies.pkl', 'rb') as fid:\n [self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols] \\\n = pickle.load(fid)\n else:\n print('Energies are being extracting and will be saved to pkl')\n for i, radius in enumerate(self.radii):\n self.E_sd_plus[radius] = {}\n self.E_sd_0[radius] = {}\n self.E_sd_minus[radius] = {}\n\n self.E_sum_env_plus[radius] = {}\n self.E_sum_env_0[radius] = {}\n self.E_sum_env_minus[radius] = {}\n\n self.V0_plus[radius] = {}\n self.V0_0[radius] = {}\n self.V0_minus[radius] = {}\n\n self.E_env_plus[radius] = {}\n self.E_env_0[radius] = {}\n self.E_env_minus[radius] = {}\n\n self.V_env_plus[radius] = {}\n self.V_env_0[radius] = {}\n self.V_env_minus[radius] = {}\n\n self.n_mols[radius] = {}\n\n for j, core_id in enumerate(self.core_ids):\n #path2file_ip = \\\n # 'Analysis/' + self.dict_radii_folder_IP[radius] + '/Matrix-analysis-IP_' \\\n # + self.mol_name + '-Mol_' + str(core_id) + '_C_1.yml'\n\n path2file_ip = \\\n 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[radius]\\\n + '/Matrix-analysis-IP_' + self.mol_name + '.yml' # new\n path2file_ea = \\\n 'Analysis/EA_by_radius/' + self.dict_radii_folder_EA[radius]\\\n + '/Matrix-analysis-EA_' + self.mol_name + '.yml'\n\n # IP. Charged states: \"+\" and \"0\"\n with open(path2file_ip) as fid:\n ip_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n with open(path2file_ea) as fid:\n ea_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n\n\n # number of mols extraction\n self.n_mols[radius][core_id] = len(ip_dict[int(core_id)]['energies'])\n\n # sd extraction. E_sd = E_0 + V_0\n self.E_sd_plus[radius][core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged'] #new\n self.E_sd_0[radius][core_id] = ip_dict[core_id]['energies'][int(core_id)]['total_e_uncharged']\n self.E_sd_minus[radius][core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged']\n # E_0\n self.E0_plus[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n self.E0_0[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_uncharged_vacuum']\n self.E0_minus[core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n # # E_0_vacuum\n # self.E0_plus_vacuum[core_id] =\n # self.E0_0_vacuum[core_id] =\n # self.E0_minus_vacuum[core_id] =\n\n\n # V_0\n self.V0_plus[radius][core_id] = self.E_sd_plus[radius][core_id] - self.E0_plus[core_id]\n self.V0_0[radius][core_id] = self.E_sd_0[radius][core_id] - self.E0_0[core_id]\n self.V0_minus[radius][core_id] = self.E_sd_minus[radius][core_id] - self.E0_minus[core_id]\n\n # E_sum_env = \\sum_i\\ne 0 E_i \\sum_{j=0}^{N} V_{ij}\n ip_env_sub_dict = ip_dict[int(core_id)]['energies']#new\n del ip_env_sub_dict[int(core_id)]\n # del ip_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n\n ea_env_sub_dict = ea_dict[int(core_id)]['energies'] # new\n del ea_env_sub_dict[int(core_id)]\n # del ea_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n # tmp = ip_env_sub_dict['energies'][]\n\n list_total_e_env_plus = [ip_env_sub_dict[env_id]['total_e_charged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_plus[radius][int(core_id)] = np.sum(list_total_e_env_plus) if not list_total_e_env_plus == [] else 0.0\n list_total_e_env_0 = [ip_env_sub_dict[env_id]['total_e_uncharged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_0[radius][int(core_id)] = np.sum(list_total_e_env_0) if not list_total_e_env_0 == [] else 0.0\n list_total_e_env_minus = [ea_env_sub_dict[env_id]['total_e_charged'] for env_id in ea_env_sub_dict]\n self.E_sum_env_minus[radius][int(core_id)] = np.sum(list_total_e_env_minus) if not list_total_e_env_minus == [] else 0.0\n\n # E_env = \\sum_i \\ne 0 E_i. sum of DFT env energies.\n list_vacuum_env_e_plus = [ip_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_plus[radius][int(core_id)] = np.sum(list_vacuum_env_e_plus) if not list_vacuum_env_e_plus == [] else 0.0\n list_vacuum_env_e_0 = [ip_env_sub_dict[env_id]['total_e_uncharged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_0[radius][int(core_id)] = np.sum(list_vacuum_env_e_0) if not list_vacuum_env_e_0 == [] else 0.0\n list_vacuum_env_e_minus = [ea_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ea_env_sub_dict]\n self.E_env_minus[radius][int(core_id)] = np.sum(list_vacuum_env_e_minus) if not list_vacuum_env_e_minus == [] else 0.0\n\n # V_env = 0.5 (\\sum_{i=1} \\sum_{j=1} V_{ij}). classical interaction of env. mols\n self.V_env_plus[radius][core_id] = 0.5 * (self.E_sum_env_plus[radius][core_id]\n - self.E_env_plus[radius][core_id]\n - self.V0_plus[radius][core_id])\n\n self.V_env_0[radius][core_id] = 0.5 * (self.E_sum_env_0[radius][core_id]\n - self.E_env_0[radius][core_id]\n - self.V0_0[radius][core_id])\n\n self.V_env_minus[radius][core_id] = 0.5 * (self.E_sum_env_minus[radius][core_id]\n - self.E_env_minus[radius][core_id]\n - self.V0_minus[radius][core_id])\n\n\n append_dict_with_mean(self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.E0_plus, self.E0_0, self.E0_minus,\n self.n_mols) # compute and add \"mean\" to all mentioned dicts\n\n with open('Analysis/energies.pkl', 'wb') as fid:\n pickle.dump([self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols],\n fid)\n print(\"Energies are extracted and dumped to pkl\")", "def eid2partid(self, eids, etype=...):\n ...", "def eid2partid(self, eids, etype=...):\n ...", "def products(self):\n _products = []\n for ext in self.exts:\n prod = Product(ext, self.node.opath_from_ext(ext))\n _products.append(prod)\n\n return _products", "def list_all_ephemerides_files(self) -> Dict:\n ephs = self.list_result_ephemerides_files()\n while 'nextPageToken' in ephs:\n next_page_token = ephs['nextPageToken']\n _, e = self.list_result_ephemerides_files(page_token=next_page_token)\n ephs['ephemerisResourcePath'].extend(e['ephemerisResourcePath'])\n return ephs", "def read_qe(qefile, task):\n fileobj = open(qefile)\n lines = fileobj.readlines()\n fileobj.close()\n if task == \"PW_INP\": # Reading a pw.x input file\n for i, line in enumerate(lines):\n if \"nat\" in line:\n # Reading the number of atoms in the cell\n if \",\" in line.split()[2]:\n nat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n nat = int(line.split()[2])\n elif \"ntyp\" in line:\n if \",\" in line.split()[2]:\n ntypat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n ntypat = int(line.split()[2])\n elif \"CELL_PARAMETERS\" in line:\n # Reading the cell vectors\n cell = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"ATOMIC_POSITIONS\" in line:\n if \"crystal\" in line:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]]), cell)\n else:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n # Returning the input structure\n rstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n return rstrc\n elif task == \"PW_OUT_RELAX\": # Reading a pw.x output file for a calculation = \"relax\"\n status = \"NONE\"\n rstrcs = []\n rtotEs = []\n rtotFs = []\n rforces = []\n rstress = []\n for i, line in enumerate(lines):\n # Initial information related to the input cell\n if \"number of atoms/cell\" in line:\n # Reading the number of atoms in the cell\n nat = int(line.split()[4])\n elif \"number of atomic types\" in line:\n ntypat = int(line.split()[5])\n elif \"crystal axes: (cart. coord. in units of alat)\" in line:\n # Reading the cell vectors\n cell = [x.split()[3:6] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"Crystallographic axes\" in line:\n # Reading the input coordinates and creating a collection of ase.Atoms objects\n geom_start = i + 3\n geom_stop = geom_start + nat\n species = [line.split()[1] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[6:9]]\n for line in lines[geom_start:geom_stop]]), cell)\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates (first)\")\n # Now, just after each SCF cycle\n # Reading total energy\n elif \"Forces acting on atoms\" in line:\n forces_start = i + 2\n forces_stop = forces_start + nat\n try:\n forces = array([[float(col) for col in line.split()[6:9]]\n for line in lines[forces_start:forces_stop]])\n #print (\"Appending forces\")\n rforces.append(forces)\n except ValueError:\n # expected to occur when forces are too big\n # and so incompatible with the format used in QE\n # for instance:\n # atom 3 type 2 force = 674.57999165 312.30521069-1079.69944125\n print (\"Rerror reading forces in file:\")\n print (qefile)\n #print (\"Appending forces (empty)\")\n rforces.append([])\n elif \"! total energy\" in line:\n rtotEs.append(float(line.split()[4]))\n #print (\"Appending energy\")\n elif \"total stress (Ry/bohr**3)\" in line:\n # Reading the stress tensor\n stress = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n stress = array([[float(col) for col in row] for row in stress])\n rstress.append(stress)\n #print (\"Appending stress\")\n elif \"Total force\" in line:\n rtotFs.append(float(line.split()[3]))\n #print (\"Appending total forces\")\n elif \"ATOMIC_POSITIONS (alat)\" in line:\n # Reading the relaxed and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates\")\n elif \"convergence NOT achieved after 100 iterations: stopping\" in line:\n # Removing the last item the vector with structures\n status = \"SCF_NOT_CONVERGED\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if no even the first SCF started\n if len(rtotEs) == 0 and status == \"NONE\":\n status = \"CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the SCF has not been finished because of timeout\n if len(rstrcs) > len(rtotEs) and status == \"NONE\":\n status = \"TIMEOUT_OR_CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the BFGS has been finished\n if status == \"TIMEOUT_OR_CRASH\" and \"JOB DONE\" in lines[len(lines)-2]:\n status = \"FINISHED\"\n # Returning a collection of cells and properties\n return status, rstrcs, rtotEs, rtotFs, rforces, rstress", "def _load(self):\n with qisys.sh.TempDir() as work_dir:\n pkg = portage.xpak.tbz2(self.path)\n pkg.decompose(work_dir, cleanup=0)\n arch, arch_variant = _get_pkg_arch(work_dir)\n with open(os.path.join(work_dir, 'PF'), 'r') as fpf:\n pf = fpf.readline().strip()\n name, version, revision = portage.versions.pkgsplit(pf)\n dependency = dict()\n for dep, dep_filename in _DEPENDENCY.items():\n dep_path = os.path.join(work_dir, dep_filename)\n if not os.path.exists(dep_path):\n dependency[dep] = list()\n continue\n with open(dep_path, 'r') as fdep:\n dependency[dep] = fdep.read().strip().split()\n dependency['all'] = list()\n for dep_list in _DEPENDENCY:\n dependency['all'].extend(dependency[dep_list])\n for dep, dep_list in dependency.items():\n dependency[dep] = list(set(dep_list))\n metadata = {\n 'name': name,\n 'version': version,\n 'revision': revision,\n 'arch': arch,\n 'arch_variant': arch_variant,\n 'dependencies': dependency,\n }\n self.metadata = metadata", "def extract_extended_pdfs(pdfs: Union[Iterable[ZfitPDF], ZfitPDF]) -> List[ZfitPDF]:\n from ..models.functor import BaseFunctor\n\n pdfs = convert_to_container(pdfs)\n indep_pdfs = []\n\n for pdf in pdfs:\n if not pdf.is_extended:\n continue\n elif isinstance(pdf, BaseFunctor):\n if all(pdf.pdfs_extended):\n indep_pdfs.extend(extract_extended_pdfs(pdfs=pdf.pdfs))\n elif not any(pdf.pdfs_extended):\n indep_pdfs.append(pdf)\n else:\n assert False, \"Should not reach this point, wrong assumptions. Please report bug.\"\n else: # extended, but not a functor\n indep_pdfs.append(pdf)\n\n return indep_pdfs", "def loadInputFiles(self):\n\t\tfor filename in self.input_filename_list:\n\t\t\tfor module in self.modules:\n\t\t\t\tmodule.Add(filename)", "def paargs(self):\n paopt_find = {'Night':self.night, 'Telescope':self.telescope, 'Field':self.field, 'RA':self.ra,\n 'DEC':self.dec, 'TimeBeforeDiscovery': self.t_before, 'TimeAfterDiscovery': self.t_after,\n 'Program':self.program, 'datadir':self.datadir, 'outdir':self.outdir}\n paopt_coadd = {'outdir':self.outdir}\n paopt_extract = {'outdir':self.outdir}\n paopt_subimage = {'Program':self.program, 'Telescope':self.telescope, 'RA':self.ra, 'DEC':self.dec,\n 'PixelRadius':self.pixrad, 'tempdir':self.tempdir, 'outdir':self.outdir}\n paopt_imdiff = {'outdir':self.outdir}\n paopt_refstars = {'RA':self.ra, 'DEC':self.dec, 'outdir':self.outdir}\n paopt_phot = {'outdir':self.outdir, 'dumpfile':self.dump_pa('Photometry')}\n\n paopts={}\n defList={'Find_Data' : paopt_find,\n 'Coaddition' : paopt_coadd,\n 'Source_Extraction' : paopt_extract,\n 'Make_Subimages' : paopt_subimage,\n 'Image_Differencing' : paopt_imdiff,\n 'Choose_Refstars' : paopt_refstars,\n 'Photometry' : paopt_phot}\n\n def getPAConfigFromFile(PA,algs):\n def mergeDicts(source,dest):\n for k in source:\n if k not in dest:\n dest[k]=source[k]\n userconfig={}\n if PA in algs:\n fc=algs[PA]\n for k in fc: #do a deep copy leave QA config out\n if k != \"QA\":\n userconfig[k]=fc[k]\n defconfig={}\n if PA in defList:\n defconfig=defList[PA]\n mergeDicts(defconfig,userconfig)\n return userconfig\n\n for PA in self.palist:\n paopts[PA]=getPAConfigFromFile(PA,self.algorithms)\n\n\n return paopts", "def readEpi_fromSequence(fpath, position=0, direction='h'):\n assert isinstance(fpath, str)\n\n fnames = []\n for f in glob(fpath + \"*.png\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.jpg\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.bmp\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.ppm\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.tif\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(fpath + \"*.bmp\"):\n fnames.append(f)\n fnames.sort()\n\n im = misc.imread(fnames[0])\n channels = 1\n if len(im.shape) == 3:\n channels = 3\n\n if direction == 'h':\n epi = np.zeros((len(fnames), im.shape[1], channels))\n if direction == 'v':\n epi = np.zeros((len(fnames), im.shape[0], channels))\n\n for n,f in enumerate(fnames):\n im = misc.imread(fnames[n])\n if direction == 'h':\n if len(im.shape) == 3:\n epi[n, :, 0:3] = im[position, :, 0:3]\n else:\n epi[n, :, 0] = im[position, :]\n if direction == 'v':\n if len(im.shape) == 3:\n epi[n, :, 0:3] = im[ :, position, 0:3]\n else:\n epi[n, :, 0] = im[:, position]\n\n return epi[:, :, 0:channels]", "def loadPSet(self):\n self.logger.info(\"Working dir: %s\", os.getcwd())\n # Pickle original pset configuration\n procScript = \"edm_pset_pickler.py\"\n cmd = \"%s --input %s --output_pkl %s\" % (\n procScript,\n os.path.join(self.stepSpace.location, self.psetFile),\n os.path.join(self.stepSpace.location, self.configPickle))\n self.scramRun(cmd)\n\n try:\n with open(os.path.join(self.stepSpace.location, self.configPickle), 'rb') as f:\n self.process = Unpickler(f).load()\n except ImportError as ex:\n msg = \"Unable to import pset from %s:\\n\" % self.psetFile\n msg += str(ex)\n self.logger.error(msg)\n raise ex\n\n return", "def transform_programs(programs):\n # normalize the xPro data into the course_catalog/models.py data structures\n return [\n {\n \"readable_id\": program[\"readable_id\"],\n \"title\": program[\"title\"],\n \"image\": {\"url\": program[\"thumbnail_url\"]},\n \"description\": program[\"description\"],\n \"offered_by\": copy.deepcopy(OFFERED_BY),\n \"published\": bool(\n program[\"current_price\"]\n ), # a program is only considered published if it has a product/price\n \"url\": program[\"url\"],\n \"topics\": transform_topics(program.get(\"topics\", [])),\n \"platform\": PlatformType.xpro.value,\n \"resource_type\": LearningResourceType.program.value,\n \"runs\": [\n {\n \"prices\": [program[\"current_price\"]]\n if program.get(\"current_price\", None)\n else [],\n \"title\": program[\"title\"],\n \"run_id\": program[\"readable_id\"],\n \"enrollment_start\": _parse_datetime(program[\"enrollment_start\"]),\n \"start_date\": _parse_datetime(program[\"start_date\"]),\n \"end_date\": _parse_datetime(program[\"end_date\"]),\n \"description\": program[\"description\"],\n \"instructors\": [\n {\"full_name\": instructor[\"name\"]}\n for instructor in program.get(\"instructors\", [])\n ],\n }\n ],\n \"courses\": transform_courses(program[\"courses\"]),\n }\n for program in programs\n ]", "def do_preprocess(pdf_files):\n\n for pdf_file in pdf_files:\n\n base, ext = os.path.splitext(pdf_file)\n \n create_intermediate_files()\n \n # 1) split a pdf file, a page a pdf\n num_pages = pdfutil.split(os.path.join(cwd, pdf_file), DIR_PAGE)\n\n for i in xrange(1, num_pages + 1):\n\n file = '%04d.pdf' % i\n page_pdf = os.path.join(DIR_PAGE, file)\n \n pdfutil.convert_srgb(page_pdf, DIR_SRGB)\n srgb_pdf = os.path.join(DIR_SRGB, file)\n \n pdfutil.convert_vti(srgb_pdf, DIR_VTI)\n vti_pdf = os.path.join(DIR_VTI, file)\n\n pdfutil.convert_tiff(vti_pdf, DIR_TIFF)\n pdfutil.convert_text(vti_pdf, DIR_TEXT)\n\n # merge background pdf files\n pdfutil.merge_to_single_pdf(DIR_TIFF, DIR_BACK, 'back')\n background_pdf = os.path.join(DIR_BACK, 'back.pdf')\n\n # merge foreground pdf files\n output_text_pdf = '%s_text' % base\n pdfutil.merge_to_single_pdf(DIR_TEXT, DIR_TEXT, output_text_pdf)\n foreground_pdf = os.path.join(DIR_TEXT, output_text_pdf + '.pdf')\n pdfutil.export_by_preview(foreground_pdf)\n\n # merge background and foreground\n merged_pdf = os.path.join(cwd, '%s_merge.pdf' % base)\n pdfutil.merge_text_and_back(foreground_pdf, background_pdf, merged_pdf)\n\n final_pdf = '%s_final' % base\n pdfutil.optimize(merged_pdf, final_pdf)\n final_pdf = os.path.join(cwd, final_pdf + '.pdf')\n\n # aggregate what we want\n for f in (foreground_pdf, final_pdf):\n shutil.move(f, DIR_FINAL)\n \n # clean up unused\n os.unlink(merged_pdf) \n cleanup_intermediate_files()", "def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))", "def parseDemultiplexConfig(self, fn=\"DemultiplexConfig.xml\", **kw):\n pattern = os.path.join(os.path.abspath(self.path), \"Unaligned*\", fn)\n cfg = {}\n for cfgfile in glob.glob(pattern):\n parser = DemultiplexConfigParser(cfgfile)\n data = parser.parse()\n if len(data) > 0:\n cfg[os.path.basename(os.path.dirname(cfgfile))] = data\n return cfg", "def mo_parse_p(self, filepath):\n\n # Now, can reprocess using tesseract-ocr rather than pdftotext\n ptext = textract.process(filepath, method='tesseract', encoding='utf-8')\n ptext = ptext.replace(b'\\xe2\\x80\\x94', b'-')\n ptext = ptext.decode('utf-8')\n keys = list(self.mo_coefficient_name_map.keys())\n\n # Get the calibration date:\n for line in ptext.splitlines():\n if 'CALIBRATION DATE' in line:\n items = line.split()\n ind = items.index('DATE:')\n cal_date = items[ind+1]\n cal_date = pd.to_datetime(cal_date).strftime('%Y%m%d')\n self.date.update({len(self.date): cal_date})\n\n if 'psia S/N' in line:\n items = line.split()\n ind = items.index('psia')\n prange = items[ind-1]\n name = self.mo_coefficient_name_map.get('prange')\n self.coefficients.update({name: prange})\n\n # Loop through each line looking for the lines which contain\n # calibration coefficients\n if '=' in line:\n # Tesseract-ocr misreads '0' as O, and 1 as IL\n line = line.replace('O', '0').replace('IL', '1').replace(\n '=', '').replace(',.', '.').replace(',', '.')\n line = line.replace('L', '1').replace('@', '0').replace('l', '1').replace('--', '-')\n if '11' in line and 'PA2' not in line:\n line = line.replace('11', '1')\n items = line.split()\n for n, k in enumerate(items):\n if k.lower() in keys:\n try:\n float(items[n+1])\n name = self.mo_coefficient_name_map.get(k.lower())\n self.coefficients.update({name: items[n+1]})\n except:\n pass\n if 'CC_ptcb2' not in list(self.mo_coefficient_name_map.keys()):\n self.coefficients.update({'CC_ptcb2': '0.000000e+000'})", "def _load (cls, *files):\n config = ConfigParser.ConfigParser()\n config.read(files)\n \n metadata = {}\n if config.has_section(\"metadata\"):\n for key in config.options(\"metadata\"):\n metadata[key] = config.get(\"metadata\", key)\n\n processes = {}\n datasources = {}\n for section in config.sections():\n if section == \"metadata\": continue\n if section.startswith(\"process_\"):\n try:\n processes[section[8:]] = FeatureServer.Processing.loadFromSection(config, section)\n except Exception, E:\n pass \n else: \n datasources[section] = cls.loadFromSection(\n config, section, 'DataSource')\n\n return cls(datasources, metadata, processes)", "def extract_template(temp_dir, fea_type):\n kps = []\n descriptors = np.array([])\n in_path = temp_dir + 'imgs/' # images\n names = os.listdir(in_path)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n if any(np.array(img.shape) > 1000):\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n print(img.shape)\n kp, des = get_des(fea_type, img)\n if descriptors.size == 0:\n kps = kp\n descriptors = des\n else:\n kps.extend(kp)\n descriptors = np.vstack((descriptors, des))\n\n print(\"template descriptors shape: \" + str(descriptors.shape))\n with open(temp_dir + fea_type + '_template_0.pickle', 'wb') as ff:\n pickle.dump(descriptors, ff)\n\n # with open(temp_dir + fea_type + '_template_0.pickle', 'rb') as f:\n # template = pickle.load(f)\n\n return", "def get_ps_uni_extensions(fits_file_name, is_point_source):\n hdulist = fits.open(fits_file_name)\n ps_dict = {}\n uni_dict = {}\n s = 0\n for ext, hdu in enumerate(hdulist):\n if hdu.name == \"PS\":\n try:\n sltname = hdu.header[\"APERTURE\"]\n ps_dict[sltname] = ext\n except KeyError:\n sltname = \"Slit_\"+repr(s+1)\n ps_dict[sltname] = ext\n if hdu.name == \"UNI\":\n try:\n sltname = hdu.header[\"APERTURE\"]\n uni_dict[sltname] = ext\n except KeyError:\n sltname = \"Slit_\"+repr(s+1)\n uni_dict[sltname] = ext\n return ps_dict, uni_dict", "def load_pfile(self, **kwargs):\r\n pfile = kwargs['pfile']\r\n filetype = kwargs['filetype']\r\n\r\n # Loads the pfile and finds the indices, still need to sync and parse.\r\n self.pfile = PFILE(pfile, filetype=filetype)\r\n # self.pfile.sync(tstep='auto')\r", "def _read_eeg(eeg_file):\r\n pass", "def process_all():\n\tfiles = os.listdir('records')\n\tfiles = [file for file in files if file not in ('.DS_Store','old')]\n\tattr_list = []\n\tcorpus = []\n\tsentences = []\n\tcorp_set = set()\n\tfor file in files:\n\t\twith open('records/'+file) as f:\n\t\t\tattr_list, corpus, sentences = proc_file(f,file,corpus,attr_list,corp_set,sentences)\n\treturn attr_list,corpus,sentences", "def setup(self):\n EventGenerator.setup(self)\n\n if self.egs5_dir is None:\n self.egs5_dir = self.get_install_dir()\n logger.debug(\"Using EGS5 from install dir: \" + self.egs5_dir)\n\n ## data directory\n self.egs5_data_dir = os.path.join(self.egs5_dir, \"data\")\n ## config directory\n self.egs5_config_dir = os.path.join(self.egs5_dir, \"config\")\n\n logger.debug(\"egs5_data_dir=%s\" % self.egs5_data_dir)\n logger.debug(\"egs5_config_dir=%s\" % self.egs5_config_dir)\n\n if os.path.exists(\"data\"):\n os.unlink(\"data\")\n os.symlink(self.egs5_data_dir, \"data\")\n\n if os.path.exists(\"pgs5job.pegs5inp\"):\n os.unlink(\"pgs5job.pegs5inp\")\n os.symlink(self.egs5_config_dir + \"/src/esa.inp\", \"pgs5job.pegs5inp\")\n\n logger.debug(\"Reading run parameters: {}\".format(self.run_params))\n ## run parameters\n self.run_param_data = RunParameters(self.run_params)\n\n # Set target thickness from job parameter or use the default from run parameters\n if self.target_thickness is not None:\n self.target_z = self.target_thickness\n logger.debug(\"Target thickness set from job param: {}\".format(self.target_z))\n else:\n self.target_z = self.run_param_data.get(\"target_z\")\n logger.debug(\"Target thickness set from run_params: {}\".format(self.target_z))\n\n ebeam = self.run_param_data.get(\"beam_energy\")\n electrons = self.run_param_data.get(\"num_electrons\") * self.bunches\n\n seed_data = \"%d %f %f %d\" % (self.seed, self.target_z, ebeam, electrons)\n logger.debug(\"Seed data (seed, target_z, ebeam, electrons): {}\".format(seed_data))\n seed_file = open(\"seed.dat\", 'w')\n seed_file.write(seed_data)\n seed_file.close()", "def __init__(self, data_dir, html_dir, paver_html_dir, resolution_1, resolution_2, picture_format, hierarchy_version, original_mapping_dir):\n\n pn = proteomaps_PATHNAMES(hierarchy_version)\n PROTEIN_HIERARCHY_DIR = pn.PROTEIN_HIERARCHY_DIR\n po = proteomaps_organisms(pn)\n\n filenames_file = data_dir + \"/filenames.csv\"\n print \"Data set directory: \" + data_dir\n \n for row in csv.reader(open(filenames_file, 'r'), delimiter='\\t'):\n organism, data_file, data_set_name, data_set_name_matlab, article_name = row\n print \"\\nData set \" + data_set_name + \":\\nWriting html files to directory \" + html_dir\n\n data_type = \"cost\"\n proteomap_process_html(paver_html_dir, html_dir, data_dir, data_set_name, organism, resolution_1, resolution_2, article_name, data_type, picture_format,po,PROTEIN_HIERARCHY_DIR, original_mapping_dir)\n\n data_type = \"abundance\"\n proteomap_process_html(paver_html_dir, html_dir, data_dir, data_set_name, organism, resolution_1, resolution_2, article_name, data_type, picture_format,po,PROTEIN_HIERARCHY_DIR, original_mapping_dir)", "def setup(self, files):\n if not isinstance(files, (list, tuple)):\n raise RuntimeError(\"Argument must be list of files.\")\n\n self.files = files" ]
[ "0.64974564", "0.61578393", "0.57928175", "0.57109356", "0.5541757", "0.5491878", "0.5438996", "0.54297066", "0.5325947", "0.5219759", "0.52026135", "0.5192286", "0.5152777", "0.51420754", "0.5114544", "0.5093096", "0.50334144", "0.5020539", "0.5005299", "0.49603912", "0.4959661", "0.49570575", "0.4956181", "0.49561778", "0.49294677", "0.49101338", "0.4872313", "0.48491374", "0.48084882", "0.48028648", "0.48012263", "0.47868457", "0.47772345", "0.4776739", "0.4771809", "0.47716284", "0.47306472", "0.4726854", "0.4726158", "0.47260383", "0.47208107", "0.47123975", "0.47048616", "0.4701291", "0.4684332", "0.46819356", "0.4677526", "0.46762034", "0.46582517", "0.46487656", "0.46447363", "0.4643828", "0.46432105", "0.46407503", "0.46407503", "0.46375266", "0.4632671", "0.4631915", "0.46313772", "0.462955", "0.46271265", "0.4616699", "0.46152452", "0.4607997", "0.4602589", "0.46015367", "0.4599631", "0.4595909", "0.45948735", "0.45899794", "0.45794326", "0.45788684", "0.45772544", "0.45726702", "0.45710346", "0.45704272", "0.45704272", "0.45652235", "0.45636", "0.45600942", "0.45577124", "0.45546767", "0.45546013", "0.45525444", "0.454939", "0.4548315", "0.45471045", "0.4545621", "0.4545204", "0.45404527", "0.45380422", "0.453653", "0.4533239", "0.45305988", "0.45304912", "0.45272896", "0.4524214", "0.45241186", "0.45237604", "0.45228922" ]
0.64618903
1
Eliminate entries in epi recon table that have already been reconstructed. I don't remember why this is here but I know that at one time it was important.
def PruneEpiEntries(self): pruned = {} basefiles = [] baseentries = {} for entry in self.entry_map['epi']: if baseentries.has_key(self.info[entry]['basefile']): baseentries[self.info[entry]['basefile']].append(entry) else: baseentries[self.info[entry]['basefile']] = [entry] for entry in self.entry_map['epi']: targets = [] if self.no_motcorr: target = self.info[entry]['imgfile'] elif self.info[entry]['fmapname'] is None or self.no_fmapcorr: target = self.info[entry]['imgfile_m'] else: target = self.info[entry]['imgfile_mf'] targets.append(target + self.info[entry]['suffix']) targets.append('%s%s' % (self.info[entry]['censor_prefix'], '_censor.1D')) pruned[entry] = [True, baseentries[self.info[entry]['basefile']]] for target in targets: pruned[entry] = \ [False, baseentries[self.info[entry]['basefile']]] for key in pruned.keys(): if not pruned[key][0]: for entry in pruned[key][1]: pruned[entry][0] = False tmp = new_map = [] for entry in self.entry_map['epi']: if pruned[entry][0]: if self.verbose: print 'Skipping %s: Already reconstructed.' % targets[0] if entry in self.pfiles_recon: self.pfiles_recon.remove(entry) else: new_map.append(entry) self.entry_map['epi'] = new_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def elimination_ofconc(a2_data):\n for data in a2_data.values():\n data.pop('conc')\n return a2_data", "def nonphysicalxs_remotion(a2_data,res_nufi_removal):\n for i in a2_data['I'].keys():\n if i=='MACR' and res_nufi_removal==True:\n if 'nufi' in a2_data['I'][i]['R'].keys():\n a2_data['I'][i]['R'].pop('nufi')\n for r in a2_data['I'][i]['R'].keys():\n if any(x in r for x in ['111', '112', '122', '212', '222', '211', '322',\n '321', '312', '311', '221', '121']):\n a2_data['I'][i]['R'].pop(r)\n return a2_data", "def eliminateRedundantInfo(self):\n\n allEliminated = False\n edep = self.energyDependentWidths\n for colId in range(edep.nColumns)[::-1]:\n column = edep.columns[colId]\n columnData = edep.getColumn( column.name, units='eV' )\n if len(set( columnData ) ) == 1:\n setattr( self.constantWidths, column.name, PQU.PQU( PQU.pqu_float.surmiseSignificantDigits( columnData[0] ), column.units ) )\n [d.pop(colId) for d in edep.data]\n edep.columns.pop(colId)\n for idx, col in enumerate( edep.columns ): col.index = idx #re-number\n #if edep.nColumns == 1 and edep.columns[0].name == 'energy':\n # edep.columns, edep.data = [],[] # all widths are constant\n # allEliminated = True\n return allEliminated", "def revise():", "def clean_edges(self):", "def clean(df):", "def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()", "def complement(self):\n for cell in self.compact:\n cell.set(not cell.peg)", "def cleaning (data):", "def remove_hydrogens(self) -> None:\n for cid, c in self:\n for rid, r in c:\n for aid, a in r:\n if a.element == 'H':\n print('removing H at %s' % aid)\n r.remove_atom(a)", "def reduceUniverse(self):\r\n self.bondList = list(set([bond for grid in self.parent.gridList for bond in grid.bondList]))#set removes duplicates\r\n self.df = self.df.reindex(self.bondList)\r\n self.df = self.df[pandas.notnull(self.df['ISIN'])]\r\n self.rfbonds = list(self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers)].index)\r\n self.embondsisins = self.df.loc[~self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']\r\n self.rfbondsisins = self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']", "def row_inout_eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n \n location = values[box][0]\n \n if location in location_dict.keys():\n outside = location_dict[location][0]\n \n if str(6) not in box: #only look at periods 1-5\n \n following_activity = inout_dict[box][0]\n if following_activity not in solved_values:\n temp_list = list(values[following_activity])\n \n for locations_next in values[following_activity]:\n \n if location_dict[locations_next][0] == outside and outside == True:\n \n try:\n temp_list.remove(locations_next)\n except:\n pass\n \n \n values[following_activity] = temp_list\n\n return values", "def _remove_dangling_bonds(self) -> None:\n for residue in self.residues:\n bonds, impropers, cross_maps, ics = [], [], [], []\n for bond in residue.bonds:\n for atom_id in bond:\n if atom_id not in self._id_to_index:\n break\n else:\n bonds.append(bond)\n for improper in residue.impropers:\n for atom_id in improper:\n if atom_id not in self._id_to_index:\n break\n else:\n impropers.append(improper)\n for cross_map in residue.cross_maps:\n for atom_id in cross_map:\n if atom_id not in self._id_to_index:\n break\n else:\n cross_maps.append(cross_map)\n for ic in residue.ics:\n for res_index, atom_name in ic[:4]:\n if atom_name.replace(\"*\", \"\") not in self._id_to_index:\n break\n else:\n ics.append(ic)\n residue.bonds = bonds\n residue.impropers = impropers\n residue.cross_maps = cross_maps\n residue.ics = ics", "def vacuum(self):\n\t\t\n\t\tstart_resnr = 1\n\t\t#---check for remark 999 to see if we have a starting residue not 1\n\t\twith open(self.rootdir+'system-input.pdb','r') as fp: lines = fp.readlines()\n\t\tregex = 'REMARK 999 starting residue = ([0-9]+)'\n\t\ttrawl = [re.findall(regex,line) for line in lines if re.match(regex,line)]\n\t\tif trawl != []: start_resnr = int(trawl[0][0])\n\n\t\t#---fix histidine naming according to the convention set by the force field\n\t\tif self.settings['force_field'] == 'charmm27':\n\t\t\tif self.settings['histype'] == 'd':\n\t\t\t\thisfix = \"awk '{gsub(/HIS /,\\\"HISD\\\");print}' < system-input.pdb > prep-protein-start.pdb\"\n\t\t\tif self.settings['histype'] == 'e':\n\t\t\t\thisfix = \"awk '{gsub(/HIS /,\\\"HISE\\\");print}' < system-input.pdb > prep-protein-start.pdb\"\n\t\t\tif self.settings['histype'] == 'p':\n\t\t\t\thisfix = \"awk '{gsub(/HIS /,\\\"HISP\\\");print}' < system-input.pdb > prep-protein-start.pdb\"\n\t\t\tcall(hisfix,cwd=self.rootdir)\n\t\telse: copy(self.rootdir+'system-input.pdb',self.rootdir+'prep-protein-start.pdb')\n\t\t\t\n\t\tprint \"stripping non-protein molecules\"\n\t\tcmd = [gmxpaths['make_ndx'],\n\t\t\t'-f prep-protein-start.pdb',\n\t\t\t'-o prep-index-protein.ndx']\n\t\tcall(cmd,logfile='log-make-ndx-prep-protein',cwd=self.rootdir,inpipe=\"q\\n\")\n\t\tprotgrp = int(checkout([\"awk\",\"'/[ ,\\t]+Protein[ ,\\t]+:/ {print $1}'\",\n\t\t\t\"log-make-ndx-prep-protein\"],cwd=self.rootdir).strip())\n\t\tcmd = [gmxpaths['make_ndx'],\n\t\t\t'-f prep-protein-start.pdb',\n\t\t\t'-o prep-index-protein-only.ndx']\n\t\tcall(cmd,logfile='log-make-ndx-prep-protein-only',cwd=self.rootdir,\n\t\t\tinpipe=\"keep \"+str(protgrp)+\"\\nq\\n\")\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f prep-protein-start.pdb',\n\t\t\t'-o prep-protein-start-stripped.pdb',\n\t\t\t'-n prep-index-protein-only.ndx',\n\t\t\t'-resnr '+str(start_resnr)]\n\t\tcall(cmd,logfile='log-editconf-prep-protein-strip',cwd=self.rootdir)\n\n\t\tprint \"running pdb2gmx\"\n\t\tcmd = [gmxpaths['pdb2gmx'],\n\t\t\t'-f prep-protein-start-stripped.pdb',\n\t\t\t'-o vacuum-alone-number1.gro',\n\t\t\t'-p vacuum-standard.top',\n\t\t\t'-ignh',\n\t\t\t'-i system-posre.itp',\n\t\t\t'-ff '+self.settings['force_field'],\n\t\t\t'-water '+self.settings['water_model']]\n\t\tcall(cmd,logfile='log-pdb2gmx',cwd=self.rootdir)\n\t\t\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f vacuum-alone-number1.gro',\n\t\t\t'-o vacuum-alone.gro']\n\t\tcall(cmd,logfile='log-editconf-renumber',cwd=self.rootdir)\n\t\t\n\t\t#---intervening step will isolate the ITP data from the TOP file to use standardized TOP\n\t\twith open(self.rootdir+'vacuum-standard.top','r') as f: topfile = f.read()\n\t\t#---extract protein chain names here if necessary\n\t\tchains = []\n\t\tstartline = [ii for ii,i in enumerate(topfile.split('\\n')) \n\t\t\tif re.match('^(\\s+)?\\[(\\s+)?system(\\s+)?\\]',i)][0]\n\t\tfor line in topfile.split('\\n')[startline:]:\n\t\t\tif re.match('^Protein',line):\n\t\t\t\tchains.append(line.split(' ')[0])\n\t\tif len(chains) > 1:\n\t\t\t#---assume one domain per chain\n\t\t\tself.nprots = [1 for i in chains]\n\t\t\tself.protname = chains\n\t\telse:\t\n\t\t\tself.protname = chains[0]\n\t\t\tself.nprots = 1\n\t\tfp = open(self.rootdir+'protein.itp','w') \n\t\tfor line in topfile.split('\\n'):\n\t\t\t#---skip any part of the top that follows the water topology and/or system composition\n\t\t\tif re.match('; Include water topology',line): break\n\t\t\tif re.match('; Include topology for ions',line): break\n\t\t\tif re.match('\\[ system \\]',line): break\n\t\t\t#---you must extract forcefield.itp from the file to prevent redundant includes\n\t\t\tif not re.match(\".+forcefield\\.itp\",line) and not \\\n\t\t\t\tre.match(\"; Include forcefield parameters\",line): \n\t\t\t\tfp.write(line+'\\n')\n\t\tfp.close()\n\t\t\n\t\t#---note that this method is currently set to only simulate one protein\n\t\tself.write_topology_protein('vacuum.top')\n\t\t\n\t\tprint \"building box with \"+str(self.settings['wbuffer'])+'nm of water'\n\t\tcmd = [gmxpaths['editconf'],\n\t\t\t'-f vacuum-alone.gro',\n\t\t\t'-d '+str(self.settings['wbuffer']),\n\t\t\t('-princ' if 'align_x' in self.settings.keys() \n\t\t\tand self.settings['align_x'] == True else ''),\n\t\t\t'-o vacuum.gro']\n\t\tcall(cmd,logfile='log-editconf-vacuum',cwd=self.rootdir,\n\t\t\tinpipe=('0\\n' if 'align_x' in self.settings.keys() \n\t\t\tand self.settings['align_x'] == True else None))\t\t\n\t\tself.minimization_method('vacuum')", "def main():\n # get commmand line args\n args = parse_arguments()\n \n adj_file = args.adj # open(\"UCSC_VIPER/pathways/extended_pathways_transcriptional.adj\", \"r\")\n \n # this set isn't actually used in the script, but I was curious...\n adj_gene_set = set() \n \n cutoff_number = args.cutoff_number\n #cutoff_percent = args.cutoff_percent\n \n expr_gene_file = args.expr_genes #open(\"stanford_batchK1-12.HUGO_only_genes.lst\", 'r')\n expr_genes = [line.strip() for line in expr_gene_file] \n \n # for each line, check that the regulator and other genes are in the\n # expression matrix gene set. if not, remove them, or remove the whole\n # line if the regulator isn't in the set or if too few genes remain\n for line in adj_file:\n \n line_list = line.strip().split()\n regulator_plus_gene_list = [x for x in line_list if x !=\"1.0\"]\n regulator = regulator_plus_gene_list[0]\n \n if regulator not in expr_genes:\n # remove the whole regulator + regulon\n print(\"Skipped a line (regulator not in expr genes): \", \n line_list[0], file=sys.stderr) \n continue\n \n gene_list = regulator_plus_gene_list[1:]\n list_size = len(gene_list)\n adj_gene_set.update(gene_list)\n \n how_many_to_remove= 0\n good_genes = []\n \n for gene in gene_list:\n if gene not in expr_genes:\n how_many_to_remove += 1\n else:\n good_genes.append(gene)\n \n #print(\"\\n\")\n #pdb.set_trace()\n #if (100-how_many_to_remove/list_size*100 < cutoff_percent) and (list_size-how_many_to_remove < cutoff_number):\n if (list_size-how_many_to_remove < cutoff_number):\n print(\"Skipped a line (too many removed): \", line_list[0], file=sys.stderr)\n \n else:\n # re-generate the new line of the .adj file with kept genes\n #genes_to_print = good_genes.insert(0, regulator)\n regulated_genes = \"\\t1.0\\t\".join(good_genes)\n print(regulator+\"\\t\"+regulated_genes+\"\\t1.0\")", "def strip_ds(ds):\n if 'brain' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'brain'), :]\n print('excluded the rest of the brain from the dataset')\n if 'overlap' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'overlap'), :]\n print('excluded overlap from the dataset')\n return ds", "def get_degenerate_statements(self):\n logger.info(\"Checking for 'degenerate' statements...\\n\")\n # Get rules of type protein X -> activity Y\n q_stmts = prefixes + \"\"\"\n SELECT ?stmt\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasSubject ?subj .\n ?stmt belvoc:hasObject ?obj .\n {\n { ?stmt belvoc:hasRelationship belvoc:DirectlyIncreases . }\n UNION\n { ?stmt belvoc:hasRelationship belvoc:DirectlyDecreases . }\n }\n {\n { ?subj a belvoc:ProteinAbundance . }\n UNION\n { ?subj a belvoc:ModifiedProteinAbundance . }\n }\n ?subj belvoc:hasConcept ?xName .\n {\n {\n ?obj a belvoc:ProteinAbundance .\n ?obj belvoc:hasConcept ?yName .\n }\n UNION\n {\n ?obj a belvoc:ModifiedProteinAbundance .\n ?obj belvoc:hasChild ?proteinY .\n ?proteinY belvoc:hasConcept ?yName .\n }\n UNION\n {\n ?obj a belvoc:AbundanceActivity .\n ?obj belvoc:hasChild ?objChild .\n ?objChild a belvoc:ProteinAbundance .\n ?objChild belvoc:hasConcept ?yName .\n }\n }\n FILTER (?xName != ?yName)\n }\n \"\"\"\n res_stmts = self.g.query(q_stmts)\n\n logger.info(\"Protein -> Protein/Activity statements:\")\n logger.info(\"---------------------------------------\")\n for stmt in res_stmts:\n stmt_str = strip_statement(stmt[0])\n logger.info(stmt_str)\n self.degenerate_stmts.append(stmt_str)", "def _reducedProtToPeps(protToPeps, proteins):\n return {k: v for k, v in viewitems(protToPeps) if k not in proteins}", "def clean(self):\n self.df = _data.prune(self.df, [REGEX_PATTERN_GCI, REGEX_PATTERN_DB_ID])\n self.df, _ = _data.remove_totally_failed_tests(self.df)\n self.is_cleaned = True", "def clean(self):\n return _coconut_tail_call((self.__class__), *filter(_coconut.functools.partial(_coconut.operator.ne, self.identity), self.elems))", "def clean(self):\n # Perform the standard ACE cleaning\n max_status = mm_ace.clean(self)\n\n # Replace bad values with NaN and remove times with no valid data\n ecols = ['eflux_38-53', 'eflux_175-315']\n\n # Evaluate the electron flux data\n self[self.data['status_e'] > max_status, ecols] = np.nan\n\n # Evaluate the proton flux data\n pcols = ['pflux_47-68', 'pflux_115-195', 'pflux_310-580',\n 'pflux_795-1193', 'pflux_1060-1900']\n self[self.data['status_p'] > max_status, pcols] = np.nan\n\n # Include both fluxes and the anisotropy index in the removal eval\n eval_cols = ecols + pcols\n eval_cols.append('anis_ind')\n\n # Remove lines without any good data\n good_cols = (np.isfinite(self.data.loc[:, eval_cols])).sum(axis=1)\n bad_index = good_cols[good_cols == 0].index\n self.data = self.data.drop(index=bad_index)\n\n return", "def _remove_tech_rep_negatives(self):\n\n # For each row in the post_med_df, find the mapping key that is a substring\n # Should be only one, check this.\n # Then once you have found the one, check all samples in the post_med df to see if it matches any other\n # if you return multiple matches, then keep only the one with the biggest number of contigs,\n # and all others to a drop list. Keep a checked list so that we don't have to check readsets twice.\n # Also up date a dictionary as you go that is the full readset to the sample-id that it needs to become.\n # Once this has been done for the post-med do it for the pre-med.\n # For the pre-med, use the dictionary we created while doing the post-med\n\n # Get the post med df. Read it in with index as false and set index manually without dropping\n # this way we can work with the index, but then we can not write it out later so as not\n # to disturb the column orders.\n post_med_count_path = os.path.join(self.negative_output_dir_path, 'post_med_seqs', [_ for _ in os.listdir(\n os.path.join(self.negative_output_dir_path, 'post_med_seqs')) if 'abund' in _][0])\n post_med_df = pd.read_csv(post_med_count_path, index_col=False)\n post_med_df = post_med_df.set_index('sample-id', drop=False)\n\n # Same for the pre_med\n pre_med_count_path = os.path.join(self.negative_output_dir_path, 'pre_med_seqs', [_ for _ in os.listdir(\n os.path.join(self.negative_output_dir_path, 'pre_med_seqs')) if 'abund' in _][0])\n pre_med_df = pd.read_csv(pre_med_count_path, index_col=False)\n pre_med_df = pre_med_df.set_index('sample-id', drop=False)\n\n # First check to see if the sample-ids have already been fixed\n if 'TARA' in pre_med_df.index[0] and 'TARA' in post_med_df.index[0]:\n return\n if 'TARA' in pre_med_df.index[0] and 'TARA' not in post_med_df.index[0]:\n raise RuntimeError\n if 'TARA' not in pre_med_df.index[0] and 'TARA' in post_med_df.index[0]:\n raise RuntimeError\n\n # The dictionary df that Setphane produced\n mapping_df = pd.read_csv(self.negative_mapping_file_path, index_col=0)\n # Make the mapping dictionary from the Stephane df\n raw_mapping_dict = {}\n for df_ind in mapping_df.index:\n raw_mapping_dict[df_ind] = mapping_df.at[df_ind, 'sample-id_source']\n\n # This is the dictionary we are going to populate that had the full genoscope readset\n # as the key and the equivalent TARA sample-id as the value\n curated_mapping_dict = {}\n\n # Check that the assumption holds that both of the indeces are identifcal except for order.\n # NB the post med df has an annoying row at the end.\n assert(set(post_med_df.index[:-1]) == set(pre_med_df.index))\n contig_dict = {readset: contig for readset, contig in zip(post_med_df['sample-id'][:-1], post_med_df['raw_contigs'][:-1])}\n\n to_drop_list = []\n checked_list = []\n for pm_ind in post_med_df.index[:-1]:\n if pm_ind in checked_list:\n continue\n match = []\n for map_ind in mapping_df.index:\n if map_ind in pm_ind:\n match.append(map_ind)\n if len(match) == 0:\n print(f'pm_ind: {pm_ind} found 0 matches. This sample will be dropped.')\n to_drop_list.append(pm_ind)\n continue\n elif len(match) > 1:\n raise RuntimeError\n\n # Now we have the mapping indice that matches\n match = match[0]\n pm_matches = []\n for pm_ind_again in post_med_df.index[:-1]:\n if match in pm_ind_again:\n pm_matches.append(pm_ind_again)\n assert(len(pm_matches) > 0)\n if len(pm_matches) > 1:\n # Then we have technical replicates and we only want to keep the largest\n contig_match_dict = {pm_match: contig_dict[pm_match] for pm_match in pm_matches}\n sorted_keys = sorted(contig_match_dict, key=contig_match_dict.get, reverse=True)\n # Add all of the matches to the check_list\n checked_list.extend(sorted_keys)\n curated_mapping_dict[sorted_keys[0]] = raw_mapping_dict[match]\n to_drop_list.extend(sorted_keys[1:])\n else:\n checked_list.append(pm_matches[0])\n curated_mapping_dict[pm_matches[0]] = raw_mapping_dict[match]\n\n # drop the rows\n post_med_df.drop(index=to_drop_list, inplace=True)\n # We now need to get rid of any sequence count columns that only have 0s after dropping the samples\n # The last meta column is post_med_unique\n cols = list(post_med_df)\n c_ind = cols.index('post_med_unique') + 1\n cols_to_check = cols[c_ind:]\n cols_to_drop = []\n for col in cols_to_check:\n if (post_med_df[col][:-1] == 0).all():\n cols_to_drop.append(col)\n\n # drop the cols\n post_med_df.drop(columns=cols_to_drop, inplace=True)\n\n # rename\n for ind in post_med_df.index[:-1]:\n current = post_med_df.at[ind, 'sample-id']\n post_med_df.at[ind, 'sample-id'] = curated_mapping_dict[current]\n\n # Here we have the curated mapping dict popualted and we can now use this to\n # process the pre_med df\n pre_med_df.drop(index=to_drop_list, inplace=True)\n # We now need to get rid of any sequence count columns that only have 0s after dropping the samples\n # The last meta column is post_med_unique\n cols = list(pre_med_df)\n c_ind = cols.index('sample-id') + 1\n cols_to_check = cols[c_ind:]\n cols_to_drop = []\n for col in cols_to_check:\n if (pre_med_df[col][:-1] == 0).all():\n cols_to_drop.append(col)\n\n # drop the cols\n pre_med_df.drop(columns=cols_to_drop, inplace=True)\n\n # rename\n for ind in pre_med_df.index:\n current = pre_med_df.at[ind, 'sample-id']\n pre_med_df.at[ind, 'sample-id'] = curated_mapping_dict[current]\n\n # Now convert the columns to int32\n d_type_dict = {col_name : pd.Int32Dtype() for col_name in list(post_med_df)[2:]}\n post_med_df = post_med_df.astype(d_type_dict)\n d_type_dict = {col_name : pd.Int32Dtype() for col_name in list(pre_med_df)[2:]}\n pre_med_df = pre_med_df.astype(d_type_dict)\n\n # Important to write out with index as false\n post_med_df.to_csv(post_med_count_path, index=False, header=True)\n pre_med_df.to_csv(pre_med_count_path, index=False, header=True)", "def cleanup_transition_matrix(matrix,polarization):\n\n index = []\n for i in range(len(matrix['label'])):\n if (polarization[0] == 0) & ('right' in matrix['label'][i]):\n index.append(i)\n elif (polarization[1] == 0) & ('parallel' in matrix['label'][i]):\n index.append(i)\n elif (polarization[2] == 0) & ('left' in matrix['label'][i]):\n index.append(i)\n\n for i in reversed(index):\n del matrix['label'][i]\n del matrix['bra_energy'][i]\n del matrix['ket_energy'][i]\n del matrix['matrix'][i]\n\n return matrix", "def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")", "def CleanUp(self):\n for Ind in self.IndList():\n if amax(abs(self[Ind]))<1e-10:\n del self[Ind]", "def clean_up(self, prune=True, b_factor=None, filename=\"clean.pdb\"):\n skipped = [\"HOH\", \"WAT\"]\n for chain in self.hier.chains():\n for residue in chain.residue_groups():\n if b_factor is not None:\n atoms = residue.atom_groups()[0].atoms()\n atoms.set_b(flex.double(len(atoms), b_factor))\n resname = residue.unique_resnames()[0].strip()\n if prune:\n if resname in unmodified_residues:\n continue\n elif resname in PTM_reverse_lookup.keys():\n pruned_resname = PTM_reverse_lookup[resname]\n PTM_lookup[pruned_resname][resname][\"prune_lambda\"](residue)\n for ag in residue.atom_groups():\n ag.resname = pruned_resname\n else:\n if resname not in skipped:\n print \"Warning: skipping unrecognized residue, ligand or ion %s\" % resname\n skipped.append(resname)\n self.hier.write_pdb_file(filename)", "def dropRedundantEcotypes(self, input_fname, ecotypeid2tg_ecotypeid):\n\t\tsys.stderr.write(\"Dropping redundant ecotypes ...\\n\")\n\t\treader = csv.reader(open(input_fname), delimiter=figureOutDelimiter(input_fname))\n\t\tcol_name2col_index = getColName2IndexFromHeader(reader.next())\n\t\tecotypeid_idx = col_name2col_index['ecotypeid']\n\t\thaplo_name_idx = col_name2col_index['haplogroup']\n\t\tnativename_idx = col_name2col_index['nativename']\n\t\ttg_ecotypeid2row = {}\n\t\tno_of_duplicates = 0\n\t\tno_of_duplicates_with_different_haplogroups = 0\n\t\tcounter = 0\n\t\tfor row in reader:\n\t\t\tecotypeid = int(row[ecotypeid_idx])\n\t\t\thaplo_name = row[haplo_name_idx]\n\t\t\tnativename = row[nativename_idx]\n\t\t\tif ecotypeid in ecotypeid2tg_ecotypeid:\n\t\t\t\ttg_ecotypeid = ecotypeid2tg_ecotypeid[ecotypeid]\n\t\t\t\tif tg_ecotypeid not in tg_ecotypeid2row:\n\t\t\t\t\ttg_ecotypeid2row[tg_ecotypeid] = row\n\t\t\t\telse:\n\t\t\t\t\tno_of_duplicates += 1\n\t\t\t\t\told_row = tg_ecotypeid2row[tg_ecotypeid]\n\t\t\t\t\told_ecotypeid = int(old_row[ecotypeid_idx])\n\t\t\t\t\told_haplo_name = old_row[haplo_name_idx]\n\t\t\t\t\told_nativename = row[nativename_idx]\n\t\t\t\t\tif old_haplo_name!=haplo_name:\n\t\t\t\t\t\tsys.stderr.write(\"ecotype %s(%s) in haplotype group %s, while duplicate %s(%s) in haplotype group %s.\\n\"%\\\n\t\t\t\t\t\t\t\t\t\t (ecotypeid, nativename, haplo_name, old_ecotypeid, old_nativename, old_haplo_name))\n\t\t\t\t\t\tno_of_duplicates_with_different_haplogroups += 1\n\t\t\t\t\tif ecotypeid==tg_ecotypeid:\t#replace if the new ecotypeid matching the tg_ecotypeid whether the haplotype group is same or not.\n\t\t\t\t\t\ttg_ecotypeid2row[tg_ecotypeid] = row\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\"Warning: ecotype %s not in ecotypeid2tg_ecotypeid.\\n\"%(ecotypeid))\n\t\t\tcounter += 1\n\t\tsys.stderr.write(\"no_of_duplicates: %s, out of which %s encompass different haplotype groups. %s accessions in total. Done.\\n\"%\\\n\t\t\t\t\t\t (no_of_duplicates, no_of_duplicates_with_different_haplogroups, counter))\n\t\treturn tg_ecotypeid2row", "def _Dedup(self):\n kegg_id_to_index = {}\n for i, c in enumerate(self.reactants):\n first_i = kegg_id_to_index.setdefault(c.compound.kegg_id, i)\n if i != first_i:\n self.reactants[first_i].coeff += c.coeff\n c.coeff = 0\n \n self.reactants = filter(lambda x: x.coeff != 0, self.reactants)\n \n # always make sure that H2O is the last reactant (so that it will\n # appear last in the chemical formula)\n i_h2o = self._FindCompoundIndex('C00001')\n if i_h2o is not None:\n self.reactants = self.reactants[:i_h2o] + \\\n self.reactants[(i_h2o + 1):] + \\\n [self.reactants[i_h2o]]", "def purgeTrp(atoms):\n for a in atoms:\n found = False\n if getAtype(a) == \"N\":\n for c in atoms:\n if not c == a and dist(c,a) < COVALENT_BOND_DIST:\n found = True\n if not found:\n atoms.remove(a)\n return atoms\n if DEBUG: print \"Warning! Residue %s appears to be incomplete\" % (atoms[0][17:20]+atoms[0][22:26]+atoms[0][21])\n return False", "def group_remotion(a2_data, retained):\n for i in a2_data['I'].keys():\n for r in a2_data['I'][i]['R'].keys():\n for g in a2_data['I'][i]['R'][r].keys():\n if g not in retained:\n a2_data['I'][i]['R'][r].pop(g)\n return a2_data", "def remove_nans(ifile):\n\n usecols = [1, 94, 90, 102, 103, 104, 105, 127, 128, 114]\n df = pd.read_csv(ifile, sep=\",\",low_memory=False)\n\n for c in df.columns:\n df[c] = pd.to_numeric(df[c],errors='coerce')\n\n \n # filter the flag calib_psfCandidate==False\n # not a star candidate\n df = df.query('calib_psfCandidate == 0.0')\n\n # filter the column deblend_nChild==0\n # no child source after deblending\n df = df.query('deblend_nChild == 0.0')\n df = df.copy()\n\n # clean out unphysical results\n # e1^2 + e2^2 < 1.5^2\n df['e'] = (df['ext_shapeHSM_HsmShapeRegauss_e1'] ** 2 + df['ext_shapeHSM_HsmShapeRegauss_e2'] ** 2)**0.5\n\n df = df.query('e < 1.5')\n\n # take only required columns\n cols_select = ['id',\n 'base_SdssCentroid_x', 'base_SdssCentroid_y',\n 'base_SdssCentroid_xSigma','base_SdssCentroid_ySigma',\n 'ext_shapeHSM_HsmShapeRegauss_e1','ext_shapeHSM_HsmShapeRegauss_e2',\n 'base_SdssShape_flux']\n\n df = df[cols_select]\n\n # drop all nans\n df = df.dropna()\n\n # write txt file with commented header\n prefix = ' '*11\n header_line = prefix.join(cols_select)\n np.savetxt(ifile[0:-4]+'.txt',df.values,header=header_line,delimiter='\\t')", "def table_reduction(self):\n answer = Surjection_element(torsion=self.torsion,\n convention='Berger-Fresse')\n for k1, v in self.items():\n d, a = len(k1) - 1, max(k1[0])\n for pi in partitions(d + a, d + 1, ordered=True):\n k2, removed = [], []\n degenerate = False\n for idx, i in enumerate(pi):\n filtered = [i for i in k1[idx]\n if i not in removed]\n if idx > 0 and k2[-1] == filtered[0]:\n degenerate = True\n break\n if i > 1:\n removed += filtered[: i - 1]\n k2 += filtered[: i]\n\n if not degenerate:\n answer += answer.create({tuple(k2): v})\n\n answer._reduce_rep()\n\n return answer", "def _common_preprocess(self, data):\n\n data = data.drop('id', axis=1) \n data = data.drop(['17', '488', 'B01AF', 'H01AB'], axis=1, errors='ignore')\n\n # drop age outliers\n idx = data[(data['age'] > 99)].index\n data = data.drop(idx)\n\n # drop rows with CKD\n idx = data[((data['585'] != 0) | (data['586'] != 0)) &\n (data['ckd'] == 0)].index\n data = data.drop(idx)\n data = data.drop(['585', '586'], axis=1)\n\n return data", "def elimination_technique_2(C):\n rels = C._reidemeister_relators\n rels.sort(reverse=True)\n gens = C._schreier_generators\n for i in range(len(gens) - 1, -1, -1):\n rel = rels[i]\n for j in range(len(gens) - 1, -1, -1):\n gen = gens[j]\n if rel.generator_count(gen) == 1:\n k = rel.exponent_sum(gen)\n gen_index = rel.index(gen**k)\n bk = rel.subword(gen_index + 1, len(rel))\n fw = rel.subword(0, gen_index)\n rep_by = (bk*fw)**(-1*k)\n del rels[i]; del gens[j]\n for l in range(len(rels)):\n rels[l] = rels[l].eliminate_word(gen, rep_by)\n break\n C._reidemeister_relators = rels\n C._schreier_generators = gens\n return C._schreier_generators, C._reidemeister_relators", "def old_strip_sql(data, sap_stat=True):\r\n tab_fixs = [[\"PCOGIS.SDE.\", ''],\r\n [\"Auxiliary Equipment\", \"AUXILLARYEQUIPMENT\"]]\r\n for old_str, new_str in tab_fixs:\r\n data['TABLE'] = data['TABLE'].str.replace(old_str, new_str)\r\n data = data.dropna(subset=['COLUMN'])\r\n bad_atts = [\" \", \"SHAPE_Length\", \"HVFUSES\", \"LVFUSES\", \"SHAPE_Area\",\r\n \"ACTUALLENGTH\", \"DECOMMISSIONINGDATE\", \"DECOMMISSIONINGREASON\"]\r\n data = data[~data['COLUMN'].isin(bad_atts)]\r\n bad_tab_atts = [['SWITCHUNIT$', 'INTERRUPTINGMEDIUM$'],\r\n ['DistributionMain$', '^CROSSINGID$'],\r\n ['DistributionMain$', '^MOUNTINGTYPE$'],\r\n ['DistributionMain$', '^MOUNTINGPOSITION$']]\r\n for tab_str, att_str in bad_tab_atts:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str))]\r\n bad_doubles = [['Regulator$', '^SUBTYPECD$', 'y'],\r\n ['RegulatorStation$', '^EQUIPMENTID$', 'N'],\r\n ['SurfaceStructure$', '^APPLICATION$', 'N'],\r\n ['SurfaceStructure$', '^ENTRY$', 'N'],\r\n ['SurfaceStructure$', '^FACILITYID$', 'N'],\r\n ['SurfaceStructure$', '^MANUFACTURER$', 'N'],\r\n ['SurfaceStructure$', '^MATERIAL$', 'N'],\r\n ['SurfaceStructure$', '^MODEL$', 'N'],\r\n ['SurfaceStructure$', '^STRUCTURESIZE$', 'N'],\r\n ['COMMSPOWERSUPPLY$', '^BATTERYAMPERAGEHOURS$', 'N'],\r\n ['COMMSPOWERSUPPLY$', '^BATTERYCOUNT$', 'N']]\r\n if sap_stat is True:\r\n for tab_str, att_str, sap_str in bad_doubles:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].str.match(sap_str))]\r\n bad_null = [['SurfaceStructure$', '^ENCLOSURE$'],\r\n ['SurfaceStructure$', '^ENCLOSURETYPE$'],\r\n ['SurfaceStructure$', '^ENCLOSUREMANUFACTURER$']]\r\n for tab_str, att_str in bad_null:\r\n data = data[~(data['TABLE'].str.match(tab_str) &\r\n data['COLUMN'].str.match(att_str) &\r\n data['SAP'].isnull())]\r\n return data", "def removeOrphanContents(self):\n reObjNum = re.compile('[0-9A-Z]{8}$')\n #--Determine which contIds are matched to a reference.\n contIds = set(self.conts_id.keys())\n matched = dict([(id,False) for id in contIds])\n for cell in self.cells:\n objects = cell.getObjects()\n for object in objects.list():\n objId= object[2]\n #--LVCR? Get id of spawned creature instead.\n for objRecord in object[3]:\n if objRecord.name == 'NAME':\n objId = cstrip(objRecord.data)\n break\n if reObjNum.search(objId):\n if objId in contIds:\n matched[objId] = True\n #--Special case: PlayerSaveGame\n matched['PlayerSaveGame00000000'] = True\n #--unmatched = container records that have not been matched.\n orphans = set([self.conts_id[id] for id in contIds if not matched[id]])\n for orphan in sorted(orphans, key=lambda a: a.getId().lower()):\n self.log(' '+orphan.getId())\n #--Delete Records\n self.records = [record for record in self.records if record not in orphans]\n self.conts = [record for record in self.conts if record not in orphans]\n self.conts_id = dict([(id,record) for id,record in self.conts_id.iteritems() if matched[id] > 0])\n return len(orphans)", "def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df", "def removeDegenerate(self):\n return self[~self.testDegenerate()]", "def data_preparation(self) -> None:\n self.logger.info('data cleaning')\n self.logger.info('num of secs: {}, num of ipo_dates: {}, num of secs with prices: {}'.format(\n len(self.data),\n len(self.ipo_dates),\n len(self.prices)\n ))\n excluded = []\n excluded = [i.lower() for i in excluded]\n self.logger.info(f'number of excluded: {len(excluded)}')\n for i in excluded:\n self.data.pop(i)\n for s in self.data:\n # columns with empty assets sum (empty columns and other situations)\n self.data[s].dropna(axis='columns', how='any', subset=['A_0'], inplace=True)\n # columns with descriptions (polish and english names of values)\n self.data[s].drop(self.data[s].columns[[0, 1]], inplace=True, axis=1)\n\n self.logger.info(f'number of secs after cleaning: {len(self.data)}')\n data_list = [k for k in self.data.values()]\n self.uber_data = pd.concat(data_list, ignore_index=True, axis=1)\n self.uber_data = self.uber_data.transpose()\n self.uber_data = self.uber_data.loc[:, pd.notnull(self.uber_data.columns)]", "def filter_cobra_model_genes():\n model_genes = [str(x.id) for x in self.base_cobra_model.genes]\n keep_alleles = []\n for col in strain_allele_df.columns:\n if col.split(allele_gene_sep)[0] in model_genes:\n keep_alleles.append(col)\n # X_model = strain_allele_df.drop(drop_alleles, axis=1)\n print(\"# alleles:\", len(strain_allele_df.columns), \"-> removing alleles not in GEM -> # alleles after:\", len(keep_alleles))\n return keep_alleles", "def reaction_remotion(a2_data, retained_reactions):\n for i in a2_data['I'].keys():\n for r in a2_data['I'][i]['R'].keys():\n if r not in retained_reactions:\n a2_data['I'][i]['R'].pop(r)\n return a2_data", "def _reparse_database_emails(self):\r\n emails = self.db.read_sql(\"SELECT * FROM emails\")\r\n emails.loc[:, email2] = emails.loc[:, email].apply(self.parse_email)\r\n diff_emails = emails.loc[emails[email2] != emails[email], [email, email2, 'dealno']]\r\n \r\n if not diff_emails.empty:\r\n diff_emails.dropna(how='any', axis=0, inplace=True)\r\n diff_emails.reset_index(drop=True, inplace=True)\r\n diff_emails = diff_emails.loc[diff_emails[email2] != '', :]\r\n print(\"Reprocessing {} records that changed when reparsed.\".format(diff_emails.index.size))\r\n try:\r\n for i in range(diff_emails.index.size):\r\n #Reprocess the re-parsed email with the original dealno.\r\n r = diff_emails.iloc[i]\r\n self.deep_clean_one(r[email2], r['dealno'])\r\n self.delete_email(r[email])\r\n self.db.con.commit()\r\n self.deep_processing_rerun_all()\r\n print(\"Re-parse processing complete.\")\r\n except:\r\n self.db.con.rollback()\r\n raise\r\n else:\r\n print(\"No new database email records found to re-process.\")", "def remove_redundant(hocfile):\n tempout = 'temp_outfile.txt'\n # Run first pass\n tb = call_demoRead(hocfile)\n if tb is True or tb.split(None)[0] == 'None':\n return # file is already ready\n else:\n go = True\n \n # Loop until the file is ready \n while go:\n print('Got new traceback')\n seglist = parse_traceback(tb)\n if len(seglist) == 1:\n return # file is ready\n elif len(seglist) == 2:\n # two segments returned -- remove them\n xmlwhich_control(hocfile, seglist[0], seglist[1])\n # remove carriage returns\n remove_carriage_returns('temphoc.hoc', hocfile)\n # check new hocfile\n tb = call_demoRead(hocfile)\n else:\n print('bad seglist:')\n print(seglist)\n return \n if tb is True or tb.split(None)[0] == 'None':\n return # file is ready\n \n # end of while loop and function", "def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)", "def _remove_duplicates_(self):\n t = self.table_orig\n mask = []\n t_obs = np.unique(t['jdobs'])\n for t_ in t_obs:\n if np.sum(t['jdobs'] == t_) == 1:\n mask.append(True)\n else:\n mags = t['magpsf'][t['jdobs'] == t_]\n if len(np.unique(mags)) == 1:\n mask.append(True)\n for k in range(len(mags) - 1):\n mask.append(False)\n elif np.sum(np.unique(mags) < 90) == 1:\n done = False\n for m_ in mags:\n if m_ < 90. and not done:\n mask.append(True)\n done = True\n else:\n mask.append(False)\n else:\n mags_ = np.unique(mags)\n mags_ = np.array(mags_[mags_ < 90])\n\n done = [False for k in range(len(mags_))]\n for m_ in mags:\n if m_ < 90.:\n k = np.where(mags_ == m_)[0][0]\n if not done[k]:\n mask.append(True)\n done[k] = True\n else:\n mask.append(False)\n\n self.table = t[np.array(mask)]", "def removeRounabouts(segmentsMeta):\n roundabouts = segmentsMeta[segmentsMeta.nodes.apply(lambda x : x[0]==x[-1] )]\n roundabouts.apply(lambda x : linkInsOuts(x['ins'],x['outs'],segmentsMeta,x.name) ,axis=1)\n segmentsMeta.drop(roundabouts.index,inplace=True)", "def prepare(self):\n # Remove rows where the temporal scope could not be parsed correctly.\n self.triples = self.triples[(self.triples.time_begin != -99999) & (self.triples.time_end != -99999)]\n\n # Remove rows with start time after the end time.\n time_diffs = self.triples[\"time_end\"] - self.triples[\"time_begin\"]\n invalid_times = time_diffs[time_diffs < 0].index\n self.triples.drop([x for x in invalid_times], inplace=True, axis=\"index\")\n\n # Remove all entities that no longer have any facts associated with them from the dictionary.\n entity_counts = Utils.get_entity_counts(self.triples, self.entities)\n for idx, count in entity_counts.items():\n if count == 0:\n del self.entities[idx]\n\n # Remove all relations that no longer have any facts associated with them from the dictionary.\n relation_counts = Utils.get_group_counts(self.triples.groupby(\"relation_id\"), self.relations)\n for idx, count in relation_counts.items():\n if count == 0:\n del self.relations[idx]", "def _cleanProcessDf(self):\n # Se applica la funcion a todo el DataFrame exepto la columna \"State\"\n self._df.iloc[:, 1:] = self._df.iloc[:, 1:].applymap(\n lambda x: self._extractTuples(x))", "def purgeHis(atoms):\n for a in atoms:\n if getAtype(a) == \"N\" or getAtype(a) == \"NA\":\n found = 0\n for c in atoms:\n if not c == a and dist(c,a) < COVALENT_BOND_DIST:\n found = 1\n break\n if not found:\n atoms.remove(a)\n return atoms\n if DEBUG: print \"Warning! Residue %s appears to be incomplete\" % (atoms[0][17:20]+atoms[0][22:26]+atoms[0][21])\n return False", "def clean(self):\n if self.reloading:\n self.cleaned = pd.concat(\n [self.raw[0: self.brkIdx1+1],\n self.raw[self.brkIdx3+1: self.brkIdx4+1]])\n else:\n self.cleaned = self.raw[0: self.brkIdx1+1]\n self.cleaned.reset_index(drop=True, inplace=True) # update idx\n # -- Cubic spline that passes through the data\n sigmaLog = np.log10(self.cleaned['stress'][1:])\n cs = CubicSpline(x=sigmaLog, y=self.cleaned['e'][1:])\n self.eSigmaV = float(cs(np.log10(self.sigmaV))) # void ratio at sigmaV\n return", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency <= 0:\n return True\n else:\n if line.amount_residual <= 0:\n return True\n return False", "def clean(c):", "def separate_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.result[row] = self.matrix[row][-1]\r\n self.matrix[row].pop()", "def preprocess(table):\n # drop the column Zeitraum\n table = table.drop('Zeitraum', axis=1)\n # drop the rows containing the true results of the elections\n Idx = np.where(table.Befragte=='Bundestagswahl')[0]\n Idx = np.append(Idx, np.where(table['CDU/CSU'].str.contains('Umfrage'))[0])\n table = table.drop(Idx)\n table.index = np.arange(table.shape[0])\n # replace the strings %,-\n table = table.replace('%', '', regex=True)\n table = table.replace(',', '.', regex=True)\n table = table.replace('[–?]', '', regex=True)\n # fix the column Befragte !!!!!!!!!!!!!!\n table.Befragte = table.Befragte.replace('[T • ?≈O • .]', '', regex=True)\n # replace all empty entries with NaN\n table = table.replace('', 'NaN', regex=True)\n\n # if the colomn Sonstige contains entries with more than one number\n try: \n table.Sonstige = table.Sonstige.astype(float)\n except ValueError:\n for i, n in enumerate(table.Sonstige):\n if len(n) > 2:\n digits = np.array([digit for digit in np.arange(10).astype(str) if digit in n])\n table.Sonstige[i] = digits.astype(int).sum()\n table.Sonstige = table.Sonstige.astype(float)\n\n # convert all numbers to float\n table[table.keys()[1:]] = table[table.keys()[1:]].astype(float)\n # convert the date to type date\n table.Datum = pd.to_datetime(table.Datum, format='%d.%m.%Y').dt.date\n return table", "def applyMorphologicalCleaning(self, image):", "def _remove_pre_post_1q(self, circ):\n dag = circuit_to_dag(circ)\n del_list = []\n for node in dag.topological_op_nodes():\n if len(node.qargs) > 1:\n break\n del_list.append(node)\n for node in reversed(list(dag.topological_op_nodes())):\n if len(node.qargs) > 1:\n break\n del_list.append(node)\n for node in del_list:\n dag.remove_op_node(node)\n return dag_to_circuit(dag)", "def clean(self):\n self.unique_combinations = {}\n self.reverse_combinations = []\n self.label_count = None", "def remove_data():\n # Removing the existing data\n col_answer_given.remove()\n col_answer_not_given.remove()\n col_q_not_given.remove()\n col_to_summarize.remove()", "def restart():\n for pig in pigs.copy():\n space.remove(pig.shape, pig.shape.body)\n pigs.remove(pig)\n for bird in birds.copy():\n space.remove(bird.shape, bird.shape.body)\n birds.remove(bird)\n for column in columns.copy():\n space.remove(column.shape, column.shape.body)\n columns.remove(column)\n for beam in beams.copy():\n space.remove(beam.shape, beam.shape.body)\n beams.remove(beam)", "def clean_edges(self):\n for from_node in self.all_nodes():\n for to_node in self.all_nodes():\n if from_node == to_node:\n continue\n dup = list(filter(lambda x: x.from_node == from_node and x.to_node == to_node, self.edges))\n if len(dup) > 1:\n for d in dup[1:]:\n self.edges.remove(d)", "def removeBiotype(df):\n\tdf = df[ df.Biotype != 'IG_C_gene']\n\tdf = df[ df.Biotype != 'IG_D_gene']\n\tdf = df[ df.Biotype != 'IG_J_gene']\n\tdf = df[ df.Biotype != 'IG_V_gene']\n\tdf = df[ df.Biotype != 'pseudogene']\n\tdf = df[ df.Biotype != 'rRNA']\n\tdf = df[ df.Biotype != 'sRNA']\n\tdf = df[ df.Biotype != 'TR_C_gene']\n\tdf = df[ df.Biotype != 'TR_D_gene']\n\tdf = df[ df.Biotype != 'TR_J_gene']\n\tdf = df[ df.Biotype != 'TR_V_gene']\n\tdf = df[ df.Biotype != 'macro_lncRNA']\n\tdf = df[ df.Biotype != 'bidirectional_promoter_lncRNA']\n\tdf = df[ df.Biotype != '3prime_overlapping_ncRNA']\n\tdf = df[ df.Biotype != 'non_coding']\n\tdf = df[ df.Biotype != 'pseudogene']\n\tdf = df[ df.Biotype != 'TR_J_pseudogene']\n\tdf = df[ df.Biotype != 'IG_C_pseudogene']\n\tdf = df[ df.Biotype != 'IG_J_pseudogene']\n\tdf = df[ df.Biotype != 'IG_pseudogene']\n\tdf = df[ df.Biotype != 'TR_V_pseudogene']\n\tdf = df[ df.Biotype != 'polymorphic_pseudogene']\n\tdf = df[ df.Biotype != 'IG_V_pseudogene']\n\tdf = df[ df.Biotype != 'TEC']\n\tdf = df[ df.Biotype != 'Predictif']\n\tdf = df[ df.Biotype != 'ribozyme']\n\tdf = df[ df.Biotype != 'scRNA']\n\tdf = df[ df.Biotype != 'scaRNA']\n\tdf = df[ df.Biotype != 'snRNA']\n\tdf = df[ df.Biotype != 'snoRNA']\n\tdf = df[ df.Biotype != 'vaultRNA']\n\tdf = df[ df.Biotype != 'translated_processed_pseudogene']\n\treturn df", "def removeOutliers(self):\n #With the DSFPlate object, we can just use self.wells.pop() to remove outliers\n visited = []\n discard = []\n for well in self.wells:\n if well not in visited:\n reps = []\n reps += self.originalPlate.repDict[well]\n pairs = combinations(reps,2)\n distMatrix = [[0 for x in range(len(reps))] for y in range(len(reps))]\n for pair in pairs:\n dist = sqrDiffWellFluoro(self.wells[pair[0]].fluorescence,self.wells[pair[1]].fluorescence)\n distMatrix[reps.index(pair[0])][reps.index(pair[1])] = dist\n distMatrix[reps.index(pair[1])][reps.index(pair[0])] = dist\n keep = rh.discardBad(reps,distMatrix,SIMILARITY_THRESHOLD)\n for rep in reps:\n visited.append(rep)\n if rep not in keep:\n discard.append(rep)\n for well in discard:\n self.wells[well].fluorescence = None\n self.delCurves.append(well)\n return", "def remove_duplicated_url_entries(self):\n\n # based on the data in the WebSite table create a data frame with all the kvk which\n # we have already included. These can be removed from the data we have just read\n nr = self.url_df.index.size\n self.logger.info(\"Removing duplicated kvk/url combinies. Data read at start: {}\".format(nr))\n self.logger.debug(\"Getting all sql websides from database\")\n kvk_list = list()\n url_list = list()\n name_list = list()\n query = (self.CompanyTbl\n .select()\n .prefetch(self.WebsiteTbl)\n )\n for cnt, company in enumerate(query):\n kvk_nr = company.kvk_nummer\n naam = company.naam\n for web in company.websites:\n kvk_list.append(kvk_nr)\n url_list.append(web.url)\n name_list.append(naam)\n\n kvk_in_db = pd.DataFrame(\n data=list(zip(kvk_list, url_list, name_list)),\n columns=[KVK_KEY, URL_KEY, NAME_KEY])\n kvk_in_db.set_index([KVK_KEY, URL_KEY], drop=True, inplace=True)\n\n # drop all the kvk number which we already have loaded in the database\n self.logger.debug(\"Dropping all duplicated web sides\")\n kvk_to_remove = self.url_df.set_index([KVK_KEY, URL_KEY])\n kvk_to_remove = kvk_to_remove.reindex(kvk_in_db.index)\n kvk_to_remove = kvk_to_remove[~kvk_to_remove[NAME_KEY].isnull()]\n try:\n self.url_df = self.url_df.set_index([KVK_KEY, URL_KEY]).drop(index=kvk_to_remove.index)\n except KeyError:\n self.logger.debug(\"Nothing to drop\")\n else:\n self.url_df.reset_index(inplace=True)\n\n self.logger.debug(\"Getting all companies in Company table\")\n kvk_list = list()\n name_list = list()\n for company in self.CompanyTbl.select():\n kvk_list.append(int(company.kvk_nummer))\n name_list.append(company.naam)\n companies_in_db = pd.DataFrame(data=list(zip(kvk_list, name_list)),\n columns=[KVK_KEY, NAME_KEY])\n companies_in_db.set_index([KVK_KEY], drop=True, inplace=True)\n\n self.logger.debug(\"Dropping all duplicated companies\")\n comp_df = self.url_df.set_index([KVK_KEY, URL_KEY])\n comp_df.drop(index=companies_in_db.index, level=0, inplace=True)\n self.url_df = comp_df.reset_index()\n\n nr = self.url_df.index.size\n self.logger.debug(\"Removed duplicated kvk/url combies. Data at end: {}\".format(nr))", "def eliminateRules(self):\n deleteKey = []\n for key,value in self._rules.items():\n if value[0] < self._minConfidence:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._rules[key]", "def deep_processing_rerun_all(self):\r\n sql = \"\"\"SELECT * FROM emails \r\n WHERE email_status = 'processing' \r\n AND clean_type = 1\"\"\"\r\n df = self.db.read_sql(sql)\r\n\r\n for i in range(df.index.size):\r\n rec = df.loc[i, :]\r\n self.deep_clean_one(rec[EMAIL], dealno=rec['dealno'])\r\n self.db.con.commit()\r\n print('Reprocessed {} records that were stuck in the processing status'.format(df.index.size))", "def clean_up(houses:pd.DataFrame) -> pd.DataFrame:\n houses= delete_columns(houses)\n houses= analyze_missing_values(houses)\n houses= add_seller_house(houses)\n houses= add_underscore(houses)\n houses= create_dummies(houses)\n houses= impute(houses)\n return houses", "def test_clean_run(self):\n Historical_ROAs_Parsed_Table(clear=True)\n with Historical_ROAs_Table(clear=True) as t:\n Historical_ROAs_Parser().run()\n assert t.get_count() > 2000000", "def setDifference(self, table2):\n results = set([])\n for rec in self.records:\n rec_tuple = tuple([v for (k, v) in rec.items()])\n results.add(rec_tuple)\n for rec in table2.records:\n rec_tuple = tuple([v for (k, v) in rec.items()])\n if rec_tuple in results:\n results.remove(rec_tuple)\n for item in results:\n print item", "def pruneSelfModifyingRelationships(self):\n modifiers = self.getConTextModeNodes(\"modifier\")\n nodes_to_remove = []\n for modifier in modifiers:\n modified_by = self.successors(modifier)\n if modified_by:\n for mod_by in modified_by:\n if self.getVerbose():\n print(mod_by, modifier, mod_by.encompasses(modifier))\n if mod_by.encompasses(modifier):\n nodes_to_remove.append(modifier)\n if self.getVerbose():\n print(\"removing the following self modifying nodes\", nodes_to_remove)\n self.remove_nodes_from(nodes_to_remove)", "def EliminateRows(self, rows):\n return _hypre.HypreParMatrix_EliminateRows(self, rows)", "def pruneSelfModifyingRelationships(markup):\n markupNew = markup.copy()\n modifiers = markup.getConTextModeNodes(\"modifier\")\n nodesToRemove = []\n for m in modifiers:\n modifiedBy = markup.successors(m)\n if( modifiedBy ):\n for mb in modifiedBy:\n if( TO.encompasses(mb,m) ):\n nodesToRemove.append(m)\n markupNew.remove_nodes_from(nodesToRemove)\n return markupNew", "def _clean_graph(self):\n for entry_node in self._entry_nodes:\n self._clean_graph_visit(entry_node.get_func_first_node(), {})", "def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def compress(self):\n gamma = -1\n A = self.A\n A_dict = self.A_dict\n A_dict_inv = self.A_dict_inv\n chi = tuple([i for i in range(len(self.p)) if self.p[i] != i])\n for alpha in self.omega:\n gamma += 1\n if gamma != alpha:\n # replace α by γ in coset table\n for x in A:\n beta = self.table[alpha][A_dict[x]]\n self.table[gamma][A_dict[x]] = beta\n self.table[beta][A_dict_inv[x]] == gamma\n # all the cosets in the table are live cosets\n self.p = list(range(gamma + 1))\n # delete the useless coloumns\n del self.table[len(self.p):]\n # re-define values\n for row in self.table:\n for j in range(len(self.A)):\n row[j] -= bisect_left(chi, row[j])", "def pruneModifierRelationships(self):\n modifiers = self.getConTextModeNodes(\"modifier\")\n for modifier in modifiers:\n modified_by = self.successors(modifier)\n if modified_by and len(modified_by) > 1:\n minm = min([(modifier.dist(mod_by), mod_by) for mod_by in modified_by])\n edgs = self.edges(modifier)\n edgs.remove((modifier, minm[1]))\n if self.getVerbose():\n print(\"deleting relationship(s)\", edgs)\n\n self.remove_edges_from(edgs)", "def remove_ei(remove_fields: np.ndarray, remove_values: np.ndarray):\n remove_fields = remove_fields[2:10]\n remove_values = remove_values[:, 2:10]\n return remove_fields, remove_values", "def prune_sequence(sequence_set, extended_set):\n tmp_set = set()\n for seq in sequence_set:\n # se una sotto-sequenza e' trovata viene ignorata, altrimenti e' aggiunta al set temporaneo\n found = False\n for ext in extended_set:\n if seq1_in_seq2(seq, ext, 0): # eps e' 0 perche' le sequenze sono identiche\n found = True\n break\n if not found:\n tmp_set.add(seq)\n # alla fine aggiungi tutto il set esteso, si puo' includere nel ciclo precedente\n for ext in extended_set:\n tmp_set.add(ext)\n return tmp_set", "def prune_anoms(self, inverse=False):\n\n E_seq = self.E_seq if not inverse else self.E_seq_inv\n e_s = self.e_s if not inverse else self.e_s_inv\n non_anom_max = self.non_anom_max if not inverse \\\n else self.non_anom_max_inv\n\n if len(E_seq) == 0:\n return\n\n E_seq_max = np.array([max(e_s[e[0]:e[1]+1]) for e in E_seq])\n E_seq_max_sorted = np.sort(E_seq_max)[::-1]\n E_seq_max_sorted = np.append(E_seq_max_sorted, [non_anom_max])\n\n i_to_remove = np.array([])\n for i in range(0, len(E_seq_max_sorted)-1):\n if (E_seq_max_sorted[i] - E_seq_max_sorted[i+1]) \\\n / E_seq_max_sorted[i] < self._p:\n i_to_remove = np.append(i_to_remove, np.argwhere(\n E_seq_max == E_seq_max_sorted[i]))\n else:\n i_to_remove = np.array([])\n i_to_remove[::-1].sort()\n\n if len(i_to_remove) > 0:\n E_seq = np.delete(E_seq, i_to_remove, axis=0)\n\n if len(E_seq) == 0 and inverse:\n self.i_anom_inv = np.array([])\n return\n elif len(E_seq) == 0 and not inverse:\n self.i_anom = np.array([])\n return\n\n indices_to_keep = np.concatenate([range(e_seq[0], e_seq[-1]+1)\n for e_seq in E_seq])\n\n if not inverse:\n mask = np.isin(self.i_anom, indices_to_keep)\n self.i_anom = self.i_anom[mask]\n else:\n mask_inv = np.isin(self.i_anom_inv, indices_to_keep)\n self.i_anom_inv = self.i_anom_inv[mask_inv]", "def experience_clean_row(row_of_data):\n experience = row_of_data.get('experience')\n z = list(set(remove_filler_words(experience)))\n return z", "def _simplify_dfas(dfas):\n changes = True\n while changes:\n changes = False\n for i, state_i in enumerate(dfas):\n for j in range(i + 1, len(dfas)):\n state_j = dfas[j]\n if state_i == state_j:\n #print \" unify\", i, j\n del dfas[j]\n for state in dfas:\n state.unifystate(state_j, state_i)\n changes = True\n break", "def reduce_rec(node):\n if node.is_leaf():\n return\n for edge in node.child_nodes:\n # replacing the subdiagram with a singular isomorphic one\n node.child_nodes[edge] = hashtable[node.child_nodes[edge].__hash__()]\n # and going down recursively along that subdiagram\n reduce_rec(node.child_nodes[edge])", "def _clean_up(ev):\n ev = ev[(ev.Currency == \"USD\") | (ev.Currency == \"EUR\")]\n ev = ev[ev.Importance == \"H\"]\n for text_column in ['Currency', 'Importance', 'Event']:\n ev[text_column] = ev[text_column].str.strip()\n return ev", "def removeLegacy(self, path=None):\n\n df = pd.read_csv(path, compression='gzip')\n print(df.shape)\n gamelist = pd.read_csv('Resources/Genres.csv.gz', usecols=['appid'])\n gamelist = pd.DataFrame(gamelist.appid.unique(), columns=['appid'])\n print(gamelist)\n filter_df = pd.merge(df, gamelist, on='appid', how='inner')\n filter_df = filter_df.dropna()\n filter_df = filter_df.sort_values(['steamid', 'appid'], ascending=[True, True])\n print('done')\n print(filter_df.shape)\n print(filter_df)\n print(np.setdiff1d(df['appid'].unique(), filter_df['appid'].unique()))\n filter_df.to_csv(path, compression='gzip', columns=['steamid', 'appid', 'rating'], index=None)", "def CleanUp(self):\n blankColumnPattern = re.compile('^-*$')\n blankColumns = []\n for columnIndex in range(self.alignment.get_alignment_length() - 1):\n columnValues = self.alignment[:,columnIndex]\n match = blankColumnPattern.search(columnValues)\n if (match):\n blankColumns.append(str(columnIndex))\n for column in blankColumns[::-1]:\n self.DeleteRange(',' + str(column), True)\n self.Show(self.displayedColumn)\n self.BackupAlignment()", "def preprocess(df): \n \n df.drop_duplicates(subset=df.columns[0], inplace=True) #drop duplicate gene_names. \n df.set_index(keys=df.columns[0], inplace=True)\n df.dropna(how='all', inplace=True) #drop rows with all NAs\n df2 = df.select_dtypes(include=['float64']) + 0.001 #select numbers in DataFrame \n \n return df2", "def pruning(self):\n data = self.data.copy()\n for d in self.data:\n # cascade purning method. Inspired from \"Efficient Computation of Group Skyline Queries on MapReduce (FCU)\"\n if d in data:\n pastart = [self.drange[1] if i+self.radius>self.drange[1] else i+self.radius for i in d.getLocationMax()]\n pamax = [self.drange[1] for j in range(self.dim)]\n # prune data points that are obviously dominated by current data point\n pruned = (self.index.intersection(tuple(pastart+pamax),objects=True))\n for p in pruned:\n if p.object in data:\n data.remove(p.object)\n self.pruned = data", "def _remove_noise_in_o2m():\n if line.reconcile_partial_id:\n sign = 1\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency * sign <= 0:\n return True\n else:\n if line.amount_residual * sign <= 0:\n return True\n return False", "def delete_cand():\n if row:\n return row[0] + [[key + [left_i]]]", "def removeMDrizProducts(self):\n\n # Remove all PyDrizzle intermediate products\n self.assoc.clean(coeffs=True,final=False)\n\n # Remove median file created by MultiDrizzle\n if os.path.exists(self.medianfile):\n os.remove(self.medianfile)", "def cleaning(df, file=\"proteinGroups\"):\r\n columns = df.columns\r\n if file == \"proteinGroups\":\r\n if (\"Potential contaminant\" not in columns) or\\\r\n (\"Reverse\" not in columns) or\\\r\n (\"Only identified by site\" not in columns):\r\n print(\"Is this data already cleaned?\\nMandatory columns for cleaning not present in data!\")\r\n print(\"Returning provided dataframe!\")\r\n return df\r\n df = df[(df['Potential contaminant'].isnull()) &\r\n (df['Reverse'].isnull()) &\r\n (df['Only identified by site'].isnull())]\r\n df.drop(['Potential contaminant',\"Reverse\", 'Only identified by site'], axis=1, inplace=True)\r\n elif (file == \"Phospho (STY)\") or (file == \"evidence\") or (file == \"modificationSpecificPeptides\"):\r\n if (\"Potential contaminant\" not in columns) or\\\r\n (\"Reverse\" not in columns):\r\n print(\"Is this data already cleaned?\\nMandatory columns for cleaning not present in data!\")\r\n print(\"Returning provided dataframe!\")\r\n return df\r\n df = df[(df['Potential contaminant'].isnull()) &\r\n (df['Reverse'].isnull())]\r\n df.drop(['Potential contaminant',\"Reverse\"], axis=1, inplace=True)\r\n return df", "def _clean(self):\n map(self.__delitem__, self.keys())\n self._original = []\n self._columns = {}\n self._modified, self._deleted = {}, {}", "def reverse_transform(self):\n self.reaction_df['dG0'] = self.reaction_df['dG0_prime']\n\n for i, rxn in self.iterreactions():\n aq_cond = self.reaction_df.loc[i, ['pH', 'I', 'T']]\n self.reaction_df.at[i, 'dG0'] -= rxn.get_transform_ddG0(*aq_cond)", "def prune_influence_map_subj_obj(self):\n def get_rule_info(r):\n result = {}\n for ann in self.model.annotations:\n if ann.subject == r:\n if ann.predicate == 'rule_has_subject':\n result['subject'] = ann.object\n elif ann.predicate == 'rule_has_object':\n result['object'] = ann.object\n return result\n im = self.get_im()\n rules = im.nodes()\n edges_to_prune = []\n for r1, r2 in itertools.permutations(rules, 2):\n if (r1, r2) not in im.edges():\n continue\n r1_info = get_rule_info(r1)\n r2_info = get_rule_info(r2)\n if 'object' not in r1_info or 'subject' not in r2_info:\n continue\n if r1_info['object'] != r2_info['subject']:\n logger.info(\"Removing edge %s --> %s\" % (r1, r2))\n edges_to_prune.append((r1, r2))\n logger.info('Removing %d edges from influence map' %\n len(edges_to_prune))\n im.remove_edges_from(edges_to_prune)", "def removeAllCorrelations(self, removeReImCorrel = True):\n\t\tdim = len(self.coma)/2\n#\t#\tCMwrite(\"removeAllCorrelations\")\n\t\tfor i in range(dim):\n\t\t\tfor j in range(dim):\n\t\t\t\tif not i == j:\n\t\t\t\t\tself.coma[2*i ,2*j ] = 0.\t\t\n\t\t\t\t\tself.coma[2*i+1,2*j ] = 0.\n\t\t\t\t\tself.coma[2*i ,2*j+1] = 0.\n\t\t\t\t\tself.coma[2*i+1,2*j+1] = 0.\n\t\t\t\telif removeReImCorrel:\n\t\t\t\t\tself.coma[2*i+1,2*j ] = 0.\n\t\t\t\t\tself.coma[2*i ,2*j+1] = 0.\n\t\tself.makeComaInv()\n\t\tself.specialCOMAs = {}", "def column_eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n \n location = values[box][0] #a solved location in a column\n if location in location_dict.keys():\n \n #ensure that multiple groups can be in multiple locations using period_loc_frequency\n loc_freq = 0\n loc_freq -= 1 #subtract one for the current location usage\n loc_freq += period_loc_frequency[location]\n \n for other_col in column_dict[box]:\n if other_col in solved_values:\n if values[other_col] == location:\n loc_freq -= 1\n \n #make sure that too many locations haven't been used up yet\n if loc_freq < 0:\n print(\"error: too many groups in location\", location)\n \n #if the location is \"used up\", remove it as an option from the rest of the groups\n if loc_freq == 0:\n for other_col in column_dict[box]:\n try:\n values[other_col].remove(location) #remove the location from the other column units\n except:\n pass\n \n return values", "def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compression'] is not None:\n# Data are compressed, copy to tmp.\n compression = self.info[pfile]['compression']\n\n pfile_decomp = '%s/%s' % (self.tmpdir, \\\n os.path.basename(self.info[pfile]['pfile_decomp']))\n if os.path.exists(pfile_decomp):\n errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \\\n ' in ReconEpis'\n\n cmd = '%s %s > %s' % \\\n (decompress_cmds[compression], pfile, pfile_decomp)\n self.ExecCmd(cmd)\n else:\n# Create a link on /tmp to the pfile so the link to ref.dat will also\n# be on /tmp, (which is always writeable.)\n pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))\n if not os.path.exists(pfile_decomp):\n os.symlink(pfile, pfile_decomp)\n refname, refcmpress = self.CheckCompression( \\\n self.info[pfile]['refdat'])\n if refcmpress is not None:\n refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))\n cmd = '%s %s > %s' % \\\n (decompress_cmds[refcmpress], \\\n self.info[pfile]['refdat'], refdat_decomp)\n self.ExecCmd(cmd)\n else:\n refdat_decomp = self.info[pfile]['refdat']\n if refdat_decomp is not None:\n if refdat_decomp != 'ref.dat':\n# Create link bearing the file name epirecon_ex expects.\n refdat_link = '%s/ref.dat' % self.tmpdir\n if not os.path.exists(refdat_link):\n if self.verbose:\n print 'ln -s %s %s' % (refdat_decomp, refdat_link)\n if os.path.islink(refdat_link):\n# ref.dat is a broken symbolic link.\n if self.verbose:\n print 'rm %s' % ref_file\n os.remove(refdat_link)\n try:\n os.symlink(refdat_decomp, refdat_link)\n except OSError:\n self.errors = True\n pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))\n os.symlink(pfile_decomp, pfile_link)\n os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)\n\n series = int(self.info[pfile]['series'])\n run[series] = run[series] + 1\n epiname = self.info[pfile]['imgfile']\n cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \\\n (pfile_decomp, epiname, self.skip)\n fname = '%s+orig.BRIK' % epiname\n self.CheckExec(cmd, [fname])\n# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']\n else:\n errstr = '*******************************************\\n' + \\\n 'No ref.dat file exists for %s\\n' % pfile + \\\n '*******************************************\\n'\n self.error_log = self.error_log + errstr\n self.f_crash.write(errstr)", "def prune(record: cfg.OpenAPI, outformat: cfg.Format = None) -> NoReturn:\n content = record.oas\n # Cleanup\n content, _, _ = clean.remove_unused_components(content)\n content = clean.remove_empty_objects(content)\n # Output\n out(content, outformat or record.oastype)", "def removeDoubleUnbondedAtoms (self):\r\n atomsToRemove = [] # Stores index of atoms we will need to remove\r\n \r\n # Go through each mol\r\n for i in range(len(self.mol)):\r\n # Atom is disconnected if number of unbonded spikes is equal to the number of spikes in the atom\r\n numUnbondedSpikes = 0\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == False:\r\n # Spike not bonded so increment counter\r\n numUnbondedSpikes += 1\r\n # If atom disconnected then need to check to see if dangling nodes or tails are bonded\r\n if numUnbondedSpikes == len(self.mol[i].spikeArray):\r\n print (\"Atom: \" + str(self.mol[i].rbnNumber) + \" is being removed \\n\")\r\n anyBondedDanglingNodes = False\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.isUnbondedAtomConnected(self.mol[i].spikeArray[j]) == True:\r\n anyBondedDanglingNodes = True\r\n # If atom has connected dangling nodes then need to convert atom to metaAtom, add metaAtom to metaMolecule and\r\n # remove atom from ring\r\n if anyBondedDanglingNodes == True:\r\n print (\"A new metaAtom is being created \\n\")\r\n newMetaAtom = self.convertUnbondedAtomToMetaAtom(self.mol[i])\r\n self.metaMolecule.addMetaAtom(newMetaAtom)\r\n atomsToRemove.append(i)\r\n \r\n # Now need to remove atoms\r\n print (\"Length of ring before removal: \" + str(len(self.mol)) + \"\\n\")\r\n for i in range(len(atomsToRemove)):\r\n self.mol.pop(atomsToRemove[i])\r\n print (\"Length of ring after removal: \" + str(len(self.mol)) + \"\\n\")\r\n # Finally need to update metaMolecule with new mol \r\n self.metaMolecule.updateListMols(self)" ]
[ "0.6324132", "0.59182566", "0.5790216", "0.57542723", "0.57150894", "0.5654134", "0.55529314", "0.5501398", "0.54495615", "0.5417837", "0.5416808", "0.5410812", "0.53797144", "0.53654283", "0.5352083", "0.5328368", "0.53084546", "0.530087", "0.5300535", "0.5298115", "0.529656", "0.5293976", "0.527256", "0.5271022", "0.52537477", "0.52323747", "0.5232325", "0.52261007", "0.5222202", "0.52065635", "0.5204635", "0.5204167", "0.51828986", "0.5170891", "0.5170645", "0.5148652", "0.51482916", "0.5136792", "0.51321214", "0.51253533", "0.51211447", "0.5119196", "0.5109581", "0.51054806", "0.5099908", "0.5098097", "0.5097277", "0.50836885", "0.5081005", "0.5066483", "0.5062651", "0.5062651", "0.5057948", "0.50496453", "0.50392747", "0.5034259", "0.50318754", "0.5028552", "0.5022664", "0.50196695", "0.50195", "0.5016522", "0.50110245", "0.5009248", "0.5007355", "0.50065583", "0.50018823", "0.49939972", "0.4993693", "0.49819562", "0.49794263", "0.49792293", "0.49652997", "0.49645245", "0.49515918", "0.49505877", "0.4948431", "0.49484283", "0.49474576", "0.49473864", "0.494724", "0.49460712", "0.49442226", "0.49418324", "0.49349138", "0.4931562", "0.49291974", "0.49270356", "0.49235886", "0.4923551", "0.49215692", "0.49128953", "0.49063897", "0.49040008", "0.48965836", "0.4887202", "0.4884071", "0.48835927", "0.48809305", "0.48804224" ]
0.6166888
1
Convert epis reconstructed on the scanner.
def ConvertRtEpis(self): if self.verbose: print 'Convert EPIs to brik' for entry in self.entry_map['epi']: if ('epirt' in self.info[entry]['psdname'] or \ self.info[entry]['psdname'] == 'epi' or \ self.info[entry]['psdname'] == '*epfid2d1_64') and \ self.info[entry]['data_filetype'] == 'dicom': series = self.info[entry]['series'] if self.info[entry]['skip'] > 0: skip = '--skip=%s' % self.info[entry]['skip'] else: skip = '' cmd = 'convert_file %s %s %s brik' % \ (skip, entry, self.info[entry]['imgfile']) checkname = '%s+orig.BRIK' % (self.info[entry]['imgfile']) self.CheckExec(cmd, [checkname])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compression'] is not None:\n# Data are compressed, copy to tmp.\n compression = self.info[pfile]['compression']\n\n pfile_decomp = '%s/%s' % (self.tmpdir, \\\n os.path.basename(self.info[pfile]['pfile_decomp']))\n if os.path.exists(pfile_decomp):\n errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \\\n ' in ReconEpis'\n\n cmd = '%s %s > %s' % \\\n (decompress_cmds[compression], pfile, pfile_decomp)\n self.ExecCmd(cmd)\n else:\n# Create a link on /tmp to the pfile so the link to ref.dat will also\n# be on /tmp, (which is always writeable.)\n pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))\n if not os.path.exists(pfile_decomp):\n os.symlink(pfile, pfile_decomp)\n refname, refcmpress = self.CheckCompression( \\\n self.info[pfile]['refdat'])\n if refcmpress is not None:\n refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))\n cmd = '%s %s > %s' % \\\n (decompress_cmds[refcmpress], \\\n self.info[pfile]['refdat'], refdat_decomp)\n self.ExecCmd(cmd)\n else:\n refdat_decomp = self.info[pfile]['refdat']\n if refdat_decomp is not None:\n if refdat_decomp != 'ref.dat':\n# Create link bearing the file name epirecon_ex expects.\n refdat_link = '%s/ref.dat' % self.tmpdir\n if not os.path.exists(refdat_link):\n if self.verbose:\n print 'ln -s %s %s' % (refdat_decomp, refdat_link)\n if os.path.islink(refdat_link):\n# ref.dat is a broken symbolic link.\n if self.verbose:\n print 'rm %s' % ref_file\n os.remove(refdat_link)\n try:\n os.symlink(refdat_decomp, refdat_link)\n except OSError:\n self.errors = True\n pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))\n os.symlink(pfile_decomp, pfile_link)\n os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)\n\n series = int(self.info[pfile]['series'])\n run[series] = run[series] + 1\n epiname = self.info[pfile]['imgfile']\n cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \\\n (pfile_decomp, epiname, self.skip)\n fname = '%s+orig.BRIK' % epiname\n self.CheckExec(cmd, [fname])\n# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']\n else:\n errstr = '*******************************************\\n' + \\\n 'No ref.dat file exists for %s\\n' % pfile + \\\n '*******************************************\\n'\n self.error_log = self.error_log + errstr\n self.f_crash.write(errstr)", "def ExtractFirstEpi(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'first_epi':\n epiname = self.info[entry]['imgfile']\n cmd = 'convert_file %s -f0 %s %s %s' % \\\n (self.flip_opts, entry,epiname, self.info[entry]['filetype'])\n fname = '%s%s' % (epiname, self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])\n self.info[entry]['imgfile'] = fname", "def into_epochs(self):\n #divide into epochs\n new_events = mne.make_fixed_length_events(self.raw, duration=2.)\n event_dict = {'divide':1}\n #reject data with extreme/flat amplitude\n reject_criteria = {'eeg' : 400e-6} # 400 µV\n flat_criteria = {'eeg' : 1e-6} # 1 µV\n\n# self.epochs = mne.Epochs(self.raw,new_events, reject=reject_criteria, flat=flat_criteria,\n# reject_by_annotation=False, preload=True)\n self.epochs = mne.Epochs(self.raw,new_events, reject_by_annotation=False, preload=True)\n# self.epochs.plot()\n return self.epochs", "def convert_to_evoros_input(self, enki_input):\n # ramp inclines\n ramp_inclines = [\n enki_input['incline1'],\n enki_input['incline2'],\n enki_input['incline3'],\n enki_input['incline4'],\n enki_input['incline5']\n ]\n\n # convert to Evo-ROS input\n evoros_input = {\n 'genome': PID_SETTINGS,\n 'enki_genome': ramp_inclines\n }\n return evoros_input", "def parse_eps_files(self):\n retrieved = self.retrieved\n retrieved_names = retrieved.base.repository.list_object_names()\n\n files = self.node.process_class._internal_retrieve_list\n if any(_ not in retrieved_names for _ in files):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n\n energy = None\n eps = ArrayData()\n for name in self.node.process_class._internal_retrieve_list:\n content = retrieved.base.repository.get_object_content(name)\n base = name.split('.')[0]\n\n try:\n data = np.loadtxt(io.StringIO(content))\n except ValueError:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES\n return\n if len(data.shape) != 2 or data.shape[0] == 0 or data.shape[1] != 2:\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_INVALID_FORMAT\n return\n\n x, y = data.T\n if energy is None:\n energy = x\n eps.set_array('energy', x)\n elif not np.allclose(x, energy):\n self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES_ENERGY_MISMATCH\n return\n\n eps.set_array(base, y)\n\n return eps", "def decode_pes(self, pes: bytes)-> PES.PES:\n pesdk = PES.PES()\n try:\n pesdk.stream_id, PES_packet_length = struct.unpack('>BH', pes[0:3])\n if pesdk.stream_id not in [33, 188, 190, 191, 240, 241, 242, 248, 255]:\n # 33 (0x21) - unknown ?????\n # 188 (0xBC) - program_stream_map\n # 190 (0xBE) - padding_stream\n # 191 (0xBF) - private_stream_2\n # 240 (0xF0) - ECM\n # 241 (0xF1) - EMM\n # 242 (0xF2) - DSMCC_stream\n # 248 (0xF8) - ITU-T Rec. H.222.1 type E stream\n # 255 (0xFF) - program_stream_directory\n if pesdk.stream_id >> 4 == 14:\n pesdk.stream_type = 'video-stream'\n pesdk.stream_number = (pesdk.stream_id & 15)\n elif pesdk.stream_id >> 5 == 6:\n pesdk.stream_type = 'audio-stream'\n pesdk.stream_number = (pesdk.stream_id & 31)\n b1, b2, PES_header_data_length = struct.unpack('>BBB', pes[3:6])\n pesdk.PES_scrambling_control = (b1 & 16) >> 4\n # PES_priority = bool((b1 & 8) >> 3)\n # data_alignment_indicator = bool((b1 & 4) >> 2)\n pesdk.copyright = bool((b1 & 2) >> 1)\n pesdk.original_or_copy = bool(b1 & 1)\n pesdk.PTS_DTS_flags = (b2 & 192) >> 6\n pesdk.ESCR_flag = bool((b2 & 32) >> 5)\n pesdk.ES_rate_flag = bool((b2 & 16) >> 4)\n pesdk.DSM_trick_mode_flag = bool((b2 & 8) >> 3)\n pesdk.additional_copy_info_flag = bool((b2 & 4) >> 2)\n pesdk.PES_CRC_flag = bool((b2 & 2) >> 1)\n pesdk.PES_extension_flag = bool(b2 & 1)\n pos = 6\n if pesdk.PTS_DTS_flags in [2, 3]:\n b1, b23, b45 = struct.unpack('>BHH', pes[pos:pos+5])\n pesdk.PTS = (((b1 & 14) << 29) + ((b23 >> 1) << 15) + (b45 >> 1))\n pos += 5\n if pesdk.PTS_DTS_flags == 3:\n b1, b23, b45 = struct.unpack('>BHH', pes[pos:pos + 5])\n pesdk.DTS = (((b1 & 14) << 29) + ((b23 >> 1) << 15) + (b45 >> 1))\n pos += 5\n elif pesdk.stream_id == 190:\n # 190 (0xBE) - padding_stream\n pass\n else:\n # 33 (0x21) - unknown ?????\n # 188 (0xBC) - program_stream_map\n # 191 (0xBF) - private_stream_2\n # 240 (0xF0) - ECM\n # 241 (0xF1) - EMM\n # 242 (0xF2) - DSMCC_stream\n # 248 (0xF8) - ITU-T Rec. H.222.1 type E stream\n # 255 (0xFF) - program_stream_directory\n pass\n return pesdk\n except Exception as err:\n logging.warning('PES parsing error:' + str(err))\n return None", "def convert_season_episode(self, se_input):\n if type(se_input) == str:\n se_input = se_input[1:]\n se_input.replace(' ', '')\n\n e_ndx = se_input.index('E')\n\n #sometimes it looks like \"S14 E10\" and sometimes it's \"S14 Ep10\"\n if \"Ep\" in se_input:\n ep_offset = 2\n else:\n ep_offset = 1\n\n # return two ints\n return int(se_input[:e_ndx]), int(se_input[e_ndx+ep_offset:])\n\n else:\n # return it as \"S14 Ep10\"\n return \"S%s Ep%s\" % (se_input[0], se_input[1])", "def read(self, generic=False, to_xarray=False):\n return read_eps_l2(self.filename, generic, to_xarray)", "def _convert(self):\n root = cElementTree.fromstring(self.html)\n for el in root.getiterator():\n if el in self.visited:\n continue\n self.visited.update([el])\n if el.tag == 'p':\n parser = ParagraphParser(el)\n self.document_state.append(parser.tag)\n self.visited.update(el.getiterator())", "def _unpack_ies(buf):\n\t\t# each IE starts with an ID and a length\n\t\ties = []\n\t\toff = 0\n\t\tbuflen = len(buf)\n\t\t# logger.debug(\"lazy dissecting: %s\" % buf)\n\n\t\twhile off < buflen:\n\t\t\tie_id = buf[off]\n\t\t\ttry:\n\t\t\t\tparser = IEEE80211.ie_decoder[ie_id]\n\t\t\texcept KeyError:\n\t\t\t\t# some unknown tag, use standard format\n\t\t\t\tparser = IEEE80211.IE\n\n\t\t\tdlen = buf[off + 1]\n\t\t\t# logger.debug(\"IE parser is: %d = %s = %s\" % (ie_id, parser, buf[off: off+2+dlen]))\n\t\t\tie = parser(buf[off: off + 2 + dlen])\n\t\t\ties.append(ie)\n\t\t\toff += 2 + dlen\n\n\t\treturn ies", "def prepare_for_ESR(self):\r\n _debug('Anapico: prepare ESR')\r\n # This is all for the ANAPICO to use the external trigger. \r\n # BONUS for preparing the list with the external trigger. \r\n print('Testing query: ', self.query('*IDN?'))\r\n print('Source for Trigger?: ', self.query('TRIG:SEQ:SOUR?'))\r\n self.write('TRIG:SEQ:SOUR EXT') # Set the external trigger to ext\r\n print('Source for Trigger?: ', self.query('TRIG:SEQ:SOUR?'))\r\n print('First frequency?: ', self.query('SOUR:FREQ:STAR?'))\r\n print('Last frequency?: ', self.query('SOUR:FREQ:STOP?'))\r\n \r\n # Prepare the list mode\r\n self.write('SOUR:FREQ:MODE LIST') # Set the frequency mode to list\r\n print('Frequency mode ?: ', self.query('SOUR:FREQ:MODE?'))\r\n self.write('SOUR:POW:MODE LIST') # Set the power mode to list\r\n print('Power mode ?: ', self.query('SOUR:POW:MODE?'))\r\n self.write('SOUR:LIST:MODE AUTO') # Set the list mode to auto\r\n print('List mode ?: ', self.query('SOUR:LIST:MODE?'))\r\n# self.api.write('TRIG:SEQ:TYPE GATE') # An external trigger signal repeatedly starts and stops the waveform’s playback.\r\n self.write('TRIG:SEQ:TYPE POIN')# Upon triggering, only a single point of the sweep (list) is played.\r\n print('Trig type?: ', self.query('TRIG:SEQ:TYPE?'))\r\n \r\n # Set stuff for the modulation\r\n self.write('SOUR:PULM:SOUR EXT')# Set the pulse modulation to be external\r\n print('Pulse modulation source?: ', self.query('SOUR:PULM:SOUR?'))\r\n self.write('SOUR:PULM:STAT ON') # Switch the pulse modulation ON\r\n print('State of pulse modulation? ', self.query('SOUR:PULM:STAT?'))\r\n self.write('SOUR:PULM:POL NORM') # Polarity NORMal, in case it was INVerted\r\n print('Polarity of modulation?: ', self.query('SOUR:PULM:POL?')) \r\n # This is all for the ANAPICO to use the external trigger. \r\n # BONUS for preparing the list with the external trigger. \r", "def __init__(self, config):\n self.config = config\n self.outpath = prepDir(config.outpath)\n self.xslpath = config.xslpath\n self.imagespath = config.imagespath\n self.errors = []\n self.xeps = []\n files = []\n if config.xeps:\n for xep in config.xeps:\n if os.path.isfile(xep):\n files.append(os.path.abspath(xep))\n elif os.path.isdir(xep):\n fltr = os.path.join(os.path.abspath(xep), '*.xml')\n files += glob.glob(fltr)\n else:\n if os.path.isfile(\"xep-{0}.xml\".format(xep)):\n files.append(\n os.path.abspath(os.path.join(os.getcwd(), \"xep-{0}.xml\".format(xep))))\n else:\n # no xeps given, try all xml-files in curdir\n fls = glob.glob(os.path.join(os.getcwd(), '*.xml'))\n for fle in fls:\n files.append(os.path.abspath(fle))\n # try if we can find an existing XEP-table:\n if os.path.isfile(os.path.join(self.outpath, \"xeps.xml\")):\n self.xeptable = os.path.join(self.outpath, \"xeps.xml\")\n else:\n self.xeptable = None\n # read files to xeps\n for fle in sorted(set(files)):\n try:\n self.xeps.append(\n xeputils.xep.XEP(fle,\n outpath=self.outpath,\n xslpath=self.xslpath,\n imagespath=self.imagespath))\n except:\n e = \"Error while parsing {}\\n\".format(fle)\n e += \"FATAL: {} is not included\\n\".format(fle)\n e += traceback.format_exc()\n self.errors.append(e)", "def partid2eids(self, partid, etype): # -> None:\n ...", "def epitopes(record, info, ens_data):\n\n funcensGene = info.Consequence\n allowed_contigs = ens_data.contigs()\n epitopes = list()\n if 'missense' in funcensGene or 'frame' in funcensGene:\n gene = info.SYMBOL\n transcript = info.Feature\n # sequence = ens_data.transcript_by_id(info.Feature)\n mut_dna = info.HGVSc.split(':')[1] if len(info.HGVSc.split(':')) > 1 else ''\n mut_aa = info.HGVSp.split(':')[1] if len(info.HGVSp.split(':')) > 1 else ''\n chrom = record.CHROM.replace('chr', '') if 'chr' in record.CHROM else record.CHROM\n if chrom == 'M':\n chrom = 'MT'\n if chrom in allowed_contigs:\n # TODO this should return a list \n pos, flags, wtmer, mutmer = create_epitope_varcode(chrom,\n record.POS,\n record.REF,\n info.Allele,\n ens_data,\n transcript)\n epitopes.append(Epitope(transcript, gene, funcensGene, mut_dna, mut_aa, flags, wtmer, mutmer))\n else:\n print(\"Unable to infer epitope for contig {}\".format(chrom))\n return epitopes", "def convert_ere2eer(input_filename, output_filename):\n with codecs.open(input_filename, \"r\") as input_file:\n with codecs.open(output_filename, \"w\") as output_file:\n for line in input_file:\n line = line.strip().split('\\t')\n if len(line)<3:\n output_file.write('\\t'.join([str(c) for c in line])+'\\n')\n continue\n\n line = [line[0],line[2],line[1]]\n # print(line)\n output_file.write('\\t'.join([str(c) for c in line])+'\\n')", "def from_endf(cls, ev, resonances):\n file_obj = io.StringIO(ev.section[32, 151])\n\n # Determine whether discrete or continuous representation\n items = endf.get_head_record(file_obj)\n n_isotope = items[4] # Number of isotopes\n\n ranges = []\n for iso in range(n_isotope):\n items = endf.get_cont_record(file_obj)\n abundance = items[1]\n fission_widths = (items[3] == 1) # Flag for fission widths\n n_ranges = items[4] # Number of resonance energy ranges\n\n for j in range(n_ranges):\n items = endf.get_cont_record(file_obj)\n # Unresolved flags - 0: only scattering radius given\n # 1: resolved parameters given\n # 2: unresolved parameters given\n unresolved_flag = items[2]\n formalism = items[3] # resonance formalism\n\n # Throw error for unsupported formalisms\n if formalism in [0, 7]:\n error = 'LRF='+str(formalism)+' covariance not supported '\\\n 'for this formalism'\n raise NotImplementedError(error)\n\n if unresolved_flag in (0, 1):\n # Resolved resonance region\n resonance = resonances.ranges[j]\n erange = _FORMALISMS[formalism].from_endf(ev, file_obj,\n items, resonance)\n ranges.append(erange)\n\n elif unresolved_flag == 2:\n warn = 'Unresolved resonance not supported. Covariance '\\\n 'values for the unresolved region not imported.'\n warnings.warn(warn)\n\n return cls(ranges)", "def sensordaten_einlesen(self):\n self.caldata = []\n self.caldata_raw = np.genfromtxt(self.sensorfile, usecols = np.asarray(self.sensorspalte), skip_header = 1)\n for ele in self.caldata_raw:\n self.caldata.append(int(ele))\n self.Sensordata = Channel()", "def partid2eids(self, partid, etype=...):\n ...", "def partid2eids(self, partid, etype=...):\n ...", "def read(self, generic=False, to_xarray=False):\n return read_eps_l1b(self.filename, generic, to_xarray)", "def parse_eeg_file(path):\n if os.path.splitext(path)[-1].lower() != '.edf':\n NotImplementedError(\"Only EDFs are supported currently. More files coming.\")\n\n try: #edf\n edf_file = mne.io.read_raw_edf(path, stim_channel=None, verbose=False)\n except RuntimeError: #edf+\n edf_file = mne.io.read_raw_edf(path, preload=True, stim_channel=None, verbose=False)\n\n # TODO edf++\n\n eeg_data = {}\n eeg_data['meas_date'] = datetime.datetime.fromtimestamp(edf_file.info[\"meas_date\"])\n eeg_data['nchan'] = edf_file.info[\"nchan\"]\n eeg_data['sfreq'] = edf_file.info[\"sfreq\"]\n eeg_data['subject_info'] = edf_file.info[\"subject_info\"]\n eeg_data['ch_names'] = edf_file.ch_names\n\n return {\"eeg_\"+key: value for key, value in eeg_data.items()}", "def _emiss_ep(self,Eph):\n if self.weight_ep == 0.0:\n return np.zeros_like(Eph)\n\n gam = np.vstack(self._gam)\n eps = (Eph / mec2).decompose().value\n # compute integral with electron distribution\n emiss = c.cgs * trapz_loglog(np.vstack(self._nelec) * self._sigma_1(gam,eps),\n self._gam, axis=0).to(u.cm**2 / Eph.unit)\n return emiss", "def extendedConvert(self):\r\n devId = str(self.deviceId)\r\n if(devId == '28' or devId == '29'):\r\n answers = []\r\n #just add the counter value\r\n answers.append(self.fields[1])\r\n #find the engineering units converter\r\n enum = self.fields[0] & 0x3F\r\n #look up the scale and offset for that eeu\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n print('eeu:' + str(eeu))\r\n #convert from twos complement and adjust by scale/offset\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n self.units = [self.UNITS_COUNT, eeu[2]]\r\n elif(devId == '53' or devId == '54'):\r\n #strip off the first part of the answer which is the last part of the\r\n #serial number\r\n answers = [self.fields[1]]\r\n self.fields = answers\r\n elif(devId == '75' or devId == '76'):\r\n answers = []\r\n #find out the number of I/O points\r\n pointCount = self.fields[0] & 3\r\n #find out engineering units for 1st I/O\r\n enum = self.fields[1] & 0x3F\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu1 = eeu\r\n #new value = old value * scale + offset\r\n val = (self.convertSigned16(self.fields[3]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units = [eeu[2]]\r\n #see if there's two\r\n if pointCount == 2:\r\n #find out engineering units for 2nd I/O\r\n #and off first two bits\r\n enum = self.fields[0] >> 2\r\n eeu = self._eeumaps[str(enum)]\r\n self.eeu2 = eeu\r\n val = (self.convertSigned16(self.fields[2]) * eeu[1]) + eeu[0]\r\n answers.append(val)\r\n self.units.append(eeu[2])\r\n else:\r\n self.eeu2 = []\r\n #reset fields to hold the new answers\r\n self.fields = answers\r\n\r\n return", "def convert(self):\n return", "def elementStream():\n try:\n es = ExpatElementStream()\n return es\n except ImportError:\n if SuxElementStream is None:\n raise Exception(\"No parsers available :(\")\n es = SuxElementStream()\n return es", "def convert(self, sm):\n return self.visit(sm)", "def auto_convert(self):\n nodes_converted = []\n for node_type in self.conversion_spec_sheet:\n print('searching for: %s' % node_type)\n found_nodes = self.list_nodes(node_type)\n print('found: %s nodes' % len(found_nodes))\n for node in found_nodes:\n new_node = self.convert(node)\n nodes_converted.append([node, new_node])\n\n return nodes_converted", "def dnde_ee(_: PseudoScalarMediatorBase, egams, cme):\n return dnde_xx_to_p_to_ffg(egams, cme, me)", "def psea2HEC(pseq): # -> list[Unknown]:\n ...", "def processDuplicitous(self):\n\n ims = bytearray()\n key = ekey = b'' # both start same. when not same means escrows found\n while True: # break when done\n for ekey, edig in self.db.getLdeItemsNextIter(key=key):\n try:\n pre, sn = splitKeySN(ekey) # get pre and sn from escrow item\n # check date if expired then remove escrow.\n dtb = self.db.getDts(dgKey(pre, bytes(edig)))\n if dtb is None: # othewise is a datetime as bytes\n # no date time so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Missing event datetime\"\n \" at dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Missing escrowed event datetime \"\n \"at dig = {}.\".format(bytes(edig)))\n\n # do date math here and discard if stale nowIso8601() bytes\n dtnow = datetime.datetime.now(datetime.timezone.utc)\n dte = fromIso8601(bytes(dtb))\n if (dtnow - dte) > datetime.timedelta(seconds=self.TimeoutLDE):\n # escrow stale so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Stale event escrow \"\n \" at dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Stale event escrow \"\n \"at dig = {}.\".format(bytes(edig)))\n\n # get the escrowed event using edig\n eraw = self.db.getEvt(dgKey(pre, bytes(edig)))\n if eraw is None:\n # no event so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Missing event at.\"\n \"dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Missing escrowed evt at dig = {}.\"\n \"\".format(bytes(edig)))\n\n eserder = Serder(raw=bytes(eraw)) # escrowed event\n ims.extend(eserder.raw)\n\n # get sigs and attach\n sigs = self.db.getSigs(dgKey(pre, bytes(edig)))\n if not sigs: # otherwise its a list of sigs\n # no sigs so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Missing event sigs at.\"\n \"dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Missing escrowed evt sigs at \"\n \"dig = {}.\".format(bytes(edig)))\n\n counter = Counter(code=CtrDex.ControllerIdxSigs,\n count=len(sigs))\n ims.extend(counter.qb64b)\n for sig in sigs: # stored in db as qb64b\n ims.extend(sig)\n\n # process event\n self.processOne(ims=ims) # default framed True\n\n # If process does NOT validate event with sigs, becasue it is\n # still out of order then process will attempt to re-escrow\n # and then raise OutOfOrderError (subclass of ValidationError)\n # so we can distinquish between ValidationErrors that are\n # re-escrow vs non re-escrow. We want process to be idempotent\n # with respect to processing events that result in escrow items.\n # On re-escrow attempt by process, Ooe escrow is called by\n # Kevery.self.escrowOOEvent Which calls\n # self.db.addOoe(snKey(pre, sn), serder.digb)\n # which in turn will not enter dig as dup if one already exists.\n # So re-escrow attempt will not change the escrowed ooe db.\n # Non re-escrow ValidationError means some other issue so unescrow.\n # No error at all means processed successfully so also unescrow.\n\n except LikelyDuplicitousError as ex:\n # still can't determine if duplicitous\n if logger.isEnabledFor(logging.DEBUG):\n logger.exception(\"Kevery unescrow failed: %s\\n\", ex.args[0])\n else:\n logger.error(\"Kevery unescrow failed: %s\\n\", ex.args[0])\n\n except Exception as ex: # log diagnostics errors etc\n # error other than likely duplicitous so remove from escrow\n self.db.delLde(snKey(pre, sn), edig) # removes one escrow at key val\n if logger.isEnabledFor(logging.DEBUG):\n logger.exception(\"Kevery unescrowed: %s\\n\", ex.args[0])\n else:\n logger.error(\"Kevery unescrowed: %s\\n\", ex.args[0])\n\n else: # unescrow succeeded, remove from escrow\n # We don't remove all escrows at pre,sn because some might be\n # duplicitous so we process remaining escrows in spite of found\n # valid event escrow.\n self.db.delLde(snKey(pre, sn), edig) # removes one escrow at key val\n logger.info(\"Kevery unescrow succeeded in valid event: \"\n \"event=\\n%s\\n\", json.dumps(eserder.ked, indent=1))\n\n if ekey == key: # still same so no escrows found on last while iteration\n break\n key = ekey # setup next while iteration, with key after ekey", "def load_scan_energies(self):\n\n raise NotImplementedError('Could not find a successfully load Orca scan:Parser is not implemented')", "def mapped_parser_item_iterator(input_stream, item_path):\n events = map(ijson_decimal_to_float, ijpython.parse(input_stream))\n return ijcommon.items(events, item_path)", "def _construct_qpe_evolution(self):\n\n a = QuantumRegister(self._num_ancillae, name='a')\n c = ClassicalRegister(self._num_ancillae, name='c')\n q = QuantumRegister(self._operator.num_qubits, name='q')\n qc = QuantumCircuit(a, q, c)\n\n # initialize state_in\n qc.data += self._state_in.construct_circuit('circuit', q).data\n\n # Put all ancillae in uniform superposition\n qc.u2(0, np.pi, a)\n\n # phase kickbacks via dynamics\n pauli_list = self._operator.reorder_paulis(grouping=self._paulis_grouping)\n if len(pauli_list) == 1:\n slice_pauli_list = pauli_list\n else:\n if self._expansion_mode == 'trotter':\n slice_pauli_list = pauli_list\n elif self._expansion_mode == 'suzuki':\n slice_pauli_list = Operator._suzuki_expansion_slice_pauli_list(\n pauli_list,\n 1,\n self._expansion_order\n )\n else:\n raise ValueError('Unrecognized expansion mode {}.'.format(self._expansion_mode))\n for i in range(self._num_ancillae):\n qc.data += self._operator.construct_evolution_circuit(\n slice_pauli_list, -2 * np.pi, self._num_time_slices, q, a, ctl_idx=i\n ).data\n # global phase shift for the ancilla due to the identity pauli term\n qc.u1(2 * np.pi * self._ancilla_phase_coef * (2 ** i), a[i])\n\n # inverse qft on ancillae\n self._iqft.construct_circuit('circuit', a, qc)\n\n # measuring ancillae\n qc.measure(a, c)\n\n self._circuit = qc", "def convert_points(pointsIN,epsgIN,epsgOUT):\n \n if(epsgIN != epsgOUT):\n \n coords_in = osr.SpatialReference()\n coords_in.ImportFromEPSG(epsgIN)\n coords_out = osr.SpatialReference() \n coords_out.ImportFromEPSG(epsgOUT) \n numPts = len(pointsIN)\n dimension = len(pointsIN[0])\n pointsOUT = []\n n=0\n while n<numPts:\n point = ogr.Geometry(type=ogr.wkbPoint)\n point.SetPoint(0, float(pointsIN[n][0]), float(pointsIN[n][1]))\n point.AssignSpatialReference(coords_in)\n point.TransformTo(coords_out)\n if dimension < 3:\n pointsOUT.append([float(point.GetX()),float(point.GetY())])\n else:\n pointsOUT.append([float(point.GetX()),float(point.GetY()),float(pointsIN[n][2])])\n \n n+=1\n \n return pointsOUT\n \n else:\n return pointsIN", "def _tosuperclass(self): \n self.ne_in = self.rsig['ne']['signal']\n self.ne = self.ne_in\n self.te_in = self.rsig['te']['signal']\n self.ti_in = self.rsig['ti']['signal']\n self.ni_in = np.zeros((self.nion, len(self.ne_in)),dtype=float)\n self.zeff_in = np.full(self.nrho, self.zeff)\n self.vt_in = np.zeros(len(self.ne_in),dtype=float)\n self.vt = np.zeros(len(self.ne_in),dtype=float)\n self._ion_densities()\n self.ni = self.ni_in\n self.te = self.te_in\n self.ti = self.ti_in\n \n # no need to smooth since they are already smoothed\n self._extrapolate()", "def _read_eeg(eeg_file):\r\n pass", "def prepare_for_ESR(self): \r\n # Not implemented yet\r\n return", "def plotERP(self, ep):\n import os \n import matplotlib.pyplot as plt\n \n try:\n filename = ep.filename.split('\\\\')[-1].split('.fif')[0]\n filename = 'plotsEEG_'+filename.split('_')[0] \n except Exception as err: \n filename = 'plots_eeg_file' \n print(err) \n finally:\n print('Saving ERP plots at >>>>', os.getcwd())\n \n try:\n os.mkdir(os.path.join(os.getcwd(), filename)) \n os.chdir(os.path.join(os.getcwd(), filename)) \n except Exception as err:\n print(err) \n \n \n ep = ep.interpolate_bads(reset_bads='True', mode = 'accurate')\n ep.info['bads'] = []\n \n ep.plot_psd(area_mode='range',fmin=0, fmax=40, tmax=10.0).savefig(filename + '_psd')\n\n# picks = ['FC2', 'C4', 'Cz', 'C5', 'FC1'] \n \n ep.plot_image(picks = None, cmap='interactive', sigma=1) \n \n plt.savefig(filename + '_image') \n \n bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),\n (12, 30, 'Beta'), (30, 45, 'Gamma')] \n \n ep.plot_psd_topomap(bands=bands, vmin=None, vmax=None, \n tmin=0, tmax=0.5).savefig(filename + '_psd_topo')\n \n ep.plot_sensors().savefig(filename + '_sensors_') \n \n ep.plot_topo_image(vmin=-25, vmax=25, title='ERF images', sigma=3.,\n fig_facecolor='w', font_color='k').savefig(filename + '_image_topo') \n \n ep.average().plot().savefig(filename + 'erp_average_')\n ep.average().plot_image().savefig(filename + '_erp_average_image')\n print('Saving ERP plots at >>>>', os.getcwd())", "def processPartials(self):\n\n ims = bytearray()\n key = ekey = b'' # both start same. when not same means escrows found\n while True: # break when done\n for ekey, edig in self.db.getPseItemsNextIter(key=key):\n try:\n pre, sn = splitKeySN(ekey) # get pre and sn from escrow item\n # check date if expired then remove escrow.\n dtb = self.db.getDts(dgKey(pre, bytes(edig)))\n if dtb is None: # othewise is a datetime as bytes\n # no date time so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Missing event datetime\"\n \" at dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Missing escrowed event datetime \"\n \"at dig = {}.\".format(bytes(edig)))\n\n # do date math here and discard if stale nowIso8601() bytes\n dtnow = datetime.datetime.now(datetime.timezone.utc)\n dte = fromIso8601(bytes(dtb))\n if (dtnow - dte) > datetime.timedelta(seconds=self.TimeoutPSE):\n # escrow stale so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Stale event escrow \"\n \" at dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Stale event escrow \"\n \"at dig = {}.\".format(bytes(edig)))\n\n # get the escrowed event using edig\n eraw = self.db.getEvt(dgKey(pre, bytes(edig)))\n if eraw is None:\n # no event so so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Missing event at.\"\n \"dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Missing escrowed evt at dig = {}.\"\n \"\".format(bytes(edig)))\n\n eserder = Serder(raw=bytes(eraw)) # escrowed event\n ims.extend(eserder.raw)\n\n # get sigs and attach\n sigs = self.db.getSigs(dgKey(pre, bytes(edig)))\n if not sigs: # otherwise its a list of sigs\n # no sigs so raise ValidationError which unescrows below\n logger.info(\"Kevery unescrow error: Missing event sigs at.\"\n \"dig = %s\\n\", bytes(edig))\n\n raise ValidationError(\"Missing escrowed evt sigs at \"\n \"dig = {}.\".format(bytes(edig)))\n\n counter = Counter(code=CtrDex.ControllerIdxSigs, count=len(sigs))\n ims.extend(counter.qb64b)\n for sig in sigs: # stored in db as qb64b\n ims.extend(sig)\n\n # process event\n self.processOne(ims=ims) # default framed True\n\n # If process does NOT validate sigs or delegation seal (when delegated),\n # but there is still one valid signature then process will\n # attempt to re-escrow and then raise MissingSignatureError\n # or MissingDelegationSealError (subclass of ValidationError)\n # so we can distinquish between ValidationErrors that are\n # re-escrow vs non re-escrow. We want process to be idempotent\n # with respect to processing events that result in escrow items.\n # On re-escrow attempt by process, Pse escrow is called by\n # Kever.self.escrowPSEvent Which calls\n # self.db.addPse(snKey(pre, sn), serder.digb)\n # which in turn will not enter dig as dup if one already exists.\n # So re-escrow attempt will not change the escrowed pse db.\n # Non re-escrow ValidationError means some other issue so unescrow.\n # No error at all means processed successfully so also unescrow.\n\n except (MissingSignatureError, MissingDelegatingSealError) as ex:\n # still waiting on missing sigs or missing seal to validate\n if logger.isEnabledFor(logging.DEBUG):\n logger.exception(\"Kevery unescrow failed: %s\\n\", ex.args[0])\n else:\n logger.error(\"Kevery unescrow failed: %s\\n\", ex.args[0])\n\n except Exception as ex: # log diagnostics errors etc\n # error other than waiting on sigs or seal so remove from escrow\n self.db.delPse(snKey(pre, sn), edig) # removes one escrow at key val\n if logger.isEnabledFor(logging.DEBUG):\n logger.exception(\"Kevery unescrowed: %s\\n\", ex.args[0])\n else:\n logger.error(\"Kevery unescrowed: %s\\n\", ex.args[0])\n\n else: # unescrow succeeded, remove from escrow\n # We don't remove all escrows at pre,sn because some might be\n # duplicitous so we process remaining escrows in spite of found\n # valid event escrow.\n self.db.delPse(snKey(pre, sn), edig) # removes one escrow at key val\n logger.info(\"Kevery unescrow succeeded in valid event: \"\n \"event=\\n%s\\n\", json.dumps(eserder.ked, indent=1))\n\n if ekey == key: # still same so no escrows found on last while iteration\n break\n key = ekey # setup next while iteration, with key after ekey", "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)", "def read(self, epsg=None):\n if self.data is None:\n self.process.compute()\n self.data = self.process.output.data\n out_data = self.data\n if epsg and self.get_epsg() != epsg:\n out_data = reproject(self.data, epsg)\n return out_data", "def to_mne(self):\n try:\n import mne\n except ModuleNotFoundError:\n raise ModuleNotFoundError('MNE is not installed.')\n info = mne.create_info([str(self.name)], sfreq=self.sfreq, ch_types='misc')\n data = self.spikes[:, np.newaxis, :] # Add a singleton channel dimension\n # If events are given in strings, convert to integers first\n events = self.events\n if isinstance(events[0], str):\n events = [self.event_id[event] for event in events]\n events = np.column_stack([range(len(events)), np.zeros_like(events), events])\n events = events.astype(int)\n epochs = mne.EpochsArray(data, info, events=events,\n event_id=self.event_id, tmin=self.tmin)\n return epochs", "def _EpiInfo(self, info, path):\n\n epi_vals = {'tdim':self.hdr['tdim'], 'plane':self.hdr['plane'], \\\n 'SeriesNumber':self.hdr['subhdr']['SeriesNumber']}\n for key in self.epi_keys.keys():\n if self.epi_keys[key] != str(epi_vals[key]):\n# Return None, which will cause these data to be ignored.\n return None\n\n# Early versions of the EPIC software saved p-files for the setup epis.\n# Don't process these (or any epi with fewer than eight useable frames).\n if self.hdr['tdim'] < (8 + self.skip):\n return None\n\n info['slice_order'] = self.shdr.get('SliceOrder', 'altplus')\n if self.shdr['EffEchoSpacing'] is not None:\n info['echo_spacing'] = self.shdr['EffEchoSpacing']/1000.\n else:\n info['echo_spacing'] = 0.\n if info['data_filetype'] == 'dicom':\n# Entry is name of dirctory for dicom images.\n if not os.path.isdir(path):\n entry = os.path.dirname(path)\n else:\n entry = path\n else:\n# Otherwise it is the name of a directory containing p-files.\n entry = path\n\n if info['data_filetype'] == 'ge_data' and info['type'] is not None:\n# Found a pfile. Add it to the list.\n if entry not in self.pfiles and info['tdim'] > 2:\n self.pfiles.append(entry)\n self.entry_map['epi'].append(entry)\n if info['series'] not in self.epi_series:\n self.epi_series.append(info['series'])\n elif info['data_filetype'] == 'dicom' and \\\n info['psdname'] == 'epibold':\n# This is the initial EPI done during setup.\n info['outdir'] = self.episetup_dir\n info['type'] = 'first_epi'\n self.entry_map['first_epi'].append(entry)\n info['imgfile'] = '%s/first_epi_%d' % \\\n (self.episetup_dir, len(self.entry_map['first_epi']))\n elif ('epirt' in info['psdname'] or info['psdname'] == 'epi' or \\\n info['psdname'] == '*epfid2d1_64') and info['tdim'] > 2:\n# This is an epi reconstructed on the scanner.\n self.epi_series.append(info['series'])\n self.entry_map['epi'].append(entry)\n if not os.path.isdir(path):\n tmp_path = os.path.dirname(path)\n else:\n tmp_path = path\n self.epirt_paths.append(tmp_path)\n\n if self.fsl_flip:\n info['filetype'] = 'brik'\n else:\n info['filetype'] = self.tmplt['epi_file_format']\n\n info['TR'] = self.hdr['tsize']\n if self.tmplt['acq_tr'] is None:\n info['acq_tr'] = float(info['TR'])\n else:\n info['acq_tr'] = float(self.tmplt['acq_tr'])\n return OK", "def convert_alleles(self, alleles):\n raise NotImplementedError", "def Parse(self):\n prev_percent_read = 0\n for packet in TS.next_packet(self._filename):\n #check_packet_formedness(packet)\n pei = TS.get_transport_error_indicator(packet)\n pusi = TS.get_payload_start(packet)\n pid = TS.get_pid(packet)\n tsc = TS.get_tsc(packet)\n\n # per .ts packet handler\n if self.OnTSPacket:\n self.OnTSPacket(packet)\n\n # Update a progress callback\n self._read_size += TS.PACKET_SIZE\n percent_read = ((self._read_size / float(self._total_filesize)) * 100)\n new_percent_read = int(percent_read * 100)\n if new_percent_read != prev_percent_read and self.Progress:\n self.Progress(self._read_size, self._total_filesize, percent_read)\n prev_percent_read = new_percent_read\n\n adaptation_field_control = TS.get_adaptation_field_control(packet)\n continuity_counter = TS.get_continuity_counter(packet)\n\n # put together PES from payloads\n payload = TS.get_payload(packet)\n if pusi == True:\n if not ES.pes_packet_check_formedness(payload):\n if pid in self._elementary_streams:\n self._elementary_streams[pid] = None\n continue\n pes_id = ES.get_pes_stream_id(payload)\n self._elementary_streams[pid] = payload\n else:\n if pid in self._elementary_streams:\n # TODO: check packet sequence counter\n if not self._elementary_streams[pid]:\n self._elementary_streams[pid] = \"\"\n self._elementary_streams[pid] += payload\n else:\n # TODO: throw. this situaiton means out of order packets\n pass\n if pid in self._elementary_streams and ES.pes_packet_complete(self._elementary_streams[pid]):\n # TODO: handle packet contents here (callback)\n es = self._elementary_streams[pid]\n if self.OnESPacket:\n header_size = ES.get_pes_header_length(es)\n self.OnESPacket(pid, es, header_size)", "def read_psmecalist( istream , isEig=False ):\n\n mtlist=[] # this will be the output list\n\n # read everything\n alltxt = NP.genfromtxt( istream, delimiter='\\n' , dtype=str)\n try: \n istream.close()\n except:\n tmp=1\n\n # loop through all tensors\n n = len(alltxt)\n\n # check for desired output type\n if isEig:\n for i in range(0,n):\n mtlist.append( psmeca2EigMT( alltxt[i] ) )\n else:\n for i in range(0,n):\n mtlist.append( psmeca2SymMT( alltxt[i] ) )\n\n \n return mtlist, alltxt", "def classify(self, ep):\n # just here for defining the interface; work is done in subclasses\n pass", "def transform():", "def batchInverse(self):\n \n os.chdir(self.mainDir) \n # go to the main directory\n # find folders\n folder = os.listdir(u'.')\n\n # get folders starts with 'S' - freesurfer directory\n subfolders = [f for f in folder if f[:1] == self.foldername]\n\n for subject in subfolders:\n\n curdir = os.path.join(self.mainDir, subject)\n os.chdir(curdir)\n\n # 1) locate EEG files\n eegfiles = glob.glob('*-epo.fif')\n\n # 2) locate the Forward solution and read it\n fname_fwd = os.path.join(curdir, subject) +'-fwd.fif'\n fwd = mne.read_forward_solution(fname_fwd)\n\n # 3) for each eegfiles perform inverse mapping\n for file in eegfiles:\n\n # 4) read epochs\n eegfilePath = os.path.join(curdir,file)\n epochs = mne.read_epochs(eegfilePath, proj=True, preload=True, verbose=None)\n epochs.set_eeg_reference(ref_channels = \"average\", projection=True)\n\n\n # tmin, tmax = epochs.time_as_index([-1, -0.5])\n # 5) calculate noise covariance matrix\n noise_cov = mne.compute_covariance(epochs, keep_sample_mean=True, tmin=self.tmin, tmax=self.tmax)\n evoked = epochs.average().pick_types(eeg=True)\n \n # 6) make inverse operator\n info = evoked.info\n inverse_operator = make_inverse_operator(info, fwd, noise_cov, loose=0.2, depth=None)\n\n # 7) apply inverse solution\n snr = 3.\n lambda2 = 1. / snr ** 2\n stc = apply_inverse(evoked, inverse_operator, lambda2, method= self.method)\n\n # 8) save the source result\n fname2save = file.split('-')[0]\n stc.save(fname2save, ftype='stc', verbose=None)\n\n\n print(\">>> Inverse mapping is complete <<<\")\n \n # go back to the main directory\n os.chdir(self.mainDir)", "def transform(self):", "def _convert(val, acceptable_types):\n return parse_expression(val, acceptable_types, raise_type=ParseError)", "def vel_eci2ecef(vel_eci: np.ndarray, \r\n time: datetime) -> np.ndarray:\r\n gst = att.Time(time).sidereal_time(\"apparent\", \"greenwich\").radian\r\n gst = np.atleast_1d(gst)\r\n assert gst.ndim == 1 and isinstance(gst[0], float) # must be in radians\r\n \r\n vel_eci = np.atleast_2d(vel_eci)\r\n assert vel_eci.shape[0] == gst.size, 'length of time does not match number of ECI positions'\r\n\r\n N, trip = vel_eci.shape \r\n if vel_eci.ndim > 2 or trip != 3:\r\n raise ValueError(\"eci triplets must be shape (N,3)\")\r\n\r\n vel_ecef = np.empty_like(vel_eci)\r\n\r\n for i in range(N):\r\n vel_ecef[i, :] = _rottrip(gst[i]) @ vel_eci[i, :]\r\n\r\n return vel_ecef.squeeze()", "def as_exons(self,input={}):\n # handle potentially applied input argument\n self._handle_input_subdict(input)\n # parse data in the AbgpGeneLocusDir\n self.parseinputgff()\n self.rungetorf()\n # we need abgp_geneconfirmation.geneconfirmation first!\n geneconfirmation( { self._create_auto_key(): self.input } )\n\n # get only the CDS-type of tracks that define the coding sequence\n genecdstracks = filtergffs4fmethod( self._obtain_gene_gff(), GFF_CDS_FMETHOD ) \n\n if len(genecdstracks) == 1:\n # deal with SingleExonOnOrf -> TSS + donor\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n return [ SingleExonOnOrf(tss,genecdstracks[-1][4],orf,gff={}) ]\n\n elif len(genecdstracks) == 0:\n # no tracks !?\n return []\n elif not self.input['orfid-genestructure']:\n # not mappable on Orfs / or no genestructure provided\n return []\n else:\n # list with exons,introns to return \n exons = []\n introns = []\n exonsandintrons = []\n\n # deal with FirstExonOnOrf -> TSS + donor\n try:\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n except:\n print self.input.keys(), self.input['proteinfref']\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][0])\n\n tss = self._gene_cds_track_2_tss( genecdstracks[0], orf )\n donor = self._gene_cds_track_2_donor( genecdstracks[0], orf )\n donor.phase = ( genecdstracks[0][4]-genecdstracks[0][3]-1 ) % 3\n exons.append( FirstExonOnOrf(tss,donor,orf,gff={}) )\n exonsandintrons.append( exons[-1] )\n\n # deal with internal ExonOnOrf(s): -> acceptor + donor\n for pos in range(1,len(genecdstracks)-1):\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][pos])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[pos], orf )\n accep.phase = exons[-1].donor.phase\n donor = self._gene_cds_track_2_donor( genecdstracks[pos], orf )\n donor.phase = ( genecdstracks[pos][4]-genecdstracks[pos][3]-1+accep.phase ) % 3\n exons.append( ExonOnOrf(accep,donor,orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf, exons[-2].orf,\n exons[-1].acceptor, exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # deal with FinalExonOnOrf -> acceptor + StopCodon\n orf = self.input['orfs'].get_orf_by_id(self.input['orfid-genestructure'][-1])\n accep = self._gene_cds_track_2_acceptor( genecdstracks[-1], orf )\n accep.phase = exons[-1].donor.phase\n exons.append( FinalExonOnOrf(accep,genecdstracks[-1][4],orf,gff={}) )\n sharednts = get_shared_nucleotides_at_splicesite(\n exons[-1].orf,exons[-2].orf,\n exons[-1].acceptor,exons[-2].donor,\n )\n intron = IntronConnectingOrfs(\n exons[-2].donor, exons[-1].acceptor, sharednts,\n exons[-2].orf, exons[-1].orf,\n )\n introns.append(intron)\n exonsandintrons.append( introns[-1] )\n exonsandintrons.append( exons[-1] )\n\n # return list of exons&introns\n return exonsandintrons", "def _convert_psd(self, ascii_format, ifo):\n command = [\"convert_psd_ascii2xml\",\n \"--fname-psd-ascii\", f\"{ascii_format}\",\n \"--conventional-postfix\",\n \"--ifo\", f\"{ifo}\"]\n \n pipe = subprocess.Popen(command, \n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n out, err = pipe.communicate()\n self.logger.info(command, production = self.production)\n if err:\n self.production.status = \"stuck\"\n if hasattr(self.production.event, \"issue_object\"):\n raise PipelineException(f\"An XML format PSD could not be created.\\n{command}\\n{out}\\n\\n{err}\",\n issue=self.production.event.issue_object,\n production=self.production.name)\n else:\n raise PipelineException(f\"An XML format PSD could not be created.\\n{command}\\n{out}\\n\\n{err}\",\n production=self.production.name)", "def __iter__(self):\n return self.cli.essids.essids().__iter__()", "def converters(self) -> Iterator[Tuple[str, Type[ConverterAPI]]]:", "def eeg_handler(self, unused_addr, args, ch1, ch2, ch3, ch4, ch5, ch6):\n\n\t\tself.data[0, self.eegcount] = ch2\n\t\tself.data[1, self.eegcount] = ch3\n\n\t\tself.eegcount += 1\n\n\t\tif self.eegcount >= self.settings.buffer_size - 1:\n\n\t\t\tself.eegcount = 0\n\t\t\told_data = self.data\n\t\t\tfor i in range(self.settings.num_channels):\n\t\t\t\tself.output_data[i] = self.eeg_to_tones(old_data[i])\n\n\t\t\tself.stream.start_stream()\n\n\t\t\twhile self.stream.is_active():\n\t\t\t\ttime.sleep(self.settings.buffer_size /\n\t\t\t\t\t\t self.settings.playback_rate)\n\n\t\t\tself.stream.stop_stream()\n\t\t\told_data = []", "def eog_removal(self):\n print('ch_names are: ' + str(self.raw.ch_names))\n ch_name = input(\"Enter a channel for eog detection. Best if the channel is near eyes, like Fp1 and Fp2. All channels will be named like 'CH_1': \")\n eog_projs, eog_events = mne.preprocessing.compute_proj_eog(self.raw, n_grad=0, n_mag=0, n_eeg=1, ch_name=ch_name, reject = None)\n projs = eog_projs\n self.epochs.add_proj(projs)\n self.epochs.apply_proj()\n return self.epochs", "def test_epsf_build(self):\n\n size = 25\n oversampling = 4.\n stars = extract_stars(self.nddata, self.init_stars, size=size)\n epsf_builder = EPSFBuilder(oversampling=oversampling, maxiters=20,\n progress_bar=False)\n epsf, fitted_stars = epsf_builder(stars)\n\n ref_size = (size * oversampling) + 1\n assert epsf.data.shape == (ref_size, ref_size)\n\n y0 = int((ref_size - 1) / 2)\n z = epsf.data[y0, :]\n ampl, peak, sigma = gaussian1d_moments(z)\n assert_allclose(ampl, 0.002487, rtol=1e-4)\n assert_allclose(peak, y0, rtol=1e-3)\n assert_allclose(sigma, oversampling * self.stddev, rtol=1e-5)", "def test_xml_epsilons(xml_parser_disp_details):\n\n epsilon = xml_parser_disp_details.get_epsilon()\n epsilon_ion = xml_parser_disp_details.get_epsilon_ion()\n test = np.array([[13.05544887 ,-0. , 0. ],\n [-0. ,13.05544887 , -0. ],\n [ 0. , 0. , 13.05544887]])\n np.testing.assert_allclose(epsilon, test)\n test = np.array([[0. ,-0. , 0. ],\n [-0. ,0. , -0. ],\n [ 0. , 0. , 0.]])\n np.testing.assert_allclose(epsilon_ion, test)", "def events_to_input(self, events, position):\n return self._event_to_input(events[position])", "def RE2e(Type=\"DFA\"):\n CC10, CC11 = state('CC10'), state('CC11')\n for i in sigma:\n CC10.transit[i] = CC10\n CC11.transit[i] = CC11\n for i in sigma_AW:\n CC10.transit[i] = CC11\n for i in sigma_B:\n CC11.transit[i] = CC10\n if Type == \"pDFA\":\n CC1 = pDFA('CC1', sigma, [CC10, CC11], CC10, [CC10])\n else:\n CC1 = DFA('CC1', sigma, [CC10, CC11], CC10, [CC10])\n if (SIZEOF):\n EM_size[\"RE2e\"] = asizeof.asizeof(CC1)\n return CC1", "def __init__(self, config):\n spi = SPI(-1, baudrate=config.baudrate,\n sck=config.sck, mosi=config.mosi, miso=config.miso)\n self._epd = epaper2in9.EPD(spi, config.cs, config.dc,\n config.rst1, config.busy)\n self._epd.init()\n self._buffer = Buffer(epaper2in9.EPD_WIDTH, epaper2in9.EPD_HEIGHT)", "def prepare_for_ESR(self): \r\n return self._api.prepare_for_ESR()", "def transform(self, data):", "def sonify_eog_artifacts(self, artifacts):\r\n channel = int(input(\"Enter the channel number for which you want to do the EOG sonification: \\n 1 -> FC5,\\n 2 -> FC1,\\n \\\r\n3 -> C3,\\n 4 -> CP5,\\n 5 -> CP1,\\n 6 -> FC2,\\n 7 -> FC6,\\n 8 -> C4,\\n 9 -> CP2,\\n 10 -> CP6\\nInsert number:\"))\r\n fs = 440 # A4 note\r\n data = np.sin(2. * np.pi * fs * self.time)\r\n coeff = np.zeros(self.n_samples)\r\n samp_freq_conversion = int(np.ceil(self.audio_sample_rate / self.eeg_sampling_frequency))\r\n duration = int(0.2 * self.eeg_sampling_frequency) # where self.eeg_sampling_frequency is 1 sec\r\n i = 0\r\n for samp in artifacts[channel-1]:\r\n if samp != 0:\r\n for j in range(i*samp_freq_conversion, (i+duration)*samp_freq_conversion): \r\n if j < self.n_samples:\r\n coeff[j] = 1\r\n print('EOG artifacts sonification: ', i, ' out of ', len(artifacts[channel-1]))\r\n i += 1\r\n print('EOG artifacts sonification: ', i, ' out of ', len(artifacts[channel-1]))\r\n data = data * coeff\r\n return data", "def get_ephemeris(rundate, sat_name):\n file_key = \"slr_ephemeris\"\n ephemeris_data = get_satellite_vars(sat_name)\n provider_list = config.tech.prediction_providers.list\n # Find the latest version of the observation file\n versions = config.files.glob_variable(file_key, \"version\", r\"\\d+\", file_vars=ephemeris_data)\n\n try:\n ephemeris_data[\"version\"] = sorted(versions)[-1]\n providers = config.files.glob_variable(file_key, \"provider\", r\"\\w+\", file_vars=ephemeris_data)\n for provider in provider_list:\n if provider in providers:\n ephemeris_data[\"provider\"] = provider\n break\n else:\n log.fatal(f\"No valid provider found: {', '.join(providers)}\")\n except IndexError:\n log.info(\"No ephemeris data found\")\n log.info(f\"Download manually from https://cddis.nasa.gov/archive/slr/cpf_predicts/{rundate.year}/{sat_name}\")\n log.fatal(f\"Please save missing file as '{config.files.path(file_key)}' !\")\n eph_parser = parsers.parse_key(file_key, file_vars=ephemeris_data)\n eph = calculate_initial_values(eph_parser.as_dict(), rundate)\n\n return eph", "def __iter__(self):\n return iter(self.parses)", "def _get_converted_data(self):\n pass", "def eeg_to_tones(self, data):\n\n\t\t_, _, signal = s.spectrogram(data, self.settings.sample_rate)\n\n\t\tmaxsignal = np.max(signal)\n\t\tsignal = np.mean(signal / maxsignal,\n\t\t\t\t\t\t axis=1)[:self.settings.maximum_frequency]\n\n\t\tendindex = self.settings.maximum_frequency - 1\n\t\tstartindex = endindex - self.settings.num_max_frequencies\n\n\t\thighest_freqs = self.settings.f[np.argpartition(\n\t\t\tsignal, self.settings.num_max_frequencies)][startindex:endindex]\n\t\tsignal = signal[np.argpartition(signal, self.settings.num_max_frequencies)][\n\t\t\t\t\t\t\t\t\t\tstartindex:endindex]\n\n\t\tprint(highest_freqs)\n\t\tvalues = np.sum([np.sin(2 * np.pi * np.arange(self.settings.playback_rate * self.settings.cycle_duration)\n\t\t\t\t\t\t\t\t* highest_freqs[i] / self.settings.playback_rate) * signal[i] for i in range(self.settings.num_max_frequencies)], axis=0).astype(np.float32)\n\t\treturn values", "def add_episode(self, ep):\n #make da season\n ses = self._add_season(ep)\n dvdses = self._add_season(ep, dvd=True) \n self._add_episode(ep, ses)\n self._add_episode(ep, dvdses, dvd=True)", "def epix(self):\n return self._epix", "def inference_preprocess(self):\n return", "def to_epsilon_nfa(self):\n self._initialize_enfa()\n s_initial = self._set_and_get_initial_state_in_enfa()\n s_final = self._set_and_get_final_state_in_enfa()\n self._process_to_enfa(s_initial, s_final)\n return self._enfa", "def _get_data(ch_decim=1):\n # Read evoked\n evoked = mne.read_evokeds(fname_ave, 0, baseline=(None, 0))\n evoked.info[\"bads\"] = [\"MEG 2443\"]\n with evoked.info._unlock():\n evoked.info[\"lowpass\"] = 16 # fake for decim\n evoked.decimate(12)\n evoked.crop(0.0, 0.3)\n picks = mne.pick_types(evoked.info, meg=True, eeg=False)\n picks = picks[::ch_decim]\n evoked.pick_channels([evoked.ch_names[pick] for pick in picks])\n evoked.info.normalize_proj()\n\n noise_cov = mne.read_cov(fname_cov)\n noise_cov[\"projs\"] = []\n noise_cov = regularize(noise_cov, evoked.info, rank=\"full\", proj=False)\n return evoked, noise_cov", "def convert_from_evoros_result(self, evoros_result):\n error = np.abs(np.array(evoros_result['Actual Speed']) - np.array(evoros_result['Goal Speed']))\n\n # convert to Enki result\n enki_result = {\n 'error': error\n }\n return enki_result", "def snips_result_to_eobject(result: SnipsResult) -> EObject:\n assert mm_root is not None\n if not result:\n raise ValueError(\"SnipsResult argument should evaluate to True\")\n\n eclass = mm_root.getEClassifier(result.name)\n eobject = eclass()\n for slot in result:\n eattribute = _getEAttribute(eclass, slot.name)\n\n value = eattribute.eType.from_string(str(slot))\n if eattribute.many:\n getattr(eobject, eattribute.name).append(value)\n else:\n setattr(eobject, eattribute.name, value)\n return eobject", "def parse(cls, path: str) -> List[QuoteModel]:\n for ingestor in cls.ingestors:\n if ingestor.can_ingest(path):\n try:\n return ingestor.parse(path)\n except CannotIngestException as e:\n return e\n except Exception as e:\n raise Exception(\"Unable to parse file\")\n else:\n raise CannotIngestException(\"File cannot be ingested\")", "def preprocess(self):", "def convert_H_eV(en_H):\n return en_H/eV_H", "def read_input(E):\n # ---------- INSERT CODE BELOW ----------\n edge_list = []\n\n for _ in range(E):\n src, dst, cost = input('').rstrip('\\r\\n').split()\n edge_list.append((int(src),int(dst),int(cost)))\n \n return edge_list\n # ---------- INSERT CODE ABOVE ----------", "def generate_calliope_conversion(transformers, timeframe, es_name):\n\n # setting this up here in case no transformer is used, there is still something needed to be yield\n\n conversion, loc = dict(), dict()\n\n for transformer in transformers:\n if transformer.uid.name.lower() == 'conversion':\n # conflict with calliope parent tech. Name change only affects\n # yaml and native calliope post processing. Tessif will sort out the previous name.\n transformer_name = f'{transformer.uid.carrier}_{transformer.uid.name}'\n else:\n transformer_name = transformer.uid.name\n\n if transformer.timeseries:\n msg = (\n f\"Transformer '{transformer.uid.name}' has a timeseries given. \"\n f\"Calliope can only consider timeseries for sources and sinks. \"\n )\n logger.warning(msg)\n\n # giving the uid information that cant get recreated on any other way\n uid = transformer.uid\n uid = f'{uid.name}.{uid.region}.{uid.sector}.{uid.carrier}.{uid.node_type}'\n\n conversion[f'{transformer_name}'] = dict(\n essentials=dict(\n name=uid,\n # only needed for visualisation in native calliope tools\n color=str('#99ccff'),\n parent='conversion', # overwritten to conversion_plus if multi input/output\n carrier_in=list(transformer.inputs)[0],\n carrier_out=list(transformer.outputs)[0],\n ),\n )\n\n flows = dict({'constraints': {\n 'energy_con': True,\n 'energy_prod': True,\n }})\n costs = dict(costs=dict())\n\n # transformer to conversion\n if len(transformer.outputs) == 1 and len(transformer.inputs) == 1:\n input_ = list(transformer.inputs)[0]\n output_ = list(transformer.outputs)[0]\n flows['constraints'].update(parse_flow_parameters(\n transformer, output_, len(timeframe)))\n costs['costs'] = parse_cost_parameters(transformer, output_)\n\n eff = transformer.conversions[(f'{input_}', f'{output_}')]\n if type(eff) != float and type(eff) != int:\n eff = np.array(eff).astype(float)\n timeseries = pd.DataFrame({'': timeframe, f'{transformer_name}': eff})\n timeseries.to_csv(\n os.path.join(\n write_dir, 'Calliope', f'{es_name}', 'timeseries_data', f'{transformer_name}_eff.csv'),\n index=False)\n eff = f'file={transformer_name}_eff.csv:{transformer_name}'\n\n flows['constraints'].update({\n 'energy_cap_min': float(transformer.flow_rates[output_].max),\n 'energy_eff': eff,\n })\n\n if transformer.expandable[f'{output_}']:\n if transformer.expandable[f'{input_}']:\n exp_cost = transformer.expansion_costs[f'{output_}'] + transformer.expandable[f'{input_}'] / eff\n limit_in = transformer.expansion_limits[f'{input_}'].max * eff\n limit_out = transformer.expansion_limits[f'{output_}'].max\n energy_cap_max = min(limit_in, limit_out)\n flows['constraints'].update({\n 'energy_cap_max': energy_cap_max,\n })\n else:\n exp_cost = transformer.expansion_costs[f'{output_}']\n costs['costs']['monetary'].update(\n {\n 'interest_rate': 0,\n 'energy_cap': exp_cost,\n }\n )\n if transformer.expandable[f'{input_}'] and not transformer.expandable[f'{output_}']:\n exp_cost = transformer.expandable[f'{input_}'] / eff\n energy_cap_max = transformer.expansion_limits[f'{input_}'].max * eff\n flows['constraints'].update({\n 'energy_cap_max': energy_cap_max,\n })\n costs['costs']['monetary'].update(\n {\n 'interest_rate': 0,\n 'energy_cap': exp_cost,\n }\n )\n\n conversion[f'{transformer_name}'].update(flows)\n conversion[f'{transformer_name}'].update(costs)\n\n # transformer to conversion_plus\n if len(transformer.outputs) > 1 or len(transformer.inputs) > 1:\n conv_plus = generate_calliope_conversion_plus(\n transformer, len(timeframe))\n conversion[f'{transformer_name}']['essentials'].update(conv_plus['essentials'])\n conversion[f'{transformer_name}']['constraints'].update(conv_plus['constraints'])\n conversion[f'{transformer_name}']['costs'].update(conv_plus['costs'])\n\n if 'energy_cap_min' in flows['constraints'].keys():\n if flows['constraints']['energy_cap_min'] == float('inf'):\n flows['constraints'].update({'energy_cap_min': 0})\n\n # creating the location in which the storage is called\n loc.update(dict({\n f'{transformer_name} location': {\n 'coordinates': {'lat': float(transformer.uid.latitude), 'lon': float(transformer.uid.longitude)},\n 'techs': {f'{transformer_name}': None},\n }}))\n\n yield conversion, loc", "def get_ePSF(self, psf_params, origin=[0,0], shape=[20,20], filter='F140W', get_extended=False):\n sh = shape\n y0, x0 = np.array(sh)/2.-1\n \n xd = x0+origin[1]\n yd = y0+origin[0]\n \n xc, yc = int(x0), int(y0)\n \n psf_xy = self.get_at_position(x=xd, y=yd, filter=filter)\n \n yp, xp = np.indices(sh)\n \n dx = xp-psf_params[1]-x0\n dy = yp-psf_params[2]-y0\n \n if get_extended:\n extended_data = self.extended_epsf[filter]\n else:\n extended_data = None\n \n output_psf = self.eval_ePSF(psf_xy, dx, dy, extended_data=extended_data)*psf_params[0]\n \n return output_psf", "def E2f(E):\n f=E/c['h']/u['eV']\n return f", "def _convert(self):\n logger.info(\"Converting conformers to density\")\n logger.debug(\"Masking\")\n self._transformer.reset(full=True)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self._transformer.mask(self._rmask)\n mask = self._transformer.xmap.array > 0\n self._transformer.reset(full=True)\n\n nvalues = mask.sum()\n self._target = self.xmap.array[mask]\n logger.debug(\"Density\")\n nmodels = len(self._coor_set)\n self._models = np.zeros((nmodels, nvalues), float)\n for n, coor in enumerate(self._coor_set):\n self.conformer.coor = coor\n self.conformer.b = self._bs[n]\n self._transformer.density()\n model = self._models[n]\n model[:] = self._transformer.xmap.array[mask]\n np.maximum(model, self.options.bulk_solvent_level, out=model)\n self._transformer.reset(full=True)", "def convert_exon_data(opts, exon_recs):\n er0 = exon_recs[0]\n ti = TxInfo(ac=er0[\"tx_ac\"],\n origin=opts.origin,\n hgnc=None,\n cds_se_i=None,\n exons_se_i=\";\".join(\n [\"{},{}\".format(int(ex[\"tx_start\"]) - 1, ex[\"tx_end\"]) for ex in exon_recs])\n )\n es = ExonSet(\n tx_ac=er0[\"tx_ac\"],\n alt_ac=er0[\"ref_ac\"],\n method=\"splign\",\n strand=-1 if er0[\"strand\"] == \"-\" else 1,\n exons_se_i=\";\".join(\n [\"{},{}\".format(int(ex[\"g_start\"]) - 1, ex[\"g_end\"]) for ex in exon_recs])\n )\n return (ti, es)", "def _read_expression_direct(cls):\n\n expression_data = {}\n expression_columns = cls._get_columns(EXPRESSION_MANIFEST)\n expression_psvs = cls._get_component_psvs(EXPRESSION_MANIFEST)\n\n for expression_psv in expression_psvs:\n for row in gzip.GzipFile(fileobj=io.BytesIO(cls._read_s3_url(expression_psv))):\n row_dict = dict(zip(expression_columns, row.strip().split(b'|')))\n expression_data.setdefault(\n row_dict[\"cellkey\"].decode(), {})[row_dict[\"featurekey\"].decode()] = \\\n float(row_dict[\"exrpvalue\"])\n\n return expression_data", "def s2_epsg_code(self):\n ul, lr = self.ul_lr\n epsg_old = self.epsg\n if epsg_old != 4326:\n lon, lat = ImageIO.transform_point(ul, epsg_old)\n else:\n lat, lon = ul\n lon_mod = int(lon / 6)\n\n lon_code = str(30 + lon_mod if lon < 0 else 31 - lon_mod).zfill(2)\n epsg = \"327\" if lat < 0 else \"326\"\n return int(epsg + lon_code)", "def _process_egocentric(self, signal: egocentric.EgocentricSignal):\n output_signals = []\n output_signals += self._process_egocentric_direction(\n self._get_hparam('egocentric_direction_mode'),\n signal.xz_direction,\n signal.yz_direction)\n output_signals += self._process_egocentric_distance(\n self._get_hparam('egocentric_distance_mode'),\n signal.distance)\n return output_signals", "def scan_e(self, sym: bool=True) -> None:\n decimal = False\n num = False\n end = None\n if sym:\n self.expect('^')\n self.next()\n if self.match('-'):\n print(f\"Caractére inexperado ('-') na posição {self._ind}: expoente negativo deve estar cercado por parênteses.\")\n quit()\n\n if self.match('('):\n self.next()\n end = ')'\n self.expect('-')\n self.e = '-'\n self.next()\n\n while self.match(*tuple('1234567890.')):\n if self.match('.'):\n if not decimal:\n decimal = True\n else:\n print(f\"Caractére inesperado: segundo ponto decimal na posição {self._ind}.\")\n quit()\n else:\n num = True\n self.e += self.get()\n self.next()\n\n if not num:\n print(f\"Nenhum dígito no expoente, na posição {self._ind}.\")\n quit()\n\n if end is not None:\n self.expect(end)", "def _extract_data_from_feed(self):\n for eco in self.snyk_data:\n if eco == \"java\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Maven.\")\n self._add_default_obj_for_eco(\"maven\")\n self._parse_data(self.snyk_data[eco], \"maven\")\n elif eco == \"js\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Npm.\")\n self._add_default_obj_for_eco(\"npm\")\n self._parse_data(self.snyk_data[eco], \"npm\")\n elif eco == \"python\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Pypi.\")\n self._add_default_obj_for_eco(\"pypi\")\n self._parse_data(self.snyk_data[eco], \"pypi\")\n elif eco == \"golang\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Golang.\")\n self._add_default_obj_for_eco(\"golang\")\n self._parse_golang_data(self.snyk_data[eco], \"golang\")\n else:\n logger.info(\"Ignoring the ecosystem {} from the feed\".format(eco))", "def iter_unpack(raw):\n return struct.iter_unpack(EVENT_FORMAT, raw)", "def _perform_data_conversion(self):\n self.data = []\n for value in self.elements_to_convert:\n try:\n timestamp_epoch = value['time_utc']\n year = parse_date_utc(timestamp_epoch).year\n value = parse_float(value['measures'][\n 'smoothed_variation_GIA_annual_&_semi_annual_removed'], nullable=False)\n self.data.append(SeaLevelRiseMeasure(timestamp_epoch=timestamp_epoch, year=year, value=value))\n except (ValueError, AttributeError, KeyError, IndexError, TypeError):\n _id = value.get('_id', 'Unknown ID')\n self.logger.exception('An error occurred while parsing data. SeaLevelRiseMeasure with ID \"%s\" will not '\n 'be converted.' % _id)\n if self.data:\n # Ensuring that all values are greater than or equal to 0\n min_value = abs(self.data[0].value)\n for value in self.data:\n value.value += min_value", "def read_eps_l2(filename, generic=False, to_xarray=False):\n eps_file = read_eps(filename)\n ptype = eps_file.mphr[\"PRODUCT_TYPE\"]\n fmv = int(eps_file.mphr[\"FORMAT_MAJOR_VERSION\"])\n\n if ptype in [\"SMR\", \"SMO\"]:\n\n if fmv == 12:\n data, metadata = read_smx_fmv_12(eps_file)\n else:\n raise RuntimeError(\"L2 SM format version not supported.\")\n\n data[\"time\"] = jd2dt(data.pop(\"jd\"))\n\n rename_coords = {\"longitude\": \"lon\", \"latitude\": \"lat\"}\n\n for k, v in rename_coords.items():\n data[v] = data.pop(k)\n\n # convert spacecraft_id to internal sat_id\n sat_id = np.array([4, 3, 5])\n metadata[\"sat_id\"] = sat_id[metadata[\"spacecraft_id\"] - 1]\n\n # add/rename/remove fields according to generic format\n if generic:\n data = conv_epsl2szx_generic(data, metadata)\n\n # convert dict to xarray.Dataset or numpy.ndarray\n if to_xarray:\n for k in data.keys():\n if len(data[k].shape) == 1:\n dim = [\"obs\"]\n elif len(data[k].shape) == 2:\n dim = [\"obs\", \"beam\"]\n\n data[k] = (dim, data[k])\n\n coords = {}\n coords_fields = [\"lon\", \"lat\", \"time\"]\n for cf in coords_fields:\n coords[cf] = data.pop(cf)\n\n data = xr.Dataset(data, coords=coords, attrs=metadata)\n else:\n # collect dtype info\n dtype = []\n for var_name in data.keys():\n if len(data[var_name].shape) == 1:\n dtype.append((var_name, data[var_name].dtype.str))\n elif len(data[var_name].shape) > 1:\n dtype.append((var_name, data[var_name].dtype.str,\n data[var_name].shape[1:]))\n\n ds = np.empty(data[\"time\"].size, dtype=np.dtype(dtype))\n for k, v in data.items():\n ds[k] = v\n data = ds\n else:\n raise ValueError(\"Format not supported. Product type {:1}\"\n \" Format major version: {:2}\".format(ptype, fmv))\n\n return data, metadata", "def convert(self, events: np.ndarray) -> dict:\n logging.debug(\"converting %s events\", len(events[0]))\n\n dataset = {}\n\n for (\n four_momenta_ids,\n inv_mass_name,\n ) in self._registered_inv_masses.items():\n if len(four_momenta_ids) == 1:\n index = self._convert_ids_to_indices(four_momenta_ids)[0]\n\n dataset[inv_mass_name] = np.square(\n np.array(self._reaction_info.final_state_masses[index])\n )\n\n else:\n four_momenta = np.sum(\n events[self._convert_ids_to_indices(four_momenta_ids), :],\n axis=0,\n )\n\n dataset[inv_mass_name] = tfa_kin.mass_squared(\n np.array(four_momenta)\n ).numpy()\n\n for subsys, angle_names in self._registered_subsystems.items():\n topology = [\n np.sum(events[self._convert_ids_to_indices(x), :], axis=0)\n for x in subsys.final_states\n ]\n if subsys.recoil_state:\n topology = [\n topology,\n np.sum(\n events[\n self._convert_ids_to_indices(subsys.recoil_state),\n :,\n ],\n axis=0,\n ),\n ]\n if subsys.parent_recoil_state:\n topology = [\n topology,\n np.sum(\n events[\n self._convert_ids_to_indices(\n subsys.parent_recoil_state\n ),\n :,\n ],\n axis=0,\n ),\n ]\n\n values = tfa_kin.nested_helicity_angles(topology)\n\n # the last two angles is always what we are interested\n dataset[angle_names[0]] = values[-2].numpy()\n dataset[angle_names[1]] = values[-1].numpy()\n\n return dataset", "def _parse_esn_data (self, netflix_page_data):\n # we generate an esn from device strings for android\n import subprocess\n try:\n manufacturer = subprocess.check_output([\"/system/bin/getprop\", \"ro.product.manufacturer\"])\n if manufacturer:\n esn = 'NFANDROID1-PRV-'\n input = subprocess.check_output([\"/system/bin/getprop\", \"ro.nrdp.modelgroup\"])\n if not input:\n esn = esn + 'T-L3-'\n else:\n esn = esn + input.strip(' \\t\\n\\r') + '-'\n esn = esn + '{:5}'.format(manufacturer.strip(' \\t\\n\\r').upper())\n input = subprocess.check_output([\"/system/bin/getprop\" ,\"ro.product.model\"])\n esn = esn + input.strip(' \\t\\n\\r').replace(' ', '=').upper()\n self.log(msg='Android generated ESN:' + esn)\n return esn\n except OSError as e:\n self.log(msg='Ignoring exception for non Android devices')\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n return netflix_page_data.get('esn', '')\n\n esn = ''\n\n # values are stored in lists (returned from JS parser)\n for item in netflix_page_data:\n if 'esnGeneratorModel' in dict(item).keys():\n esn = item['esnGeneratorModel']['data']['esn']\n return esn", "def test_mouse_sym_to_ens(self):\n\n mapper = EnsemblMapper(\n from_type='symbol',\n to_type='ensembl',\n host=HOST,\n from_organism='mmusculus')\n mapped = mapper.map_ids(['Trp53', 'Brca1'])\n\n assert mapped == ['ENSMUSG00000059552', 'ENSMUSG00000017146']", "def vae_decoder(self):\n return stax.serial(\n stax.Dense(self.hidden_dims[1], W_init=stax.randn()),\n stax.Relu,\n stax.Dense(self.hidden_dims[0], W_init=stax.randn()),\n stax.Relu,\n stax.Dense(self.out_dim, W_init=stax.randn()),\n stax.exp\n )", "def parse(self):", "def _dinamic_decode(self):\n raise NotImplementedError" ]
[ "0.5776446", "0.5423936", "0.5320594", "0.5170984", "0.51104087", "0.5029466", "0.5025242", "0.50205046", "0.48759243", "0.4801463", "0.47459334", "0.47000486", "0.46995685", "0.4694864", "0.46835348", "0.467961", "0.4648427", "0.46379155", "0.46379155", "0.4625023", "0.45730945", "0.45613006", "0.4542094", "0.45276928", "0.45153186", "0.45150012", "0.44994313", "0.44905585", "0.44898614", "0.4485389", "0.4479791", "0.4471952", "0.44581792", "0.44578424", "0.4448294", "0.44442677", "0.44271353", "0.442248", "0.44221961", "0.4418681", "0.44107115", "0.43924373", "0.4389978", "0.43892527", "0.4388255", "0.43863922", "0.4381319", "0.4379081", "0.4374471", "0.4371017", "0.43699938", "0.4359905", "0.43541077", "0.4347735", "0.43457252", "0.43420407", "0.43319058", "0.43289685", "0.43283945", "0.43277803", "0.43274668", "0.4326532", "0.43219152", "0.43215048", "0.43167898", "0.43154502", "0.43137664", "0.4305549", "0.4305129", "0.43038267", "0.4302215", "0.42999384", "0.4298649", "0.42794442", "0.42759532", "0.42693755", "0.4267975", "0.42664394", "0.42627794", "0.42610574", "0.42600995", "0.42594686", "0.42591962", "0.42507687", "0.42415166", "0.42407757", "0.42338267", "0.42312288", "0.42216146", "0.4215425", "0.4213095", "0.4209538", "0.42049092", "0.42043164", "0.4202021", "0.41984436", "0.41980723", "0.4185281", "0.41818345", "0.41782534" ]
0.6517192
0
Correct for motion and call SliceTimeCorrect.
def CorrectMotion(self): if self.verbose: print "Correct for motion" for entry in self.entry_map['epi']: info = self.info[entry] if os.path.exists(info['imgfile_m'] + info['suffix']): return # Always use brik for 3dDeconvolve. suffix = '+orig' epifile = '%s%s' % (info['imgfile'], suffix) prefix = info['imgfile_m'] base_entry = info['base_entry'] if info['base'] == 'start': # Use the first frame specified in template file. Defaults # to zero. base = info['motion_ref_frame'] else: # Use the last frame. base = self.info[base_entry]['tdim'] - info['skip']-1 base = ('%d' % base).replace(' ','') # Correct for slice-timing. self.SliceTimeCorrect(info, epifile) plane = info['plane'] anat_tgt = info['anat_tgt'] # anat_entry = self.anat_entry[plane] if info['catmats']: # Include additonal transformation in motion correction such # that final image is in register with the fieldmap, which has # been registered to the structural image that will be used for # spatial normalization. self.MotcorCatenate(info, base, anat_tgt) else: # Assume fieldmap is in register with the structural. self.Motcor(info, base) if info.get('fmapname', None) is None: # No fieldmap correction. if self.fsl_flip: # Flip the way fslview likes it. self.FSLFlip(info['imgfile_m'], info['imgfile_final']) elif info['suffix'] == '.nii': # Copy motion-corrected images from /tmp to output directory outfile = info['imgfile_final'] + info['suffix'] cmd = '3dcopy %s+orig %s' % (info['imgfile_m'], outfile) self.CheckExec(cmd, [outfile], force=True) cmd = '/bin/rm %s+orig*' % info['imgfile_m'] self.CheckExec(cmd, [], force=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_drift_correction(self, pos):\n\n\t\tprint(\"function not supported yet\")", "def refframe_correct(self, ra, dec, obstime, sobjs=None):\n # Correct Telescope's motion\n refframe = self.par['calibrations']['wavelengths']['refframe']\n if refframe in ['heliocentric', 'barycentric'] \\\n and self.par['calibrations']['wavelengths']['reference'] != 'pixel':\n msgs.info(\"Performing a {0} correction\".format(self.par['calibrations']['wavelengths']['refframe']))\n # Calculate correction\n radec = ltu.radec_to_coord((ra, dec))\n vel, vel_corr = wave.geomotion_correct(radec, obstime,\n self.spectrograph.telescope['longitude'],\n self.spectrograph.telescope['latitude'],\n self.spectrograph.telescope['elevation'],\n refframe)\n # Apply correction to objects\n msgs.info('Applying {0} correction = {1:0.5f} km/s'.format(refframe, vel))\n if (sobjs is not None) and (sobjs.nobj != 0):\n # Loop on slits to apply\n gd_slitord = self.slits.slitord_id[np.logical_not(self.extract_bpm)]\n for slitord in gd_slitord:\n indx = sobjs.slitorder_indices(slitord)\n this_specobjs = sobjs[indx]\n # Loop on objects\n for specobj in this_specobjs:\n if specobj is None:\n continue\n specobj.apply_helio(vel_corr, refframe)\n\n # Apply correction to wavelength image\n self.vel_corr = vel_corr\n self.waveimg *= vel_corr\n\n else:\n msgs.info('A wavelength reference frame correction will not be performed.')", "def _do_updates(self):\n is_right = self._puzzle.is_guess_right()\n if is_right:\n self._puzzle.reveal_puzzle()\n else:\n self._jumper.cut_line()", "def drift_correction(self, pos=None, fix_triggered=False):\n\t\t\n\t\tif pos == None:\n\t\t\tpos = self.dispsize[0] / 2, self.dispsize[1] / 2\n\t\tif fix_triggered:\n\t\t\treturn self.fix_triggered_drift_correction(pos)\t\t\n\t\tself.draw_drift_correction_target(pos[0], pos[1])\n\t\tpressed = False\n\t\twhile not pressed:\n\t\t\tpressed, presstime = self.kb.get_key()\n\t\t\tif pressed:\n\t\t\t\tif pressed == 'escape' or pressed == 'q':\n\t\t\t\t\tprint(\"libeyetribe.EyeTribeTracker.drift_correction: 'q' or 'escape' pressed\")\n\t\t\t\t\treturn self.calibrate()\n\t\t\t\tgazepos = self.sample()\n\t\t\t\tif ((gazepos[0]-pos[0])**2 + (gazepos[1]-pos[1])**2)**0.5 < self.pxerrdist:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tself.errorbeep.play()\n\t\treturn False", "def update_dead_reckoning(self):\n now = time.time()\n time_diff_s = now - self._last_observation_s\n self._last_observation_s = now\n\n self._prediction_step(time_diff_s)", "def non_causal_timecrop(self, length):\n assert length < self.time_length\n\n cut = (self.time_length - length) / 2\n\n _, i_start = _find_nearest(self.times, cut)\n _, i_end = _find_nearest(self.times, self.time_length - cut)\n\n h = np.fft.ifftshift(np.fft.fftshift(self.in_time)[..., i_start:i_end])\n\n new_response = self.from_time(self.fs, h)\n\n if new_response.time_length != length:\n w = f\"Could not precisely shrink to {length}s with fs = {self.fs}\"\n warnings.warn(w)\n\n return new_response", "def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=self.MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop()\n ev3.Sound.beep().wait()\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(\n position_sp=-arm_revolutions_for_full_range,\n speed_sp=self.MAX_SPEED,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this\n # line is correct as is).", "def recalc_spd(*args):\n return _ida_frame.recalc_spd(*args)", "def acoustic_lasting(self):\n for i in range(self._number_of_events):\n if self._number_of_events == 1:\n self._events[i].corrected_pitch_classes += self._events[i].pitch_classes_octave\n else:\n if i == 0:\n self._events[i].corrected_pitch_classes += self._events[i].pitch_classes_octave\n self._events[i].corrected_pitch_classes += self._events[i+1].pitch_classes_octave\n\n elif i == self._number_of_events - 1:\n self._events[i].corrected_pitch_classes += self._events[i].pitch_classes_octave\n self._events[i].corrected_pitch_classes += self._events[i-1].pitch_classes_octave\n else:\n self._events[i].corrected_pitch_classes += self._events[i+1].pitch_classes_octave\n self._events[i].corrected_pitch_classes += self._events[i].pitch_classes_octave\n self._events[i].corrected_pitch_classes += self._events[i-1].pitch_classes_octave\n\n self._events[i].corrected_pitch_classes = self.sort_single_event_notes(self._events[i].corrected_pitch_classes)\n self._events[i].corrected_pitch_classes = remove_identical(self._events[i].corrected_pitch_classes)", "def _update_transition(self, dt, time, direction): #pylint:disable-msg=C0103,C0301\r\n pass", "def sos_correction(self, ratio):\n\n # Correct velocities\n self.u_mps = self.u_mps * ratio\n self.v_mps = self.v_mps * ratio", "def handleUpdateTimer(self):\n self.mustRun(task = self.position,\n ret_signal = self.positionUpdate)", "def runTask1(self):\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.initialize()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.sweepDuckie()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieSuctionOn()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLiftEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppSweep()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppSuctionOff()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.reset()\r\n\t\tself._motion.terminate()", "def revolver(self):\r\n\t\tself.__revuelto=True", "def test2(self):\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.initialize()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.sweepDuckie()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieSuctionOn()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLiftEE()\r\n\t\tself._motion.terminate()", "def _calibrate(self, t_send, t_recv, server_timestamp):\n pass", "def update(self):\n if (self.j + self.step >= self.image.shape[0]) and (self.i + self.step >= self.image.shape[1]):\n self.no_more_crops = True\n elif self.i + self.step >= self.image.shape[1]:\n self.i = 0\n self.j += self.step\n else:\n self.i += self.step", "def update(self, dt):", "def update(self, dt):", "def set_up_orbit_correctors(ps_beg, delay, id_slice1, ds_slice, zplot, id_slices, U_core, lambdaref):\n SXSS = Chicane(3.2716, 0.362, 0.830399, delay[0])\n HXSS = Chicane(3.2, 0.3636, 0.5828, delay[1])\n\n OC2 = [CORR08, D1_SXSS, SXSS, D2_SXSS, QUAD09, CORR09]\n OC3 = [CORR15, D1_HXSS, HXSS, D2_HXSS, QUAD16, CORR16]\n\n ps_end1 = beam_transportation(ps_beg, U_core[0])\n\n # ps_end1 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the first undulator section.\n\n # The id of the slice on the axis in the second undulator section\n on_axis_id_U2 = int(id_slice1+delay[0]/ds_slice+ (8*110)*lambdaref/ds_slice) # The last part is slippage\n\n print(on_axis_id_U2)\n\n ps_end_slice1 = beam_property_along_s(ps_end1, id_slices)[0:4, :]\n ps_on_axis_2 = np.ravel(ps_end_slice1[:, on_axis_id_U2])\n\n # print(ps_on_axis_2)\n\n OC2_optimized = analyze_orbit_corrector(OC2[0], OC2[-1], OC2[1:-1], ps_on_axis_2)\n print(OC2_optimized)\n CORR08_new = Orbit_Corrector(OC2[0].length, OC2_optimized[0], OC2_optimized[2])\n CORR09_new = Orbit_Corrector(OC2[-1].length, OC2_optimized[1], OC2_optimized[3])\n\n # The whole U2 with optimized orbit correctors\n U2_new = [CORR08_new] + OC2[1:-1] + [CORR09_new] + U_core[1]\n ps_end2 = beam_transportation(ps_end1, U2_new)\n\n # ps_end2 is a 4-by-N array. N is the number of macro-particles. It is the full\n # 4D phase space distribution at the end of the second undulator section.\n\n # The id of the slice on the axis in the third undulator section\n on_axis_id_U3 = int(id_slice1+(delay[0]+delay[1])/ds_slice +(14*110*lambdaref)/ds_slice) # The last term is the slipage\n\n print(on_axis_id_U3)\n\n ps_end_slice2 = beam_property_along_s(ps_end2, id_slices)[0:4, :]\n ps_on_axis_3 = np.ravel(ps_end_slice2[ :, on_axis_id_U3])\n\n # print(ps_on_axis_3)\n\n OC3_optimized = analyze_orbit_corrector(OC3[0], OC3[-1], OC3[1:-1], ps_on_axis_3)\n print(OC3_optimized)\n CORR15_new = Orbit_Corrector(OC3[0].length, OC3_optimized[0], OC3_optimized[2])\n CORR16_new = Orbit_Corrector(OC3[-1].length, OC3_optimized[1], OC3_optimized[3])\n\n U3_new = [CORR15_new] + OC3[1:-1] + [CORR16_new] + U_core[2]\n\n Undulator_Beamline = U_core[0]+U2_new+U3_new\n\n return Undulator_Beamline", "def update_apc22(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n if len(self._old) >= 1:\n\n\n try:\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n beta = 0.5*self.delta_t/(self.time-self.get_old(0, 2))\n\n pos = self.pos+delta_t*((1+beta)*self.vel-beta*self.get_old(0, 0))\n vel = self.vel+delta_t*((1+beta)*kap[1]-beta*self.get_old(0, 1))\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n pos = self.pos+delta_t/2.0*(self.vel+vel)\n vel = self.vel+delta_t/2.0*(self.force(pos, vel, self.time+delta_t,\n drag=False)+kap[1])\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n beta = 0.5*col.delta_t/(self.time-self.get_old(0, 2))\n vel = self.vel+col.delta_t*(1+beta)*kap[1]-beta*self.get_old(0, 1)\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest=True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n\n self.time += delta_t\n\n else:\n ## reduced to using the Adams first order method for the first timestep:\n\n kap = update_apc1(self)\n\n self.set_old(kap, 1)\n\n return kap", "def _update_active_rides_fast(self, time: datetime) -> None:\n pass", "def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()", "def on_correct_utterance(self, event):\n if not self.quitting:\n number = event.utterance_number\n instance = event.instance_name\n self.the_mediator.correct_utterance(instance, number)", "def update(self):\n super().update()\n self.decelerate()\n #check for a collisison with all rock types\n self.checkForRockCollisions()\n #when the ship gets hit by a rock, it enters a period of invulnerability. we need to make sure that period ends at the proper time\n self.checkGracePeriodDuration()\n #movement stuff\n if (self.isAcceleratingForward):\n self.accelerateForwards()\n if (self.isRotatingLeft):\n self.rotateLeft()\n if (self.isRotatingRight):\n self.rotateRight()", "def motion_correct(mov, max_iters=5, shift_threshold=1., reslice=slice(None,None), in_place=True, verbose=True, compute_kwargs={}, apply_kwargs={}):\n if not in_place:\n mov = mov.copy()\n mov = mov[reslice]\n \n all_vals = []\n for it in range(max_iters):\n if verbose:\n print('Iteration {}'.format(it)); sys.stdout.flush()\n template,vals = compute_motion(mov, **compute_kwargs)\n mov = apply_motion_correction(mov, vals, **apply_kwargs)\n maxshifts = np.abs(vals[:,[0,1]]).max(axis=0)\n all_vals.append(vals)\n if verbose:\n print('Shifts: {}'.format(str(maxshifts))); sys.stdout.flush()\n if np.all(maxshifts < shift_threshold):\n break\n\n # combine values from iterations\n all_vals = np.array(all_vals)\n return_vals = np.empty([all_vals.shape[1],all_vals.shape[2]])\n return_vals[:,[0,1]] = all_vals[:,:,[0,1]].sum(axis=0)\n return_vals[:,2] = all_vals[-1,:,2]\n\n return mov,template,return_vals", "def update(self):\n self.syncSpriteCoordinates()\n self.moveBasedOnCurrentMomentum()\n #self.decelerate()\n self.checkCanvasBoundsAndWrap()", "def routine(self):\n # Read in time table data\n\n _, t1, t2, t3, t4, _, _ = self.times\n _, p1, p2, p3, p4, _, _ = self.positions\n \n # Carry out routine\n self.throttle_valve_set(p1)\n self.valve_pulse(1, t1)\n if self.interrupt_measurement_called:\n self.shutoff()\n return\n \n self.throttle_valve_set(p2) \n self.purge(t2)\n if self.interrupt_measurement_called:\n self.shutoff()\n return\n \n \n ## Check selected method for t3 in main recipe table,\n ## Carry out an operation based on the selection\n mode = self.settings['t3_method']\n self.throttle_valve_set(p3)\n \n if mode == 'Shutter':\n self.shutter_pulse(t3)\n if self.interrupt_measurement_called:\n self.shutoff()\n return\n elif mode == 'PV':\n self.valve_pulse(2, t3)\n if self.interrupt_measurement_called:\n self.shutoff()\n return \n elif mode == 'RF':\n power = self.seren.settings['recipe_power']\n self.plasma_dose(t3, power)\n if self.interrupt_measurement_called:\n self.shutoff()\n return\n# elif mode == 'PV/Purge':\n# '''Run sub_cyc number of subroutine cycles.\n# Subroutine consists of a valve pulse and a purge period.'''\n# for _ in range(sub_cyc):\n# self.valve_pulse(2, sub_t0)\n# self.purge(sub_t1)\n \n \n self.throttle_valve_set(p4)\n self.purge(t4)\n if self.interrupt_measurement_called:\n self.shutoff()\n return", "def target_velocity(self, time):\n pass", "def target_velocity(self, time):\n pass", "def on_mouse_move(self, event):\n\n # self.view = 1 * np.eye(4, dtype=np.float32)\n # self.model = 1 * np.eye(4, dtype=np.float32)\n\n # self.translate -= event.delta[1]\n # self.translate = max(-1, self.translate)\n # print(event.delta[1])\n # print(self.translate)\n # self.view = translate((0, 0, -self.translate))\n # self.game_program['u_view'] = self.view\n # self.game_program['u_size'] = 5 / self.translate\n # self.view = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.view\n # self.model = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.model\n # print(self.view)\n\n # self.game_program['u_model'] = self.model\n # self.game_program['u_view'] = self.view\n\n x, y = event.pos\n #print(x, y)\n self.x_offset, self.y_offset = x - self.last_x, - (y - self.last_y)\n self.last_x, self.last_y = x, y\n self.x_offset *= self.sensitivity\n self.y_offset *= self.sensitivity\n\n self.yaw, self.pitch = self.yaw - self.x_offset, self.pitch + self.y_offset\n self.rot_y(self.yaw * np.pi / 180)\n self.rot_x(self.pitch * np.pi / 180)\n\n self.view = np.dot(self.rot_mat_y, self.rot_mat_x)\n self.game_program['u_view'] = self.view\n\n self.update()", "def _correct_overflow(timestamps, detectors, overflow_ch, overflow):\n index_overflows = np.where((detectors == overflow_ch))[0]\n for n, (idx1, idx2) in enumerate(zip(index_overflows[:-1],\n index_overflows[1:])):\n timestamps[idx1:idx2] += (n + 1)*overflow\n timestamps[idx2:] += (n + 2)*overflow", "def on_timer(self, event):\n \n o = Unicorn()\n data = o.get_data(rt)\n k = len(data[0])\n y[:, :-k] = y[:, k:]\n y[:, -k:] = remap((data), -40, 40, -1, 1 ) \n t2 = _thread.start_new_thread(printT, ())\n #y2 = np.array([lfilter(b, a, y[i]) for i in range(17)])\n self.program['a_position'].set_data(y.ravel().astype(np.float32))\n self.update()", "def stabilize(td):\n\n def correct_saccade1(data):\n data.x -= np.rint(3.5 * data.ts / 105e3).astype(np.uint16)\n data.y -= np.rint(7 * data.ts / 105e3).astype(np.uint16)\n return data\n\n def correct_saccade2(data):\n data.x -= np.rint(3.5 + 3.5 * (data.ts - 105e3) / 105e3).astype(\n np.uint16)\n data.y -= np.rint(7 - 7 * (data.ts - 105e3) / 105e3).astype(np.uint16)\n return data\n\n def correct_saccade3(data):\n data.x -= np.rint(7 - 7 * (data.ts - 210e3) / 105e3).astype(np.uint16)\n return data\n\n copy = np.piecewise(td.data,\n [td.data.ts <= 105e3,\n (td.data.ts > 105e3) & (td.data.ts <= 210e3),\n (td.data.ts > 210e3)],\n [correct_saccade1, correct_saccade2,\n correct_saccade3]).view(np.recarray)\n\n # after saccades, we might end up with invalid x and y values, have to\n # correct these\n x_vals = copy.x\n y_vals = copy.y\n copy.x = np.piecewise(x_vals,\n [x_vals >= 65000,\n (x_vals < 65000) & (x_vals >= td.width),\n x_vals < td.width],\n [0, td.width - 1, lambda x: x])\n copy.y = np.piecewise(y_vals,\n [y_vals >= 65000,\n (y_vals < 65000) & (y_vals >= td.height),\n y_vals < td.height],\n [0, td.height - 1, lambda y: y])\n\n return copy", "def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=900)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action=\"brake\")\n ev3.Sound.beep()\n self.arm_motor.run_to_rel_pos(\n speed_sp=900, position_sp=-5100)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n print('motor is no longer running')\n ev3.Sound.beep()\n self.arm_motor.position = 0", "def cool(self):\n self.t = self.t - 1", "def timerange_change():\n global transformer_top\n assert transformer_top is not None\n global transformer_bottom\n assert transformer_bottom is not None\n global label_encoders_per_modality\n assert label_encoders_per_modality is not None\n global DEVICE\n assert DEVICE is not None\n global USE_LOCAL_CONDITIONING\n assert USE_LOCAL_CONDITIONING is not None\n global partial_sample_model\n assert partial_sample_model is not None\n\n layer = str(request.args.get('layer'))\n temperature = request.args.get('temperature', type=float)\n start_index_top = request.args.get('start_index_top', type=int)\n uniform_sampling = bool(strtobool(\n request.args.get('uniform_sampling', type=str,\n default=\"False\")))\n\n # try to retrieve local conditioning map in the request's JSON payload\n (class_conditioning_top_map, class_conditioning_bottom_map,\n input_conditioning_top, input_conditioning_bottom) = (\n parse_conditioning(request)\n )\n global_instrument_family_str = str(\n request.args.get('instrument_family_str'))\n global_pitch = request.args.get('pitch', type=int)\n global_class_conditioning = {\n 'pitch': global_pitch,\n 'instrument_family_str': global_instrument_family_str\n }\n if (not USE_LOCAL_CONDITIONING\n or not transformer_bottom.local_class_conditioning):\n class_conditioning_bottom = global_class_conditioning.copy()\n class_conditioning_tensors_bottom = make_conditioning_tensors(\n class_conditioning_bottom,\n label_encoders_per_modality)\n class_conditioning_bottom_map = None\n else:\n class_conditioning_bottom = class_conditioning_tensors_bottom = None\n\n top_code, bottom_code = parse_codes(request)\n\n # extract frame to operate on\n end_index_top = start_index_top + transformer_top.shape[1]\n top_code_frame = top_code[..., start_index_top:end_index_top]\n\n upsampling_ratio_time = (transformer_bottom.shape[1]\n // transformer_top.shape[1])\n start_index_bottom = upsampling_ratio_time * start_index_top\n end_index_bottom = start_index_bottom + transformer_bottom.shape[1]\n bottom_code_frame = bottom_code[..., start_index_bottom:end_index_bottom]\n generation_mask_batched = parse_mask(request).to(DEVICE)\n\n time_indexes_top = make_time_indexes(start_index_top,\n top_code.shape[-1],\n transformer_top.shape[-1])\n time_indexes_bottom = make_time_indexes(start_index_bottom,\n bottom_code.shape[-1],\n transformer_bottom.shape[-1])\n\n if layer == 'bottom':\n if not uniform_sampling:\n bottom_code_resampled_frame = partial_sample_model(\n model=transformer_bottom,\n condition=top_code_frame,\n batch_size=1,\n codemap_size=transformer_bottom.shape,\n temperature=temperature,\n class_conditioning=class_conditioning_tensors_bottom,\n local_class_conditioning_map=class_conditioning_bottom_map,\n initial_code=bottom_code_frame,\n mask=generation_mask_batched,\n time_indexes_source=time_indexes_top,\n time_indexes_target=time_indexes_bottom,\n )\n else:\n bottom_code_resampled_frame = bottom_code_frame.masked_scatter(\n generation_mask_batched,\n torch.randint_like(bottom_code_frame,\n high=transformer_bottom.n_class_target)\n )\n\n bottom_code_resampled = bottom_code\n bottom_code_resampled[..., start_index_bottom:end_index_bottom] = (\n bottom_code_resampled_frame)\n\n # create JSON response\n response = make_response(top_code, bottom_code_resampled,\n input_conditioning_top,\n input_conditioning_bottom)\n elif layer == 'top':\n if (not USE_LOCAL_CONDITIONING\n or not transformer_top.local_class_conditioning):\n # try to retrieve conditioning from http arguments\n class_conditioning_top = global_class_conditioning.copy()\n class_conditioning_tensors_top = make_conditioning_tensors(\n class_conditioning_top,\n label_encoders_per_modality)\n class_conditioning_top_map = None\n else:\n class_conditioning_top = class_conditioning_tensors_top = None\n\n if not uniform_sampling:\n if transformer_top.self_conditional_model:\n condition = top_code_frame\n else:\n condition = None\n top_code_resampled_frame = partial_sample_model(\n model=transformer_top,\n condition=condition,\n device=DEVICE,\n batch_size=1,\n codemap_size=transformer_top.shape,\n temperature=temperature,\n class_conditioning=class_conditioning_tensors_top,\n local_class_conditioning_map=class_conditioning_top_map,\n initial_code=top_code_frame,\n mask=generation_mask_batched,\n time_indexes_source=time_indexes_top,\n time_indexes_target=time_indexes_top,\n )\n else:\n top_code_resampled_frame = top_code_frame.masked_scatter(\n generation_mask_batched,\n torch.randint_like(top_code_frame,\n high=transformer_top.n_class_target)\n )\n\n top_code_resampled = top_code\n top_code_resampled[..., start_index_top:end_index_top] = (\n top_code_resampled_frame)\n\n upsampling_ratio_frequency = (transformer_bottom.shape[0]\n // transformer_top.shape[0])\n generation_mask_bottom_batched = (\n generation_mask_batched\n .repeat_interleave(upsampling_ratio_frequency, -2)\n .repeat_interleave(upsampling_ratio_time, -1)\n )\n bottom_code_resampled_frame = partial_sample_model(\n model=transformer_bottom,\n condition=top_code_resampled_frame,\n device=DEVICE,\n batch_size=1,\n codemap_size=transformer_bottom.shape,\n temperature=temperature,\n class_conditioning=class_conditioning_tensors_bottom,\n local_class_conditioning_map=class_conditioning_bottom_map,\n initial_code=bottom_code_frame,\n mask=generation_mask_bottom_batched,\n time_indexes_source=time_indexes_top,\n time_indexes_target=time_indexes_bottom,\n )\n\n # update conditioning map\n bottom_mask = generation_mask_bottom_batched[0]\n new_conditioning_map_bottom = {\n modality: masked_fill(modality_conditioning,\n bottom_mask,\n class_conditioning_bottom[modality])\n for modality, modality_conditioning\n in input_conditioning_bottom.items()\n }\n\n bottom_code_resampled = bottom_code\n bottom_code_resampled[..., start_index_bottom:end_index_bottom] = (\n bottom_code_resampled_frame)\n\n # create JSON response\n response = make_response(top_code_resampled, bottom_code_resampled,\n input_conditioning_top,\n new_conditioning_map_bottom)\n\n return response", "def test_fix_metadata_correct_time(self):\n fixed_cube = self.fix.fix_metadata([self.cube])[0]\n time_coord = fixed_cube.coord('time')\n np.testing.assert_allclose(time_coord.points, [0, 1])\n assert time_coord.bounds is None", "def photometric_calibration():\n pass", "def arm_calibration(self):\n # DONE: 3. Implement the arm calibration movement by fixing the code below (it has many bugs). It should to this:\n # Command the arm_motor to run forever in the positive direction at max speed.\n # Create an infinite while loop that will block code execution until the touch sensor's is_pressed value is True.\n # Within that loop sleep for 0.01 to avoid running code too fast.\n # Once past the loop the touch sensor must be pressed. So stop the arm motor quickly using the brake stop action.\n # Make a beep sound\n # Now move the arm_motor 14.2 revolutions in the negative direction relative to the current location\n # Note the stop action and speed are already set correctly so we don't need to specify them again\n # Block code execution by waiting for the arm to finish running\n # Make a beep sound\n # Set the arm encoder position to 0 (the last line below is correct to do that, it's new so no bug there)\n\n # Code that attempts to do this task but has MANY bugs (nearly 1 on every line). Fix them!\n self.arm_motor.run_forever(speed_sp=900)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action='brake')\n ev3.Sound.beep().wait()\n # time.sleep(2)\n # arm_motor.stop(stop_action='brake')\n\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(position_sp=-arm_revolutions_for_full_range, speed_sp=900)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep()\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this line is correct as is).", "def test_rolling_before_analysis(self):\n cheese = TomoCheese.from_demo_images()\n cheese.analyze()\n original_roi_1 = copy.copy(cheese.module.rois[\"1\"].pixel_value)\n for img in cheese.dicom_stack:\n img.roll(direction=\"x\", amount=20)\n cheese.analyze()\n new_roi_1 = cheese.module.rois[\"1\"].pixel_value\n assert math.isclose(original_roi_1, new_roi_1, abs_tol=3)", "def post_process(self):\n\t\ti_s = 0\n\t\ti_e = 0\n\t\tif self.trans_t_dict[0][1] == 0:\n\t\t\tif len(self.noise_itv) == 0:\n\t\t\t\tself.trans_t_dict[0][1] = self.fake_start_offset\n\t\t\telse:\n\t\t\t\tself.trans_t_dict[0][1] = self.noise_itv[0][1] # start_offset\n\t\t\tself.trans_t_dict[0][2] = 0.1\n\t\tif self.trans_t_dict[len(self.trans_t_dict)-1][1] == 0:\n\t\t\tif len(self.noise_itv) == 0:\n\t\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][1] = self.fake_end_offset\n\t\t\telse:\n\t\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][1] = self.noise_itv[-1][0] # end_offset\n\t\t\tself.trans_t_dict[len(self.trans_t_dict)-1][2] = 0.1\n\n\t\twhile i_s < len(self.trans_t_dict):\n\t\t\twhile i_s < len(self.trans_t_dict) and self.trans_t_dict[i_s][1] != 0:\n\t\t\t\ti_s += 1\n\t\t\tif i_s == len(self.trans_t_dict):\n\t\t\t\ti_e = len(self.trans_t_dict)\n\t\t\tif i_s < len(self.trans_t_dict):\n\t\t\t\ti_s -= 1\n\t\t\t\ti_e = i_s + 1\n\t\t\t\twhile i_e < len(self.trans_t_dict) and self.trans_t_dict[i_e][1] == 0:\n\t\t\t\t\ti_e += 1\n\t\t\t\tif i_e == len(self.trans_t_dict):\n\t\t\t\t\tbreak\n\n\t\t\t\t# incorperate the noise inverval\n\t\t\t\ts_time = self.trans_t_dict[i_s][1]\n\t\t\t\te_time = self.trans_t_dict[i_e][1]\n\t\t\t\t\"\"\"\n\t\t\t\tfor ts in self.noise_itv:\n\t\t\t\t\tif len(ts) == 2:\t\t\t\t\t\t\n\t\t\t\t\t\ttime1 = ts[0]\n\t\t\t\t\t\ttime2 = ts[1]\n\t\t\t\t\t\tif s_time < time1 and time2 < e_time:\n\t\t\t\t\t\t\te_time = min(e_time, time1)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttime0 = ts[0]\n\t\t\t\t\t\tif s_time < time0 and time0 < e_time:\n\t\t\t\t\t\t\te_time = min(e_time, time0)\n\t\t\t\t\"\"\"\n\t\t\t\tchar_len = 0\n\t\t\t\tfor i in range(i_s, i_e):\n\t\t\t\t\tchar_len += len(self.trans_t_dict[i][0])\n\t\t\t\t# ratio = float(self.trans_t_dict[i_e][1]-self.trans_t_dict[i_s][1]) / float(char_len)\n\t\t\t\tratio = float(e_time - s_time) / float(char_len)\n\t\t\t\tchar_len = 0\n\t\t\t\t# s_time = self.trans_t_dict[i_s][1]\n\t\t\t\tfor i in range(i_s+1, i_e):\n\t\t\t\t\tchar_len += len(self.trans_t_dict[i-1][0])\n\t\t\t\t\tself.trans_t_dict[i][1] = s_time + char_len * ratio\n\t\t\t\t\tself.trans_t_dict[i][2] = len(self.trans_t_dict[i][0]) * ratio\n\t\t\ti_s = i_e", "def auto_correction(self):\n\n if self.mi_standard_fb is not None:\n try: \n is_st_fb_running = self.mi_standard_fb.is_running()\n except Exception as e:\n logger.warning(\"error during status of st. FB reading: \" + str(e))\n is_st_fb_running = False\n \n if self.mi_standard_fb is not None and is_st_fb_running:\n logger.info(\"auto_correction: St.FB is running. Stop Ad. FB\")\n self.stop_feedback()\n return 0\n \n is_nan = self.ref_orbit_calc()\n if is_nan:\n logger.warning(\"auto_correction: nan in the ref orbit. Pause 1 sec\")\n time.sleep(1)\n return\n\n start = time.time()\n stop_flag = self.correct()\n print(\"correct: \", time.time() - start)\n time.sleep(0.01)\n if not stop_flag:\n start = time.time()\n self.apply_kicks()\n print(\"apply_kicks: \", time.time() - start)\n self.sase_hist.append(self.target_filtered[-1])\n self.le_warn.clear()\n else:\n logger.warning(\"auto_correction: stop_flag = True. Pause 1 sec\")\n self.le_warn.clear()\n self.le_warn.setText(\"Stop flag. Kicks are not applied\")\n self.le_warn.setStyleSheet(\"color: red\")\n\n time.sleep(1)", "def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]", "def _step(self, board, elapsedTime):\n\t\tpass", "def auto_resting(self):\n self.image_list = self.animation_dict[self.direction]\n self.image = self.image_list[self.index]\n\n if self.rect.y % 32 != 0:\n self.correct_position(self.rect.y)\n if self.rect.x % 32 != 0:\n self.correct_position(self.rect.x)\n\n if (self.current_time - self.move_timer) > 2000:\n direction_list = ['up', 'down', 'left', 'right']\n random.shuffle(direction_list)\n direction = direction_list[0]\n self.begin_auto_moving(direction)\n self.move_timer = self.current_time", "def step(self, time_delta: float, game_data):\n if self.get_invincible(): # if become invincible\n if time.time() - self._invincible_time > 10: # if the time past 10 seconds\n self.set_invincible(False) # remove the buff\n\n if self.get_velocity()[0] != 0: # if moved horizontally\n self._image_time += time_delta # count time while move\n # set the order of image id while moving, generally it's 3-2-1-2-3-...\n if self._image_time <= 0.1: # if time past 0.1 second\n self._image = '3' # set image id to '3'\n elif 0.1 < self._image_time <= 0.2 or 0.3 < self._image_time <= 0.4:\n # if time past 0.2 or 0.4 second\n self._image = '2' # set image id to '2'\n elif 0.2 < self._image_time <= 0.3: # if time past 0.3 second\n self._image = '1' # set image id to '1'\n elif self._image_time > 0.4: # reset the cycle\n self._image = '3'\n self._image_time = 0\n else: # if not moving\n self._image = 'default' # set it with default image", "def calibrate(self):\n\t\tLTOGRIGHT = []\n\t\tLTOGUP = []\n\t\tRTOGRIGHT = []\n\t\tRTOGUP = []\n\t\tstart = time.time()\n\t\tcalibration_time = 5.0\n\t\twhile time.time() - start < calibration_time:\n\t\t\tevents = pygame.event.get()\n\t\t\tfor event in events:\n\t\t\t\tif event.type == pygame.JOYAXISMOTION:\n\t\t\t\t\tLTOGRIGHT.append(self.joystick.get_axis(self.LTOGRIGHT))\n\t\t\t\t\tLTOGUP.append(-self.joystick.get_axis(self.LTOGUP))\n\t\t\t\t\tRTOGRIGHT.append(self.joystick.get_axis(self.RTOGRIGHT))\n\t\t\t\t\tRTOGUP.append(-self.joystick.get_axis(self.RTOGUP))\n\n\t\t# calibration sets highest value equal to 1.0\n\t\tself.calibration[0] = 1.0/max(LTOGRIGHT)\n\t\tself.calibration[1] = -1.0/min(LTOGRIGHT)\n\t\tself.calibration[2] = -1.0/min(LTOGUP)\n\t\tself.calibration[3] = 1.0/max(LTOGUP)\n\t\tself.calibration[4] = 1.0/max(RTOGRIGHT)\n\t\tself.calibration[5] = -1.0/min(RTOGRIGHT)\n\t\tself.calibration[6] = -1.0/min(RTOGUP)\n\t\tself.calibration[7] = 1.0/max(RTOGUP)", "def apply_motion_correction(mov, shifts, interpolation=None, crop=None, in_place=False, verbose=True):\n if interpolation is None and cv2 is not None:\n interpolation = cv2.INTER_LINEAR\n\n if not in_place:\n mov=mov.copy()\n\n if mov.ndim==2:\n mov = mov[None,...]\n\n if type(shifts) in [str] and mov.filename:\n shifts,crop_ = retrieve_motion_correction_data(shifts, mov.filename)\n if crop is None:\n crop = crop_\n\n if shifts.ndim==1:\n shifts = shifts[None,...]\n\n if shifts.ndim==2 and shifts.shape[1]==3:\n shifts = shifts[:,:2]\n\n assert shifts.ndim==2 and shifts.shape[1]==2\n\n t,h,w=mov.shape\n if verbose:\n print('Applying shifts:')\n pbar = ProgressBar(maxval=len(mov)).start()\n for i,frame in enumerate(mov):\n sh_x_n, sh_y_n = shifts[i]\n if cv2 is not None:\n M = np.float32([[1,0,sh_x_n],[0,1,sh_y_n]]) \n mov[i] = cv2.warpAffine(frame,M,(w,h),flags=interpolation)\n elif cv2 is None:\n M = np.float32([[1,0,sh_y_n],[0,1,sh_x_n],[0,0,1]]) \n transform = sktf.AffineTransform(matrix=M)\n mov[i] = sktf.warp(frame, transform)\n if verbose:\n pbar.update(i)\n\n if verbose:\n pbar.finish()\n\n if crop:\n if crop == True:\n ymax = int(min([0, min(shifts[:,0])]) or None)\n xmax = int(min([0, min(shifts[:,1])]) or None)\n ymin = int(max(shifts[:,0]))\n xmin = int(max(shifts[:,1]))\n elif isinstance(crop, PF_numeric_types):\n crop = int(crop)\n ymax,xmax = -crop,-crop\n ymin,xmin = crop,crop\n mov = mov[:, ymin:ymax, xmin:xmax]\n\n return mov.squeeze()", "def always_touching(self):\n assert int(self.snake[0].real - self.snake[1].real) in [1, 0, -1] and int(\n self.snake[0].real - self.snake[1].real) in [1, 0, -1]", "def _doCalibration(self):\n self._cmdCalibration(2)", "def chacha(self):\n self.right()\n time.sleep(2)\n self.stop()\n self.back()\n time.sleep(1)\n self.servo(1000)\n time.sleep(1)\n self.stop()\n self.fwd()\n time.sleep(1)\n self.stop()\n self.servo(2000)\n time.sleep(1)\n self.stop()\n self.left()\n time.sleep(2)\n self.stop()", "def run_go_to_star(self):\n if self.altitude != self.tele_altitude or self.azimuth != self.tele_azimuth:\n self.azimuth_update()\n self.altitude_update()", "def punched(self):\n if not self.dizzy:\n self.dizzy = 1\n self.original = self.image\n Chimp.count_punch += 1", "def CorrectStokes(self, t, fluid):\n\n\t\tself.SetTime(t + fluid.dt)\n\t\t\n\t\tself.F = (self.Tether - self.X)\n\t\tf = self.F.sum(axis=0) / self.Nb\n\n\t\tself.X += f\n\n\t\tfluid.u += f / fluid.dt", "def compute_tide_corrections(\n x: np.ndarray, y: np.ndarray, delta_time: np.ndarray,\n DIRECTORY: str | pathlib.Path | None = None,\n MODEL: str | None = None,\n ATLAS_FORMAT: str = 'netcdf',\n GZIP: bool = False,\n DEFINITION_FILE: str | pathlib.Path | None = None,\n EPSG: str | int = 3031,\n EPOCH: list | tuple = (2000, 1, 1, 0, 0, 0),\n TYPE: str or None = 'drift',\n TIME: str = 'UTC',\n METHOD: str = 'spline',\n EXTRAPOLATE: bool = False,\n CUTOFF: int | float=10.0,\n APPLY_FLEXURE: bool = False,\n FILL_VALUE: float = np.nan,\n **kwargs\n ):\n\n # check that tide directory is accessible\n if DIRECTORY is not None:\n DIRECTORY = pathlib.Path(DIRECTORY).expanduser()\n if not DIRECTORY.exists():\n raise FileNotFoundError(\"Invalid tide directory\")\n\n # validate input arguments\n assert TIME in ('GPS', 'LORAN', 'TAI', 'UTC', 'datetime')\n assert METHOD in ('bilinear', 'spline', 'linear', 'nearest')\n\n # get parameters for tide model\n if DEFINITION_FILE is not None:\n model = pyTMD.io.model(DIRECTORY).from_file(\n pathlib.Path(DEFINITION_FILE).expanduser())\n else:\n model = pyTMD.io.model(DIRECTORY, format=ATLAS_FORMAT,\n compressed=GZIP).elevation(MODEL)\n\n # determine input data type based on variable dimensions\n if not TYPE:\n TYPE = pyTMD.spatial.data_type(x, y, delta_time)\n # reform coordinate dimensions for input grids\n # or verify coordinate dimension shapes\n if (TYPE.lower() == 'grid') and (np.size(x) != np.size(y)):\n x,y = np.meshgrid(np.copy(x),np.copy(y))\n elif (TYPE.lower() == 'grid'):\n x = np.atleast_2d(x)\n y = np.atleast_2d(y)\n elif TYPE.lower() in ('time series', 'drift'):\n x = np.atleast_1d(x)\n y = np.atleast_1d(y)\n\n # converting x,y from EPSG to latitude/longitude\n try:\n # EPSG projection code string or int\n crs1 = pyproj.CRS.from_epsg(int(EPSG))\n except (ValueError,pyproj.exceptions.CRSError):\n # Projection SRS string\n crs1 = pyproj.CRS.from_string(EPSG)\n # output coordinate reference system\n crs2 = pyproj.CRS.from_epsg(4326)\n transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n lon, lat = transformer.transform(x.flatten(), y.flatten())\n\n # assert delta time is an array\n delta_time = np.atleast_1d(delta_time)\n # convert delta times or datetimes objects to timescale\n if (TIME.lower() == 'datetime'):\n timescale = pyTMD.time.timescale().from_datetime(\n delta_time.flatten())\n else:\n timescale = pyTMD.time.timescale().from_deltatime(delta_time,\n epoch=EPOCH, standard=TIME)\n # number of time points\n nt = len(timescale)\n\n # read tidal constants and interpolate to grid points\n if model.format in ('OTIS','ATLAS','TMD3'):\n amp,ph,D,c = pyTMD.io.OTIS.extract_constants(lon, lat, model.grid_file,\n model.model_file, model.projection, type=model.type,\n method=METHOD, extrapolate=EXTRAPOLATE, cutoff=CUTOFF,\n grid=model.format, apply_flexure=APPLY_FLEXURE)\n # use delta time at 2000.0 to match TMD outputs\n deltat = np.zeros((nt), dtype=np.float64)\n elif (model.format == 'netcdf'):\n amp,ph,D,c = pyTMD.io.ATLAS.extract_constants(lon, lat, model.grid_file,\n model.model_file, type=model.type, method=METHOD,\n extrapolate=EXTRAPOLATE, cutoff=CUTOFF, scale=model.scale,\n compressed=model.compressed)\n # use delta time at 2000.0 to match TMD outputs\n deltat = np.zeros((nt), dtype=np.float64)\n elif (model.format == 'GOT'):\n amp,ph,c = pyTMD.io.GOT.extract_constants(lon, lat, model.model_file,\n method=METHOD, extrapolate=EXTRAPOLATE, cutoff=CUTOFF,\n scale=model.scale, compressed=model.compressed)\n # delta time (TT - UT1)\n deltat = timescale.tt_ut1\n elif (model.format == 'FES'):\n amp,ph = pyTMD.io.FES.extract_constants(lon, lat, model.model_file,\n type=model.type, version=model.version, method=METHOD,\n extrapolate=EXTRAPOLATE, cutoff=CUTOFF, scale=model.scale,\n compressed=model.compressed)\n # available model constituents\n c = model.constituents\n # delta time (TT - UT1)\n deltat = timescale.tt_ut1\n\n # calculate complex phase in radians for Euler's\n cph = -1j*ph*np.pi/180.0\n # calculate constituent oscillation\n hc = amp*np.exp(cph)\n\n # predict tidal elevations at time and infer minor corrections\n if (TYPE.lower() == 'grid'):\n ny,nx = np.shape(x)\n tide = np.ma.zeros((ny,nx,nt),fill_value=FILL_VALUE)\n tide.mask = np.zeros((ny,nx,nt),dtype=bool)\n for i in range(nt):\n TIDE = pyTMD.predict.map(timescale.tide[i], hc, c,\n deltat=deltat[i], corrections=model.format)\n MINOR = pyTMD.predict.infer_minor(timescale.tide[i], hc, c,\n deltat=deltat[i], corrections=model.format)\n # add major and minor components and reform grid\n tide[:,:,i] = np.reshape((TIDE+MINOR), (ny,nx))\n tide.mask[:,:,i] = np.reshape((TIDE.mask | MINOR.mask), (ny,nx))\n elif (TYPE.lower() == 'drift'):\n tide = np.ma.zeros((nt), fill_value=FILL_VALUE)\n tide.mask = np.any(hc.mask,axis=1)\n tide.data[:] = pyTMD.predict.drift(timescale.tide, hc, c,\n deltat=deltat, corrections=model.format)\n minor = pyTMD.predict.infer_minor(timescale.tide, hc, c,\n deltat=deltat, corrections=model.format)\n tide.data[:] += minor.data[:]\n elif (TYPE.lower() == 'time series'):\n nstation = len(x)\n tide = np.ma.zeros((nstation,nt), fill_value=FILL_VALUE)\n tide.mask = np.zeros((nstation,nt),dtype=bool)\n for s in range(nstation):\n TIDE = pyTMD.predict.time_series(timescale.tide, hc[s,None,:], c,\n deltat=deltat, corrections=model.format)\n MINOR = pyTMD.predict.infer_minor(timescale.tide, hc[s,None,:], c,\n deltat=deltat, corrections=model.format)\n tide.data[s,:] = TIDE.data[:] + MINOR.data[:]\n tide.mask[s,:] = (TIDE.mask | MINOR.mask)\n # replace invalid values with fill value\n tide.data[tide.mask] = tide.fill_value\n\n # return the ocean or load tide correction\n return tide", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def _step(self) -> None:\n self._update_gradient_at(DimerPoint.left)\n\n self._optimise_rotation()\n trns_result = self._translate()\n\n if trns_result == _StepResult.skipped_translation:\n self._converged_translation = True\n\n return None", "def set_flow_corrected(self):\n self.exh.temp_v_press_fit = (\n np.polyfit(self.exh.pressure_drop[0:4],\n self.exh.T_array[0:4], 2) ) \n self.flow_data.T_hx = np.polyval(self.exh.temp_v_press_fit,\n self.flow_data.pressure_drop) \n self.flow_data.flow = ( self.flow_data.flow_trash *\n self.flow_data.T_hx / self.flow_data.T )", "def _update_moved(self):\n self._RAS_textbox.setPlainText('{:.2f}, {:.2f}, {:.2f}'.format(\n *self._ras))\n self._VOX_textbox.setPlainText('{:3d}, {:3d}, {:3d}'.format(\n *self._current_slice))\n self._intensity_label.setText('intensity = {:.2f}'.format(\n self._base_data[tuple(self._current_slice)]))", "def before_tick(self, time):\n pass", "def evaluate(self, time, view) -> ControlPoint:\n ...", "def dst(self, dt):", "def turn_until_clear(self):\n print(\"Rotating until lane is clear\")\n #make sure robot is looking straight\n self.servo(self.MIDPOINT)\n while self.read_distance() < self.SAFE_DISTANCE:\n self.left(primary=40,counter=-40)\n time.sleep(.05)\n #stop motion before ending method\n self.stop()", "def compute_tide_corrections(x, y, delta_time, DIRECTORY=None, MODEL=None,\n EPSG=3031, EPOCH=(2000,1,1,0,0,0), TYPE='drift', TIME='UTC',\n METHOD='spline', EXTRAPOLATE=False, FILL_VALUE=np.nan):\n\n #-- select between tide models\n if (MODEL == 'CATS0201'):\n grid_file = os.path.join(DIRECTORY,'cats0201_tmd','grid_CATS')\n model_file = os.path.join(DIRECTORY,'cats0201_tmd','h0_CATS02_01')\n model_format = 'OTIS'\n model_EPSG = '4326'\n model_type = 'z'\n elif (MODEL == 'CATS2008'):\n grid_file = os.path.join(DIRECTORY,'CATS2008','grid_CATS2008')\n model_file = os.path.join(DIRECTORY,'CATS2008','hf.CATS2008.out')\n model_format = 'OTIS'\n model_EPSG = 'CATS2008'\n model_type = 'z'\n elif (MODEL == 'CATS2008_load'):\n grid_file = os.path.join(DIRECTORY,'CATS2008a_SPOTL_Load','grid_CATS2008a_opt')\n model_file = os.path.join(DIRECTORY,'CATS2008a_SPOTL_Load','h_CATS2008a_SPOTL_load')\n model_format = 'OTIS'\n model_EPSG = 'CATS2008'\n model_type = 'z'\n elif (MODEL == 'TPXO9-atlas'):\n model_directory = os.path.join(DIRECTORY,'TPXO9_atlas')\n grid_file = 'grid_tpxo9_atlas.nc.gz'\n model_files = ['h_q1_tpxo9_atlas_30.nc.gz','h_o1_tpxo9_atlas_30.nc.gz',\n 'h_p1_tpxo9_atlas_30.nc.gz','h_k1_tpxo9_atlas_30.nc.gz',\n 'h_n2_tpxo9_atlas_30.nc.gz','h_m2_tpxo9_atlas_30.nc.gz',\n 'h_s2_tpxo9_atlas_30.nc.gz','h_k2_tpxo9_atlas_30.nc.gz',\n 'h_m4_tpxo9_atlas_30.nc.gz','h_ms4_tpxo9_atlas_30.nc.gz',\n 'h_mn4_tpxo9_atlas_30.nc.gz','h_2n2_tpxo9_atlas_30.nc.gz']\n model_format = 'netcdf'\n model_type = 'z'\n SCALE = 1.0/1000.0\n elif (MODEL == 'TPXO9-atlas-v2'):\n model_directory = os.path.join(DIRECTORY,'TPXO9_atlas_v2')\n grid_file = 'grid_tpxo9_atlas_30_v2.nc.gz'\n model_files = ['h_q1_tpxo9_atlas_30_v2.nc.gz','h_o1_tpxo9_atlas_30_v2.nc.gz',\n 'h_p1_tpxo9_atlas_30_v2.nc.gz','h_k1_tpxo9_atlas_30_v2.nc.gz',\n 'h_n2_tpxo9_atlas_30_v2.nc.gz','h_m2_tpxo9_atlas_30_v2.nc.gz',\n 'h_s2_tpxo9_atlas_30_v2.nc.gz','h_k2_tpxo9_atlas_30_v2.nc.gz',\n 'h_m4_tpxo9_atlas_30_v2.nc.gz','h_ms4_tpxo9_atlas_30_v2.nc.gz',\n 'h_mn4_tpxo9_atlas_30_v2.nc.gz','h_2n2_tpxo9_atlas_30_v2.nc.gz']\n model_format = 'netcdf'\n model_type = 'z'\n SCALE = 1.0/1000.0\n elif (MODEL == 'TPXO9-atlas-v3'):\n model_directory = os.path.join(DIRECTORY,'TPXO9_atlas_v3')\n grid_file = 'grid_tpxo9_atlas_30_v3.nc.gz'\n model_files = ['h_q1_tpxo9_atlas_30_v3.nc.gz','h_o1_tpxo9_atlas_30_v3.nc.gz',\n 'h_p1_tpxo9_atlas_30_v3.nc.gz','h_k1_tpxo9_atlas_30_v3.nc.gz',\n 'h_n2_tpxo9_atlas_30_v3.nc.gz','h_m2_tpxo9_atlas_30_v3.nc.gz',\n 'h_s2_tpxo9_atlas_30_v3.nc.gz','h_k2_tpxo9_atlas_30_v3.nc.gz',\n 'h_m4_tpxo9_atlas_30_v3.nc.gz','h_ms4_tpxo9_atlas_30_v3.nc.gz',\n 'h_mn4_tpxo9_atlas_30_v3.nc.gz','h_2n2_tpxo9_atlas_30_v3.nc.gz',\n 'h_mf_tpxo9_atlas_30_v3.nc.gz','h_mm_tpxo9_atlas_30_v3.nc.gz']\n model_format = 'netcdf'\n TYPE = 'z'\n SCALE = 1.0/1000.0\n elif (MODEL == 'TPXO9.1'):\n grid_file = os.path.join(DIRECTORY,'TPXO9.1','DATA','grid_tpxo9')\n model_file = os.path.join(DIRECTORY,'TPXO9.1','DATA','h_tpxo9.v1')\n model_format = 'OTIS'\n model_EPSG = '4326'\n model_type = 'z'\n elif (MODEL == 'TPXO8-atlas'):\n grid_file = os.path.join(DIRECTORY,'tpxo8_atlas','grid_tpxo8atlas_30_v1')\n model_file = os.path.join(DIRECTORY,'tpxo8_atlas','hf.tpxo8_atlas_30_v1')\n model_format = 'ATLAS'\n model_EPSG = '4326'\n model_type = 'z'\n elif (MODEL == 'TPXO7.2'):\n grid_file = os.path.join(DIRECTORY,'TPXO7.2_tmd','grid_tpxo7.2')\n model_file = os.path.join(DIRECTORY,'TPXO7.2_tmd','h_tpxo7.2')\n model_format = 'OTIS'\n model_EPSG = '4326'\n model_type = 'z'\n elif (MODEL == 'TPXO7.2_load'):\n grid_file = os.path.join(DIRECTORY,'TPXO7.2_load','grid_tpxo6.2')\n model_file = os.path.join(DIRECTORY,'TPXO7.2_load','h_tpxo7.2_load')\n model_format = 'OTIS'\n model_EPSG = '4326'\n model_type = 'z'\n elif (MODEL == 'AODTM-5'):\n grid_file = os.path.join(DIRECTORY,'aodtm5_tmd','grid_Arc5km')\n model_file = os.path.join(DIRECTORY,'aodtm5_tmd','h0_Arc5km.oce')\n model_format = 'OTIS'\n model_EPSG = 'PSNorth'\n model_type = 'z'\n elif (MODEL == 'AOTIM-5'):\n grid_file = os.path.join(DIRECTORY,'aotim5_tmd','grid_Arc5km')\n model_file = os.path.join(DIRECTORY,'aotim5_tmd','h_Arc5km.oce')\n model_format = 'OTIS'\n model_EPSG = 'PSNorth'\n model_type = 'z'\n elif (MODEL == 'AOTIM-5-2018'):\n grid_file = os.path.join(DIRECTORY,'Arc5km2018','grid_Arc5km2018')\n model_file = os.path.join(DIRECTORY,'Arc5km2018','h_Arc5km2018')\n model_format = 'OTIS'\n model_EPSG = 'PSNorth'\n model_type = 'z'\n elif (MODEL == 'GOT4.7'):\n model_directory = os.path.join(DIRECTORY,'GOT4.7','grids_oceantide')\n model_files = ['q1.d.gz','o1.d.gz','p1.d.gz','k1.d.gz','n2.d.gz',\n 'm2.d.gz','s2.d.gz','k2.d.gz','s1.d.gz','m4.d.gz']\n c = ['q1','o1','p1','k1','n2','m2','s2','k2','s1','m4']\n model_format = 'GOT'\n SCALE = 1.0/100.0\n elif (MODEL == 'GOT4.7_load'):\n model_directory = os.path.join(DIRECTORY,'GOT4.7','grids_loadtide')\n model_files = ['q1load.d.gz','o1load.d.gz','p1load.d.gz','k1load.d.gz',\n 'n2load.d.gz','m2load.d.gz','s2load.d.gz','k2load.d.gz',\n 's1load.d.gz','m4load.d.gz']\n c = ['q1','o1','p1','k1','n2','m2','s2','k2','s1','m4']\n model_format = 'GOT'\n SCALE = 1.0/1000.0\n elif (MODEL == 'GOT4.8'):\n model_directory = os.path.join(DIRECTORY,'got4.8','grids_oceantide')\n model_files = ['q1.d.gz','o1.d.gz','p1.d.gz','k1.d.gz','n2.d.gz',\n 'm2.d.gz','s2.d.gz','k2.d.gz','s1.d.gz','m4.d.gz']\n c = ['q1','o1','p1','k1','n2','m2','s2','k2','s1','m4']\n model_format = 'GOT'\n SCALE = 1.0/100.0\n elif (MODEL == 'GOT4.8_load'):\n model_directory = os.path.join(DIRECTORY,'got4.8','grids_loadtide')\n model_files = ['q1load.d.gz','o1load.d.gz','p1load.d.gz','k1load.d.gz',\n 'n2load.d.gz','m2load.d.gz','s2load.d.gz','k2load.d.gz',\n 's1load.d.gz','m4load.d.gz']\n c = ['q1','o1','p1','k1','n2','m2','s2','k2','s1','m4']\n model_format = 'GOT'\n SCALE = 1.0/1000.0\n elif (MODEL == 'GOT4.10'):\n model_directory = os.path.join(DIRECTORY,'GOT4.10c','grids_oceantide')\n model_files = ['q1.d.gz','o1.d.gz','p1.d.gz','k1.d.gz','n2.d.gz',\n 'm2.d.gz','s2.d.gz','k2.d.gz','s1.d.gz','m4.d.gz']\n c = ['q1','o1','p1','k1','n2','m2','s2','k2','s1','m4']\n model_format = 'GOT'\n SCALE = 1.0/100.0\n elif (MODEL == 'GOT4.10_load'):\n model_directory = os.path.join(DIRECTORY,'GOT4.10c','grids_loadtide')\n model_files = ['q1load.d.gz','o1load.d.gz','p1load.d.gz','k1load.d.gz',\n 'n2load.d.gz','m2load.d.gz','s2load.d.gz','k2load.d.gz',\n 's1load.d.gz','m4load.d.gz']\n c = ['q1','o1','p1','k1','n2','m2','s2','k2','s1','m4']\n model_format = 'GOT'\n SCALE = 1.0/1000.0\n elif (MODEL == 'FES2014'):\n model_directory = os.path.join(DIRECTORY,'fes2014','ocean_tide')\n model_files = ['2n2.nc.gz','eps2.nc.gz','j1.nc.gz','k1.nc.gz',\n 'k2.nc.gz','l2.nc.gz','la2.nc.gz','m2.nc.gz','m3.nc.gz','m4.nc.gz',\n 'm6.nc.gz','m8.nc.gz','mf.nc.gz','mks2.nc.gz','mm.nc.gz',\n 'mn4.nc.gz','ms4.nc.gz','msf.nc.gz','msqm.nc.gz','mtm.nc.gz',\n 'mu2.nc.gz','n2.nc.gz','n4.nc.gz','nu2.nc.gz','o1.nc.gz','p1.nc.gz',\n 'q1.nc.gz','r2.nc.gz','s1.nc.gz','s2.nc.gz','s4.nc.gz','sa.nc.gz',\n 'ssa.nc.gz','t2.nc.gz']\n c = ['2n2','eps2','j1','k1','k2','l2','lambda2','m2','m3','m4','m6','m8',\n 'mf','mks2','mm','mn4','ms4','msf','msqm','mtm','mu2','n2','n4',\n 'nu2','o1','p1','q1','r2','s1','s2','s4','sa','ssa','t2']\n model_format = 'FES'\n TYPE = 'z'\n SCALE = 1.0/100.0\n elif (MODEL == 'FES2014_load'):\n model_directory = os.path.join(DIRECTORY,'fes2014','load_tide')\n model_files = ['2n2.nc.gz','eps2.nc.gz','j1.nc.gz','k1.nc.gz',\n 'k2.nc.gz','l2.nc.gz','la2.nc.gz','m2.nc.gz','m3.nc.gz','m4.nc.gz',\n 'm6.nc.gz','m8.nc.gz','mf.nc.gz','mks2.nc.gz','mm.nc.gz',\n 'mn4.nc.gz','ms4.nc.gz','msf.nc.gz','msqm.nc.gz','mtm.nc.gz',\n 'mu2.nc.gz','n2.nc.gz','n4.nc.gz','nu2.nc.gz','o1.nc.gz','p1.nc.gz',\n 'q1.nc.gz','r2.nc.gz','s1.nc.gz','s2.nc.gz','s4.nc.gz','sa.nc.gz',\n 'ssa.nc.gz','t2.nc.gz']\n c = ['2n2','eps2','j1','k1','k2','l2','lambda2','m2','m3','m4','m6',\n 'm8','mf','mks2','mm','mn4','ms4','msf','msqm','mtm','mu2','n2',\n 'n4','nu2','o1','p1','q1','r2','s1','s2','s4','sa','ssa','t2']\n model_format = 'FES'\n model_type = 'z'\n SCALE = 1.0/100.0\n else:\n raise Exception(\"Unlisted tide model\")\n\n #-- converting x,y from EPSG to latitude/longitude\n crs1 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(EPSG))\n crs2 = pyproj.CRS.from_string(\"epsg:{0:d}\".format(4326))\n transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)\n lon,lat = transformer.transform(x.flatten(), y.flatten())\n\n #-- assert delta time is an array\n delta_time = np.atleast_1d(delta_time)\n #-- calculate leap seconds if specified\n if (TIME.upper() == 'GPS'):\n GPS_Epoch_Time = pyTMD.time.convert_delta_time(0, epoch1=EPOCH,\n epoch2=(1980,1,6,0,0,0), scale=1.0)\n GPS_Time = pyTMD.time.convert_delta_time(delta_time, epoch1=EPOCH,\n epoch2=(1980,1,6,0,0,0), scale=1.0)\n #-- calculate difference in leap seconds from start of epoch\n leap_seconds = pyTMD.time.count_leap_seconds(GPS_Time) - \\\n pyTMD.time.count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))\n elif (TIME.upper() == 'TAI'):\n #-- TAI time is ahead of GPS time by 19 seconds\n GPS_Epoch_Time = pyTMD.time.convert_delta_time(-19.0, epoch1=EPOCH,\n epoch2=(1980,1,6,0,0,0), scale=1.0)\n GPS_Time = pyTMD.time.convert_delta_time(delta_time-19.0, epoch1=EPOCH,\n epoch2=(1980,1,6,0,0,0), scale=1.0)\n #-- calculate difference in leap seconds from start of epoch\n leap_seconds = pyTMD.time.count_leap_seconds(GPS_Time) - \\\n pyTMD.time.count_leap_seconds(np.atleast_1d(GPS_Epoch_Time))\n else:\n leap_seconds = 0.0\n\n #-- convert time to days relative to Jan 1, 1992 (48622mjd)\n t = pyTMD.time.convert_delta_time(delta_time - leap_seconds, epoch1=EPOCH,\n epoch2=(1992,1,1,0,0,0), scale=(1.0/86400.0))\n\n #-- read tidal constants and interpolate to grid points\n if model_format in ('OTIS','ATLAS'):\n amp,ph,D,c = extract_tidal_constants(lon, lat, grid_file, model_file,\n model_EPSG, TYPE=model_type, METHOD=METHOD, EXTRAPOLATE=EXTRAPOLATE,\n GRID=model_format)\n deltat = np.zeros_like(t)\n elif (model_format == 'netcdf'):\n amp,ph,D,c = extract_netcdf_constants(lon, lat, model_directory,\n grid_file, model_files, TYPE=model_type, METHOD=METHOD, \n EXTRAPOLATE=EXTRAPOLATE, SCALE=SCALE)\n deltat = np.zeros_like(t)\n elif (model_format == 'GOT'):\n amp,ph = extract_GOT_constants(lon, lat, model_directory, model_files,\n METHOD=METHOD, EXTRAPOLATE=EXTRAPOLATE, SCALE=SCALE)\n #-- interpolate delta times from calendar dates to tide time\n delta_file = pyTMD.utilities.get_data_path(['data','merged_deltat.data'])\n deltat = calc_delta_time(delta_file, t)\n elif (model_format == 'FES'):\n amp,ph = extract_FES_constants(lon, lat, model_directory, model_files,\n TYPE=model_type, VERSION=MODEL, METHOD=METHOD,\n EXTRAPOLATE=EXTRAPOLATE, SCALE=SCALE)\n #-- interpolate delta times from calendar dates to tide time\n delta_file = pyTMD.utilities.get_data_path(['data','merged_deltat.data'])\n deltat = calc_delta_time(delta_file, t)\n\n #-- calculate complex phase in radians for Euler's\n cph = -1j*ph*np.pi/180.0\n #-- calculate constituent oscillation\n hc = amp*np.exp(cph)\n\n #-- predict tidal elevations at time and infer minor corrections\n if (TYPE.lower() == 'grid'):\n ny,nx = np.shape(x); nt = len(t)\n tide = np.ma.zeros((ny,nx,nt),fill_value=FILL_VALUE)\n tide.mask = np.zeros((ny,nx,nt),dtype=np.bool)\n for i in range(nt):\n TIDE = predict_tide(t[i], hc, c,\n DELTAT=deltat[i], CORRECTIONS=model_format)\n MINOR = infer_minor_corrections(t[i], hc, c,\n DELTAT=deltat[i], CORRECTIONS=model_format)\n #-- add major and minor components and reform grid\n tide[:,:,i] = np.reshape((TIDE+MINOR), (ny,nx))\n tide.mask[:,:,i] = np.reshape((TIDE.mask | MINOR.mask), (ny,nx))\n else:\n npts = len(t)\n tide = np.ma.zeros((npts), fill_value=FILL_VALUE)\n tide.mask = np.any(hc.mask,axis=1)\n tide.data[:] = predict_tide_drift(t, hc, c,\n DELTAT=deltat, CORRECTIONS=model_format)\n minor = infer_minor_corrections(t, hc, c,\n DELTAT=deltat, CORRECTIONS=model_format)\n tide.data[:] += minor.data[:]\n #-- replace invalid values with fill value\n tide.data[tide.mask] = tide.fill_value\n\n #-- return the tide correction\n return tide", "def correct_proper_motion(self, invert=False, mjd=None):\n\n # If mjd not set directly, check that it was set from FITS headers in get_cutout method\n if self.mjd is None:\n if mjd is None:\n raise FITSException(\"Date could not be inferred from header, supply with epoch keyword.\")\n else:\n self.mjd = mjd\n\n obstime = Time(self.mjd, format='mjd')\n\n simbad = Simbad.query_region(self.position, radius=180 * u.arcsec)\n\n # Catch SIMBAD failure either from None return of query or no stellar type matches in region\n try:\n simbad = simbad.to_pandas()\n pm_types = ['*', '**', 'PM*', 'EB*', 'Star', 'PSR', 'Pulsar', 'Flare*']\n simbad = simbad[(simbad['OTYPE'].isin(pm_types)) | (simbad['SP_TYPE'].str.len() > 0)]\n\n assert len(simbad) > 0\n\n except (ValueError, AssertionError):\n logger.debug(\"No high proper-motion objects within 180 arcsec.\")\n self.correct_pm = False\n\n return\n\n # Treat non-existent proper motion parameters as extremely distant objects\n simbad['PMRA'].fillna(0, inplace=True)\n simbad['PMDEC'].fillna(0, inplace=True)\n simbad['PLX_VALUE'].fillna(0.01, inplace=True)\n\n newtime = Time(self.radio.mjd, format='mjd')\n pmra = simbad['PMRA'].values * u.mas / u.yr\n pmdec = simbad['PMDEC'].values * u.mas / u.yr\n\n dist = Distance(parallax=simbad['PLX_VALUE'].values * u.mas)\n\n simbad['j2000pos'] = SkyCoord(\n ra=simbad['RA_d'].values * u.deg,\n dec=simbad['DEC_d'].values * u.deg,\n frame='icrs',\n distance=dist,\n pm_ra_cosdec=pmra,\n pm_dec=pmdec,\n obstime='J2000',\n )\n\n datapos = simbad.j2000pos.apply(lambda x: x.apply_space_motion(obstime))\n newpos = simbad.j2000pos.apply(lambda x: x.apply_space_motion(newtime))\n\n simbad_cols = {\n 'MAIN_ID': 'Object',\n 'OTYPE': 'Type',\n 'SP_TYPE': 'Spectral Type',\n 'DISTANCE_RESULT': 'Separation (arcsec)',\n }\n simbad = simbad.rename(columns=simbad_cols)\n simbad = simbad[simbad_cols.values()].copy()\n simbad['PM Corrected Separation (arcsec)'] = np.round(newpos.apply(\n lambda x: x.separation(self.position).arcsec), 3)\n\n # Only display PM results if object within 15 arcsec\n if simbad['PM Corrected Separation (arcsec)'].min() > 15:\n logger.debug(\"No PM corrected objects within 15 arcsec\")\n self.correct_pm = False\n\n return\n\n self.simbad = simbad.sort_values('PM Corrected Separation (arcsec)')\n logger.info(f'SIMBAD results:\\n {self.simbad.head()}')\n\n nearest = self.simbad['PM Corrected Separation (arcsec)'].idxmin()\n\n self.oldpos = datapos[nearest]\n self.pm_coord = newpos[nearest]\n\n near_object = self.simbad.loc[nearest].Object\n msg = f'{near_object} proper motion corrected to <{self.pm_coord.ra:.4f}, {self.pm_coord.dec:.4f}>'\n logger.info(msg)\n\n missing = simbad[simbad['PM Corrected Separation (arcsec)'].isna()]\n if len(missing) > 0:\n msg = f\"Some objects missing PM data, and may be a closer match than presented:\\n {missing}\"\n logger.warning(msg)\n\n return", "def _perturbInPlaceHard(self):\n die", "def timerCallback(self,evprent):\n self._odom_list.waitForTransform('map', 'base_footprint', rospy.Time(0), rospy.Duration(1.0))\n (position, orientation) = self._odom_list.lookupTransform('map','base_footprint', rospy.Time(0)) #finds the position and oriention of two objects relative to each other (hint: this returns arrays, while Pose uses lists)\n self._current.position.x = position[0]\n self._current.position.y = position[1]\n\n self._current.orientation.x = orientation[0]\n self._current.orientation.y = orientation[1]\n self._current.orientation.z = orientation[2]\n self._current.orientation.w = orientation[3]\n q = [self._current.orientation.x,\n self._current.orientation.y,\n self._current.orientation.z,\n self._current.orientation.w] # quaternion nonsense\n\n (roll, pitch, yaw) = euler_from_quaternion(q)", "def update(self, dayHour, comicId):\n #TODO test\n self.loadComic(comicId)\n\n #Sprint \"Predictor.update(\", comicId, \")\"\n\n\n for i in range(1):\n # Stop weeding?\n stopWeeding = False\n\n if self.__predictorData == None:\n print \"Line 252 (NONE)\"\n\n if (self.__predictorData.isWeeding()):\n tgm = time.gmtime()\n sec = time.mktime(time.strptime(str(tgm.tm_year) +' '+ str(tgm.tm_mon) +' '+ str(tgm.tm_mday) +' '+ str(tgm.tm_hour) +' '+ str(tgm.tm_min) +' '+ str(tgm.tm_sec), '%Y %m %d %H %M %S'))\n # print \"weeding times:\", self.__predictorData.getWeedingStart(), sec\n if (sec >= self.__predictorData.getWeedingStart() + 7*24*self.hourScale):\n self.stopWeeding(dayHour)\n stopWeeding = True\n\n if (self.__predictorData.isWeeding() or stopWeeding):\n # Weeding\n uRanges = self.__predictorData.getUpdateRanges()\n if (len(uRanges) > 0):\n if (self.__predictorData.addDayHour(dayHour)):\n break\n # if (self.inURange(self.incDayHour(dayHour, -self.rangeWidth), uRanges[-1])):\n # uRangeLast = self.incDayHour(uRanges[-1]['position'], uRanges[-1]['width'])\n # #self.__predictorData.addUpdateRange(self.addUpdateRange(incDayHour(uRangeLast, Predictor.rangeWidth + 1)))\n # self.__predictorData.addUpdateRange(self.incDayHour(uRangeLast, Predictor.rangeWidth + 1))\n # break \n self.__predictorData.addUpdateRange(self.blankUpdateRange(dayHour))\n else:\n # Regular update\n if (not self.__predictorData.addDayHour(dayHour)):\n # Add dayHour to nearest range\n updateRanges = self.__predictorData.getUpdateRanges()\n dist = {}\n dh = self.dayHourToHours(dayHour)\n for i, uRange in enumerate(updateRanges):\n ur_pos_hours = self.dayHourToHours(uRange['position'])\n ur_width = uRange['width']\n hours = self.dayHourToHours(dayHour)\n dist[min(abs(ur_pos_hours - ur_width - hours), abs(ur_pos_hours + ur_width - hours))] = i\n self.__predictorData.addDayHourToURange(dayHour, dist[min(dist.keys())])\n\n \n self.__predictorData.setUpdateRanges(self.calculateURPositions(self.__predictorData.getUpdateRanges()))\n self.__predictorData.setSchedule(self.calculateScheduleUR(self.__predictorData.getUpdateRanges()))\n \n self.updatePredictorList(self.__predictorData.getSchedule(), comicId)\n self.saveComic(comicId)", "def act_func_part4_v2_t(action_raw, action_raw_idx, raw_state_limits, stptLmt, ob_this_raw, logger, is_show_debug):\n IAT_STPT_IDX = 4;\n OCP_IDX = 7;\n PMV_IDX = 6;\n ACTIONS = [[-2.0],\n [-1.0],\n [-0.5],\n [0.0],\n [0.5],\n [1.0],\n [2.0],];\n\n ocp = ob_this_raw[OCP_IDX];\n pmv = ob_this_raw[PMV_IDX];\n iat_stpt_ob = ob_this_raw[IAT_STPT_IDX]; \n act_num = len(ACTIONS);\n # Process based on PMV\n action_prcd = action_raw;\n action_prcd_idx = action_raw_idx;\n if ocp == 1:\n if pmv < -0.5 and action_raw[0] < 0:\n # If pmv < -0.5 and the action is to reduce setpoint\n # Choose one action to increase the setpoint\n action_prcd_idx = np.random.randint(3, act_num);\n action_prcd = ACTIONS[action_prcd_idx];\n elif pmv > 0.5 and action_raw[0] > 0:\n # If pmv > 0.5 and the action is to increase setpoint\n # Choose one action to reduce the setpoint\n action_prcd_idx = np.random.randint(0, 4);\n action_prcd = ACTIONS[action_prcd_idx];\n # Get final action\n iat_stpt_act_raw = iat_stpt_ob + action_prcd[0];\n action_ret = [max(min(iat_stpt_act_raw, stptLmt[0][1]), stptLmt[0][0])];\n action_ret_idx = action_prcd_idx;\n # Log\n if action_raw_idx != action_ret_idx:\n if is_show_debug:\n logger.debug('Action function: raw action %s has been changed to %s for '\n 'the PMV %s.'%(action_raw_idx, action_ret_idx, pmv));\n return (action_ret, action_ret_idx);", "def update(self, dt):\n event = super(Kick, self).update(dt)\n if event is not None:\n return event\n\n if not self._motion.is_running():\n if not self._motion_completed:\n effectors = \"LLeg\"\n delta_pos1 = np.asarray([0.15, 0.0, 0.0, 0.0, -0.03, 0.0])\n\n if self._support_leg == \"left\":\n effectors = \"RLeg\"\n delta_pos1[4] *= -1\n\n delta_pos = [[delta_pos1,\n np.asarray([0.09, 0.0, 0.00, 0.0, 0.0, 0.0])]]\n times = [[0.2, 0.3]]\n self._motion.interpolate_positions(effectors, delta_pos, times, NaoMotionController.FRAME_TORSO)\n\n self._motion_completed = True\n return\n\n return \"done\"", "def update_for_odor_detection(self, dt, odor, wind_uvecs, masks):\n x_wind_unit = wind_uvecs['x']\n y_wind_unit = wind_uvecs['y']\n mask_startmode = masks['startmode']\n mask_castfor = masks['castfor']\n\n mask_gt_upper = odor >= self.param['odor_thresholds']['upper']\n mask_candidates = mask_gt_upper & (mask_startmode | mask_castfor)\n dice_roll = scipy.full((self.size,),scipy.inf)\n dice_roll[mask_candidates] = scipy.rand(mask_candidates.sum())\n\n # Convert probabilty/sec to probabilty for time step interval dt\n odor_probability_upper = 1.0 - (1.0 - self.param['odor_probabilities']['upper'])**dt\n mask_change = dice_roll < odor_probability_upper\n self.mode[mask_change] = self.Mode_FlyUpWind\n\n # Compute new heading error for flies which change mode\n heading_error_std = self.param['heading_error_std']\n self.heading_error[mask_change] = heading_error_std*scipy.randn(mask_change.sum())\n\n # Set x and y velocities for the flies which just changed to FlyUpWind.\n x_unit_change, y_unit_change = rotate_vecs(\n x_wind_unit[mask_change],\n y_wind_unit[mask_change],\n self.heading_error[mask_change]\n )\n speed = self.param['flight_speed'][mask_change]\n self.x_velocity[mask_change] = -speed*x_unit_change\n self.y_velocity[mask_change] = -speed*y_unit_change", "def _reset(self) -> ts.TimeStep:", "def update_apc23(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n if len(self._old) >= 1:\n\n\n try:\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n beta = 0.5*self.delta_t/(self.time-self.get_old(0, 2))\n\n pos = self.pos+delta_t*((1+beta)*self.vel-beta*self.get_old(0, 0))\n vel = self.vel+delta_t*((1+beta)*kap[1]-beta*self.get_old(0, 1))\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n\n beta1 = (3.0*(self.time-self.get_old(0,2))+delta_t)/(6.0*self.time-self.get_old(0,2))\n beta2 = -delta_t**2/(6.0*(self.time+delta_t-self.get_old(0,2))*(self.time-self.get_old(0,2)))\n\n print self.force(pos, vel, self.time+delta_t), kap[1], self.get_old(0,1)\n\n pos = self.pos+delta_t*((1.0-beta1-beta2)*vel+beta1*self.vel+beta2*self.get_old(0, 0))\n vel = self.vel+delta_t*((1.0-beta1-beta2)*self.force(pos, vel, self.time+delta_t,\n drag=False)+beta1*kap[1]+beta2*self.get_old(0,1))\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n beta = 0.5*col.delta_t/(self.time-self.get_old(0, 2))\n vel = self.vel+col.delta_t*(1+beta)*kap[1]-beta*self.get_old(0, 1)\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest=True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n\n self.time += delta_t\n\n else:\n ## reduced to using the Adams first order method for the first timestep:\n\n kap = update_apc1(self)\n\n self.set_old(kap, 1)\n\n return kap", "def target_position(self, time):\n \"\"\"\n start_pos = self.points[self.cur_start]\n seg_time = time - self.last_checkpoint_time\n\n #The arguement of target-velocity dosent matter\n cur_pos = self.target_velocity(time)*seg_time + start_pos\n\n \n # or time > (self.total_time / 4)*(self.cur_start + 1)\n cur_pos_norm = length(cur_pos - start_pos)\n\n next_corner = self.points[(self.cur_start + 1)%4]\n \n seg_norm = length(next_corner - start_pos)\n print(\"cur_pos : \", cur_pos, \"segment: \", self.cur_start, seg_norm - cur_pos_norm)\n\n if cur_pos_norm >= seg_norm:\n self.cur_start = (self.cur_start + 1) % 4\n self.last_checkpoint_time = time\n return cur_pos\n \"\"\"\n\n #Possibly use rospy.sleep()\n total_time = self.total_time\n\n\n if time < total_time/4:\n return self.path1.target_position(time)\n\n elif time - total_time/4 == 0:\n rospy.sleep(0.5)\n\n elif time < total_time/2:\n return self.path2.target_position(time - (total_time/4 + 0.5))\n # return self.path2.target_position(time - (total_time/4 ))\n\n\n elif time - total_time/2 == 0:\n rospy.sleep(0.5)\n\n elif time <= total_time/4*3:\n return self.path3.target_position(time - (total_time/2 + 1))\n # return self.path3.target_position(time - (total_time/2))\n\n\n elif time - total_time/4*3 == 0:\n rospy.sleep(0.5)\n\n else:\n return self.path4.target_position(time - (total_time/4*3 + 1.5))\n # return self.path4.target_position(time - (total_time/4*3))", "def __update_current_motion_start(self) -> None:\n if len(self.past_movements) < 2:\n return\n if self.past_movements[-1] * self.past_movements[-2] < 0:\n self.current_motion_start_element = self.counterpoint[-2]", "def sweep(self):\n position = 0\n countdown = False\n\n for i in range(14):\n\n if position == 14:\n countdown = True\n if countdown:\n position -= 2\n else:\n position += 2\n\n self.servo.ChangeDutyCycle(position)\n GPIO.output(self.pin, False)\n sleep(0.1)", "def update_for_odor_loss(self, t, dt, odor, wind_uvecs, masks):\n\n x_wind_unit = wind_uvecs['x']\n y_wind_unit = wind_uvecs['y']\n mask_flyupwd = masks['flyupwd']\n mask_castfor = masks['castfor']\n\n mask_lt_lower = odor <= self.param['odor_thresholds']['lower']\n mask_candidates = mask_lt_lower & mask_flyupwd\n dice_roll = scipy.full((self.size,),scipy.inf)\n dice_roll[mask_candidates] = scipy.rand(mask_candidates.sum())\n\n # Convert probabilty/sec to probabilty for time step interval dt\n odor_probability_lower = 1.0 - (1.0 - self.param['odor_probabilities']['lower'])**dt\n mask_change = dice_roll < odor_probability_lower\n self.mode[mask_change] = self.Mode_CastForOdor\n\n # Lump together flies changing to CastForOdor mode with casting flies which are\n # changing direction (e.g. time to make cast direction change)\n mask_change |= mask_castfor & (t > (self.t_last_cast + self.dt_next_cast))\n\n # Computer new heading errors for flies which change mode\n self.heading_error[mask_change] = self.param['heading_error_std']*scipy.randn(mask_change.sum())\n\n # Set new cast intervals and directions for flies chaning to CastForOdor or starting a new cast\n cast_interval = self.param['cast_interval']\n self.dt_next_cast[mask_change] = scipy.random.uniform(\n cast_interval[0],\n cast_interval[0],\n (mask_change.sum(),)\n )\n self.t_last_cast[mask_change] = t\n self.cast_sign[mask_change] = scipy.random.choice([-1,1],(mask_change.sum(),))\n\n # Set x and y velocities for new CastForOdor flies\n x_unit_change, y_unit_change = rotate_vecs(\n x_wind_unit[mask_change],\n -y_wind_unit[mask_change],\n self.heading_error[mask_change]\n )\n speed = self.param['flight_speed'][mask_change]\n self.x_velocity[mask_change] = self.cast_sign[mask_change]*speed*x_unit_change\n self.y_velocity[mask_change] = self.cast_sign[mask_change]*speed*y_unit_change", "def data_move_case_zero(tik_instance, ub_ori, ub_trans,\n is_last, num_outer_axis, num_loop_time,\n loop_time, loop_col, loop_len):\n with tik_instance.if_scope(tik.all(loop_time ==\n self.dst_shape[-4] //\n loop_col - 1,\n self.dst_shape[-4] % loop_col ==\n 0)):\n is_last.set_as(1)\n num_data_one_loop = self.dst_shape[-4] * self.dst_shape[-3] * \\\n self.dst_shape[-2] * self.dst_shape[-1]\n src_ub_index = 0\n if self.src_shape[-1] % CUBE_SIZE != 0 or \\\n (self.src_shape[-1] - loop_len * CUBE_SIZE) // \\\n self.num_data > MAX_STRIDE_BLK:\n if self.src_shape[-2] % CUBE_SIZE != 0:\n with tik_instance.if_scope(num_loop_time ==\n self.dst_shape[-3] - 1):\n with tik_instance.for_range(0,\n self.src_shape[-2] %\n CUBE_SIZE) as num_cube_col:\n src_gm_index = num_outer_axis *\\\n self.src_shape[-1] * \\\n self.src_shape[-2] + \\\n (num_loop_time * CUBE_SIZE +\n num_cube_col) *\\\n self.src_shape[-1] + loop_time *\\\n loop_col * CUBE_SIZE\n tik_instance.data_move(ub_ori[loop_len *\n CUBE_SIZE *\n num_cube_col],\n self.src_gm[src_gm_index],\n 0, 1,\n loop_len * self.num_byte //\n 2, 0, 0)\n with tik_instance.else_scope():\n with tik_instance.for_range(0, CUBE_SIZE) \\\n as num_cube_col:\n src_gm_index = num_outer_axis * \\\n self.src_shape[-1] * \\\n self.src_shape[-2] + \\\n (num_loop_time * CUBE_SIZE +\n num_cube_col) *\\\n self.src_shape[-1] + loop_time *\\\n loop_col * CUBE_SIZE\n tik_instance.data_move(ub_ori[loop_len *\n CUBE_SIZE *\n num_cube_col],\n self.src_gm[src_gm_index],\n 0, 1,\n loop_len * self.num_byte //\n 2, 0, 0)\n else:\n with tik_instance.for_range(0, CUBE_SIZE) as num_cube_col:\n src_gm_index = num_outer_axis * self.src_shape[-1] * \\\n self.src_shape[-2] + \\\n (num_loop_time * CUBE_SIZE +\n num_cube_col) * self.src_shape[-1] + \\\n loop_time * loop_col * CUBE_SIZE\n tik_instance.data_move(ub_ori[loop_len * CUBE_SIZE *\n num_cube_col],\n self.src_gm[src_gm_index],\n 0, 1,\n loop_len * self.num_byte //\n 2, 0, 0)\n else:\n src_gm_index = num_outer_axis * self.src_shape[-1] * \\\n self.src_shape[-2] + num_loop_time *\\\n CUBE_SIZE * self.src_shape[-1] +\\\n loop_time * loop_col * CUBE_SIZE\n if self.src_shape[-2] % CUBE_SIZE != 0:\n with tik_instance.if_scope(num_loop_time ==\n self.dst_shape[-3] - 1):\n tik_instance.data_move(ub_ori[src_ub_index],\n self.src_gm[src_gm_index], 0,\n self.src_shape[-2] % CUBE_SIZE,\n loop_len * self.num_byte // 2,\n (self.src_shape[-1] -\n loop_len * CUBE_SIZE) //\n self.num_data,\n 0)\n with tik_instance.else_scope():\n tik_instance.data_move(ub_ori[src_ub_index],\n self.src_gm[src_gm_index],\n 0, CUBE_SIZE,\n loop_len * self.num_byte // 2,\n (self.src_shape[-1] -\n loop_len * CUBE_SIZE) //\n self.num_data,\n 0)\n else:\n tik_instance.data_move(ub_ori[src_ub_index],\n self.src_gm[src_gm_index],\n 0, CUBE_SIZE,\n loop_len * self.num_byte // 2,\n (self.src_shape[-1] - loop_len *\n CUBE_SIZE) // self.num_data, 0)\n self.data_rearrange_case_four(tik_instance, ub_ori,\n ub_trans, num_loop_time,\n loop_len, is_last)\n\n if((self.dst_shape[-3] - 1) * self.dst_shape[-1] *\n self.dst_shape[-2] // self.num_data > MAX_STRIDE_BLK):\n with tik_instance.for_range(0, loop_len) as \\\n num_col_cube:\n dst_gm_index = num_outer_axis * num_data_one_loop + \\\n num_loop_time * self.dst_shape[-1] * \\\n self.dst_shape[-2] + \\\n (loop_time * loop_col + num_col_cube) * \\\n self.dst_shape[-1] * self.dst_shape[-2] * \\\n self.dst_shape[-3]\n tik_instance.data_move(self.dst_gm[dst_gm_index],\n ub_trans[num_col_cube *\n CUBE_SIZE *\n (CUBE_SIZE + 1)],\n 0, 1,\n self.dst_shape[-1] *\n self.dst_shape[-2] //\n self.num_data,\n 0, 0)\n else:\n dst_gm_index = num_outer_axis * num_data_one_loop + \\\n num_loop_time * self.dst_shape[-1] * \\\n self.dst_shape[-2] + loop_time * \\\n loop_col * self.dst_shape[-1] * \\\n self.dst_shape[-2] * \\\n self.dst_shape[-3]\n tik_instance.data_move(self.dst_gm[dst_gm_index],\n ub_trans[0],\n 0, loop_len,\n self.dst_shape[-1] *\n self.dst_shape[-2] // self.num_data,\n self.num_byte // 2,\n (self.dst_shape[-3] - 1) *\n self.dst_shape[-1] *\n self.dst_shape[-2] // self.num_data)", "def calibrate(self):\n self.mode = Mode.calibrating\n yaw_sensor = yaw_button()\n while not yaw_sensor.is_pressed():\n self.move_left()\n for _ in range(75):\n self.move_right()\n\n pitch_sensor = pitch_button()\n while not pitch_sensor.is_pressed():\n self.move_up()\n for _ in range(21):\n self.move_down()\n\n self.pitch = 0.0\n self.yaw = 0.0\n self.mode = Mode.waiting", "def update(self):\n if self.dizzy:\n self._spin()\n else:\n self._walk()", "def quarter_frame(self) -> None:\n if self._linear_ctr_reload_flag:\n self._linear_ctr = self.linear_ctr_reload_value\n elif self._linear_ctr > 0:\n self._linear_ctr -= 1\n\n if not self.control_flag:\n self._linear_ctr_reload_flag = False", "def update(self, time):\n raise NotImplementedError", "def update(self, time):\n raise NotImplementedError", "def calibrateAutoGuider(self, exposureLengthSeconds):\n\n print \"Calibrating autoguider orientation...\"\n self.camera.GuiderCalibrate(exposureLengthSeconds)\n while self.camera.GuiderRunning:\n sys.stdout.write(\".\")\n time.sleep(1)\n print\n print \"Calibration finished\"", "def undo_calibration_using_diagnostics(x,y,z,cd):\n undo_calibration(x, y, z, [cd[\"x_offset\"],cd[\"x_scale\"],cd[\"y_offset\"],cd[\"y_scale\"],cd[\"z_offset\"],cd[\"z_scale\"]])", "def _twist_callback(self, cmd):\n self.set_velocity(cmd.linear.x, cmd.angular.z)", "def right_twist(self):\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()\n self.turn_by_deg(180)\n #time.sleep(.1)\n self.stop()", "def run_update_step(self, time, pids, hole_rating, observations):\n\t\treturn NotImplemented", "def reduce_velocity(self):\n if self.controls[\"make_velocity_0\"]:\n # print(self.controls[\"bar_move_velocity\"])\n self.controls[\"bar_move_velocity\"] = 0", "def update_apc11(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n pos = self.pos+delta_t*self.vel\n vel = self.vel+delta_t*kap[1]\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n\n force = self.force(pos,\n vel,\n self.time+delta_t, drag=False)\n\n pos = self.pos+delta_t*vel\n vel = self.vel+delta_t*force\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n\n try:\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n vel = self.vel+col.delta_t*kap[1]\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest = True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n \n self.time += delta_t\n\n return (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)", "def update(self, delta_time):\r\n self.check_keys()\r\n\r\n \r\n # TODO: Tell everything to advance or move forward one step in time\r\n \r\n for Large_asteriod in self.rocks:\r\n Large_asteriod.advance()\r\n Large_asteriod.wrap()\r\n #this is where I call my functions\r\n \r\n # TODO: Check for collisions\r\n for bullet in self.bullets:\r\n bullet.advance()\r\n bullet.wrap()\r\n \r\n \r\n \r\n self.check_collisions() \r\n self.cleanup_deadstuff() \r\n self.ship.advance()\r\n self.ship.wrap()\r\n self.frame_count += 1", "def apply_model(self, original, t1, t2, resolution_scaling_factor=1):\n img = Image()\n img.time_stamp = t2\n\n if t1 == t2:\n img.initialize_with_image(original)\n return img\n\n calc_shift_fnc = self.calculate_shift\n orig_get_fnc = original.get\n interp_fnc = my_math.linear_interpolation\n\n def generate(y, x):\n \"\"\"Function describing the transformed image\"\"\"\n realy = y / resolution_scaling_factor\n realx = x / resolution_scaling_factor\n\n # move to time t2\n posy = y + calc_shift_fnc(realy, realx, t2, 0) - \\\n calc_shift_fnc(realy, realx, t1, 0)\n posx = x + calc_shift_fnc(realy, realx, t2, 1) - \\\n calc_shift_fnc(realy, realx, t1, 1)\n\n x_left = int(posx) # math.floor(pos[0])\n x_right = x_left + 1 # math.ceil(pos[0])\n y_down = int(posy) # math.floor(pos[1])\n y_up = y_down + 1 # math.ceil(pos[1])\n\n v11 = orig_get_fnc(y_down, x_left, resolution_scaling_factor)\n v12 = orig_get_fnc(y_down, x_right, resolution_scaling_factor)\n v21 = orig_get_fnc(y_up, x_left, resolution_scaling_factor)\n v22 = orig_get_fnc(y_up, x_right, resolution_scaling_factor)\n\n return interp_fnc(y_down, x_left, y_up, x_right, v11, v12, v21, v22,\n posy, posx)\n\n img.image_data = np.fromfunction(np.vectorize(generate),\n (original.shape()[0]*resolution_scaling_factor,\n original.shape()[1]*resolution_scaling_factor))\n\n if resolution_scaling_factor != 1:\n img.image_data = skimage.transform.resize(img.image_data,\n original.shape(),\n preserve_range=True)\n\n return img", "def autonomousPeriodic(self):\n '''\n for i in self.dataSet:\n if i[4][0] < self.timer.get() and self.timer.get() <= i[4][1]:\n self.drive.arcadeDrive(i[0],i[1])\n self.SV1.set(i[3])\n self.sd.putValue(\"Camera\",i[5])\n else:\n self.drive.arcadeDrive(0,0)\n '''\n\n #self.auto = self.sd.getNumber(\"auto\",0)\n #test\n #if(self.auto != 1):\n if self.auto == 6:\n if self.autoState == 0:\n if self.gyro.getAngle() >= -41 and self.gyro.getAngle() <= 0:\n self.drive.arcadeDrive(0.5,-0.4)\n else:\n self.autoState = 1\n self.EC1.reset()\n if self.autoState == 1:\n if self.EC1.getDistance() <= 282 and self.EC1.getDistance() >= 0:\n self.drive.arcadeDrive(0.6,0)\n else:\n self.autoState = 2\n if self.autoState == 2:\n if self.gyro.getAngle() >= -41 and self.gyro.getAngle() <= 0:\n self.drive.arcadeDrive(0.5,0.4)\n else:\n self.autoState = 3\n self.EC1.reset()\n if self.autoState == 3:\n if self.EC1.getDistance() <= 120 and self.EC1.getDistance() >= 0:\n self.drive.arcadeDrive(0,6,0)\n else:\n self.autoState = 4\n if self.autoState == 4:\n if self.EC2.getDistance() <= 831 and self.EC2.getDistance() >= 0: #shoulder\n self.S1.set(-0.25)\n self.S2.set(-0.25)\n else:\n self.autoState = 5\n if self.autoState == 5:\n if self.EC2.getDistance() >= 831 and self.EC2.getDistance() <= 887:\n self.goldenArrowhead.set(False)\n self.S1.set(-0.25)\n self.S2.set(-0.25)\n else:\n self.autoState = 6", "def update(self, deltatime):\n pass", "def update(self, index, ttime):\n if self.dead:\n return 1\n self.pos[0] += self.vector[0]\n self.pos[1] += self.vector[1]\n self.rect.x += self.vector[0]\n self.rect.y += self.vector[1]\n if sum(self.vector):\n if self.vector[0] < 0:\n self.vector[0] += 1\n elif self.vector[0] > 0:\n self.vector[0] -= 1\n if self.vector[1] < 0:\n self.vector[1] += 1\n elif self.vector[1] > 0:\n self.vector[1] -= 1\n if self.game.Player.collides(self.rect) and not sum(self.vector):\n self.game.Invent.add(self.name)\n self.game.Player.headDraw(self.name, self.game.Player.player_r)\n self.dead = 1", "def update(self, seconds):\n # Gravity\n self.calcGravity(seconds)\n \n # Move left/right\n self.rect.x += self.change_x\n \n # See if we hit anything\n block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n for block in block_hit_list:\n # If we are moving right,\n # set our right side to the left side of the item we hit\n if self.change_x > 0:\n self.rect.right = block.rect.left\n elif self.change_x < 0:\n # Otherwise if we are moving left, do the opposite.\n self.rect.left = block.rect.right\n \n # Move up/down\n self.rect.y += self.change_y\n \n # update arm position\n self.arm.update(seconds)\n \n # Check and see if we hit anything\n block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n for block in block_hit_list:\n \n # Reset our position based on the top/bottom of the object.\n if self.change_y > 0:\n self.rect.bottom = block.rect.top\n elif self.change_y < 0:\n self.rect.top = block.rect.bottom\n \n # Stop our vertical movement\n self.change_y = 0", "def updatePos(self):\n self.timeDriving +=1\n self.pos[0] += self.vx\n self.pos[1] += self.vy", "def moveBasedOnRetreatAction(self, time_passed):\n cpos = self.toScreenCoordinate()\n mpos = pygame.mouse.get_pos()\n toMouse = Vector2.from_points(cpos,mpos)\n toMouse.normalize()\n rheading = -toMouse\n \n heading = self.heading\n angle_between = heading.angle_between(rheading)\n if angle_between>=-30 and angle_between<=30:\n return\n \n distance = time_passed * self.speed\n movement = rheading * distance\n x = movement.get_x()\n y = movement.get_y()\n if not self.checkCollision(x, y) and self.checkValidCoord(x, y):\n self.move(x, y)", "def update(self, current):\n\n self.times[self.index] = time.time()\n\n self.current = current\n\n # for circular PID, keep values within [-pi, pi]\n if self.circular:\n while self.current > 2*math.pi:\n self.current = self.current - 2*math.pi\n while self.current < 0:\n self.current = self.current + 2*math.pi\n\n # COMPUTE PROPORTIONAL\n\n self.error_p = self.__target - self.current\n\n # for circular PID, keep error values within [-pi, pi]\n if self.circular: \n while self.error_p > math.pi:\n self.error_p = self.error_p - 2*math.pi\n while self.error_p < -math.pi:\n self.error_p = self.error_p + 2*math.pi\n\n self.errors[self.index] = self.error_p\n if callable(self.debug_callback):\n self.debug_callback(self.errors)\n\n # COMPUTE INTEGRAL\n\n # time step here is only the diff between current and past sample\n time_step = self.times[self.index] - self.times[(self.index - 1) % self.samples]\n # impose upper bound on time step (to avoid jump from 0 to unix time)\n time_step = min(time_step, 0.1)\n self.error_i += self.error_p * time_step\n self.error_i = max(min(self.error_i, self.I_LIMIT), -self.I_LIMIT)\n\n # COMPUTE DIFFERENTIAL\n\n # time_step here is over all self.samples=5 samples\n time_step = self.times[self.index] - self.times[(self.index + 1) % self.samples]\n # impose lower bound on time step (to avoid divide by zero error)\n time_step = max(time_step, 0.001)\n self.error_d = (self.errors[self.index] \\\n - self.errors[(self.index + 1) % self.samples]) \\\n / (time_step)\n\n # increment index for next irritation\n self.index = (self.index + 1) % self.samples\n\n # COMPUTE CORRECTION\n\n correction = self.KP * self.error_p \\\n + self.KI * self.error_i \\\n + self.KD * self.error_d\n\n # safety feature in case update() is not called frequently enough\n if time_step > 0.2:\n if callable(self.debug_callback):\n self.debug_callback(\"infrequent updates, returning 0\")\n return 0\n\n if callable(self.debug_callback):\n self.debug_callback(\"target = {:2.4f} current = {:2.4f}\".format( \\\n self.__target, self.current))\n self.debug_callback(\"errors = \" + str(self.errors))\n self.debug_callback(\"e = {:2.4f} e_i = {:2.4f} e_d = {:2.4f} corr = {:2.4f}\".format( \\\n self.error_p, self.error_i, self.error_d, correction))\n\n return correction" ]
[ "0.60318804", "0.54270923", "0.53947157", "0.53792375", "0.5303855", "0.5262295", "0.5260591", "0.52238935", "0.5190531", "0.5141736", "0.5131086", "0.50906426", "0.5085911", "0.5074954", "0.50716", "0.506681", "0.50448614", "0.50436366", "0.50436366", "0.50377357", "0.5031351", "0.50200623", "0.5010756", "0.5002735", "0.50016814", "0.500161", "0.5000479", "0.4999179", "0.4985841", "0.4985841", "0.49856302", "0.49830484", "0.4982839", "0.4979517", "0.49773633", "0.4971263", "0.49584177", "0.4935071", "0.4925788", "0.49207863", "0.49176532", "0.4915776", "0.49156523", "0.4913673", "0.49063012", "0.48929578", "0.48760146", "0.48671046", "0.48542184", "0.4841125", "0.48369187", "0.4832591", "0.48321497", "0.48291305", "0.48270628", "0.48238102", "0.48227936", "0.48227596", "0.4818937", "0.4813212", "0.48026145", "0.48011157", "0.47962642", "0.47925806", "0.47901633", "0.4789329", "0.47883862", "0.47853282", "0.47839934", "0.4783918", "0.4778102", "0.47777617", "0.47774047", "0.47733536", "0.47677723", "0.47645885", "0.47593936", "0.47585133", "0.47577548", "0.47572333", "0.4757021", "0.4756637", "0.47500563", "0.47500563", "0.4748517", "0.47434902", "0.47411945", "0.47408488", "0.47405225", "0.4735711", "0.4731181", "0.47308704", "0.47302568", "0.47292036", "0.4726586", "0.4725529", "0.4722268", "0.47208422", "0.47192377", "0.4715402" ]
0.6062027
0
Compute motioncorrection transformation matrices, catenate with transform from fieldmap to structural, then inteprolate the data to the final grid.
def MotcorCatenate(self, info, base, anat_tgt): # First compute the transformation matrices due to epi-to-epi motion. fmt = '3dvolreg -prefix NULL -1Dmatrix_save %s -twopass ' + \ '-verbose -base %s+orig[%s] -dfile %s %s+orig' cmd = fmt % (info['matfile_m'], info['basefile'], base, \ info['mot_file'], info['imgfile_t']) self.CheckExec(cmd, [info['matfile_m']]) # Catenate with transformation from epi base image to the anatomical. cmd = 'cat_matvec -ONELINE %s -P %s -P > %s' % \ (self.info[anat_tgt]['matfile'], info['matfile_m'], \ info['matfile_mcat']) self.CheckExec(cmd, [info['matfile_mcat']]) # Interpolate the data to the new grid. fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s ' + \ '-warp shift_rotate -base %s+orig[%s] %s+orig' cmd = fmt % (info['imgfile_m'], info['matfile_mcat'], info['basefile'], \ base, info['imgfile_t']) self.CheckExec(cmd, ['%s+orig.BRIK'%info['imgfile_m'], \ '%s+orig.HEAD'%info['imgfile_m']])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CorrectMotion(self):\n if self.verbose:\n print \"Correct for motion\"\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n\n if os.path.exists(info['imgfile_m'] + info['suffix']):\n return\n# Always use brik for 3dDeconvolve.\n suffix = '+orig'\n epifile = '%s%s' % (info['imgfile'], suffix)\n prefix = info['imgfile_m']\n base_entry = info['base_entry']\n if info['base'] == 'start':\n# Use the first frame specified in template file. Defaults\n# to zero.\n base = info['motion_ref_frame']\n else:\n# Use the last frame.\n base = self.info[base_entry]['tdim'] - info['skip']-1\n base = ('%d' % base).replace(' ','')\n\n# Correct for slice-timing.\n self.SliceTimeCorrect(info, epifile)\n\n plane = info['plane']\n anat_tgt = info['anat_tgt']\n# anat_entry = self.anat_entry[plane]\n\n if info['catmats']:\n# Include additonal transformation in motion correction such\n# that final image is in register with the fieldmap, which has\n# been registered to the structural image that will be used for\n# spatial normalization.\n self.MotcorCatenate(info, base, anat_tgt)\n else:\n# Assume fieldmap is in register with the structural.\n self.Motcor(info, base)\n\n if info.get('fmapname', None) is None:\n# No fieldmap correction.\n if self.fsl_flip:\n# Flip the way fslview likes it.\n self.FSLFlip(info['imgfile_m'], info['imgfile_final'])\n elif info['suffix'] == '.nii':\n# Copy motion-corrected images from /tmp to output directory\n outfile = info['imgfile_final'] + info['suffix']\n cmd = '3dcopy %s+orig %s' % (info['imgfile_m'], outfile)\n self.CheckExec(cmd, [outfile], force=True)\n cmd = '/bin/rm %s+orig*' % info['imgfile_m']\n self.CheckExec(cmd, [], force=True)", "def _calculate_transforms(self):\n\n self._logger.info(\"Generating transformations.\")\n\n # Calculate partial transforms - get partial transformation chain;\n partial_transformation_pairs = \\\n map(lambda idx: self._get_slice_pair(idx),\n self.options.slice_range)\n\n # Flatten the slices pairs\n partial_transformation_pairs =\\\n list(flatten(partial_transformation_pairs))\n\n # If user decided to prealign the images by their centre of gravity\n # an additional series of transformations has to be carried out.\n if self.options.enableMomentsAlignment:\n commands = map(lambda x: self._get_cog_alignment(*x),\n partial_transformation_pairs)\n commands = filter(None, commands)\n\n self._logger.info(\"Executing the centre of gravity transforms.\")\n self.execute(commands)\n\n # Calculate affine transformation for each slices pair\n commands = map(lambda x: self._get_partial_transform(*x),\n partial_transformation_pairs)\n commands = filter(None, commands)\n self._logger.info(\"Executing the transformation commands.\")\n self.execute(commands)", "def AlignFieldmaps(self):\n for entry in self.entry_map['fmap']:\n info = self.info[entry]\n\n# Register the magnitude image at the shortest TR to the T1-IR\n# structural image.\n target = self.info[self.norm_src]['imgfile'] + \\\n self.info[self.norm_src]['suffix']\n source = info['magfile'] + info['suffix']\n matfile = info['matfile']\n fmt = '3dAllineate -prefix NULL -1Dmatrix_save %s -base %s ' + \\\n '-source %s -cost mi -warp shift_rotate'\n cmd = fmt % (info['matfile'], target, source)\n self.CheckExec(cmd, [info['matfile']])\n\n# Convert to unitary matrix (remove scaling component.)\n cmd = 'cat_matvec -ONELINE %s -P > %s' % \\\n (info['matfile'], info['matfile_unitary'])\n self.CheckExec(cmd, [info['matfile_unitary']])\n\n# Rotate the magnitude image to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['magfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['magfile'] + info['suffix'])\n self.CheckExec(cmd, [info['magfile_r']+info['suffix']])\n\n# Rotate the fieldmap to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['imgfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['imgfile'] + info['suffix'])\n self.CheckExec(cmd, [info['imgfile_r']+info['suffix']])", "def preprocess(self):\n\n mm_magcoord.add_aacgm_coordinates(self)\n mm_magcoord.add_quasi_dipole_coordinates(self)\n mm_sc.calculate_ecef_velocity(self)\n mm_sc.add_ram_pointing_sc_attitude_vectors(self)\n\n return", "def _derive_transformation_matrices(self):\n\n if hasattr(self, '_primaries') and hasattr(self, '_whitepoint'):\n if self._primaries is not None and self._whitepoint is not None:\n npm = normalised_primary_matrix(self._primaries,\n self._whitepoint)\n\n self._derived_RGB_to_XYZ_matrix = npm\n self._derived_XYZ_to_RGB_matrix = np.linalg.inv(npm)", "def _apply_transformations(structure, rotations, translations):\n # Additional first dimension for 'structure.repeat()'\n assembly_coord = np.zeros((len(rotations),) + structure.coord.shape)\n\n # Apply corresponding transformation for each copy in the assembly\n for i, (rotation, translation) in enumerate(zip(rotations, translations)):\n coord = structure.coord\n # Rotate\n coord = matrix_rotate(coord, rotation)\n # Translate\n coord += translation\n assembly_coord[i] = coord\n\n return repeat(structure, assembly_coord)", "def assemble_matrices(self):\n \n #Pointer reassignment for convenience\n N = self.ngrids\n\n #Begin with a linked-list data structure for the transmissibilities,\n #and one-dimenstional arrays for the diagonal of B and the flux vector\n T = lil_matrix((N, N), dtype=np.double)\n B = np.zeros(N, dtype=np.double)\n Q = np.zeros(N, dtype=np.double)\n\n #Read in boundary condition types and values\n bcs = self.input_data['boundary conditions']\n bc_type_1 = bcs['left']['type'].lower()\n bc_type_2 = bcs['right']['type'].lower()\n bc_value_1 = bcs['left']['value']\n bc_value_2 = bcs['right']['value']\n \n #Loop over all grid cells\n for i in range(N):\n\n #Apply left BC\n if i == 0:\n T[i, i+1] = -self.compute_transmissibility(i, i + 1)\n\n if bc_type_1 == 'neumann':\n T[i, i] = T[i,i] - T[i, i+1]\n elif bc_type_1 == 'dirichlet':\n #Computes the transmissibility of the ith block\n T0 = self.compute_transmissibility(i, i)\n T[i, i] = T[i,i] - T[i, i+1] + 2.0 * T0\n Q[i] = 2.0 * T0 * bc_value_1\n else:\n pass #TODO: Add error checking here if no bc is specified\n\n #Apply right BC\n elif i == (N - 1):\n T[i, i-1] = -self.compute_transmissibility(i, i - 1)\n\n if bc_type_2 == 'neumann':\n T[i, i] = T[i,i] - T[i, i-1]\n elif bc_type_2 == 'dirichlet':\n #Computes the transmissibility of the ith block\n T0 = self.compute_transmissibility(i, i)\n T[i, i] = T[i, i] - T[i, i-1] + 2.0 * T0\n Q[i] = 2.0 * T0 * bc_value_2\n else:\n pass #TODO:Add error checking here if no bc is specified\n\n #If there is no boundary condition compute interblock transmissibilties\n else:\n T[i, i-1] = -self.compute_transmissibility(i, i-1)\n T[i, i+1] = -self.compute_transmissibility(i, i+1)\n T[i, i] = (self.compute_transmissibility(i, i-1) +\n self.compute_transmissibility(i, i+1))\n\n #Compute accumulations\n B[i] = self.compute_accumulation(i)\n\n #If constant-rate wells are present, add them to the flux vector\n if self.rate_well_grids is not None:\n Q[self.rate_well_grids] += self.rate_well_values\n\n \n #Return sparse data-structures\n return (T.tocsr(), \n csr_matrix((B, (np.arange(N), np.arange(N))), shape=(N,N)), \n Q)", "def transform(self, src):\n T, feature_dim = src.shape[0], self.Y_static_dim*3\n\n if feature_dim == self.Y_static_dim:\n return super(GMM_M, self).transform(src)\n\n # A suboptimum mixture sequence (eq.37)\n optimum_mix = self.px.predict(src)\n\n # Compute E eq.(40)\n E = np.empty((T, feature_dim))\n for t in range(T):\n m = optimum_mix[t] # estimated mixture index at time t\n xx = np.linalg.solve(self.covarXX[m], src[t] - self.src_means[m])\n #print(xx.shape,self.tgt_means[m].shape,self.covarYX[m].shape)\n # Eq. (22)\n E[t] = self.tgt_means[m] +np.dot(self.covarYX[m], xx)\n\n # Compute D eq.(23)\n # Approximated variances with diagonals so that we can do MLPG\n # efficiently in dimention-wise manner\n #print(E.shape)\n D = np.empty((T, feature_dim))\n #print(D.shape)\n for t in range(T):\n m = optimum_mix[t]\n # Eq. (23), with approximating covariances as diagonals\n #D[t] = np.diag(self.covarYY[m]) - np.diag(self.covarYX[m]) / \\\n # np.diag(self.covarXX[m]) * np.diag(self.covarXY[m])\n\n # Exact Inference\n dd = self.covarYY[m] - np.linalg.multi_dot([self.covarYX[m], np.linalg.pinv(self.covarXX[m]), self.covarXY[m]])\n #print(dd.shape)\n D[t] = np.diag(dd)\n\n # Once we have mean and variance over frames, then we can do MLPG\n return E, D, self.windows#mlpg(E, D, self.windows)", "def __set_transform_matrices(self):\n self.tf_matrices_list = []\n\n transform_matrix = eye(4) # creates a unit matrix via passing argument.\n for i in range(self.joint_count):\n transform_matrix = transform_matrix * self.__create_tf_matrix(self.alpha[i], self.a[i], self.d[i], self.q[i]).subs(self.dh_params)\n self.tf_matrices_list.append(transform_matrix)", "def rebuild_the_laplacians():\n local_matrix = InteractomeInterface()\n local_matrix.full_rebuild()\n\n annot_matrix = AnnotomeInterface()\n annot_matrix.full_rebuild()", "def recalculate_transform(self):\n bias_mat = wireframe.translationMatrix(*self.bias)\n rot_mat = wireframe.rotateMatrix(*self.view_angle)\n scale_mat = wireframe.scaleMatrix(*self.scale)\n\n self.tf_wireframe.nodes = (self.wireframe.nodes - self._center_half) @ \\\n rot_mat @ scale_mat @ bias_mat", "def mca_transformer(transform_data):\n M, dims, index, v0v1 = transform_data\n def transform(dfp):\n # dims, index, v0v1\n P = np.zeros((len(dfp), dims), dtype=float)\n print(\"transforming\")\n for i, (_, row) in (enumerate(dfp.iterrows())):\n ivec = np.zeros(M)\n for col, val in zip(row.index, row):\n if (col, val) in index:\n ivec[index[col, val]] = 1\n proj = ivec.dot(v0v1)\n assert(all(proj.imag == 0))\n P[i,:] = proj.real\n return P\n return transform", "def test_transform_update():\n pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_answer_0)\n ncs_obj = ncs.input(hierarchy=pdb_inp.construct_hierarchy())\n pdb_inp = iotbx.pdb.input(lines=pdb_answer_0,source_info=None)\n nrgl = ncs_obj.get_ncs_restraints_group_list()\n asu_site_cart = pdb_inp.atoms().extract_xyz()\n # reference matrices\n r1 = nrgl[0].copies[0].r\n t1 = nrgl[0].copies[0].t\n r2 = nrgl[0].copies[1].r\n t2 = nrgl[0].copies[1].t\n # modify matrices in the ncs group list\n nrgl[0].copies[0].r = r1 + r2\n nrgl[0].copies[0].t = t1 + t2\n nrgl[0].copies[1].r = r1 + r2\n nrgl[0].copies[1].t = t1 + t2\n nrgl.recalculate_ncs_transforms(asu_site_cart)\n # Get the updated values\n r1_n = nrgl[0].copies[0].r\n t1_n = nrgl[0].copies[0].t\n r2_n = nrgl[0].copies[1].r\n t2_n = nrgl[0].copies[1].t\n #\n assert approx_equal(r1, r1_n, eps=0.001)\n assert approx_equal(t1, t1_n, eps=0.1)\n assert approx_equal(r2, r2_n, eps=0.001)\n assert approx_equal(t2, t2_n, eps=0.1)", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def chain_corrections():\n \n #read the files\n sample_4m=read_sample(map_files('sample_4m'))\n empty_cell_4m=read_sample(map_files('empty_cell_4m'))\n empty_4m=read_sample(map_files('empty_4m'))\n transmission_sample_cell_4m=read_sample(map_files('trans_sample_4m'))\n transmission_empty_cell_4m=read_sample(map_files('trans_empty_cell_4m'))\n blocked_beam_4m=read_sample(map_files('blocked_4m'))\n sensitivity=read_div(map_files('div'))\n #mask=read_sample(map_files('mask'))\n \n #normalize the monitors\n \n sample_4m_norm=monitor_normalize(sample_4m)\n empty_cell_4m_norm=monitor_normalize(empty_cell_4m)\n transmission_sample_cell_4m_norm=monitor_normalize(transmission_sample_cell_4m)\n transmission_empty_cell_4m_norm=monitor_normalize(transmission_empty_cell_4m)\n empty_4m_norm=monitor_normalize(empty_4m)\n blocked_beam_4m_norm=monitor_normalize(blocked_beam_4m)\n \n #calculate q\n sample_4m_norm_q=convert_q(sample_4m_norm)\n empty_cell_4m_norm_q=convert_q(empty_cell_4m)\n blocked_beam_4m_norm_q=convert_q(blocked_beam_4m_norm)\n transmission_sample_cell_4m_norm_q=convert_q(transmission_sample_cell_4m_norm)\n transmission_empty_cell_4m_norm_q=convert_q(transmission_empty_cell_4m_norm)\n empty_4m_norm_q=convert_q(empty_4m_norm)\n \n \n print 'converted'\n #convert flatness\n sample_4m_solid=correct_solid_angle(sample_4m_norm_q)\n empty_cell_4m_solid=correct_solid_angle(empty_cell_4m_norm_q)\n blocked_beam_4m_solid=correct_solid_angle(blocked_beam_4m_norm_q)\n transmission_sample_cell_4m_solid=correct_solid_angle(transmission_sample_cell_4m_norm_q)\n transmission_empty_cell_4m_solid=correct_solid_angle(transmission_empty_cell_4m_norm_q)\n empty_4m_solid=correct_solid_angle(empty_4m_norm_q)\n \n \n #calculate transmission\n coord_left=(60,60)\n coord_right=(70,70)\n transmission_sample_cell_4m_rat=generate_transmission(transmission_sample_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n transmission_empty_cell_4m_rat=generate_transmission(transmission_empty_cell_4m_solid,empty_4m_solid,\n coord_left,coord_right)\n print 'Sample transmission= {} (IGOR Value = 0.724)'.format(transmission_sample_cell_4m_rat)\n print 'Empty Cell transmission= {} (IGOR Value = 0.929)'.format(transmission_empty_cell_4m_rat)\n print 'hi'\n \n #Initial Correction -- Not with the sub/mult tools,\n #SAM = sample_4m_solid.data\n #print SAM.x\n #EMP = empty_4m_solid.data\n #print \"EMP: \"\n #print EMP.x\n #BGD = blocked_beam_4m_solid.data\n #print \"BGD\"\n #print BGD.x\n #Tsam = transmission_sample_cell_4m_rat\n #Temp = transmission_empty_cell_4m_rat\n #COR1 = SAM.__sub__(BGD)\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n \n SAM = sample_4m_solid\n print SAM.data.x\n EMP = empty_4m_solid\n print \"EMP: \"\n print EMP.data.x\n BGD = blocked_beam_4m_solid\n print \"BGD:\"\n print BGD.data.x\n Tsam = transmission_sample_cell_4m_rat\n Temp = transmission_empty_cell_4m_rat\n print \"COR1:\"\n COR1 = SAM.__sub1__(BGD)\n print COR1.data.x #check=works\n #-----Problems Here-------\n print \"COR2:\"\n COR2 = (EMP.__sub1__(BGD)) #check=works\n print COR2.data.x\n print \"COR3:\"\n #AJJ - __mul__ not working because Tsam and Temp are Measurement instances and not simply floats. See above.\n COR3 = COR2.__mul__(Tsam/Temp) #mul not working\n print COR3.data.x\n #COR = COR1.__sub1__(COR2)\n #print \"after initial correction: \"\n #print COR.x\n #COR2 = (EMP.__sub__(BGD)).__mul__(Tsam/Temp)\n #COR = COR1.__sub__(COR2)\n #print \"after initial correction: \"\n #print COR.data.x", "def _calculate_composite_transforms(self):\n\n self._calculate_similarity()\n # Finally, calculate composite transforms\n commands = []\n for moving_slice_index in self.options.slice_range:\n commands.append(self._calculate_composite(moving_slice_index))\n self.execute(commands)\n\n self._logger.info(\"Done with calculating the transformations.\")", "def motionCorrection(baseName, outDir, rawDataFile, granularity ='plane',\n maxDisplacement=[40, 40],\n nCpu=(multiprocessing.cpu_count() - 1), \n exportFrames=True):\n try:\n # instead of os.makedirs use os.mkdir\n # the former creates also intermediate dirs\n os.mkdir(outDir + '/' + baseName)\n except OSError:\n print('Either directory exists or \\\n the intermedite directories do not exist!')\n raise\n print(\"--Motion correction started with %s...\" % baseName)\n # Checking if there are two cycles of images. Important to know otherwise \n # you get merged motion corrected images.\n allImages = (glob.glob(rawDataFile))\n tseries_names = [os.path.basename(imageName).split('_')[0] for imageName in allImages]\n uniqueTSeries = Counter(tseries_names).keys()\n tseries_lengths = Counter(tseries_names).values()\n \n if len(uniqueTSeries) > 1:\n print('---More than 1 T-Series found. Aligning them together...')\n\n match = [int(re.search(r'Cycle(.*?)_',imageName).group(1)) for imageName in allImages]\n uniqueCycles = numpy.unique(numpy.asarray(match))\n \n if len(uniqueCycles) > 1:\n warnstr='More than 1 image cycle detected. Aborting alignment.'\n warnings.warn(warnstr)\n return \n sequence = [sima.Sequence.create('TIFFs', [[rawDataFile]])]\n print(\"Creating sima dataset of non-aligned images\")\n nonAlignedDatasetDir = outDir + '/' + baseName + '/' + 'TIFFs.sima'\n sima.ImagingDataset(sequence, nonAlignedDatasetDir)\n\n print(\"Running motion correction.\")\n \n mc_approach = sima.motion.HiddenMarkov2D(granularity=granularity,\n max_displacement=maxDisplacement,\n verbose=True,\n n_processes=nCpu)\n print(\"Creating sima dataset of aligned images\")\n motCorrDir = outDir + '/' + baseName + '/' + 'motCorr.sima'\n dataset = mc_approach.correct(sequence, motCorrDir)\n\n if exportFrames:\n print(\"Exporting motion-corrected movies.\")\n \n for iTSeries, curr_T_series in enumerate(uniqueTSeries):\n \n start_frame = tseries_names.index(curr_T_series)\n end_frame = start_frame+tseries_lengths[iTSeries]\n dataset[0,start_frame:end_frame].export_frames([[[os.path.join(motCorrDir,\n '{t}_motCorr.tif'.format(t = curr_T_series))]]],\n fill_gaps=True)\n \n\n print(\"--Motion correction done with %s...\" % baseName)\n\n return uniqueTSeries", "def transform_calcualtion(height, width, t_mat, h_offset, w_offset, nh_flag, nw_flag):\n # default coordination/location of transformed matrix according to source data(data map)\n coord_map = default_coord(height, width, h_offset, w_offset)\n\n for i in range(height):\n for j in range(width):\n # base calculations\n result = [(t_mat[0][0])*(coord_map[i, j, 0])+int((t_mat[0][1])*(coord_map[i, j, 1])),\n (t_mat[1][0])*(coord_map[i, j, 0])+(t_mat[1][1])*(coord_map[i, j, 1])]\n # since all coordinations must not be negative\n # if happened also apply a translation by offset\n coord_map[i, j, :] = [(result[0], result[0]+h_offset)[nh_flag],\n (result[1], result[1]+w_offset)[nw_flag]]\n return coord_map", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def gridalign(self):\n self.position.x = int(round(self.position.x))\n self.position.y = int(round(self.position.y))\n self.position.z = int(round(self.position.z))\n\n if self.fan:\n self.fan = (int(round(self.fan[0])),int(round(self.fan[1])),int(round(self.fan[2])))\n\n bestDist = 2*9\n bestMatrix = makeMatrix(0,0,0)\n\n for compass in [0, 90, 180, 270]:\n for pitch in [0, 90, 180, 270]:\n for roll in [0, 90, 180, 270]:\n m = makeMatrix(compass,pitch,roll)\n dist = matrixDistanceSquared(self.matrix, m)\n if dist < bestDist:\n bestMatrix = m\n bestDist = dist\n\n self.matrix = bestMatrix\n self.positionOut()\n self.directionOut()", "def _preprocess(self):\n if self.gt_matrix_PCs is None:\n start = time.time()\n gt_matrix = self._process_vcf().get('gt_matrix')\n print('gt_matrix took {} secs'.format(time.time() - start))\n \n # Normalize gt_matrix by site\n gt_matrix_norm = gt_matrix - np.mean(gt_matrix, axis=1)[:, np.newaxis]\n \n # PCA\n start = time.time()\n u, s, vh = np.linalg.svd(gt_matrix_norm.T, full_matrices=False)\n print('SVD took {} secs'.format(time.time() - start))\n self.gt_matrix_PCs = -u @ np.diag(s)\n \n # Get relevant objects from result of ripser\n if self.ripser_result is None:\n start = time.time()\n print(\"Getting ripser object\")\n self.ripser_result = ripser(self.gt_matrix_PCs, coeff=2, maxdim=1, do_cocycles=True)\n print('Ripser took {} secs'.format(time.time() - start))", "def get_all_coding_matrices(self):\n class tran_matrix():\n def __init__(self):\n self.S = None\n self.T = None\n self.H1 = None\n self.H2 = None\n self.G1 = None\n self.G2 = None\n self.H = None\n self.H2G2 = None\n\n def __init__(self, S, T, H1, H2, G1, G2, H, H2G2):\n self.S = S\n self.T = T\n self.H1 = H1\n self.H2 = H2\n self.G1 = G1\n self.G2 = G2\n self.H = H\n self.H2G2 = H2G2\n\n s = self.sources\n s_edges = [self.out_edges(x) for x in s]\n S = [list(set(self.get_edges_indices(src))) for src in s_edges]\n\n matrices = []\n for dsts in self.dst_evolution_rec[::-1]:\n T = [sorted(x) for x in dsts]\n H2_raw = self.coding_matrix[T[0]].ix[S[1]].values\n G2_raw = self.coding_matrix[T[1]].ix[S[1]].values\n H1 = numpy_to_generic(self.coding_matrix[T[0]].ix[S[0]].values)\n H2 = numpy_to_generic(H2_raw)\n G1 = numpy_to_generic(self.coding_matrix[T[1]].ix[S[0]].values)\n G2 = numpy_to_generic(G2_raw)\n H = numpy_to_generic(self.coding_matrix[T[0]].values)\n H2G2 = numpy_to_generic(np.hstack((H2_raw, G2_raw)))\n temp = tran_matrix(S, T, H1, H2, G1, G2, H, H2G2)\n matrices.append(temp)\n\n return matrices", "def assembleMatrices(self):\n # All nodes informations\n self.aircraftNodesPoints = []\n self.aircraftMassPoints = []\n self.aircraftMassDistances = []\n self.aircraftSegmentsLengths = []\n self.aircraftNodesNames = []\n self.aircraftInitNodesAreas = []\n self.aircraftNodesA = []\n self.aircraftNodesIy = []\n self.aircraftNodesIz = []\n self.aircraftNodesJ = []\n # More general information\n self.aircraftConnectedNodes = []\n self.aircraftNonRotatingNodes = []\n\n self.computeProportionFuselage()\n self.computeProportionWings()\n\n # adds fulseage infos to matrices if there is one\n if self.nFuselage > 0:\n self.aircraftNodesPoints.append(self.fs_m_points[0])\n # Assumption is made that for a fuselage section, its cog is\n # aligned with the section center.\n self.aircraftMassPoints.append(self.fs_m_points[0])\n self.aircraftMassDistances.append(np.zeros((len(self.fs_m_points[0]),3)))\n self.aircraftSegmentsLengths.append(self.fs_m_distanceBetweenPoints[0])\n self.aircraftNodesNames.append(self.fs_m_pointsName[0])\n self.aircraftInitNodesAreas.append(self.fs_m_pointsInitArea[0])\n self.aircraftNodesA.append(self.fs_m_pointsA[0])\n self.aircraftNodesIy.append(self.fs_m_pointsIy[0])\n self.aircraftNodesIz.append(self.fs_m_pointsIz[0])\n self.aircraftNodesJ.append(self.fs_m_pointsJ[0])\n logger.debug('\\n')\n for i in range(self.nWings):\n self.aircraftNodesPoints.append(self.ws_me_points[i])\n self.aircraftMassPoints.append(self.ws_ma_points[i])\n self.aircraftMassDistances.append(self.ws_ma_distance[i])\n self.aircraftSegmentsLengths.append(self.ws_me_distances[i])\n # logger.debug(i)\n # logger.debug(self.ws_me_distances)\n # logger.debug(self.aircraftSegmentsLengths)\n self.aircraftNodesNames.append(self.ws_me_pointsName[i])\n self.aircraftInitNodesAreas.append(self.ws_me_pointsInitArea[i])\n self.aircraftNodesA.append(self.ws_me_pointsA[i])\n self.aircraftNodesIy.append(self.ws_me_pointsIy[i])\n self.aircraftNodesIz.append(self.ws_me_pointsIz[i])\n self.aircraftNodesJ.append(self.ws_me_pointsJ[i])\n self.computeMass()", "def matrix_map(self, bkg_reduction=True, data_correction=True):\r\n\r\n if bkg_reduction is True:\r\n if data_correction is True:\r\n data = self.df4\r\n \r\n else:\r\n data = self.df2\r\n\r\n else:\r\n if data_correction is True:\r\n data = self.df3\r\n \r\n else:\r\n data = self.df1\r\n\r\n return data", "def applyMat(my_map, linsys_setup):\n \n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n \n \n\n nx, ny, pixScaleX, pixScaleY = map_prop\n nFreq = len(g_nu); nCluster = len(clumaps[0])\n\n #Always apply beam * precond\n beam_prec=[]\n\n for f in range(nFreq):\n beam_prec+=[beams[f][:,:ny/2+1]*precond_2d[:,:ny/2+1]]\n precond_2d=precond_2d[:,:ny/2+1]\n power_2d=power_2d[:,:ny/2+1]\n \n ksz = False\n if len(clumaps) == 2: ksz = True\n \n # Routines to perform block matrix multiplication defined in Eriksen Eq. 19\n \n def apply_cmb_cmb(d0):\n \"\"\"\n Apply (S^-1 + A N^-1 A) x\n \"\"\"\n d1 = d0.copy()\n d1 = numpy.reshape(d1,(nx,ny))\n a_l = fft.rfft(d1,axes=[-2,-1])\n \n c_l = 0\n for f in range(nFreq):\n\n b_l = a_l * beam_prec[f]\n d2 = fft.irfft(b_l,axes=[-2,-1],normalize=True)\n d2 *= ninvs[f]\n b_l = fft.rfft(d2,axes=[-2,-1])\n c_l += b_l * beam_prec[f]\n \n d2 = fft.irfft(c_l,axes=[-2,-1],normalize=True)\n d1 = fft.irfft(precond_2d**2 * a_l/power_2d,axes=[-2,-1],normalize=True)\n \n d2 += d1\n \n return d2.reshape((nx*ny,))\n \n \"\"\"\n def apply_tsz_tsz(d0): # DONE\n \\\"\"\"\n Apply (F^T A^T N^-1 A F) x\n \\\"\"\"\n d1 = numpy.zeros(nCluster)\n mat = numpy.zeros((nCluster, nCluster))\n # TODO: This could probably be more efficient (e.g. using np.outer)\n for freq in range(nFreq):\n for ic in range(nCluster):\n for jc in range(0, ic+1):\n mat[ic,jc] = numpy.sum( ninvs[freq] * g_nu[freq]**2. \\\n * clumaps[0][ic][freq] * clumaps[0][jc][freq] )\n if ic != jc: mat[jc,ic] = mat[ic,jc]\n d1 += numpy.dot(mat, d0)\n return d1\n \n def apply_ksz_ksz(d0): # DONE\n \\\"\"\"\n Apply (K^T A^T N^-1 A K) x\n \\\"\"\"\n # FIXME: Missing factor of ivcov\n d1 = numpy.zeros(nCluster)\n mat = numpy.zeros((nCluster, nCluster))\n # TODO: This could probably be more efficient (e.g. using np.outer)\n for freq in range(nFreq):\n for ic in range(nCluster):\n for jc in range(0, ic+1):\n mat[ic,jc] = numpy.sum( ninvs[freq] \\\n * clumaps[1][ic][freq] * clumaps[1][jc][freq] )\n if ic != jc: mat[jc,ic] = mat[ic,jc]\n d1 += numpy.dot(mat, d0)\n d1 += numpy.dot(ivcov, d0) # Add prior term\n return d1\n \"\"\"\n \n def apply_cmb_foreground_block(dc, dm, dt, dk=None):\n \"\"\"\n Apply the CMB x (Monopole + TSZ + KSZ) terms in one block:\n (A^T N^-1 A T) x_mono\n (A^T N^-1 A F) x_tsz\n (A^T N^-1 A K) x_ksz\n \n (T^T A^T N^-1 A) x_cmb\n (F^T A^T N^-1 A) x_cmb\n (K^T A^T N^-1 A) x_cmb\n \"\"\"\n ksz = False\n if dk is not None: ksz = True\n \n # (A^T N^-1 A T) x_mono; (A^T N^-1 A F) x_tsz; (A^T N^-1 A K) x_ksz\n b_lt = 0; b_lk = 0; b_lm = 0\n for f in range(nFreq):\n mct = 0; mck = 0\n for ic in range(nCluster):\n mct += dt[ic] * ninvs[f] * clumaps[0][ic][f] * g_nu[f]\n if ksz: mck += dk[ic] * ninvs[f] * clumaps[1][ic][f]\n \n b_lm += fft.rfft(dm * ninvs[f],axes=[-2,-1]) * beam_prec[f]\n b_lt += fft.rfft(mct,axes=[-2,-1]) * beam_prec[f]\n if ksz: b_lk += fft.rfft(mck,axes=[-2,-1]) * beam_prec[f]\n\n mcm = fft.irfft(b_lm,axes=[-2,-1],normalize=True).reshape((nx*ny,))\n mct = fft.irfft(b_lt,axes=[-2,-1],normalize=True).reshape((nx*ny,))\n if ksz: mck = fft.irfft(b_lk,axes=[-2,-1],normalize=True).reshape((nx*ny,))\n \n # (T^T A^T N^-1 A) x_cmb; (F^T A^T N^-1 A) x_cmb; (K^T A^T N^-1 A) x_cmb\n mc = dc.copy().reshape((nx,ny))\n a_l = fft.rfft(mc,axes=[-2,-1])\n mtc = numpy.zeros(nCluster)\n mkc = numpy.zeros(nCluster)\n mmc = 0\n for f in range(nFreq):\n b_l = a_l * beam_prec[f]\n mc = fft.irfft(b_l,axes=[-2,-1],normalize=True)\n mmc += numpy.sum(mc * ninvs[f])\n for ic in range(nCluster):\n mtc[ic] += numpy.sum(mc * ninvs[f] * clumaps[0][ic][f] * g_nu[f])\n if ksz: mkc[ic] += numpy.sum(mc * ninvs[f] * clumaps[1][ic][f])\n \n if ksz: return mct, mcm, mck, mtc, mmc, mkc\n return mct, mcm, mtc, mmc\n \n \n def apply_foreground_block(m0, t0, k0=None):\n \"\"\"\n Apply the TSZ + KSZ + Monopole terms in one block:\n [ (T^T A^T N^-1 A F) (T^T A^T N^-1 A K) (T^T A^T N^-1 A T) ] (x_mono)\n [ (F^T A^T N^-1 A F) (F^T A^T N^-1 A K) (F^T A^T N^-1 A T) ] (x_tsz)\n [ (K^T A^T N^-1 A F) (K^T A^T N^-1 A K) (K^T A^T N^-1 A T) ] (x_ksz)\n \"\"\"\n ksz = True if k0 is not None else False\n \n dtt, dkk, dtk, dkt = [numpy.zeros(nCluster) for i in range(4)]\n mtt, mkk, mtk, mkt = [numpy.zeros((nCluster, nCluster)) for i in range(4)]\n dmm, dmk, dmt = [0 for i in range(3)]\n dkm, dtm = [numpy.zeros(nCluster) for i in range(2)]\n \n # TODO: This could probably be more efficient (e.g. using np.outer)\n for f in range(nFreq):\n dmm += numpy.sum(ninvs[f]) * m0\n \n # Loop through clusters\n for ic in range(nCluster):\n dmt += numpy.sum( ninvs[f] * g_nu[f] * clumaps[0][ic][f] * t0[ic] )\n dtm[ic] += numpy.sum( ninvs[f] * g_nu[f] * clumaps[0][ic][f] * m0 )\n if ksz:\n dmk += numpy.sum( ninvs[f] * clumaps[1][ic][f] * k0[ic] )\n dkm[ic] += numpy.sum( ninvs[f] * clumaps[1][ic][f] * m0 )\n \n for jc in range(0, ic+1):\n mtt[ic,jc] = numpy.sum( ninvs[f] * g_nu[f]**2. \\\n * clumaps[0][ic][f] * clumaps[0][jc][f] )\n if ksz:\n mkk[ic,jc] = numpy.sum( ninvs[f] \\\n * clumaps[1][ic][f] * clumaps[1][jc][f] )\n mtk[ic,jc] = numpy.sum( ninvs[f] * g_nu[f] \\\n * clumaps[0][ic][f] * clumaps[1][jc][f] )\n mkt[ic,jc] = numpy.sum( ninvs[f] * g_nu[f] \\\n * clumaps[1][ic][f] * clumaps[0][jc][f] )\n # Mirror indices\n mtt[jc,ic] = mtt[ic,jc]\n if ksz:\n mkk[jc,ic] = mkk[ic,jc]\n mtk[jc,ic] = mtk[ic,jc]\n mkt[jc,ic] = mkt[ic,jc]\n \n # Add total contribs. for this band\n dtt += numpy.dot(mtt, t0)\n if ksz:\n dkk += numpy.dot(mkk, k0)\n dtk += numpy.dot(mtk, k0)\n dkt += numpy.dot(mkt, t0)\n \n if ksz: return dtt, dkk, dmm, dtk, dkt, dmk, dkm, dtm, dmt\n return dtt, dmm, dtm, dmt\n \n # Apply block matrix multiplications and return\n # FIXME: What if KSZ not used?\n x0 = my_map[:nx*ny]\n x1 = my_map[nx*ny:nx*ny+1]\n x2 = my_map[nx*ny+1:nx*ny+nCluster+1]\n if ksz: x3 = my_map[nx*ny+nCluster+1:nx*ny+2*nCluster+1]\n \n # Multiply input vector in blocks\n #t=time.time()\n dcc = apply_cmb_cmb(x0)\n #print 'CMB', time.time()-t\n if ksz:\n dct, dcm, dck, dtc, dmc, dkc = apply_cmb_foreground_block(x0, x1, x2, x3)\n dtt, dkk, dmm, dtk, dkt, dmk, dkm, dtm, dmt = apply_foreground_block(x1, x2, x3)\n x_new_0 = dcc + dct + dck + dcm\n x_new_1= dmc + dmt + dmk + dmm\n x_new_2 = dtc + dtt + dtk + dtm\n x_new_3 = dkc + dkt + dkk + dkm\n x_new = numpy.concatenate((x_new_0, x_new_1, x_new_2, x_new_3))\n else:\n #t=time.time()\n dct, dcm, dtc, dmc = apply_cmb_foreground_block(x0, x1, x2)\n #print 'CMB-F', time.time()-t\n \n #t=time.time()\n dtt, dmm, dtm, dmt = apply_foreground_block(x1, x2)\n #print 'F', time.time()-t\n \n x_new_0 = dcc + dct + dcm\n x_new_1 = dmc + dmt + dmm\n x_new_2 = dtc + dtt + dtm\n x_new = numpy.concatenate((x_new_0, x_new_1, x_new_2))\n\n\n#sys.exit()\n return x_new", "def _compose_transforms(basis_transforms, source_basis, source_dag):\n example_gates = _get_example_gates(source_dag)\n mapped_instrs = {}\n\n for gate_name, gate_num_qubits in source_basis:\n # Need to grab a gate instance to find num_qubits and num_params.\n # Can be removed following https://github.com/Qiskit/qiskit-terra/pull/3947 .\n example_gate = example_gates[gate_name, gate_num_qubits]\n num_params = len(example_gate.params)\n\n placeholder_params = ParameterVector(gate_name, num_params)\n placeholder_gate = Gate(gate_name, gate_num_qubits, list(placeholder_params))\n placeholder_gate.params = list(placeholder_params)\n\n dag = DAGCircuit()\n qr = QuantumRegister(gate_num_qubits)\n dag.add_qreg(qr)\n dag.apply_operation_back(placeholder_gate, qr[:], [])\n mapped_instrs[gate_name, gate_num_qubits] = placeholder_params, dag\n\n for gate_name, gate_num_qubits, equiv_params, equiv in basis_transforms:\n logger.debug(\n \"Composing transform step: %s/%s %s =>\\n%s\",\n gate_name,\n gate_num_qubits,\n equiv_params,\n equiv,\n )\n\n for mapped_instr_name, (dag_params, dag) in mapped_instrs.items():\n doomed_nodes = [\n node\n for node in dag.op_nodes()\n if (node.op.name, node.op.num_qubits) == (gate_name, gate_num_qubits)\n ]\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updating transform for mapped instr %s %s from \\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n for node in doomed_nodes:\n\n replacement = equiv.assign_parameters(\n dict(zip_longest(equiv_params, node.op.params))\n )\n\n replacement_dag = circuit_to_dag(replacement)\n\n dag.substitute_node_with_dag(node, replacement_dag)\n\n if doomed_nodes and logger.isEnabledFor(logging.DEBUG):\n\n logger.debug(\n \"Updated transform for mapped instr %s %s to\\n%s\",\n mapped_instr_name,\n dag_params,\n dag_to_circuit(dag, copy_operations=False),\n )\n\n return mapped_instrs", "def do_transform(self):\n # TODO: after unit tests are added switch to astropy fftconvolve here.\n from scipy.signal import fftconvolve\n total_background = self.model + self.background + self.approx\n excess = self.image - total_background\n for key, kern in self.kernbase.items():\n self.transform[key] = fftconvolve(excess, kern, mode='same')\n self.error[key] = np.sqrt(fftconvolve(total_background, kern ** 2, mode='same'))\n\n self.approx = fftconvolve(self.image - self.model - self.bkg,\n self.kern_approx, mode='same')\n self.approx_bkg = fftconvolve(self.bkg, self.kern_approx, mode='same')", "def gonio_axis_align():\n \n # Invert camera image, so dark pin on light image becomes a peak\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # High threshold, so AD centroid doesn't interpret background\n cam_8ThresholdOld = cam_8.stats4.centroid_threshold.get()\n cam_8.stats4.centroid_threshold.put(150)\n cam_7ThresholdOld = cam_7.stats4.centroid_threshold.get()\n cam_7.stats4.centroid_threshold.put(150)\n \n # HiMag\n # Copy ROI2 geometry (HiMag Mag3) to ROI4 and use ROI4 centroid plugin\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get())\n cam_8.roi4.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get())\n cam_8.roi4.size.x.put(cam_8.roi2.size.x.get() * 0.20)\n cam_8.roi4.size.y.put(cam_8.roi2.size.y.get())\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get() + cam_8.roi2.size.x.get()/2 - cam_8.roi4.size.x.get()/2)\n \n # LoMag\n # Copy ROI2 geometry (LoMag Mag1) to ROI4 and use ROI4 centroid plugin\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get())\n cam_7.roi4.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get())\n cam_7.roi4.size.x.put(cam_7.roi2.size.x.get() * 0.05)\n cam_7.roi4.size.y.put(cam_7.roi2.size.y.get())\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get() + cam_7.roi2.size.x.get()/2 - cam_7.roi4.size.x.get()/2)\n \n centerPinYHiMag0 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag0 = centroid_avg(cam_7.stats4)[1]\n yield from bps.mvr(gonio.o,180)\n time.sleep(2)\n centerPinYHiMag180 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag180 = centroid_avg(cam_7.stats4)[1]\n centerPinYHiMag = (centerPinYHiMag0 + centerPinYHiMag180)/2\n centerPinYLoMag = (centerPinYLoMag0 + centerPinYLoMag180)/2\n\n centerPinOffsYHiMag = centerPinYHiMag - cam_8.roi4.size.y.get() / 2\n centerPinOffsYLoMag = centerPinYLoMag - cam_7.roi4.size.y.get() / 2\n \n # Correct Mag 3 (cam_8 ROI2)\n cam_8.roi2.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + centerPinOffsYHiMag)\n # Correct Mag 4 (cam_8 ROI1)\n cam_8.roi1.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + (cam_8.roi2.size.y.get()-cam_8.roi1.size.y.get())/2)\n \n # Correct Mag 1 (cam_7 ROI2)\n cam_7.roi2.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + centerPinOffsYLoMag)\n # Correct Mag 2 (cam_7 ROI3)\n cam_7.roi3.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + (cam_7.roi2.size.y.get()-cam_7.roi3.size.y.get())/2)\n\n # De-invert image\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # Set thresold to previous value\n cam_8.stats4.centroid_threshold.put(cam_8ThresholdOld)\n cam_7.stats4.centroid_threshold.put(cam_7ThresholdOld)\n \n return", "def init_transformations(self):\n shifts = (self.img_shape[0] // 2, self.img_shape[1] // 2)\n unshifts = (-self.img_shape[0] // 2, -self.img_shape[1] // 2)\n self.preshift = skimage.transform.SimilarityTransform(\n translation = shifts)\n self.postshift = skimage.transform.SimilarityTransform(\n translation = unshifts)", "def apply_dof_transformation(tdim, edge_count, face_count, entity_transformations, entity_dofs,\n data, cell_info, face_types):\n if tdim >= 2:\n if tdim == 3:\n face_start = 3 * face_count\n else:\n face_start = 0\n\n dofstart = 0\n for i in entity_dofs[0]:\n dofstart += i\n # NOTE: Copy array to make numba compilation faster (contiguous array assumption)\n edge_reflection = entity_transformations[\"interval\"][0].copy()\n for e in range(edge_count):\n edofs = entity_dofs[1][e]\n if edofs == 0:\n continue\n if cell_info >> (face_start + e) & 1:\n data[dofstart:dofstart+edofs] = numpy.dot(edge_reflection, data[dofstart:dofstart+edofs])\n dofstart += edofs\n\n if tdim == 3:\n for f in range(face_count):\n face_rotation = entity_transformations[face_types[f]][0].copy()\n face_reflection = entity_transformations[face_types[f]][1].copy()\n fdofs = entity_dofs[2][f]\n if fdofs == 0:\n continue\n if cell_info >> (3 * f) & 1:\n data[dofstart:dofstart+fdofs] = numpy.dot(face_reflection, data[dofstart:dofstart+fdofs])\n for _ in range(cell_info >> (3 * f + 1) & 3):\n data[dofstart:dofstart+fdofs] = numpy.dot(face_rotation, data[dofstart:dofstart+fdofs])\n dofstart += fdofs", "def var_transform(self, do_data=False):\n\n empty_vars = ['leadJetEn', 'leadJetPt', 'leadJetPhi', 'leadJetEta', 'leadJetQGL',\n 'subleadJetEn', 'subleadJetPt', 'subleadJetPhi', 'subleadJetEta', 'subleadJetQGL',\n 'subsubleadJetEn', 'subsubleadJetPt', 'subsubleadJetPhi', 'subsubleadJetEta', 'subsubleadJetQGL',\n 'dijetMinDRJetEle', 'dijetDieleAbsDEta','dijetDieleAbsDPhiTrunc', 'dijetCentrality', 'dijetMass', \n 'dijetAbsDEta', 'dijetDPhi'] \n\n replacement_value = -10\n\n for empty_var in empty_vars:\n self.data_obj.mc_df_sig[empty_var] = self.data_obj.mc_df_sig[empty_var].replace(-999., replacement_value)\n self.data_obj.mc_df_bkg[empty_var] = self.data_obj.mc_df_bkg[empty_var].replace(-999., replacement_value)\n if do_data: self.data_obj.data_df[empty_var] = self.data_obj.data_df[empty_var].replace(-999., replacement_value)\n\n #print self.data_obj.mc_df_sig[empty_vars]\n #print np.isnan(self.data_obj.mc_df_sig[empty_vars]).any()\n\n for var in gev_vars:\n if var in (self.low_level_vars_flat+self.high_level_vars):\n self.data_obj.mc_df_sig[var] = self.data_obj.mc_df_sig.apply(self.var_transform_helper, axis=1, args=[var, replacement_value])\n self.data_obj.mc_df_bkg[var] = self.data_obj.mc_df_bkg.apply(self.var_transform_helper, axis=1, args=[var, replacement_value])\n if do_data: self.data_obj.data_df[var] = self.data_obj.data_df.apply(self.var_transform_helper, axis=1, args=[var, replacement_value])\n\n #print np.isnan(self.data_obj.mc_df_sig[empty_vars]).any()", "def transform(self, image_matrix):\n\n # Centering the data\n mean = np.mean(image_matrix, axis=0)\n image_matrix = image_matrix - mean\n\n # Dimension reduction is done by multiplying the original matrix with the components\n transformed_matrix = np.dot(image_matrix, self.components.T)\n return transformed_matrix", "def prepare_CvD16_for_M2L_calc(templates_lam_range, verbose=False):\n import glob\n import os\n template_glob=os.path.expanduser('~/z/Data/stellarpops/CvD2/vcj_models/VCJ_*.s100')\n\n vcj_models=sorted(glob.glob(template_glob))\n temp_lamdas, x35, x3, x23, kroupa, flat=np.genfromtxt(vcj_models[-1], unpack=True)\n\n n_ages=7\n n_zs=5\n n_imfs=5\n\n \n\n\n Zs=['m1.5', 'm1.0', 'm0.5', 'p0.0', 'p0.2']\n ages=[1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.5]\n model_imfs_order=['x35', 'x3', 'x23', 'kroupa', 'flat']\n\n t_mask = ((temp_lamdas > templates_lam_range[0]) & (temp_lamdas <templates_lam_range[1]))\n\n\n\n y=x35[t_mask]\n x=temp_lamdas[t_mask]\n\n #Make a new lamda array, carrying on the delta lamdas of high resolution bit\n new_x=temp_lamdas[t_mask][0]+0.9*(np.arange(np.ceil((temp_lamdas[t_mask][-1]-temp_lamdas[t_mask][0])/0.9))+1)\n interp=si.interp1d(x, y, fill_value='extrapolate')\n out=interp(new_x)\n\n templates=np.empty((len(out), n_ages, n_zs, n_imfs))\n\n\n\n for a, Z in enumerate(Zs): \n for b, age in enumerate(ages):\n model=glob.glob(os.path.expanduser('~/z/Data/stellarpops/CvD2/vcj_models/VCJ_*{}*{}.ssp.s100'.format(Z, age)))[0]\n if verbose:\n print 'Loading {}'.format(model)\n data=np.genfromtxt(model)\n\n for c, counter in enumerate(reversed(range(1, data.shape[-1]))):\n \n #Interpolate templates onto a uniform wavelength grid and then log-rebin\n y=data[:, counter][t_mask] \n x=temp_lamdas[t_mask]\n #Make a new lamda array, carrying on the delta lamdas of high resolution bit\n new_x=temp_lamdas[t_mask][0]+0.9*(np.arange(np.ceil((temp_lamdas[t_mask][-1]-temp_lamdas[t_mask][0])/0.9))+1)\n\n interp=si.interp1d(x, y, fill_value='extrapolate')\n out=interp(new_x) \n\n templates[:, b, a, c]=out\n\n return templates, new_x", "def transformation(self):\n for key in self.combination_dict.keys():\n if self.combination_dict[key]['column_count'] == 2:\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'tem' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'tem':\n self.temporal_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'cat' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'cat':\n self.categorical_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num' and self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n self.numerical_transformation(self.combination_dict[key])\n\n elif self.combination_dict[key]['column_count'] == 3:\n num_count = 0\n num_column = []\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num':\n num_count += 1\n num_column.append(0)\n elif self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n num_count += 1\n num_column.append(1)\n elif self.data_dict[self.combination_dict[key]['column3']]['data_type'] == 'num':\n num_count += 1\n num_column.append(2)\n\n if num_count == 1:\n self.three_column_groupby_logic(self.combination_dict[key], num_column)\n\n m_score_pie = []\n m_score_bar = []\n m_score_line = []\n m_score_scatter = []\n # for key in self.scenario_dict:\n # if self.scenario_dict\n for key in self.scenario_dict:\n if math.isnan(self.scenario_dict[key][\"Scatter_chart_score\"]):\n m_score_scatter.append(0)\n else:\n m_score_scatter.append(self.scenario_dict[key][\"Scatter_chart_score\"])\n m_score_pie.append(self.scenario_dict[key][\"Pie_chart_score\"])\n m_score_bar.append(self.scenario_dict[key][\"Bar_chart_score\"])\n m_score_line.append(self.scenario_dict[key][\"Line_chart_score\"])\n\n m_score_pie /= np.max(m_score_pie)\n m_score_bar /= np.max(m_score_bar)\n m_score_line /= np.max(m_score_line)\n m_score_scatter /= np.max(m_score_scatter)\n m_score = [m_score_pie, m_score_bar, m_score_line, m_score_scatter]\n match_index = np.argmax(m_score, axis = 0)\n i = 0\n for key in self.scenario_dict:\n if match_index[i] == 0:\n self.scenario_dict[key][\"Chart_Type\"] = \"pie\"\n if match_index[i] == 1:\n self.scenario_dict[key][\"Chart_Type\"] = \"bar\"\n if match_index[i] == 2:\n self.scenario_dict[key][\"Chart_Type\"] = \"line\"\n if match_index[i] == 3:\n self.scenario_dict[key][\"Chart_Type\"] = \"scatter\"\n self.scenario_dict[key][\"m_score\"] = m_score[match_index[i]][i]\n i += 1\n\n return self.scenario_dict", "def postTransform(self,T: RigidTransform) -> None:\n for i,m in enumerate(self.milestones):\n assert len(m) == 24\n mq = self.to_se3(m[:12])\n mv = self.to_se3(m[12:])\n self.milestones[i] = self.from_se3(se3.mul(mq,T)) + self.from_se3((so3.mul(mv[0],T[0]),so3.apply(so3.inv(T[0]),mv[1])))", "def _project_loops(self):\n\n self._create_projection_datasets()\n self._get_sho_chunk_sizes(10)\n\n '''\n Loop over the FORCs\n '''\n for forc_chunk_index in range(self._num_forcs):\n pos_chunk_index = 0\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n dc_vec = self._get_dc_offset()\n '''\n Loop over positions\n '''\n while self._current_pos_slice.stop < self._end_pos:\n loops_2d, nd_mat_shape_dc_first, order_dc_offset_reverse = self._get_projection_data(pos_chunk_index)\n\n # step 8: perform loop unfolding\n projected_loops_2d, loop_metrics_1d = self._project_loop_batch(dc_vec, np.transpose(loops_2d))\n\n # test the reshapes back\n projected_loops_2d = self._reshape_projected_loops_for_h5(projected_loops_2d,\n order_dc_offset_reverse,\n nd_mat_shape_dc_first)\n self.h5_projected_loops[self._current_pos_slice, self._current_sho_spec_slice] = projected_loops_2d\n\n metrics_2d = self._reshape_results_for_h5(loop_metrics_1d, nd_mat_shape_dc_first)\n\n self.h5_loop_metrics[self._current_pos_slice, self._current_met_spec_slice] = metrics_2d\n\n # Reset the position slice\n self._current_pos_slice = slice(None)\n\n pass", "def cal_dist_and_mtx(self, calibration_patterns, pattern_size, retain_calibration_patterns):\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0), ....,(8,5,0)\n # use numpy mgrid function to generate the coordinates values for a given grid size.\n grid_x, grid_y = pattern_size[0], pattern_size[1]\n objp = np.zeros((grid_y * grid_x, 3), np.float32)\n objp[:,:2] = np.mgrid[:grid_x, :grid_y].T.reshape(-1,2)\n\n # Arrays to store object points and image points from all the sample images.\n objpoints = [] # 3d points in real world space\n imgpoints = [] # 2d points in image plane.\n \n # Step through the list and search for chessboard corners in distorted calibration images.\n for path in calibration_patterns:\n img = mpimg.imread(path)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n # Find the chessboard's inner corners\n found, corners = cv2.findChessboardCorners(gray, pattern_size, None)\n\n # If found, add object points, image points\n if found:\n objpoints.append(objp)\n imgpoints.append(corners)\n \n # Draw and display the corners to see what was detected.\n if retain_calibration_patterns:\n cv2.drawChessboardCorners(img, pattern_size, corners, found)\n self.calibration_patterns_success.append(img)\n else:\n if retain_calibration_patterns:\n self.calibration_patterns_error.append(img)\n\n img_size = (img.shape[1], img.shape[0])\n\n if objpoints and imgpoints:\n _, self.camera_matrix, self.distortion_coef, _, _ = cv2.calibrateCamera(\n objpoints, imgpoints, img_size, None, None)", "def compute_controller(self):\n # region Input Info\n\n # here we implement an example for a consensus algorithm\n neig = self.get_neighbors()\n messages = self.get_messages()\n pos, rot = self.get_pos_and_orientation()\n \n #send message of positions to all neighbors indicating our position\n for n in neig:\n self.send_message(n, pos)\n \n # check if we received the position of our neighbors and compute desired change in position\n # as a function of the neighbors (message is composed of [neighbors id, position])\n dx = 0.\n dy = 0.\n # print(messages)\n # endregion\n if messages:\n # similar to laplacian but for each robot\n # for m in messages:\n # dx += m[1][0] - pos[0]\n # dy += m[1][1] - pos[1]\n\n # position of All robots\n Apos = np.zeros([6,2])\n Apos[self.id,:]=pos[0:2]\n for m in messages:\n Apos[m[0],:]=m[1][0:2]\n\n TarM = np.zeros([6,2])\n TarM[self.id, :] = self.TargetP[self.Tid,:]-pos[0:2]\n Cdiff = Apos-pos[0:2]\n Cdiff = np.sqrt(np.square(Cdiff[:,0])+np.square(Cdiff[:,1]))\n Cdiff = np.sum(Cdiff)\n Ddiff = self.P_Des-self.P_Des[self.id]\n Ddiff = np.sqrt(np.square(Ddiff[:, 0]) + np.square(Ddiff[:, 1]))\n Ddiff = np.sum(Ddiff)\n Tdiff = np.abs(Ddiff - Cdiff)\n\n\n\n\n # region Obstacles\n Obc = Apos\n # Obc = self.Obstacles\n # Obc = np.vstack([Obs,pos[0:2]])\n Diff = pos[0:2] - Obc\n for m in range(0, Diff.shape[0]):\n if (np.sqrt(np.square(Diff[m, 0]) + np.square(Diff[m, 1]))) > 0.35:\n Diff[m, :] = np.array([0, 0])\n\n DiffY = Diff[:, 1].reshape([1, -1])\n DiffX = Diff[:, 0].reshape([1, -1])\n x_odot = np.sum(np.exp(-np.square(DiffX) / self.Var) * DiffX)\n y_odot = np.sum(np.exp(-np.square(DiffY) / self.Var) * DiffY)\n\n ObsAv = np.array([x_odot, y_odot])\n # endregion\n\n\n NewGd = np.square(np.transpose(self.E) @ Apos)\n NewGd = (NewGd[:, 0] + NewGd[:, 1]).reshape([-1, 1])\n G = self.Gdsq - NewGd\n Rg = self.DistJacobian(Apos, self.Edges)\n p_ddot = np.zeros(([6, 2]))\n\n if (Tdiff < self.Thr):\n self.StartTimer = True\n\n\n if(self.StartTimer):\n self.Timer += 1\n if (self.Timer > 500+self.OffTimer):\n self.FormStable = True\n self.StartTimer = False\n self.Timer = 0\n\n if(self.Tid > 3 and np.sum(TarM[self.id, 0])<5):\n TarM[self.id, 0] = 5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) < 5):\n TarM[self.id, 1] = 5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) > -5):\n TarM[self.id, 1] = -5\n if (self.Tid > 3 and np.sum(TarM[self.id, 1]) > -5):\n TarM[self.id, 1] = -5\n\n if (self.Tid > 3 and np.sum(TarM[self.id, :]) < 0.01):\n self.Tid +=1\n\n if (self.FormStable):\n # Formation Done\n if self.Tid == 0 and self.Formation == \"square\":\n self.P_Des = self.Form_HRec(0.5)\n self.Reset_Form()\n # self.Tid += 1\n # self.FormStable = False\n print(self.P_Des, self.Formation, \" \", self.Tid)\n # self.K1 = 5\n # self.K2 = 50\n if (self.Tid < self.TargetP.shape[0]-1 and self.FormStable):\n self.Tid += 1\n if(self.Tid == 1):\n self.K1 = 2\n self.K3 = 10\n self.Thr = 0.001\n if (self.Tid == 2):\n self.K1 = 20\n self.K3 = 1\n self.P_Des = self.Form_HRec(0.5)\n self.Reset_Form()\n self.FormStable = False\n # Linear Control Law\n p_dot = np.zeros([6,2])\n p_dot = -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n p_dot += self.dt * (self.OK / self.Var) * ObsAv\n # p_dot += self.K3 * TarM\n # Non - linear Control Law\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_dot += p_ddot*self.dt\n if(self.id == 0):\n # print(Tdiff,self.TargetP[self.Tid,:],np.sum(G),self.Tid,self.Timer)\n p_dot = self.K3 * TarM\n if (self.id == 0):\n print(Tdiff,self.TargetP[self.Tid,:],np.sum(G),self.Tid,self.Timer)\n # if(self.Tid == 1):\n # p_dot += -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n\n dx = p_dot[self.id, 0]\n dy = p_dot[self.id, 1]\n\n # Non - linear Control\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_ddot += (self.OK / self.Var) * ObsAv\n # dx = self.dt * p_ddot[self.id, 0]\n # dy = self.dt * p_ddot[self.id, 1]\n #else:\n # TarM[self.id, :] = Tdiff\n # # Linear Control\n # p_dot = -self.K1 * np.matmul(self.L, Apos) + self.K1 * np.matmul(self.E, self.Z_Des)\n # p_dot += self.dt * (self.OK / self.Var) * ObsAv\n # p_dot += self.K3 * TarM\n # dx = p_dot[self.id, 0]\n # dy = p_dot[self.id, 1]\n\n # Non - linear Control\n # p_ddot = self.K2 * (np.transpose(Rg) @ G).reshape([-1, 2])\n # p_ddot += self.K3 * TarM\n # p_ddot += (self.OK / self.Var) * ObsAv\n # dx = self.dt * p_ddot[self.id, 0]\n # dy = self.dt * p_ddot[self.id, 1]\n\n # region Robot Wheel Control\n # integrate\n des_pos_x = pos[0] + self.dt * dx\n des_pos_y = pos[1] + self.dt * dy\n\n #compute velocity change for the wheels\n vel_norm = np.linalg.norm([dx, dy]) #norm of desired velocity\n if vel_norm < 0.01:\n vel_norm = 0.01\n des_theta = np.arctan2(dy/vel_norm, dx/vel_norm)\n right_wheel = np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n left_wheel = -np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n self.set_wheel_velocity([left_wheel, right_wheel])\n # endregion", "def run(prefix=\"tst_map_model_cc\"):\n # original (zero-origin) map and model\n pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str)\n pdb_inp.write_pdb_file(file_name=\"%s_%s\"%(prefix,\"orig.pdb\"))\n ph = pdb_inp.construct_hierarchy()\n xrs = pdb_inp.xray_structure_simple()\n fc = xrs.structure_factors(d_min=1.5).f_calc()\n fft_map = fc.fft_map(resolution_factor = 0.25)\n fft_map.apply_sigma_scaling()\n map_data = fft_map.real_map_unpadded()\n assert map_data.all() == (40, 45, 54)\n assert map_data.origin() == (0, 0, 0)\n assert map_data.focus() == (40, 45, 54)\n write_ccp4_map(map_data=map_data, cs=xrs.crystal_symmetry(),\n file_name=\"%s_%s\"%(prefix,\"orig.ccp4\"))\n # shift origin of the map\n g = flex.grid((-20,-25,-27), (20,20,27))\n map_data.reshape(g)\n assert map_data.all() == (40, 45, 54)\n assert map_data.origin() == (-20, -25, -27)\n assert map_data.focus() == (20, 20, 27)\n write_ccp4_map(map_data=map_data, cs=xrs.crystal_symmetry(),\n file_name=\"%s_%s\"%(prefix,\"shifted.ccp4\"))\n # apply same shift to the model\n a,b,c = xrs.crystal_symmetry().unit_cell().parameters()[:3]\n N = map_data.all()\n O=map_data.origin()\n sites_cart = ph.atoms().extract_xyz()\n sx,sy,sz = a/N[0]*O[0], b/N[1]*O[1], c/N[2]*O[2]\n sites_cart_shifted = sites_cart+\\\n flex.vec3_double(sites_cart.size(), [sx,sy,sz])\n ph.atoms().set_xyz(sites_cart_shifted)\n ph.write_pdb_file(file_name=\"%s_%s\"%(prefix,\"shifted.pdb\"))\n # run phenix.real_space_refine\n checked = 0\n cmd = \" \".join([\n \"phenix.map_model_cc\",\n \"%s_shifted.pdb\"%prefix,\n \"%s_shifted.ccp4\"%prefix,\n \"resolution=1.5\",\n \"> %s.zlog\"%prefix\n ])\n print cmd\n easy_run.call(cmd)\n # check results\n fo = open(\"%s.zlog\"%prefix,\"r\")\n for l in fo.readlines():\n if(l.startswith(\" CC_mask :\")):\n cc = float(l.split()[2])\n assert cc>0.989\n checked+=1\n fo.close()\n assert checked==1\n # Exercise corresponding library function\n params = mmtbx.maps.map_model_cc.master_params().extract()\n params.map_model_cc.resolution=1.5\n task_obj = mmtbx.maps.map_model_cc.map_model_cc(\n map_data = map_data,\n pdb_hierarchy = ph,\n crystal_symmetry = xrs.crystal_symmetry(),\n params = params.map_model_cc)\n task_obj.validate()\n task_obj.run()\n result = task_obj.get_results()\n assert approx_equal(result.cc_mask , 1.0, 1.e-3)\n assert approx_equal(result.cc_peaks , 1.0, 1.e-3)\n assert approx_equal(result.cc_volume, 1.0, 1.e-3)", "def compute_transform(self, image_pair, figure):\t\n\t\timage_pair = [int(image_pair[1]), int(image_pair[3])]\n\t\tindices = np.where(np.all(np.abs(self.Matches[\"image_pairs\"][0] - image_pair) == 0, axis=1)) \n\t\tfeature_pairs = self.Matches[\"image_match_pairs\"][0][indices[0][0]]\n\t\tfeature_pair_scores = self.Matches[\"image_feature_scores\"][0][indices[0][0]] \n\n\t\ti1_loc = self.Database[\"image_locs\"][np.where(np.array(self.Database[\"image_idx\"]) == image_pair[0])[0][0]]\n\t\ti2_loc = self.Database[\"image_locs\"][np.where(np.array(self.Database[\"image_idx\"]) == image_pair[1])[0][0]]\n\n\t\tself.image1 = cv2.cvtColor(extract_image(figure, i1_loc), cv2.COLOR_BGR2GRAY)\n\t\tself.image2 = cv2.cvtColor(extract_image(figure, i2_loc), cv2.COLOR_BGR2GRAY)\n\t\t# self.image1 = extract_image(figure, i1_loc)\n\t\t# self.image2 = extract_image(figure, i2_loc)\n\n\t\tself.match_alignment = {}; self.match_alignment_scores = {}\n\t\tfor pair, score in zip(feature_pairs, feature_pair_scores):\n\t\t\ti1 = self.Database[\"image_idx\"][pair[0]]\n\t\t\ti2 = self.Database[\"image_idx\"][pair[1]]\n\n\t\t\tif i1 == image_pair[0]:\n\t\t\t\ttarget = pair[0] \n\t\t\t\tstart = pair[1]\n\t\t\t\tif target not in self.match_alignment.keys():\n\t\t\t\t\tself.init_match_alignment(target)\n\t\t\telse:\n\t\t\t\ttarget = pair[1]\n\t\t\t\tstart = pair[0]\t\t\t\t\n\t\t\t\tif target not in self.match_alignment.keys():\n\t\t\t\t\tself.init_match_alignment(target)\n\n\t\t\t# get ellipse and convert to three cardinal points\n\t\t\ttarget_ellipse = self.Database[\"orientation\"][target]\n\t\t\tstart_ellipse = self.Database[\"orientation\"][start]\n\t\t\ttarget_points, target_params = utils.ellipse2points(target_ellipse)\n\t\t\tstart_points, start_params = utils.ellipse2points(start_ellipse)\n\n\t\t\t# compute angle between Major axes\n\t\t\trotation_angle = int(np.abs(target_params[0] - start_params[0]))\n\t\t\treflection_truth = target_params[1] * start_params[1]\n\t\t\tif reflection_truth > 0:\n\t\t\t\treflection_truth = \"No\"\n\t\t\telse:\n\t\t\t\treflection_truth = \"Yes\"\n\t\t\tresolution_perc = round(((target_params[2] / start_params[2]) - 1), 1)\n\n\t\t\t# extract features for histogram matching\n\t\t\ttarget_bloc = self.Database[\"blot_locs\"][target]\n\t\t\tstart_bloc = self.Database[\"blot_locs\"][start]\n\t\t\ttarget_feature = extract_image(self.image1, target_bloc, ex=5)\n\t\t\tfinal_feature = extract_image(self.image2, start_bloc, ex=5) \n\n\t\t\t#find affine transform to match\n\t\t\tif self.AFFINE:\n\t\t\t\tT = utils.compute_affine(start_points, target_points)\n\t\t\t\timage2_warped = cv2.warpAffine(self.image2.copy(), T, (self.image1.shape[1], self.image1.shape[0]))\n\t\t\t\tfinal_feature = extract_image(image2_warped, target_bloc, ex=5)\n\t\t\t\n\t\t\t# match hist\n\t\t\tif self.CONTRAST:\n\t\t\t\tfinal_feature = utils.histogram_match(final_feature, target_feature)\n\n\t\t\tself.match_alignment[target].append((final_feature, start, rotation_angle, reflection_truth, \\\n\t\t\t\t\t\t\t\t\t\t\t\t resolution_perc, score))\n\t\t\tself.match_alignment_scores[target].append(score)", "def design_matrix_motor_df(meta):\n\n mouse = utils.meta_mouse(meta)\n\n # go\n new_meta = {}\n new_meta['go'] = np.zeros(len(meta))\n new_meta['go'][meta['trialerror'].isin([0, 3, 5, 7]).values] = 1\n assert np.sum(new_meta['go']) > 0\n meta_df_out = pd.DataFrame(data=new_meta, index=meta.index)\n\n # nogo\n new_meta = {}\n new_meta['nogo'] = np.zeros(len(meta))\n new_meta['nogo'][meta['trialerror'].isin([1, 2, 4, 6]).values] = 1\n assert np.sum(new_meta['nogo']) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get cues defined by new terms\n new_meta = {}\n mm_type = ['becomes_unrewarded', 'remains_unrewarded', 'becomes_rewarded']\n inverted_lookup = {v:k for k, v in lookups.lookup_mm[mouse].items()}\n for mm in mm_type:\n new_meta[mm] = np.zeros(len(meta))\n new_meta[mm][meta.initial_condition.isin([inverted_lookup[mm]]).values] = 1\n assert np.sum(new_meta[mm]) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get previous cue same\n new_meta = {}\n mm_type = ['becomes_unrewarded', 'remains_unrewarded', 'becomes_rewarded']\n inverted_lookup = {v:k for k, v in lookups.lookup_mm[mouse].items()}\n for mm in mm_type:\n new_meta[f'prev_same_{mm}'] = np.zeros(len(meta))\n new_meta[f'prev_same_{mm}'][meta[f'prev_same_{inverted_lookup[mm]}'].values] = 1\n assert np.sum(new_meta[f'prev_same_{mm}']) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get previous cue diff\n new_meta = {}\n mm_type = ['becomes_unrewarded', 'remains_unrewarded', 'becomes_rewarded']\n inverted_lookup = {v:k for k, v in lookups.lookup_mm[mouse].items()}\n for mm in mm_type:\n new_meta[f'prev_diff_{mm}'] = np.zeros(len(meta))\n not_same = ~meta[f'prev_same_{inverted_lookup[mm]}'].values & meta.initial_condition.isin([inverted_lookup[mm]]).values\n new_meta[f'prev_diff_{mm}'][not_same] = 1\n assert np.sum(new_meta[f'prev_diff_{mm}']) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get salient events\n new_meta = {}\n for col in ['prev_reward', 'prev_punish', 'prev_blank', 'hmm_engaged']:\n new_meta[col] = np.zeros(len(meta))\n new_meta[col][meta[col].values] = 1\n assert np.sum(new_meta[col]) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get normalized running and baseline licking\n new_meta = {}\n for col in ['speed', 'pre_speed', 'post_speed', 'pre_licks', 'post_licks', 'anticipatory_licks']:\n new_meta[col] = meta[col].values / np.nanmax(meta[col].values)\n new_meta[col][np.isnan(new_meta[col])] = 0\n assert np.sum(new_meta[col]) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get normalized dprime\n # new_meta = {}\n # for col in ['dprime_run']:\n # new_meta[col] = meta[col].values - np.nanmin(meta[col].values)\n # new_meta[col] = new_meta[col] / np.nanmax(new_meta[col])\n # new_meta[col][np.isnan(new_meta[col])] = 0\n # assert np.sum(new_meta[col]) > 0\n # new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n # meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n return meta_df_out", "def __updateMatC(self):\n\t\tif self.regScheme == 2:\n\t\t\tfor id1 in range(self.nTemplates):\n\t\t\t\tself.C[id1,id1] = 1.0 / self.w0[id1]**2", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def _init_transformation_matrix(self):\n # Set up basic transformation matrix\n c_transform = np.zeros((self.n_beads, self.n_beads))\n\n # Get auxiliary array with bead indices\n n = np.arange(1, self.n_beads + 1)\n\n # for k = 0\n c_transform[0, :] = 1.0\n\n for k in range(1, self.n_beads // 2 + 1):\n c_transform[k, :] = np.sqrt(2) * np.cos(2 * np.pi * k * n / self.n_beads)\n\n for k in range(self.n_beads // 2 + 1, self.n_beads):\n c_transform[k, :] = np.sqrt(2) * np.sin(2 * np.pi * k * n / self.n_beads)\n\n if self.n_beads % 2 == 0:\n c_transform[self.n_beads // 2, :] = (-1) ** n\n\n # Since matrix is initialized as C(k,n) does not need to be transposed\n c_transform /= np.sqrt(self.n_beads)\n c_transform = torch.from_numpy(c_transform)\n\n return c_transform", "def get_linear_discrete_matrices(self):\n\n vf_op, vb_op = get_operating_point_inputs(self.operating_point[1], self.model_type)\n if self.model_type == ModelType.EASY:\n At, Bt, Vf_op, Vd_op = getLinearizedMatrices(self.model_type, self.operating_point[0:6], vf_op, vb_op)\n elif self.model_type == ModelType.GYROMOMENT:\n At, Bt = get_gyro_matrices(self.operating_point, self.operating_point[6] / mc.K_f, vf_op, vb_op)\n if self.model_type == ModelType.EASY:\n Ct = np.array([[1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0]])\n Dt = np.array([[0, 0],\n [0, 0],\n [0, 0]])\n elif self.model_type == ModelType.GYROMOMENT:\n if self.nOutputs == 3:\n Ct = np.array([[1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0]])\n Dt = np.array([[0, 0],\n [0, 0],\n [0, 0],\n [0, 0],\n [0, 0]])\n elif self.nOutputs == 5:\n Ct = np.array([[1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 1]])\n Dt = np.array([[0, 0],\n [0, 0],\n [0, 0],\n [0, 0],\n [0, 0]])\n\n Ak, Bk, Ck, Dk = discretize_linear_state_space(At, Bt, Ct, Dt, self.timeStep)\n return Ak, Bk, Ck, Dk", "def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M", "def align_images(self, translation_only=False):\n # Extract feature point locations and descriptors.\n # print('here')\n points_and_descriptors = []\n for file in self.files:\n # print('kaka')\n image = sol4_utils.read_image(file, 1)\n # print('bla',image)\n self.h, self.w = image.shape\n pyramid, _ = sol4_utils.build_gaussian_pyramid(image, 3, 7)\n points_and_descriptors.append(find_features(pyramid))\n\n # Compute homographies between successive pairs of images.\n Hs = []\n\n for i in range(len(points_and_descriptors) - 1):\n points1, points2 = points_and_descriptors[i][0], points_and_descriptors[i + 1][0]\n desc1, desc2 = points_and_descriptors[i][1], points_and_descriptors[i + 1][1]\n\n # Find matching feature points.\n ind1, ind2 = match_features(desc1, desc2, .7)\n points1, points2 = points1[ind1, :], points2[ind2, :]\n\n # Compute homography using RANSAC.\n # print(points1)\n # print(points2)\n H12, inliers = ransac_homography(points1, points2, 100, 6, translation_only)\n\n # Uncomment for debugging: display inliers and outliers among matching points.\n # In the submitted code this function should be commented out!\n # display_matches(self.images[i], self.images[i+1], points1 , points2, inliers)\n\n Hs.append(H12)\n\n # Compute composite homographies from the central coordinate system.\n # print(Hs)\n accumulated_homographies = accumulate_homographies(Hs, (len(Hs) - 1) // 2)\n self.homographies = np.stack(accumulated_homographies)\n self.frames_for_panoramas = filter_homographies_with_translation(self.homographies, minimum_right_translation=5)\n self.homographies = self.homographies[self.frames_for_panoramas]", "def rebuildMatrixCache(self):\n self.converterYUR = Mat4.convertMat(CSYupRight, self.lens.getCoordinateSystem()) * self.lens.getProjectionMat()", "def _fp32_1_0_2_mc_on_1(loop_cnt, left_data):\n\n def _fp32_mte_process_1(axis_0_index, w_lp_index, sub_w_size):\n \"\"\"\n do transpose by mte for not last dim under multiple core on axis 1\n \"\"\"\n\n def _fp32_inner_mte_1(h_lp_index, sub_h_size):\n \"\"\"\n inner mte\n \"\"\"\n # move data in\n in_offset = ((block_idx * per_core_col_size + axis_0_index * axis_1 +\n h_lp_index * max_core_axis_size) * axis_2 +\n w_lp_index * max_no_core_axis_size)\n data_in_inf = (sub_h_size, sub_w_size, axis_1, axis_2, in_offset)\n _data_move_in_last_dim_be_one_block(tik_inst, ub_input, data_in, data_in_inf)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size * axis_0 + axis_0_index +\n h_lp_index * max_core_axis_size * axis_0) * axis_2 +\n w_lp_index * max_no_core_axis_size)\n data_out_inf = (sub_h_size, sub_w_size, axis_0, axis_1, axis_2, out_offset)\n _data_move_out_last_dim_be_one_block(tik_inst, data_out, ub_input, data_out_inf)\n\n with tik_inst.for_range(0, loop_cnt) as h_lp_idx:\n _fp32_inner_mte_1(h_lp_idx, max_core_axis_size)\n with tik_inst.if_scope(left_data > 0):\n _fp32_inner_mte_1(loop_cnt, left_data)\n\n with tik_inst.for_range(0, axis_0) as axis_0_idx:\n with tik_inst.for_range(0, no_core_loop_cnt) as w_lp_idx:\n _fp32_mte_process_1(axis_0_idx, w_lp_idx, max_no_core_axis_size)\n with tik_inst.if_scope(no_core_left > 0):\n _fp32_mte_process_1(axis_0_idx, no_core_loop_cnt, no_core_left)", "def preTransform(self, T: RigidTransform) -> None:\n for i,m in enumerate(self.milestones):\n assert len(m) == 24\n mq = self.to_se3(m[:12])\n mv = self.to_se3(m[12:])\n self.milestones[i] = self.from_se3(se3.mul(T,mq)) + self.from_se3((so3.mul(T[0],mv[0]),so3.apply(T[0],mv[1])))", "def matrix_of_changes():\n\tdrivers = ['PC','WCD']\n\ttasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST']\n\tproject='hcp'\n\tatlas = 'power'\n\tknown_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\tfor driver in drivers:\n\t\tall_matrices = []\n\t\tviolin_df = pd.DataFrame()\n\t\tfor task in tasks:\n\t\t\t# subjects = np.array(hcp_subjects).copy()\n\t\t\t# subjects = list(subjects)\n\t\t\t# subjects = remove_missing_subjects(subjects,task,atlas)\n\t\t\tsubjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task,atlas))\n\t\t\tassert (subjects == np.load('/home/despoB/mb3152/dynamic_mod/results/hcp_%s_%s_subs_fz.npy'%(task,atlas))).all()\n\t\t\tstatic_results = graph_metrics(subjects,task,atlas)\n\t\t\tsubject_pcs = static_results['subject_pcs']\n\t\t\tsubject_mods = static_results['subject_mods']\n\t\t\tsubject_wmds = static_results['subject_wmds']\n\t\t\tmatrices = static_results['matrices']\n\t\t\ttask_perf = task_performance(subjects,task)\n\t\t\tassert subject_pcs.shape[0] == len(subjects)\n\t\t\tmean_pc = np.nanmean(subject_pcs,axis=0)\n\t\t\tmean_wmd = np.nanmean(subject_wmds,axis=0)\n\t\t\tmod_pc_corr = np.zeros(subject_pcs.shape[1])\n\t\t\tfor i in range(subject_pcs.shape[1]):\n\t\t\t\tmod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]\n\t\t\tmod_wmd_corr = np.zeros(subject_wmds.shape[1])\n\t\t\tfor i in range(subject_wmds.shape[1]):\n\t\t\t\tmod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]\n\t\t\tif driver == 'PC':\n\t\t\t\tpredict_nodes = np.where(mod_pc_corr>0.0)[0]\n\t\t\t\tlocal_predict_nodes = np.where(mod_pc_corr<0.0)[0]\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\telse:\t\t\n\t\t\t\tpredict_nodes = np.where(mod_wmd_corr>0.0)[0]\n\t\t\t\tlocal_predict_nodes = np.where(mod_wmd_corr<0.0)[0]\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\t# Plot matrix of changes\n\t\t\tedge_thresh = 75\n\t\t\tedge_thresh = np.percentile(np.nanmean(matrices,axis=0),edge_thresh)\n\t\t\tpc_edge_corr[:,np.nanmean(matrices,axis=0)<edge_thresh,] = np.nan\n\t\t\thigh_pc_edge_matrix = np.nanmean(pc_edge_corr[predict_nodes],axis=0)\n\t\t\tlow_pc_edge_matrix = np.nanmean(pc_edge_corr[local_predict_nodes],axis=0)\n\t\t\tmatrix = (np.tril(low_pc_edge_matrix) + np.triu(high_pc_edge_matrix)).reshape((264,264))\n\t\t\tplot_matrix = matrix.copy()\n\t\t\tplot_matrix_mask = np.isnan(plot_matrix)\n\t\t\tzscores = scipy.stats.zscore(plot_matrix[plot_matrix_mask==False].reshape(-1))\n\t\t\tplot_matrix[plot_matrix_mask==False] = zscores\n\t\t\tif task != 'REST':\n\t\t\t\tall_matrices.append(plot_matrix)\n\t\t\tplot_corr_matrix(plot_matrix,network_names.copy(),out_file='/home/despoB/mb3152/dynamic_mod/figures/%s_corr_matrix_%s.pdf'%(driver,task),plot_corr=False,return_array=False)\n\n\t\t\tpc_edge_corr[np.isnan(pc_edge_corr)] = 0.0\n\t\t\tconnector_within_network_mask = pc_edge_corr.copy().astype(bool)\n\t\t\tlocal_within_network_mask = pc_edge_corr.copy().astype(bool)\n\t\t\tconnector_between_network_mask = pc_edge_corr.copy().astype(bool)\n\t\t\tlocal_between_network_mask = pc_edge_corr.copy().astype(bool)\n\t\t\tconnector_within_network_mask[:,:,:] = False\n\t\t\tlocal_within_network_mask[:,:,:] = False\n\t\t\tconnector_between_network_mask[:,:,:] = False\n\t\t\tlocal_between_network_mask[:,:,:] = False\n\t\t\t\n\t\t\tfor n in predict_nodes:\n\t\t\t\tfor node1,node2 in combinations(range(264),2):\n\t\t\t\t\tif n == node1:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif n == node2:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node1] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node2] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node1] == known_membership[node2]:\n\t\t\t\t\t\tconnector_within_network_mask[n][node1,node2] = True\n\t\t\t\t\t\tconnector_within_network_mask[n][node2,node1] = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tconnector_between_network_mask[n][node1,node2] = True\n\t\t\t\t\t\tconnector_between_network_mask[n][node2,node1] = True\n\n\t\t\tfor n in local_predict_nodes:\n\t\t\t\tfor node1,node2 in combinations(range(264),2):\n\t\t\t\t\tif n == node1:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif n == node2:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node1] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node2] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node1] == known_membership[node2]:\n\t\t\t\t\t\tlocal_within_network_mask[n][node1,node2] = True\n\t\t\t\t\t\tlocal_within_network_mask[n][node2,node1] = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tlocal_between_network_mask[n][node1,node2] = True\n\t\t\t\t\t\tlocal_between_network_mask[n][node2,node1] = True\n\n\t\t\tdef make_strs_for_df(array_to_add,str_to_add):\n\t\t\t\tarray_len = len(array_to_add)\n\t\t\t\tstr_array_ = np.chararray(array_len,itemsize=40)\n\t\t\t\tstr_array_[:] = str_to_add\n\t\t\t\treturn str_array_\n\t\t\t\n\t\t\tdef make_array_for_df(arrays_to_add):\n\t\t\t\tappend_array = np.zeros((len(arrays_to_add[0]),len(arrays_to_add))).astype(str)\n\t\t\t\tappend_array[:,0] = arrays_to_add[0]\n\t\t\t\tappend_array[:,1] = arrays_to_add[1]\n\t\t\t\tappend_array[:,2] = arrays_to_add[2]\n\t\t\t\treturn append_array\n\n\t\t\tviolin_columns = [\"r value, node i's PCs and j's edge weights\",\"Node Type\",\"Edge Type\"]\n\t\t\ttask_violin_df = pd.DataFrame(columns=violin_columns)\n\t\t\tresult_array_to_add = pc_edge_corr[connector_within_network_mask].reshape(-1)[pc_edge_corr[connector_within_network_mask].reshape(-1)!=0.0]\n\t\t\tedge_type_ = make_strs_for_df(result_array_to_add,'Within Community')\n\t\t\tnode_type_ = make_strs_for_df(result_array_to_add,'Q+')\n\t\t\tdf_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])\n\t\t\ttask_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)\n\n\t\t\tresult_array_to_add = pc_edge_corr[local_within_network_mask].reshape(-1)[pc_edge_corr[local_within_network_mask].reshape(-1)!=0.0]\n\t\t\tedge_type_ = make_strs_for_df(result_array_to_add,'Within Community')\n\t\t\tnode_type_ = make_strs_for_df(result_array_to_add,'Q-')\n\t\t\tdf_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])\n\t\t\ttask_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)\n\n\t\t\tresult_array_to_add = pc_edge_corr[connector_between_network_mask].reshape(-1)[pc_edge_corr[connector_between_network_mask].reshape(-1)!=0.0]\n\t\t\tedge_type_ = make_strs_for_df(result_array_to_add,'Between Community')\n\t\t\tnode_type_ = make_strs_for_df(result_array_to_add,'Q+')\n\t\t\tdf_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])\n\t\t\ttask_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)\n\n\t\t\tresult_array_to_add = pc_edge_corr[local_between_network_mask].reshape(-1)[pc_edge_corr[local_between_network_mask].reshape(-1)!=0.0]\n\t\t\tedge_type_ = make_strs_for_df(result_array_to_add,'Between Community')\n\t\t\tnode_type_ = make_strs_for_df(result_array_to_add,'Q-')\n\t\t\tdf_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])\n\t\t\ttask_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)\n\t\t\ttask_violin_df[\"r value, node i's PCs and j's edge weights\"] = task_violin_df[\"r value, node i's PCs and j's edge weights\"].astype(float)\n\t\t\tif driver == 'PC':\n\t\t\t\tprint task + ', Connector Hubs(Q+): ' + str(scipy.stats.ttest_ind(task_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Within Community'],\n\t\t\t\t\ttask_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Between Community']))\n\t\t\t\tprint task + ', Non-Connector Hubs(Q-): ' + str(scipy.stats.ttest_ind(task_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Within Community'],\n\t\t\t\t\ttask_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Between Community']))\n\t\t\telse:\n\t\t\t\tprint task + ', Local Hubs(Q+): ' + str(scipy.stats.ttest_ind(task_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Within Community'],\n\t\t\t\t\ttask_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Between Community']))\n\t\t\t\tprint task + ', Non Local Hubs (Q-): ' + str(scipy.stats.ttest_ind(task_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Within Community'],\n\t\t\t\t\ttask_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Between Community']))\n\t\t\t#append for average of all\n\t\t\tviolin_df = violin_df.append(pd.DataFrame(data=task_violin_df,columns=violin_columns),ignore_index=True)\n\t\t\t#Figure for single Task\n\t\t\tsns.set_style(\"white\")\n\t\t\tsns.set_style(\"ticks\")\n\t\t\tcolors = sns.color_palette(['#fdfd96','#C4D8E2'])\n\t\t\twith sns.plotting_context(\"paper\",font_scale=2):\n\t\t\t\tplt.figure(figsize=(24,16))\n\t\t\t\tsns.boxplot(x=\"Node Type\", y=\"r value, node i's PCs and j's edge weights\", hue=\"Edge Type\", order=['Q+','Q-'], data=task_violin_df)\n\t\t\t\tplt.savefig('/home/despoB/mb3152/dynamic_mod/figures/%s_edge_mod_%s.pdf'%(driver,task),dpi=4600)\n\t\t\t\tplt.close()\n\t\t# Average of All\n\t\tplot_corr_matrix(np.nanmean(all_matrices,axis=0),network_names.copy(),out_file='/home/despoB/mb3152/dynamic_mod/figures/%s_corr_matrix_avg.pdf'%(driver),plot_corr=False,return_array=False)\n\t\tif driver == 'PC':\n\t\t\tprint task + ',Connector Hubs(Q+): ' + str(scipy.stats.ttest_ind(violin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Within Community'],\n\t\t\t\tviolin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Between Community']))\n\t\t\tprint task + ', Non-Connector Hubs(Q-): ' + str(scipy.stats.ttest_ind(violin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Within Community'],\n\t\t\t\tviolin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Between Community']))\n\t\telse:\n\t\t\tprint task + ', Local Hubs(Q+): ' + str(scipy.stats.ttest_ind(violin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Within Community'],\n\t\t\t\tviolin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Between Community']))\n\t\t\tprint task + ', Non-Local Hubs(Q-): ' + str(scipy.stats.ttest_ind(violin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Within Community'],\n\t\t\t\tviolin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Between Community']))\n\t\tsns.set_style(\"white\")\n\t\tsns.set_style(\"ticks\")\n\t\tcolors = sns.color_palette(['#fdfd96','#C4D8E2'])\n\t\twith sns.plotting_context(\"paper\",font_scale=3):\n\t\t\tplt.figure(figsize=(24,16))\n\t\t\tsns.boxplot(x=\"Node Type\", y=\"r value, node i's PCs and j's edge weights\",hue=\"Edge Type\", palette=colors,order=['Q+','Q-'], data=violin_df)\n\t\t\tplt.savefig('/home/despoB/mb3152/dynamic_mod/figures/%s_edge_mod_avg.pdf'%(driver),dpi=4600)\n\t\t\tplt.close()", "def tcs2(self):\n S = self.M.allocState({})\n self.M.propagate(S, 0, 1)\n\n # set initial beam data\n S.ref_IonZ = self.refIonZ\n S.IonZ = self.IonZ\n\n S.moment0 = self.BC0\n S.moment1 = self.ENV0\n\n S.ref_IonEk = self.refIonEk\n \n S.phis = S.moment0[PS_S,:]\n S.IonEk = S.moment0[PS_PS,:]*MeVtoeV + S.ref_IonEk\n\n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n \n U,T = self.save(U, T, S)\n\n\n #S.clng = self.clng\n\n fin = len(self.M)\n\n H = self.M.propagate(S, 1, fin, observe=range(fin))\n \n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n\n for i in range(fin-1):\n U,T = self.save(U, T, H[i][1])\n\n return U,T", "def _transform(self, matrix):\n for x in list(self.keys()):\n ar = self[x]\n if len(ar.shape) == 2 and ar.shape[1] == 3:\n self[x] = np.dot(matrix, ar.transpose()).transpose()", "def compute(self):\n if self.loaded():\n debug(\"Skipping {} computation due to transform data already loaded.\" .format(self.name))\n return\n\n # sanity checks\n error(\"Got transform dimension of {} but input dimension is {}.\".format(self.dimension, self.input_dimension), self.input_dimension < self.dimension)\n\n # output containers\n self.term_components = []\n\n # compute\n info(\"Applying {} {}-dimensional transform on the raw representation.\".\n format(self.base_name, self.dimension))\n\n # train\n train_data = self.input_vectors[self.train_index, :]\n\n info(\"Transforming training input data shape: {}\".format(train_data.shape))\n if self.is_supervised:\n ground_truth = np.reshape(match_labels_to_instances(self.train_epi, self.train_labels), (len(train_data), ))\n self.vectors = self.process_func_train(train_data, ground_truth)\n else:\n self.vectors = self.process_func_train(train_data)\n self.output_roles = (roles.train,)\n\n if self.test_index.size > 0:\n # make zero output matrix\n output_data = np.zeros((len(self.input_vectors), self.dimension), np.float32)\n output_data[self.train_index, :] = self.vectors\n\n test_data = self.input_vectors[self.test_index, :]\n info(\"Transforming test input data shape: {}\".format(test_data.shape))\n vecs = self.process_func_test(test_data)\n output_data[self.test_index, :] = vecs\n self.vectors = output_data\n\n self.output_roles = (roles.train, roles.test)\n else:\n info(f\"Skipping empty test indexes.\")\n\n self.term_components = self.get_term_representations()\n self.verify_transformed(self.vectors)\n info(f\"Output shape: {self.vectors.shape}\")\n # write the output data\n write_pickled(self.serialization_path_preprocessed, self.get_all_preprocessed())\n # write the trained transformer model\n self.save_model()", "def mirrorTransformations(self):\n\n currentSelection = cmds.ls(sl=True)\n\n # get the mirror module\n networkNode = self.returnNetworkNode\n mirrorModule = cmds.getAttr(networkNode + \".mirrorModule\")\n moduleName = cmds.getAttr(networkNode + \".moduleName\")\n parent = cmds.getAttr(networkNode + \".parentModuleBone\")\n\n # get mirror module instance and information\n mirrorInst = self.returnMirrorModuleInst\n\n # turn off aim mode\n mirrorInst.aimMode_Setup(False)\n\n # turn off coplanar mode IF it exists on the module\n try:\n state = mirrorInst.coplanarBtn.isChecked()\n if state:\n mirrorInst.coplanarBtn.setChecked(False)\n mirrorInst.coplanarMode()\n except:\n pass\n\n moverTypes = self.returnJointMovers\n for moverType in moverTypes:\n for jointMover in moverType:\n attrs = cmds.listAttr(jointMover, keyable=True)\n\n for attr in attrs:\n value = cmds.getAttr(jointMover + \".\" + attr)\n\n mirrorMover = jointMover.partition(moduleName)[2]\n mirrorMover = mirrorModule + mirrorMover\n mirrorAttrs = [\"translateX\", \"translateY\", \"translateZ\"]\n\n if attr in mirrorAttrs:\n cmds.setAttr(mirrorMover + \".\" + attr, value * -1)\n else:\n cmds.setAttr(mirrorMover + \".\" + attr, value)\n\n cmds.select(clear=True)\n if len(currentSelection) > 0:\n cmds.select(currentSelection)\n\n # turn aim mode on\n mirrorInst.aimMode_Setup(True)\n\n # extend functionality\n self.mirrorTransformations_Custom()", "def apply_dof_transformation_to_transpose_prism(\n entity_transformations, entity_dofs, data, cell_info\n):\n apply_dof_transformation_to_transpose(3, 9, 5, entity_transformations, entity_dofs,\n data, cell_info, List([\"triangle\"] + [\"quadrilateral\"] * 4 + [\"triangle\"]))", "def cbindMatrices(hm, args):\n hm2 = heatmapper.heatmapper()\n\n # Make a dict of region name:row associations\n hm.read_matrix_file(args.matrixFile[0])\n d = dict({x: dict() for x in hm.parameters[\"group_labels\"]})\n for idx, group in enumerate(hm.parameters[\"group_labels\"]):\n s = hm.parameters[\"group_boundaries\"][idx]\n e = hm.parameters[\"group_boundaries\"][idx + 1]\n for idx2, reg in enumerate(hm.matrix.regions[s:e]):\n d[group][reg[2]] = idx2 + s\n\n # Iterate through the other matrices\n for idx in range(1, len(args.matrixFile)):\n hm2.read_matrix_file(args.matrixFile[idx])\n # Add the sample labels\n hm.parameters['sample_labels'].extend(hm2.parameters['sample_labels'])\n # Add the sample boundaries\n lens = [x + hm.parameters['sample_boundaries'][-1] for x in hm2.parameters['sample_boundaries']][1:]\n hm.parameters['sample_boundaries'].extend(lens)\n\n # Add on additional NA initialized columns\n ncol = hm.matrix.matrix.shape[1]\n hm.matrix.matrix = np.hstack((hm.matrix.matrix, np.empty(hm2.matrix.matrix.shape)))\n hm.matrix.matrix[:, ncol:] = np.NAN\n\n # Update the values\n for idx2, group in enumerate(hm2.parameters[\"group_labels\"]):\n if group not in d:\n continue\n s = hm2.parameters[\"group_boundaries\"][idx2]\n e = hm2.parameters[\"group_boundaries\"][idx2 + 1]\n for idx3, reg in enumerate(hm2.matrix.regions[s:e]):\n if reg[2] not in d[group]:\n continue\n hm.matrix.matrix[d[group][reg[2]], ncol:] = hm2.matrix.matrix[s + idx3, :]\n\n # Append the special params\n for s in hm.special_params:\n hm.parameters[s].extend(hm2.parameters[s])\n\n # Update the sample parameters\n hm.matrix.sample_labels = hm.parameters['sample_labels']\n hm.matrix.sample_boundaries = hm.parameters['sample_boundaries']", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_H2 = self.edp_par['rho_H2'].value\n Z_H2 = self.edp_par['Z_H2'].value\n sigma_H2 = self.edp_par['sigma_H2'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n \n # Make sure Z_H2 > Z_H1. If Z_H2 < Z_H1, swap them\n if Z_H1 > Z_H2:\n Z_H1, Z_H2 = Z_H2, Z_H1\n sigma_H1, sigma_H2 = sigma_H2, sigma_H1\n rho_H1, rho_H2 = rho_H2, rho_H1\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H2 + sigma_H2\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG += 2*rho_H2*sigma_H2 * cos(alpha*Z_H2) * exp(-0.5*(alpha*sigma_H2)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def _arrange_xfms(transforms, num_files, tmp_folder):\n base_xform = [\"#Insight Transform File V1.0\", \"#Transform 0\"]\n # Initialize the transforms matrix\n xfms_T = []\n for i, tf_file in enumerate(transforms):\n # If it is a deformation field, copy to the tfs_matrix directly\n if guess_type(tf_file)[0] != \"text/plain\":\n xfms_T.append([tf_file] * num_files)\n continue\n\n with open(tf_file) as tf_fh:\n tfdata = tf_fh.read().strip()\n\n # If it is not an ITK transform file, copy to the tfs_matrix directly\n if not tfdata.startswith(\"#Insight Transform File\"):\n xfms_T.append([tf_file] * num_files)\n continue\n\n # Count number of transforms in ITK transform file\n nxforms = tfdata.count(\"#Transform\")\n\n # Remove first line\n tfdata = tfdata.split(\"\\n\")[1:]\n\n # If it is a ITK transform file with only 1 xform, copy to the tfs_matrix directly\n if nxforms == 1:\n xfms_T.append([tf_file] * num_files)\n continue\n\n if nxforms != num_files:\n raise RuntimeError(\n \"Number of transforms (%d) found in the ITK file does not match\"\n \" the number of input image files (%d).\" % (nxforms, num_files)\n )\n\n # At this point splitting transforms will be necessary, generate a base name\n out_base = fname_presuffix(\n tf_file, suffix=\"_pos-%03d_xfm-{:05d}\" % i, newpath=tmp_folder.name\n ).format\n # Split combined ITK transforms file\n split_xfms = []\n for xform_i in range(nxforms):\n # Find start token to extract\n startidx = tfdata.index(\"#Transform %d\" % xform_i)\n next_xform = base_xform + tfdata[startidx + 1:startidx + 4] + [\"\"]\n xfm_file = out_base(xform_i)\n with open(xfm_file, \"w\") as out_xfm:\n out_xfm.write(\"\\n\".join(next_xform))\n split_xfms.append(xfm_file)\n xfms_T.append(split_xfms)\n\n # Transpose back (only Python 3)\n return list(map(list, zip(*xfms_T)))", "def _transform_frame(self, src):\n D = len(src)\n\n # Eq.(11)\n E = np.zeros((self.num_mixtures, D))\n for m in range(self.num_mixtures):\n xx = np.linalg.solve(self.covarXX[m], src - self.src_means[m])\n E[m] = self.tgt_means[m] + self.covarYX[m].dot(xx)\n\n # Eq.(9) p(m|x)\n posterior = self.px.predict_proba(np.atleast_2d(src))\n\n # Eq.(13) conditinal mean E[p(y|x)]\n return posterior.dot(E).flatten()", "def connector_mediation(task):\n\tatlas = 'power'\n\tproject='hcp'\n\tknown_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\tsubjects = np.load('%s/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %(homedir,'hcp',task,atlas))\n\tstatic_results = graph_metrics(subjects,task,atlas,run_version='fz')\n\tmatrices = static_results['matrices']\n\tsubject_pcs = static_results['subject_pcs']\n\tsubject_mods = static_results['subject_mods']\n\tmod_pc_corr = np.zeros(subject_pcs.shape[1])\n\tfor i in range(subject_pcs.shape[1]):\n\t\tmod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]\n\tmean_conn = np.nanmean(matrices,axis=0)\n\te_tresh = np.percentile(mean_conn,85)\n\tsubject_pcs[np.isnan(subject_pcs)] = 0.0\n\tm = np.zeros((264,264,264))\n\tpool = Pool(40)\n\tfor n in range(264):\n\t\tprint n\n\t\tsys.stdout.flush()\n\t\tvariables = []\n\t\tfor i,j in combinations(range(264),2):\n\t\t\tvariables.append(pd.DataFrame(data={'pc':subject_pcs[:,n],'weight':matrices[:,i,j],'q':subject_mods},index=range(len(subject_pcs))))\n\t\tresults = pool.map(multi_med,variables)\n\t\tfor r,i in zip(results,combinations(range(264),2)):\n\t\t\tm[n,i[0],i[1]] = r\n\t\t\tm[n,i[1],i[0]] = r\n\t\tnp.save('/home/despoB/mb3152/dynamic_mod/results/full_med_matrix_new_%s.npy'%(task),m)", "def apply_alignment_matrix(job, matrix):\n # Get the SVG-style 6-element vector from the 3x3 matrix\n mat = [matrix[0][0], matrix[1][0], matrix[0][1], matrix[1][1], matrix[0][2], matrix[1][2]]\n\n # Only the 'defs' list contains coordinates that need to be transformed\n defs = job['defs']\n\n for one_def in defs:\n if one_def['kind'] == \"image\":\n # TODO: raster images are not supported!\n # The rest of the code assumes the rows of pixel data are aligned\n # with the X axis, so handling rotation will require transforming\n # the entire image and generating new data.\n pass\n elif one_def['kind'] == \"path\":\n for one_path in one_def['data']:\n for one_point in one_path:\n matrixApply(mat, one_point)", "def __update_transformation_view(self):\n text = \"Computed transformation.\\n\\n\"\n text += \"Rotation:\\n\"\n for i in range(3):\n text += str(self.rotation[i, 0]) + \" \" + str(self.rotation[i, 1]) + \" \" + str(self.rotation[i, 2]) + \"\\n\"\n text += \"\\nTranslation:\\n\"\n text += str(self.translation[0]) + \" \" + str(self.translation[1]) + \" \" + str(self.translation[2])\n text += \"\\n\\nScale:\\n\"\n text += str(self.scale)\n text += \"\\n\\n4x4 Matrix:\\n\"\n mat = self.__compose_transformation()\n for i in range(4):\n text += f\"{str(mat[i, 0])}, {str(mat[i, 1])}, {str(mat[i, 2])}, {str(mat[i, 3])}\\n\"\n self.qt_transformation_textbox.setText(text)", "def finalize_fields(self):\n \n # Commented out until the MPI implementation is ready\n self.comm.allreduce(self.delta_P_dc, op=mpi.SUM)\n self.comm.allreduce(self.delta_P_omega, op=mpi.SUM)\n\n self.dc_coords[:,:,0] += self.delta_P_dc[:,:]\n self.omega_coords[:,:,0] += self.delta_P_omega[:,:]\n\n self.delta_P_dc = np.zeros((self.n_modes_z,self.n_modes_r))\n self.delta_P_omega = np.zeros((self.n_modes_z,self.n_modes_r))", "def compute_T_matrix(coordinates, p, reference=[[1,0,0],[0,1,0],[0,0,1]], origin=[0,0,0]):\n e_b_x = coordinates[0]\n e_b_y = coordinates[1]\n e_b_z = coordinates[2]\n \n e_a_x = reference[0] \n e_a_y = reference[1]\n e_a_z = reference[2]\n \n # Compute the rotation matrix\n x_b_a = [np.dot(e_b_x, e_a_x), np.dot(e_b_x, e_a_y), np.dot(e_b_x, e_a_z)]\n y_b_a = [np.dot(e_b_y, e_a_x), np.dot(e_b_y, e_a_y), np.dot(e_b_y, e_a_z)]\n z_b_a = [np.dot(e_b_z, e_a_x), np.dot(e_b_z, e_a_y), np.dot(e_b_z, e_a_z)]\n \n R_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0]],[x_b_a[1], y_b_a[1], z_b_a[1]],x_b_a[2], y_b_a[2], z_b_a[2]]\n \n # Compute the displacement \n displacement = [p[0]-origin[0], p[1]-origin[1], p[2]-origin[2]]\n \n # Make it into a transform matrix\n T_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0], displacement[0]],\n [x_b_a[1], y_b_a[1], z_b_a[1], displacement[1]],\n [x_b_a[2], y_b_a[2], z_b_a[2], displacement[2]],\n [0, 0, 0, 1]]\n \n T_a_b = np.linalg.inv(T_b_a).tolist()\n \n return T_b_a, T_a_b", "def compute_T_matrix(coordinates, p, reference=[[1,0,0],[0,1,0],[0,0,1]], origin=[0,0,0]):\n e_b_x = coordinates[0]\n e_b_y = coordinates[1]\n e_b_z = coordinates[2]\n \n e_a_x = reference[0] \n e_a_y = reference[1]\n e_a_z = reference[2]\n \n # Compute the rotation matrix\n x_b_a = [np.dot(e_b_x, e_a_x), np.dot(e_b_x, e_a_y), np.dot(e_b_x, e_a_z)]\n y_b_a = [np.dot(e_b_y, e_a_x), np.dot(e_b_y, e_a_y), np.dot(e_b_y, e_a_z)]\n z_b_a = [np.dot(e_b_z, e_a_x), np.dot(e_b_z, e_a_y), np.dot(e_b_z, e_a_z)]\n \n R_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0]],[x_b_a[1], y_b_a[1], z_b_a[1]],x_b_a[2], y_b_a[2], z_b_a[2]]\n \n # Compute the displacement \n displacement = [p[0]-origin[0], p[1]-origin[1], p[2]-origin[2]]\n \n # Make it into a transform matrix\n T_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0], displacement[0]],\n [x_b_a[1], y_b_a[1], z_b_a[1], displacement[1]],\n [x_b_a[2], y_b_a[2], z_b_a[2], displacement[2]],\n [0, 0, 0, 1]]\n \n T_a_b = np.linalg.inv(T_b_a).tolist()\n \n return T_b_a, T_a_b", "def apply_dof_transformation_to_transpose_interval(entity_transformations, entity_dofs,\n data, cell_info):\n return", "def generateMatrix():\n num_probes = 16\n\n # print(position_vectors)\n # Create the (48x4) calibration matrix:\n calibration_lookup= [[0] * 3 for i in range(num_probes)]\n calibration_matrix = [[0] * 9 for i in range(num_probes)]\n counter = 0\n\n # first populate with x-direction:\n shot_range = [17, 20] #x-direction\n dir = 0 #the direction of the orentation of the probe array\n position_vectors = get_probeLocs_calib_setup(dir)\n for probe_num in range(num_probes):\n position_vector = position_vectors[probe_num]\n ratio_Bx, ratio_By, ratio_Bz, norm_factor= getRatio(probe_num, position_vector, shot_range, dir)\n calibration_lookup[probe_num][dir] = ratio_Bx\n calibration_matrix[probe_num][(dir*3):(dir*3+1)] = [ratio_Bx, ratio_By, ratio_Bz]\n print(\"Progress: %d / %d\" %(counter+1,num_probes*3 ))\n counter +=1\n\n # Then populate with y-direction:\n shot_range = [21, 25] #y-direction\n dir = 1 #the direction of the orentation of the probe array\n position_vectors = get_probeLocs_calib_setup(dir)\n for probe_num in range(num_probes):\n position_vector = position_vectors[probe_num]\n ratio_Bx, ratio_By, ratio_Bz, norm_factor= getRatio(probe_num, position_vector, shot_range, dir)\n calibration_lookup[probe_num][dir] = ratio_By\n calibration_matrix[probe_num][(dir*3):(dir*3+1)] = [ratio_Bx, ratio_By, ratio_Bz]\n print(\"Progress: %d / %d\" %(counter+1,num_probes*3 ))\n counter +=1\n\n # Then populate with z-direction:\n shot_range = [11, 15] #z-direction\n dir = 2\n position_vectors = get_probeLocs_calib_setup(dir)\n for probe_num in range(num_probes):\n position_vector = position_vectors[probe_num]\n ratio_Bx, ratio_By, ratio_Bz, norm_factor= getRatio(probe_num, position_vector, shot_range, dir)\n calibration_lookup[probe_num][dir] = ratio_Bz\n calibration_matrix[probe_num][(dir*3):(dir*3+1)] = [ratio_Bx, ratio_By, ratio_Bz]\n print(\"Progress: %d / %d\" %(counter+1,num_probes*3 ))\n counter +=1\n\n pth = os.getcwd()\n date = '050119'\n print(\"Finished! File saved as calib-%s-4x4_lookup.txt and _3x3 in current working directory\" %(date))\n savetxt(os.path.join(pth, 'calib-%s-4x4_lookup_no_switch.txt' % (date)) , calibration_lookup)\n savetxt(os.path.join(pth, 'calib-%s-4x4_3x3_no_switch.txt' % (date)) , calibration_matrix)", "def position_modules_interpolate(self, data):\n assert data.shape == (16, 512, 128)\n size_yx, centre = self._get_dimensions()\n tmp = np.empty((16 * 8,) + size_yx, dtype=data.dtype)\n\n for i, (module, mod_data) in enumerate(zip(self.modules, data)):\n tiles_data = np.split(mod_data, 8)\n for j, (tile, tile_data) in enumerate(zip(module, tiles_data)):\n # We store (x, y, z), but numpy indexing, and hence affine_transform,\n # work like [y, x]. Rearrange the numbers:\n fs_vec_yx = tile.fs_vec[:2][::-1]\n ss_vec_yx = tile.ss_vec[:2][::-1]\n\n # Offset by centre to make all coordinates positive\n corner_pos_yx = tile.corner_pos[:2][::-1] + centre\n\n # Make the rotation matrix\n rotn = np.stack((ss_vec_yx, fs_vec_yx), axis=-1)\n\n # affine_transform takes a mapping from *output* to *input*.\n # So we reverse the forward transformation.\n transform = np.linalg.inv(rotn)\n offset = np.dot(rotn, corner_pos_yx) # this seems to work, but is it right?\n\n affine_transform(\n tile_data,\n transform,\n offset=offset,\n cval=np.nan,\n output_shape=size_yx,\n output=tmp[i * 8 + j],\n )\n\n # Silence warnings about nans - we expect gaps in the result\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n out = np.nanmax(tmp, axis=0)\n\n return out, centre", "def combine_par(output_dir): \n #start time\n start_time = time.time()\n \n # set input/output file paths\n infile0 = output_dir + 'TransformParameters.0.txt'\n infile1 = output_dir + 'TransformParameters.1.txt'\n outfile0 = output_dir +'TransformParameters.fwd.txt'\n outfile1 = output_dir +'TransformParameters.inv.txt'\n \n # define reference frame for registration\n ref = 0\n spacing = 1\n \n # Open parameter file 0 and search for GridSpacing and GridOrigin line\n text_filein0 = open( infile0, \"r\" )\n for line in text_filein0:\n if line.find( \"(GridOrigin \" ) == 0:\n origin_str = line\n elif line.find( \"(GridSpacing \" ) == 0:\n spacing_str = line\n text_filein0.close()\n \n # Extract time point origin from line\n origin_split = origin_str.strip().split(' ')\n origin_split = origin_split[ len( origin_split ) - 1 ].split(')')\n old_origin = float( origin_split[ 0 ] )\n \n # Extract time point spacing from line\n spacing_split = spacing_str.strip().split(' ')\n spacing_split = spacing_split[ len( spacing_split ) - 1 ].split(')')\n old_spacing = float( spacing_split[ 0 ] )\n \n \n print(\"Original grid origin in time dimension: \" + str( old_origin ))\n print(\"Original grid spacing in time dimension: \" + str( old_spacing ))\n print(\"\")\n \n # Determine new grid origin\n new_origin = ref - ( spacing / old_spacing ) * ( ref - old_origin )\n print( \"New grid origin in time dimension: \" + str( new_origin ))\n \n # Recompose origin and spacing lines\n new_origin_string = origin_str.strip().split(' ')\n new_origin_string.pop()\n new_origin_string = \" \".join( new_origin_string ) + \" \" + str( new_origin ) + \")\\n\"\n new_spacing_string = spacing_str.strip().split(' ')\n new_spacing_string.pop()\n new_spacing_string = \" \".join( new_spacing_string ) + \" \" + str( spacing ) + \")\\n\"\n \n # Reopen text file, replace origin and spacing and write to output file 0\n text_filein0 = open( infile0, \"r\" )\n text_fileout0 = open( outfile0, \"w\" )\n for line in text_filein0:\n if line.find( \"(GridOrigin \" ) == 0:\n # Write new origin line\n text_fileout0.write( new_origin_string )\n elif line.find( \"(GridSpacing \" ) == 0:\n # Write new spacing line\n text_fileout0.write( new_spacing_string )\n elif line.find( \"(InitialTransformParametersFileName \" ) == 0:\n # Remove initial transform\n text_fileout0.write( \"(InitialTransformParametersFileName \\\"NoInitialTransform\\\")\\n\" )\n else:\n # Write line read from input file (no change)\n text_fileout0.write( line )\n text_filein0.close()\n text_fileout0.close()\n \n # Open parameter file 1 and search for GridSize\n text_filein1 = open( infile1, \"r\" )\n for line in text_filein1:\n if line.find(\"(GridSize\") == 0:\n grid_str = line\n grid_split = grid_str.strip().split(' ')\n grid_split[-1] = grid_split[-1].replace(')','')\n grid_split = grid_split[1:]\n grid_float = [float(s) for s in grid_split]\n grid_all = int(grid_float[0] * grid_float[1] * grid_float[2] * grid_float[3])\n num_phase = int(grid_float[3])\n text_filein1.close()\n \n # Replace initial transform parameter filename\n text_filein1 = open( infile1, \"r\" )\n text_fileout1 = open( outfile1, \"w\" )\n for line in text_filein1:\n if line.find( \"(InitialTransformParametersFileName \" ) == 0:\n # Set initial transform filename\n text_fileout1.write( \"(InitialTransformParametersFileName \\\"\" + outfile0 + \"\\\")\\n\" )\n elif line.find(\"(TransformParameters \") == 0:\n # extract b-spline parameters, arrangment : x (Px*Py*Pz), y(Px*Py*Pz), z(Px*Py*Pz), t(Px*Py*Pz)\n transPar_str = line\n transPar_split = transPar_str.strip().split(' ')\n transPar_split[-1] = transPar_split[-1].replace(')','')\n transPar_split = transPar_split[1:]\n num_grid3d = int(grid_all / num_phase) \n str_seg = transPar_split[(ref*num_grid3d):((ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all+(ref*num_grid3d)): (grid_all + (ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all*2+(ref*num_grid3d)): (grid_all*2 + (ref+1)*num_grid3d)] * num_phase + transPar_split[(grid_all*3+(ref*num_grid3d)): (grid_all*3 + (ref+1)*num_grid3d)] * num_phase\n #str_seg = \"\"\n #str_seg = [str_seg + transPar_split[((ref*num_grid3d)+grid*i):((ref+1)*num_grid3d+grid*i)] * num_phase for i in range(4)]\n str_joined = ' '.join(str_seg)\n text_fileout1.write(\"(TransformParameters \" + str_joined + \")\\n\")\n else:\n # Write line read from input file (no change)\n text_fileout1.write( line )\n text_filein1.close()\n text_fileout1.close()\n \n # caclulate elapsed time\n end_time = time.time()\n elapsed_time = end_time - start_time\n print('combine_par done. elapsed time:', elapsed_time, 's')", "def run(self):\n #self.source_fixed = center_of_mass(self.source_mesh.vs)\n #self.destination_fixed = center_of_mass(self.destination_mesh.vs)\n\n source_high_curvature = get_mesh_of_high_curvature(self.source_mesh)\n destination_high_curvature = get_mesh_of_high_curvature(self.destination_mesh)\n\n self.matrix, error = icp(source_high_curvature.vs, \n destination_high_curvature.vs, \n self.source_fixed, \n self.destination_fixed,\n 300,\n verbose = self.verbose)\n\n self.inverse = np.linalg.inv(self.matrix)\n self.global_confidence = 1 - error\n print self.global_confidence", "def apply_transformation_np(source, transformation):\n source_homog = np.ones((source.shape[0], 4))\n source_homog[:, :-1] = source\n # source_homog = np.hstack(\n # (source, np.ones(source.shape[0], 1))\n # )\n\n source_transformed = np.matmul(transformation, source_homog.T).T[:, :-1]\n return source_transformed", "def align_images(self, translation_only=False):\n # Extract feature point locations and descriptors.\n points_and_descriptors = []\n for file in self.files:\n image = sol4_utils.read_image(file, 1)\n self.images.append(image)\n self.h, self.w = image.shape\n pyramid, _ = sol4_utils.build_gaussian_pyramid(image, 3, PYR_FLT_LEVEL)\n points_and_descriptors.append(find_features(pyramid))\n\n # Compute homographies between successive pairs of images.\n Hs = []\n for i in range(len(points_and_descriptors) - 1):\n points1, points2 = points_and_descriptors[i][0], points_and_descriptors[i + 1][0]\n desc1, desc2 = points_and_descriptors[i][1], points_and_descriptors[i + 1][1]\n\n # Find matching feature points.\n ind1, ind2 = match_features(desc1, desc2, MATCH_MIN_SCORE)\n points1, points2 = points1[ind1, :], points2[ind2, :]\n\n # Compute homography using RANSAC.\n H12, inliers = ransac_homography(points1, points2,\n RANSAC_ITER_NUM, RANAC_ERR_TOL,\n translation_only)\n\n # Uncomment for debugging: display inliers and outliers among matching points.\n # In the submitted code this function should be commented out!\n if(self.display_match):\n display_matches(self.images[i], self.images[i+1], points1 , points2, inliers)\n\n Hs.append(H12)\n\n # Compute composite homographies from the central coordinate system.\n accumulated_homographies = accumulate_homographies(Hs, (len(Hs) - 1) // 2)\n self.homographies = np.stack(accumulated_homographies)\n self.frames_for_panoramas = filter_homographies_with_translation(self.homographies, minimum_right_translation=5)\n print((self.frames_for_panoramas.shape))\n self.homographies = self.homographies[self.frames_for_panoramas]", "def append_direct_component_matrices(technique_data: TransitiveTechniqueData):\n for technique in technique_data.technique.get_component_techniques():\n direct_calculator = DirectTechniqueCalculator(technique.definition)\n similarity_matrix = direct_calculator.calculate_technique_data(\n technique_data.dataset\n ).similarity_matrix\n technique_data.transitive_matrices.append(similarity_matrix)", "def reconstruct(self):\n self.print_tee(\"starting reconstruction\", self.log_file)\n\n # #############################################\n # estimate SIM parameters\n # #############################################\n # estimate frequencies\n tstart = time.time() # since done using joblib, process_time() is not useful...\n\n if self.use_fixed_frq:\n self.frqs = self.frqs_guess\n self.print_tee(\"using fixed frequencies\", self.log_file)\n else:\n if self.find_frq_first:\n self.frqs = self.estimate_sim_frqs(self.imgs_ft, self.imgs_ft, self.frqs_guess)\n else:\n self.print_tee(\"doing phase demixing prior to frequency finding\", self.log_file)\n self.separated_components_ft = separate_components(self.imgs_ft, self.phases_guess, np.ones((self.nangles, self.nphases)))\n imgs1 = np.expand_dims(self.separated_components_ft[:, 0], axis=1)\n imgs2 = np.expand_dims(self.separated_components_ft[:, 1], axis=1)\n self.frqs = self.estimate_sim_frqs(imgs1, imgs2, self.frqs_guess)\n\n tend = time.time()\n self.print_tee(\"fitting frequencies took %0.2fs\" % (tend - tstart), self.log_file)\n\n # estimate phases\n tstart = time.process_time()\n if self.use_fixed_phase:\n self.phases = self.phases_guess\n self.amps = np.ones((self.nangles, self.nphases))\n self.print_tee(\"Using fixed phases\", self.log_file)\n else:\n self.phases, self.amps = self.estimate_sim_phases(self.frqs, self.phases_guess)\n\n tend = time.process_time()\n self.print_tee(\"estimated %d phases in %0.2fs\" % (self.nangles * self.nphases, tend - tstart), self.log_file)\n\n # separate components\n self.separated_components_ft = separate_components(self.imgs_ft, self.phases, self.amps)\n\n # estimate modulation depths and power spectrum fit parameters\n tstart = time.process_time()\n # for the moment, need to do this feet even if have fixed mod depth, because still need the\n # power spectrum parameters\n self.mod_depths, self.power_spectrum_params, self.pspec_masks = self.estimate_mod_depths()\n\n if self.use_fixed_mod_depths:\n self.mod_depths = np.zeros((self.nangles, self.nphases))\n self.mod_depths[:, 0] = 1\n for jj in range(1, self.nphases):\n self.mod_depths[:, jj] = self.mod_depths_guess\n\n # also correct power spectrum params\n self.power_spectrum_params[:, jj, 2] = self.mod_depths[:, jj]\n\n tend = time.process_time()\n self.print_tee('estimated %d modulation depths in %0.2fs' % (self.nangles, tend - tstart), self.log_file)\n\n # #############################################\n # estimate modulation contrast to noise ratio for raw images\n # #############################################\n mcnr = np.zeros((self.nangles, self.nphases))\n for ii in range(self.nangles):\n for jj in range(self.nphases):\n mcnr[ii, jj] = get_mcnr(self.imgs_ft[ii, jj], self.frqs[ii], self.fx, self.fy, self.fmax)\n\n # if mcnr is too low (typically < 1), use guess values instead\n if self.default_to_guess_on_low_mcnr and np.min(mcnr[ii]) < self.min_mcnr and self.frqs_guess is not None:\n self.frqs[ii] = self.frqs_guess[ii]\n self.print_tee(\"Angle %d/%d, minimum mcnr = %0.2f is less than the minimum value, %0.2f,\"\n \" so fit frequency will be replaced with guess\"\n % (ii + 1, self.nangles, np.min(mcnr[ii]), self.min_mcnr), self.log_file)\n\n for jj in range(self.nphases):\n mcnr[ii, jj] = get_mcnr(self.imgs_ft[ii, jj], self.frqs[ii], self.fx, self.fy, self.fmax)\n\n self.mcnr = mcnr\n # for convenience, also save periods and angles\n self.periods = 1 / np.sqrt(self.frqs[:, 0] ** 2 + self.frqs[:, 1] ** 2)\n self.angles = np.angle(self.frqs[:, 0] + 1j * self.frqs[:, 1])\n\n # #############################################\n # estimate noise in component images\n # #############################################\n self.noise_power = self.power_spectrum_params[:, :, -1]\n\n # #############################################\n # SIM reconstruction\n # #############################################\n tstart = time.process_time()\n self.img_sr, self.img_sr_ft, self.components_deconvolved_ft, self.components_shifted_ft, \\\n self.weights, self.weight_norm, self.snr, self.snr_shifted = self.combine_components()\n # self.img_sr, self.img_sr_ft, self.components_deconvolved_ft, self.components_shifted_ft, \\\n # self.weights, self.weight_norm, self.snr, self.snr_shifted = self.combine_components_fairSIM()\n tend = time.process_time()\n\n self.print_tee(\"combining components took %0.2fs\" % (tend - tstart), self.log_file)\n\n # #############################################\n # widefield deconvolution\n # #############################################\n # get signal to noise ratio\n wf_noise = get_noise_power(self.widefield_ft, self.fx, self.fy, self.fmax)\n fit_result, self.mask_wf = fit_power_spectrum(self.widefield_ft, self.otf, self.fx, self.fy, self.fmax, self.fbounds,\n init_params=[None, self.power_spectrum_params[0, 0, 1], 1, wf_noise],\n fixed_params=[False, True, True, True])\n\n self.pspec_params_wf = fit_result['fit_params']\n ff = np.sqrt(self.fx[None, :]**2 + self.fy[:, None]**2)\n sig = power_spectrum_fn([self.pspec_params_wf[0], self.pspec_params_wf[1], self.pspec_params_wf[2], 0], ff, 1)\n wf_snr = sig / wf_noise\n # deconvolution\n wf_decon_ft, wfilter = wiener_deconvolution(self.widefield_ft, self.otf, wf_snr, snr_includes_otf=False)\n\n # upsample to make fully comparable to reconstructed image\n self.widefield_deconvolution_ft = tools.expand_fourier_sp(wf_decon_ft, 2, 2, centered=True)\n self.widefield_deconvolution = fft.fftshift(fft.ifft2(fft.ifftshift(self.widefield_deconvolution_ft))).real\n\n # #############################################\n # print parameters\n # #############################################\n self.print_parameters()\n\n try:\n self.log_file.close()\n except AttributeError:\n pass", "def apply_transform_matrix(self, img: np.ndarray, transform_matrix):\n h, w = img.shape[0], img.shape[1]\n transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)\n img = np.rollaxis(img, 2, 0)\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n\n channel_images = [scipy.ndimage.interpolation.affine_transform(\n x_channel,\n final_affine_matrix,\n final_offset,\n order=1,\n mode=self.fill_mode,\n cval=self.cval) for x_channel in img]\n img = np.stack(channel_images, axis=0)\n img = np.rollaxis(img, 0, 2 + 1)\n # img = apply_affine_transform(img, transform_matrix, channel_axis=2, fill_mode=self.fill_mode, cval=self.cval) # apply_transform\n return img", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def _compute_correction(self, initial_state, final_state, a, b, c, s):\r\n pertub = self.pertub\r\n pertub_s = pertub *10\r\n \r\n pred_no_pertub = self._motion_update_one_shot(initial_state, a, b, c, s)\r\n pred_pertub_a = self._motion_update_one_shot(initial_state, a +pertub, b, c, s)\r\n pred_pertub_b = self._motion_update_one_shot(initial_state, a, b +pertub, c, s)\r\n # no need to correct C, C is constrained by kappa_final\r\n # # pred_pertub_c = self._motion_update_one_shot(initial_state, a, b, c +pertub, s)\r\n pred_pertub_s = self._motion_update_one_shot(initial_state, a, b, c, s +pertub_s)\r\n\r\n d_state = np.zeros((3,1))\r\n d_pertub_state = np.zeros((3,3))\r\n Jacobian = np.zeros((3,3))\r\n for i in range(0, 3):\r\n d_pertub_state[i][0] = (final_state[i] - pred_pertub_a[i]) # a\r\n d_pertub_state[i][1] = (final_state[i] - pred_pertub_b[i]) # b\r\n # d_pertub_state[i][2] = (final_state[i] - pred_pertub_c[i]) # c (no update)\r\n d_pertub_state[i][2] = (final_state[i] - pred_pertub_s[i]) # s\r\n \r\n d_state[i] = final_state[i] - pred_no_pertub[i]\r\n \r\n Jacobian[i][0] = (d_pertub_state[i][0] - d_state[i])/pertub # a\r\n Jacobian[i][1] = (d_pertub_state[i][1] - d_state[i])/pertub # b\r\n # Jacobian[i][2] = (d_pertub_state[i][2] - d_state[i])/pertub # c (no update)\r\n Jacobian[i][2] = (d_pertub_state[i][2] - d_state[i])/pertub_s # s\r\n\r\n # inv_Jacobian = np.linalg.inv(Jacobian)\r\n inv_Jacobian = np.linalg.pinv(Jacobian)\r\n correction = np.dot(inv_Jacobian, d_state)\r\n # pdb.set_trace()\r\n return correction", "def shifts_projection(sc, clean):\n def shifts_projected(clean, axis):\n projected = clean.map(lambda x: x.mean(axis=axis)[:, :, np.newaxis])\n target = getTarget(projected, 30, 1)\n shifts = registerByPlane(sc, projected, target[:, :, np.newaxis], 10, False)\n return shifts[:, :, 0]\n\n # shifts_xy = shifts_projected(clean, 2)\n shifts_xz = shifts_projected(clean, 1)\n shifts_yz = shifts_projected(clean, 0)\n\n # x_shifts = np.mean(np.stack((shifts_xz[:, 0], shifts_xy[:, 0])), axis=0)\n z_shifts = np.mean(np.stack((shifts_xz[:, 1], shifts_yz[:, 1])), axis=0)\n # y_shifts = np.mean(np.stack((shifts_yz[:, 0], shifts_xy[:, 1])), axis=0)\n plt.figure()\n plt.plot(shifts_xz[:, 1])\n plt.plot(shifts_yz[:, 1])\n plt.plot(z_shifts)\n plt.title('Z')\n # plt.figure()\n # plt.plot(shifts_xz[:, 0])\n # plt.plot(shifts_xy[:, 0])\n # plt.plot(x_shifts)\n # plt.title('X')\n # plt.figure()\n # plt.plot(shifts_yz[:, 0])\n # plt.plot(shifts_xy[:, 1])\n # plt.plot(y_shifts)\n # plt.title('Y')\n # shifts_all = np.stack((x_shifts, y_shifts, z_shifts))\n\n def initReg(kv):\n from scipy.ndimage.interpolation import shift\n index, volume = kv\n current_shift = (0, 0, -1 * z_shifts[int(index[0])])\n shifted = shift(volume, current_shift)\n return shifted.astype(np.int16)\n\n reg = clean.map(initReg, with_keys=True, value_shape=clean.shape[1:], dtype=np.int16)\n reg.cache()\n reg.count()\n return reg", "def _calc_matrix(self):\n\t\tz = self.zoom\n\t\talloc = self.allocation\n\t\tif self.image:\n\t\t\tiw, ih = self.image.get_width(), self.image.get_height()\n\t\telse:\n\t\t\tiw, ih = 0, 0\n#\t\tif __debug__: print self._vadj.lower, self._vadj.value, self._vadj.upper\n\t\t\n\t\ti2w = cairo.Matrix(\n\t\t\tz,0,\n\t\t\t0,z,\n\t\t\t-self._hadj.value if alloc.width < iw*z else (alloc.width - iw*z)/2, \n\t\t\t-self._vadj.value if alloc.height < ih*z else (alloc.height - ih*z)/2,\n\t\t\t)\n\t\t\n\t\tself._i2w_matrix = i2w\n\t\t\n\t\tw2i = cairo.Matrix(*i2w) #copy\n\t\tw2i.invert()\n\t\tself._w2i_matrix = w2i", "def design_matrix_df(meta):\n\n mouse = utils.meta_mouse(meta)\n\n # go\n new_meta = {}\n new_meta['go'] = np.zeros(len(meta))\n new_meta['go'][meta['trialerror'].isin([0, 3, 5, 7]).values] = 1\n assert np.sum(new_meta['go']) > 0\n meta_df_out = pd.DataFrame(data=new_meta, index=meta.index)\n\n # nogo\n new_meta = {}\n new_meta['nogo'] = np.zeros(len(meta))\n new_meta['nogo'][meta['trialerror'].isin([1, 2, 4, 6]).values] = 1\n assert np.sum(new_meta['nogo']) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get cues defined by new terms\n new_meta = {}\n mm_type = ['becomes_unrewarded', 'remains_unrewarded', 'becomes_rewarded']\n inverted_lookup = {v:k for k, v in lookups.lookup_mm[mouse].items()}\n for mm in mm_type:\n new_meta[mm] = np.zeros(len(meta))\n new_meta[mm][meta.initial_condition.isin([inverted_lookup[mm]]).values] = 1\n assert np.sum(new_meta[mm]) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get previous cue same\n new_meta = {}\n mm_type = ['becomes_unrewarded', 'remains_unrewarded', 'becomes_rewarded']\n inverted_lookup = {v:k for k, v in lookups.lookup_mm[mouse].items()}\n for mm in mm_type:\n new_meta[f'prev_same_{mm}'] = np.zeros(len(meta))\n new_meta[f'prev_same_{mm}'][meta[f'prev_same_{inverted_lookup[mm]}'].values] = 1\n assert np.sum(new_meta[f'prev_same_{mm}']) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get previous cue diff\n new_meta = {}\n mm_type = ['becomes_unrewarded', 'remains_unrewarded', 'becomes_rewarded']\n inverted_lookup = {v:k for k, v in lookups.lookup_mm[mouse].items()}\n for mm in mm_type:\n new_meta[f'prev_diff_{mm}'] = np.zeros(len(meta))\n not_same = ~meta[f'prev_same_{inverted_lookup[mm]}'].values & meta.initial_condition.isin([inverted_lookup[mm]]).values\n new_meta[f'prev_diff_{mm}'][not_same] = 1\n assert np.sum(new_meta[f'prev_diff_{mm}']) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get salient events\n new_meta = {}\n for col in ['prev_reward', 'prev_punish', 'prev_blank', 'hmm_engaged']:\n new_meta[col] = np.zeros(len(meta))\n new_meta[col][meta[col].values] = 1\n assert np.sum(new_meta[col]) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get normalized running and baseline licking\n new_meta = {}\n for col in ['speed', 'pre_speed', 'post_speed', 'pre_licks']:\n new_meta[col] = meta[col].values / np.nanmax(meta[col].values)\n new_meta[col][np.isnan(new_meta[col])] = 0\n assert np.sum(new_meta[col]) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n # get normalized dprime\n new_meta = {}\n for col in ['dprime_run']:\n new_meta[col] = meta[col].values - np.nanmin(meta[col].values)\n new_meta[col] = new_meta[col] / np.nanmax(new_meta[col])\n new_meta[col][np.isnan(new_meta[col])] = 0\n assert np.sum(new_meta[col]) > 0\n new_meta_df = pd.DataFrame(data=new_meta, index=meta.index)\n meta_df_out = pd.concat([meta_df_out, new_meta_df], axis=1)\n\n return meta_df_out", "def build_mm_df(sralist):\n\n def convert_to_codon(nts_array):\n \"\"\"\n pysam output is in nucleotides resolution, but scikit_curated_df uses codon resolution.\n This function converts nucleotide arrays to codon length (nts to codon resolution):\n \"\"\"\n \n nts_array = np.array(nts_array)\n codon_array = np.sum( np.reshape(A, (int(np.floor(nts_array[1]/3)),3) ), 1)/3.\n\n return codon_array\n\n\n def compute_mm(mmdata):\n \"\"\"\n get per gene average multi-mapping score\n \"\"\"\n\n mm_df = pd.DataFrame(columns=['ORF', 'MM'])\n counter = 0\n\n for gene in mmdata.keys():\n current_matrix = mmdata[gene]\n current_avrg = np.mean( np.sum(current_matrix, 1) / current_matrix.shape[1] )\n mm_df.loc[counter] = [gene, current_avrg]\n counter += 1\n\n return mm_df\n\n\n mm_mat = {}\n mm_pct = {}\n\n N = len(sralist)\n\n for ix, dataset in enumerate(sralist):\n samfile = pysam.AlignmentFile(TMP_DIR+'/ambiguous_reads/'+dataset+'_STAR_transcriptome_multi_mapped_sorted.bam', 'rb')\n genes_list = list(samfile.references)\n print(ix, dataset)\n\n for geneID in genes_list:\n # count the coverage of genomic positions by reads in region.\n # Returns: four array.arrays of the same length in order A C G T\n # The coverage is computed per-base [ACGT]\n cov = samfile.count_coverage(geneID, read_callback='nofilter')\n # Summ all 4 arrays\n cov_sum = np.sum(cov, axis=0)\n #print(geneID, cov_sum)\n codon_cov = convert_to_codon(cov_sum)\n codon_bool = np.asarray([1 if i > 0 else 0 for i in codon_cov])\n \n M = len(codon_bool)\n\n if ix == 0:\n \tmm_mat[geneID] = np.zeros((N,M)) * np.nan\n \n current_matrix = mm_mat[geneID]\n current_matrix[ix,:] = np.copy(codon_bool)\n mm_mat[geneID] = current_matrix\n\n\n mm_avrg = compute_mm(mm_mat)\n #mm_avrg.to_json('yeast_mm.json')\n #mm_avrg.to_csv('yeast_mm.txt', header=True, index=False, sep='\\t')\n\n \n mm_profile = {}\n theta_mm = 5\n for orf in mm_mat.keys():\n current_mat = mm_mat[orf]\n current_bool = np.sum(current_mat, 0) <= theta_mm\n mm_profile[orf] = current_bool\n\n with open('../data/processed/mm_consensus.pkl', 'wb') as f_mm:\n pickle.dump(mm_profile, f_mm)\n\n\n return mm_mat, mm_avrg, mm_profile", "def run_phot_normalization(setup, **params):\n\n log = logs.start_stage_log( setup.red_dir, 'postproc_phot_norm', version=VERSION )\n\n xmatch = crossmatch.CrossMatchTable()\n xmatch.load(params['crossmatch_file'],log=log)\n\n # Identify the datasets to be used as the primary reference in each\n # filter:\n xmatch.id_primary_datasets_per_filter()\n log.info('Identified datasets to be used as the primary references in each filter: '\\\n +repr(xmatch.reference_datasets))\n\n # Add columns to the dataset Table to hold the photometric calibration\n # parameters\n ndset = len(xmatch.datasets)\n ncol = len(xmatch.datasets.colnames)\n if 'norm_a0' not in xmatch.datasets.colnames:\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_a0', index=ncol+1)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_a1', index=ncol+2)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_0', index=ncol+3)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_1', index=ncol+4)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_2', index=ncol+5)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_3', index=ncol+6)\n log.info('Expanded xmatch.datasets table for normalization parameters')\n\n # Extract list of filters from xmatch.images['filter'] column\n filter_list = np.unique(xmatch.images['filter'].data)\n log.info('Identified list of filters to process: '+repr(filter_list))\n\n # Read data from quadrant 1\n # Reading in the timeseries data for all four quadrants is at the very\n # edge of the memory limits on the machines available, so it is preferable\n # to calibrate the quadrant's data separately. However, there are sufficient\n # stars in each quantant to be able to determine the photometric calibration\n # from a single quadrant, and apply it to the rest of the image.\n log.info('Loading the timeseries photometry from quadrant 1')\n file_path = path.join(setup.red_dir, params['field_name']+'_quad1_photometry.hdf5')\n phot_data = hd5_utils.read_phot_from_hd5_file(file_path, return_type='array')\n log.info('-> Completed photometry load')\n\n # Identify constant stars in the dataset\n constant_stars = find_constant_stars(xmatch, phot_data, log)\n star = 1\n\n # Normalize the photometry of each dataset to that of the reference\n # image in the primary reference dataset in that filter\n #for filter in filter_list:\n for filter in filter_list:\n\n # Plot an RMS diagram of the lightcurves for all stars in this filter,\n # prior to normalization, for comparison\n image_index = np.where(xmatch.images['filter'] == filter)[0]\n phot_data_filter = phot_data[:,image_index,:]\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('corrected')\n qc_col = 16\n\n plot_multisite_rms(params, phot_data_filter, mag_col, mag_err_col, qc_col,\n 'rms_prenorm_'+str(filter)+'.png', log)\n\n # Extract the reference image photometry for the primary-ref dataset\n # for this filter\n ref_datacode = xmatch.reference_datasets[filter]\n sitecode = get_site_code(ref_datacode)\n log.info('Reference dataset in '+filter+' is '+ref_datacode+', sitecode='+sitecode)\n\n ref_phot = np.zeros((len(xmatch.stars),2))\n ref_phot[:,0] = xmatch.stars['cal_'+filter.replace('p','')+'_mag_'+sitecode]\n ref_phot[:,1] = xmatch.stars['cal_'+filter.replace('p','')+'_magerr_'+sitecode]\n\n # Extract the lightcurves for all other datasets in this filter in turn\n dataset_index = np.where(xmatch.datasets['dataset_filter'] == filter)[0]\n\n for idset in dataset_index:\n dset_datacode = xmatch.datasets['dataset_code'][idset]\n dset_sitecode = get_site_code(dset_datacode)\n\n # If the dataset is the reference dataset, replicate the photometric\n # measurements from the corrected columns to the normalized columns,\n # since no normalization is required - this ensures the full\n # lightcurve can be accessed from the normalization columns.\n if dset_datacode == ref_datacode:\n log.info('Replicating primary reference photometry from dataset '\\\n +dset_datacode+' to the normalized photometry columns')\n image_index = np.where(xmatch.images['dataset_code'] == dset_datacode)[0]\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('corrected')\n (norm_mag_col, norm_mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n for i in image_index:\n phot_data[:,i,norm_mag_col] = phot_data[:,i,mag_col]\n phot_data[:,i,norm_mag_err_col] = phot_data[:,i,mag_err_col]\n\n # Normalize any dataset that isn't the same as the reference dataset\n else:\n log.info('Normalizing dataset '+dset_datacode+', sitecode='+dset_sitecode)\n image_index = np.where(xmatch.images['dataset_code'] == dset_datacode)[0]\n\n ## Dset created to hold all stars in field, not quadrant - \n # normalization is calculated from whole field.\n dset_phot = np.zeros((len(xmatch.stars),2))\n dset_phot[:,0] = xmatch.stars['cal_'+filter.replace('p','')+'_mag_'+dset_sitecode]\n dset_phot[:,1] = xmatch.stars['cal_'+filter.replace('p','')+'_magerr_'+dset_sitecode]\n\n # Calculate their weighted offset relative to the primary-ref\n # dataset for the filter\n (fit, covar_fit) = calc_phot_normalization(ref_phot, dset_phot,\n constant_stars, log,\n diagnostics=True, ref=sitecode,\n dset=dset_sitecode, f=filter)\n\n # Store the fit results for this dataset\n xmatch = store_dataset_phot_normalization(idset, xmatch, fit, covar_fit, log)\n\n # Apply the normalization calibration to the dataset's reference\n # image photometry, and store the results in the xmatch.stars table\n log.info('Applying normalization to the datasets reference image photometry')\n cal_phot = apply_phot_normalization_single_frame(fit, covar_fit, dset_phot,\n 0, 1, log,\n diagnostics=True, ref=sitecode,\n dset=dset_sitecode, f=filter)\n xmatch.stars['norm_'+filter.replace('p','')+'_mag_'+dset_sitecode] = cal_phot[:,0]\n xmatch.stars['norm_'+filter.replace('p','')+'_magerr_'+dset_sitecode] = cal_phot[:,1]\n\n # Apply the photometry calibration to the timeseries data\n # for this dataset\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('corrected')\n (norm_mag_col, norm_mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n phot_data = normalize_timeseries_photometry(phot_data, image_index,\n fit, covar_fit,\n mag_col, mag_err_col,\n norm_mag_col, norm_mag_err_col,\n log)\n\n # Plot a second RMS diagram of the lightcurves for all stars in this\n # filter, post normalization, for comparison\n image_index = np.where(xmatch.images['filter'] == filter)[0]\n phot_data_filter = phot_data[:,image_index,:]\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n plot_multisite_rms(params, phot_data_filter, mag_col, mag_err_col, qc_col,\n 'rms_postnorm_'+str(filter)+'.png', log)\n\n\n fig = plt.figure(3,(10,10))\n (norm_mag_col, norm_mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n idx = np.where(phot_data[star,:,norm_mag_col] > 0.0)[0]\n plt.errorbar(phot_data[star,idx,0], phot_data[star,idx,norm_mag_col],\n yerr=phot_data[star,idx,norm_mag_err_col], fmt='none', color='k')\n (xmin,xmax,ymin,ymax) = plt.axis()\n ymin = max(ymin,14.0)\n ymax = min(ymax,22.0)\n plt.axis([xmin,xmax,ymax,ymin])\n plt.xlabel('HJD')\n plt.ylabel('Mag')\n plt.savefig('Star_'+str(star)+'_lc_norm.png')\n plt.close(3)\n\n # Output updated crossmatch table\n xmatch.save(params['crossmatch_file'])\n\n # Output the photometry for quadrant 1:\n output_quadrant_photometry(params, setup, 1, phot_data, log)\n\n logs.close_log(log)\n\n status = 'OK'\n report = 'Completed successfully'\n return status, report", "def apply(self, data):\n print(\"this is morphism '{}'\".format(self.name))\n data = np.array(data)\n transformed_data = self.transf(data)\n return pd.DataFrame.from_dict({\"transf\": transformed_data.flatten()})", "def motions2hik(motions, device=0, cuda=True):\n\n nreps, njoints, nfeats, nframes = motions.shape\n j2s = joints2smpl(num_frames=nframes, device_id=device, cuda=cuda)\n\n thetas = []\n root_translation = []\n for rep_idx in range(nreps):\n rep_motions = motions[rep_idx].transpose(2, 0, 1) # [nframes, njoints, 3]\n\n if nfeats == 3:\n print(f'Running SMPLify for repetition [{rep_idx + 1}] of {nreps}, it may take a few minutes.')\n motion_tensor, opt_dict = j2s.joint2smpl(rep_motions) # [nframes, njoints, 3]\n motion = motion_tensor.cpu().numpy()\n\n elif nfeats == 6:\n motion = rep_motions\n thetas.append(rep_motions)\n\n # Convert 6D rotation representation to Euler angles\n thetas_6d = motion[0, :-1, :, :nframes].transpose(2, 0, 1) # [nframes, njoints, 6]\n thetas_deg = []\n for frame, d6 in enumerate(thetas_6d):\n thetas_deg.append([_rotation_6d_to_euler(d6)])\n\n thetas.append([np.concatenate(thetas_deg, axis=0)])\n root_translation.append([motion[0, -1, :3, :nframes].transpose(1, 0)]) # [nframes, 3]\n\n thetas = np.concatenate(thetas, axis=0)[:nframes]\n root_translation = np.concatenate(root_translation, axis=0)[:nframes]\n\n data_dict = {\n 'joint_map': JOINT_MAP,\n 'thetas': thetas.tolist(), # [nreps, nframes, njoints, 3 (deg)]\n 'root_translation': root_translation.tolist(), # [nreps, nframes, 3 (xyz)]\n }\n\n return data_dict", "def computeCoarseAlignment(self, TiltSeries_, mute=True, outfile='', optimizeShift=True, logfile_residual=''):\n #print('ref index: ', numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0], TiltSeries_._TiltAlignmentParas.ireftilt )\n (psiindeg, shiftX, shiftY, x, y, z, distLine, diffX, diffY,\n shiftVarX, shiftVarY) = alignmentFixMagRot(\n Markers_=self._Markers, cTilt=self._cTilt, sTilt=self._sTilt,\n ireftilt=numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0],\n irefmark=TiltSeries_._TiltAlignmentParas.irefmark,\n r=TiltSeries_._TiltAlignmentParas.r, imdim=TiltSeries_._imdim,imdimX=TiltSeries_._imdimX, imdimY=TiltSeries_._imdimY,\n handflip=TiltSeries_._TiltAlignmentParas.handflip, mute=mute, writeResults=outfile,\n optimizeShift=optimizeShift, logfile_residual=logfile_residual)\n if not mute:\n print((\"Tilt Axis: %.2f\" % psiindeg))\n # copy parameters to TiltSeries\n ireftilt = numpy.argwhere( self._projIndices.astype(int) == TiltSeries_._TiltAlignmentParas.ireftilt)[0][0]\n self._alignmentRotations = numpy.array(self._ntilt * [psiindeg])\n self.setRotationsInTiltSeries(TiltSeries_)\n self._alignmentTransX = shiftX\n self._alignmentTransY = shiftY\n self.set_TranslationsInTiltSeries(TiltSeries_)\n self.Psi = psiindeg\n\n for (imark, Marker) in enumerate(self._Markers):\n Marker.set_r(numpy.array([x[imark], y[imark], z[imark]]))\n # if not optimizeShift:\n # Marker.set_r(numpy.array([x[imark] + 6.326546124766944 , y[imark] + 5.187672225662868, z[imark]]))", "def set_correction(self, matrix=[[1, 0], [0, 1]], shift=[0, 0], meta=None,\n **kwargs):\n # compute the matrix for the scale and rotation correction\n shift = (np.asarray(shift) - np.dot(self._wcslin.wcs.crpix, matrix) +\n self._wcslin.wcs.crpix)\n\n matrix = inv(matrix).T\n\n cwcs = self._wcs.deepcopy()\n\n # estimate step for numerical differentiation. We need a step\n # large enough to avoid rounding errors and small enough to get a\n # better precision for numerical differentiation.\n # TODO: The logic below should be revised at a later time so that it\n # better takes into account the two competing requirements.\n crpix1, crpix2 = self._wcs.wcs.crpix\n hx = max(1.0, min(20.0, (crpix1 - 1.0) / 100.0,\n (self._wcs.pixel_shape[0] - crpix1) / 100.0))\n hy = max(1.0, min(20.0, (crpix2 - 1.0) / 100.0,\n (self._wcs.pixel_shape[1] - crpix2) / 100.0))\n\n # compute new CRVAL for the image WCS:\n crpixinref = self._wcslin.wcs_world2pix(\n self._wcs.wcs_pix2world([self._wcs.wcs.crpix], 1), 1)\n crpixinref = np.dot(crpixinref - shift, matrix.T).astype(np.float64)\n self._wcs.wcs.crval = self._wcslin.wcs_pix2world(crpixinref, 1)[0]\n self._wcs.wcs.set()\n\n # approximation for CD matrix of the image WCS:\n (U, u) = _linearize(cwcs, self._wcs, self._wcslin, self._wcs.wcs.crpix,\n matrix, shift, hx=hx, hy=hy)\n self._wcs.wcs.cd = np.dot(self._wcs.wcs.cd.astype(np.longdouble),\n U).astype(np.float64)\n self._wcs.wcs.set()\n\n # save linear transformation info to the meta attribute:\n super().set_correction(matrix=matrix, shift=shift, meta=meta, **kwargs)", "def mirrorTransformations_Custom(self):\n\n pass", "def preprocess_gene(gene_id,data_dict,t2g_mapping,out_paths,locks):\n \n # features = ['read_id','transcript_id','transcriptomic_position','reference_kmer','norm_mean','start_idx','end_idx'] # columns in the eventalign file per read.\n\n events = []\n condition_labels = []\n run_labels = []\n read_ids = []\n genomic_coordinates = []\n \n # Concatenate\n# if len(data_dict) == 0:\n# return\n\n\n for read_index,events_per_read in data_dict.items():\n# if len(events_per_read) > 0:\n # ===== transcript to gene coordinates ===== # TODO: to use gtf.\n# tx_ids = [tx_id.decode('UTF-8').split('.')[0] for tx_id in events_per_read['transcript_id']]\n tx_ids = [tx_id for tx_id in events_per_read['transcript_id']] \n tx_positions = events_per_read['transcriptomic_position']\n genomic_coordinate = list(itemgetter(*zip(tx_ids,tx_positions))(t2g_mapping)) # genomic_coordinates -- np structured array of 'chr','gene_id','genomic_position','kmer'\n genomic_coordinate = np.array(genomic_coordinate,dtype=np.dtype([('chr','<U2'),('gene_id','<U15'),('genomic_position','<i4'),('g_kmer','<U5')]))\n # ===== \n\n # Based on Ensembl, remove transcript version.\n\n events_per_read['transcript_id'] = tx_ids\n events_per_read = np.array(events_per_read,dtype=np.dtype([('transcript_id', 'S15'), ('transcriptomic_position', '<i8'), ('reference_kmer', 'S5'), ('norm_mean', '<f8')]))\n\n #\n\n events += [events_per_read]\n genomic_coordinates += [genomic_coordinate]\n n_events_per_read = len(events_per_read)\n# else:\n# print(read_index,len(events_per_read))\n\n events = np.concatenate(events)\n genomic_coordinates = np.concatenate(genomic_coordinates)\n \n # Sort and split # \n# idx_sorted = np.lexsort((events['reference_kmer'],genomic_coordinates['genomic_position'],genomic_coordinates['gene_id']))\n# key_tuples, index = np.unique(list(zip(genomic_coordinates['gene_id'][idx_sorted],genomic_coordinates['genomic_position'][idx_sorted],events['reference_kmer'][idx_sorted])),return_index = True,axis=0) #'chr',\n# y_arrays = np.split(events['norm_mean'][idx_sorted], index[1:])\n# # read_id_arrays = np.split(events['read_id'][idx_sorted], index[1:])\n# g_kmer_arrays = np.split(genomic_coordinates['g_kmer'][idx_sorted], index[1:])\n\n idx_sorted = np.argsort(genomic_coordinates['genomic_position'])\n unique_positions, index = np.unique(genomic_coordinates['genomic_position'][idx_sorted],return_index = True)\n y_arrays = np.split(events['norm_mean'][idx_sorted], index[1:])\n # read_id_arrays = np.split(events['read_id'][idx_sorted], index[1:])\n g_kmer_arrays = np.split(genomic_coordinates['g_kmer'][idx_sorted], index[1:])\n g_positions_arrays = np.split(genomic_coordinates['genomic_position'][idx_sorted], index[1:])\n\n # Prepare\n # print('Reformating the data for each genomic position ...')\n data = defaultdict(dict)\n # for each position, make it ready for json dump\n# data = dict(zip(key_tuples, y_arrays))\n\n asserted = True\n# for key_tuple,y_array,g_kmer_array in zip(key_tuples,y_arrays,g_kmer_arrays):\n for position,y_array,g_kmer_array,g_positions_array in zip(unique_positions,y_arrays,g_kmer_arrays,g_positions_arrays):\n# gene_id,position,kmer = key_tuple \n if (len(set(g_kmer_array)) == 1) and ('XXXXX' in set(g_kmer_array)) or (len(y_array) == 0):\n continue\n \n if 'XXXXX' in set(g_kmer_array):\n y_array = y_array[g_kmer_array != 'XXXXX'] \n assert len(y_array) == len(g_kmer_array) - (g_kmer_array=='XXXXX').sum()\n g_kmer_array = g_kmer_array[g_kmer_array != 'XXXXX'] \n \n try:\n assert len(set(g_kmer_array)) == 1\n assert {position} == set(g_positions_array)\n except:\n asserted = False\n break\n kmer = set(g_kmer_array).pop()\n\n data[position] = {kmer: list(y_array)} #,'read_ids': [read_id.decode('UTF-8') for read_id in read_id_array]}\n \n # write to file.\n log_str = '%s: %s' %(gene_id,asserted)\n\n with locks['json'], open(out_paths['json'],'a') as f:\n\n pos_start = f.tell()\n f.write('{')\n f.write('\"%s\":' %gene_id)\n ujson.dump(data, f)\n f.write('}\\n')\n pos_end = f.tell()\n\n with locks['index'], open(out_paths['index'],'a') as f:\n f.write('%s,%d,%d\\n' %(gene_id,pos_start,pos_end))\n \n with locks['readcount'], open(out_paths['readcount'],'a') as f: #todo: repeats no. of tx >> don't want it.\n n_reads = len(data_dict)\n f.write('%s,%d\\n' %(gene_id,n_reads))\n \n with locks['log'], open(out_paths['log'],'a') as f:\n f.write(log_str + '\\n')", "def set_correction(self, matrix=[[1, 0], [0, 1]], shift=[0, 0], meta=None,\n **kwargs):\n frms = self._wcs.available_frames\n\n # if original WCS did not have tangent-plane corrections, create\n # new correction and add it to the WCs pipeline:\n if self._tpcorr is None:\n self._tpcorr = TPCorr(\n v2ref=self._wcsinfo['v2_ref'] / 3600.0,\n v3ref=self._wcsinfo['v3_ref'] / 3600.0,\n roll=self._wcsinfo['roll_ref'],\n matrix=matrix,\n shift=shift,\n name='tangent-plane linear correction'\n )\n idx_v2v3 = frms.index(self._v23name)\n pipeline = deepcopy(self._wcs.pipeline)\n pf, pt = pipeline[idx_v2v3]\n pipeline[idx_v2v3] = (pf, deepcopy(self._tpcorr))\n frm_v2v3corr = deepcopy(pf)\n frm_v2v3corr.name = 'v2v3corr'\n pipeline.insert(idx_v2v3 + 1, (frm_v2v3corr, pt))\n self._wcs = gwcs.WCS(pipeline, name=self._owcs.name)\n self._v23name = 'v2v3corr'\n\n else:\n # combine old and new corrections into a single one and replace\n # old transformation with the combined correction transformation:\n tpcorr2 = self._tpcorr.__class__(\n v2ref=self._tpcorr.v2ref, v3ref=self._tpcorr.v3ref,\n roll=self._tpcorr.roll, matrix=matrix, shift=shift,\n name='tangent-plane linear correction'\n )\n\n self._tpcorr = tpcorr2.combine(tpcorr2, self._tpcorr)\n\n idx_v2v3 = frms.index(self._v23name)\n pipeline = deepcopy(self._wcs.pipeline)\n pipeline[idx_v2v3 - 1] = (pipeline[idx_v2v3 - 1][0],\n deepcopy(self._tpcorr))\n self._wcs = gwcs.WCS(pipeline, name=self._owcs.name)\n\n # reset definitions of the transformations from detector/world\n # coordinates to the tangent plane:\n self._update_transformations()\n\n # save linear transformation info to the meta attribute:\n super().set_correction(matrix=matrix, shift=shift, meta=meta, **kwargs)", "def align_reconstruction_no_numpy(reconstruction, anchor_points_dict):\n modified_shots_dict = {}\n all_anchor_shot_ids = sorted(anchor_points_dict.keys())\n for i in range(len(all_anchor_shot_ids) - 1):\n anchor_coords = []\n recon_coords = []\n\n for j in range(2):\n shot_id = all_anchor_shot_ids[i+j]\n anchor_coords.append(anchor_points_dict[shot_id])\n o = get_origin_no_numpy_opencv(reconstruction.shots[shot_id].pose.rotation,\n reconstruction.shots[shot_id].pose.translation)\n\n recon_coords.append(o)\n\n s, A, b = get_affine_transform_2d_no_numpy(anchor_coords, recon_coords)\n\n start_shot_id = all_anchor_shot_ids[i]\n end_shot_id = all_anchor_shot_ids[i+1]\n\n # in first iteration, we transform from first shot of recon\n # in last iteration, we transform until last shot of recon\n shot_ids = sorted(reconstruction.shots.keys())\n if i == 0:\n start_shot_id = shot_ids[0]\n\n if i == len(anchor_points_dict)-2:\n end_shot_id = shot_ids[-1]\n\n new_dict = {}\n\n start_index = _shot_id_to_int(start_shot_id)\n end_index = _shot_id_to_int(end_shot_id)\n\n # transform pdr shots\n for i in range(start_index, end_index + 1):\n shot_id = _int_to_shot_id(i)\n\n if shot_id in reconstruction.shots:\n X = get_origin_no_numpy_opencv(reconstruction.shots[shot_id].pose.rotation,\n reconstruction.shots[shot_id].pose.translation)\n A_dot_X = [A[0][0] * X[0] + A[0][1] * X[1] + A[0][2] * X[2],\n A[1][0] * X[0] + A[1][1] * X[1] + A[1][2] * X[2],\n A[2][0] * X[0] + A[2][1] * X[1] + A[2][2] * X[2]]\n Xp = [i * s + j for i, j in zip(A_dot_X, b)]\n new_dict[shot_id] = [Xp[0], Xp[1], Xp[2]]\n\n modified_shots_dict.update(new_dict)\n\n return modified_shots_dict", "def pan_corr(file):\n\n # # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n #\n # # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_1D_py.tif'\n # in_ref = in_path + in_pan_ref_file\n #\n # inreffil = gdal.Open(in_ref)\n # image_ref = inreffil.ReadAsArray()\n # # size_ref = image_ref.shape\n # # pix_count = size_ref[0]*size_ref[1]\n #\n # image_ref = image_ref[800:930, 1420:1640]\n # size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n #\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # raz_g1 = 1\n # raz_g2 = g1_avg/g2_avg\n # raz_r1 = g1_avg/r1_avg\n # raz_b1 = g1_avg/b1_avg\n #\n # avg = (g1+g2+r1+b1)/pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n raz_g1 = 1\n raz_g2 = 1.0245196396115988\n raz_r1 = 1.0131841989689434\n raz_b1 = 1.0517113199247086\n\n print('razmerje:', raz_g1, raz_g2, raz_r1, raz_b1)\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_4D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Slo_PAN\\_26_30\\\\'\n # in_pan_ref_file = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"tif\")]\n\n \n\n \n\n # print('image', i)\n in_ref=file\n inreffil = gdal.Open(in_ref)\n image_ref = inreffil.ReadAsArray()\n size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n # pix_count = np.count_nonzero(image_ref)\n # pix_count = 3664*650\n\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # avg = (g1 + g2 + r1 + b1) / pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n # popravek\n im_p_pop = np.zeros((size_ref[0], size_ref[1]), np.uint16)\n\n\n for i in range(size_ref[0]):\n for j in range(size_ref[1]):\n if (i % 2) == 0 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g1\n if (i % 2) == 1 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g2\n if (i % 2) == 0 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_r1\n if (i % 2) == 1 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_b1\n \n _,_,_,_,P=return_flatfield_set_path(2)\n P_flat=gdal_array.LoadFile(P)\n \n # im_p_pop=simple_flatfield_corr(P_flat, im_p_pop, 2, 1) \n \n # outout\n \n im_p_pop=BLUE_simple_flatfield_corr(P_flat, im_p_pop)\n \n out=os.path.abspath(file)+\"/corr/\"+os.path.basename(file)[:-4] + \"_pop_flat_corr.tif\"\n\n \n # out = in_ref[:-4] + \"_pop_flat_corr.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n # outRaster = driver.Create(out, size[1], size[0], 3, gdal.GDT_UInt16)\n outRaster = driver.Create(out, size_ref[1], size_ref[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(im_p_pop)\n outband.FlushCache()", "def generate_transformations(self):\n if self.perform_aug:\n print(\"\\nAugmentation will be applied to the training images\")\n data_transforms = {\n \"train\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)),\n transforms.RandomRotation(degrees=45),\n transforms.ColorJitter(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),\n \"val\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)), # 256 used to be\n transforms.CenterCrop(self.input_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n }\n else:\n print(\"\\nNo augmentation will be applied to the training images\")\n data_transforms = {\n \"train\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),\n \"val\": transforms.Compose([\n transforms.Resize((self.input_size, self.input_size)), # 256 used to be\n transforms.CenterCrop(self.input_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n }\n\n return data_transforms", "def _project(self):\n ghosts_w = self.input_field.topology.ghosts()\n self.input_field.data[0], self.input_field.data[1], \\\n self.input_field.data[2] = \\\n fftw2py.projection_om_3d(self.input_field.data[0],\n self.input_field.data[1],\n self.input_field.data[2], ghosts_w)", "def process_align(self):\n\t\tstm_t_dict = self._process_recog()\n\t\ttrans_t_dict = self._process_trans()\n\t\talign_obj = viterbi_align(stm_t_dict, trans_t_dict, self.label, self.pair_file_path)\n\t\tself.trans_t_dict = align_obj.viterbi(0, len(stm_t_dict)-1, 0, len(trans_t_dict)-1)", "def align_images(self, translation_only=False):\n # Extract feature point locations and descriptors.\n points_and_descriptors = []\n for file in self.files:\n image = sol4_utils.read_image(file, 1)\n self.h, self.w = image.shape\n pyramid, _ = sol4_utils.build_gaussian_pyramid(image, 3, 7)\n points_and_descriptors.append(find_features(pyramid))\n\n # Compute homographies between successive pairs of images.\n Hs = []\n for i in range(len(points_and_descriptors) - 1):\n points1, points2 = points_and_descriptors[i][0], points_and_descriptors[i + 1][0]\n desc1, desc2 = points_and_descriptors[i][1], points_and_descriptors[i + 1][1]\n\n # Find matching feature points.\n ind1, ind2 = match_features(desc1, desc2, .7)\n points1, points2 = points1[ind1, :], points2[ind2, :]\n\n # Compute homography using RANSAC.\n H12, inliers = ransac_homography(points1, points2, 100, 6, translation_only)\n\n # Uncomment for debugging: display inliers and outliers among matching points.\n # In the submitted code this function should be commented out!\n # display_matches(self.images[i], self.images[i+1], points1 , points2, inliers)\n\n Hs.append(H12)\n\n # Compute composite homographies from the central coordinate system.\n accumulated_homographies = accumulate_homographies(Hs, (len(Hs) - 1) // 2)\n self.homographies = np.stack(accumulated_homographies)\n self.frames_for_panoramas = filter_homographies_with_translation(self.homographies, minimum_right_translation=5)\n self.homographies = self.homographies[self.frames_for_panoramas]", "def reiniciarMatrix(self):\n self.matrixMAPA = []\n self.rellenarMatrix()", "def createMatrices(file, word2Idx, maxSentenceLen, extendMapping, labelsMapping, aspectMapping, typeMapping, tenseMapping, eventClassMapping, distanceMapping, sentenceLengthMapping):\n labels = []\n eventMatrix = []\n timeMatrix = []\n\n sentenceLengths = []\n sentenceMatrix = []\n positionMatrix_e = []\n positionMatrix_t = []\n\n aspectMatrix = []\n typeMatrix = []\n tenseMatrix = []\n eventClassMatrix = []\n featuresList = []\n \n \n minDistance = 0\n for distanceKey in distanceMapping.iterkeys():\n if isinstance(distanceKey, (int, long)):\n minDistance = min(minDistance, int(distanceKey))\n \n for line in open(file):\n features = eval(line.strip())\n featuresList.append(features)\n label = features['label']\n event = features[\"Token[0]\"]\n eventPosition = int(features[\"eventPosition\"])\n tokens = features[\"textInBetween\"] if \"textInBetween\" in features else features[\"sentence\"]\n \n aspect = features[\"aspect\"]\n tense = features[\"tense\"]\n eventClass = features[\"eventClass\"]\n \n labels.append(labelsMapping[label.lower()] if label.lower() in labelsMapping else -1)\n eventMatrix.append(getWordIdx(event, word2Idx))\n \n aspectMatrix.append(getMappingIdx(aspect, aspectMapping, extendMapping))\n tenseMatrix.append(getMappingIdx(tense, tenseMapping, extendMapping))\n eventClassMatrix.append(getMappingIdx(eventClass, eventClassMapping, extendMapping))\n \n if 'TimeTokenFirst' in features:\n time = features['TimeTokenFirst']\n timeMatrix.append(getWordIdx(time, word2Idx))\n \n if 'type' in features:\n type = features[\"type\"]\n typeMatrix.append(getMappingIdx(type, typeMapping, extendMapping))\n \n timePosition = int(features[\"timeFirstPosition\"]) if 'timeFirstPosition' in features else 0\n \n\n if len(tokens) in sentenceLengthMapping:\n sentenceLengths.append(sentenceLengthMapping[len(tokens)])\n else:\n sentenceLengths.append(sentenceLengthMapping['GreaterMax'])\n\n \n tokenIds = np.zeros(maxSentenceLen)\n positionValues_e = np.zeros(maxSentenceLen)\n positionValues_t = np.zeros(maxSentenceLen)\n for idx in xrange(0, min(maxSentenceLen, len(tokens))):\n tokenIds[idx] = getWordIdx(tokens[idx], word2Idx)\n \n distance_e = idx - eventPosition\n distance_t = idx - timePosition\n\n if distance_e in distanceMapping:\n positionValues_e[idx] = distanceMapping[distance_e]\n elif distance_e <= minDistance:\n positionValues_e[idx] = distanceMapping['LowerMin']\n else:\n positionValues_e[idx] = distanceMapping['GreaterMax']\n \n if distance_t in distanceMapping:\n positionValues_t[idx] = distanceMapping[distance_t]\n elif distance_t <= minDistance:\n positionValues_t[idx] = distanceMapping['LowerMin']\n else:\n positionValues_t[idx] = distanceMapping['GreaterMax']\n\n sentenceMatrix.append(tokenIds)\n positionMatrix_e.append(positionValues_e)\n positionMatrix_t.append(positionValues_t)\n\n \n labels = np.array(labels, dtype='int32')\n eventMatrix = np.expand_dims(np.array(eventMatrix, dtype='int32'), axis=1)\n timeMatrix = np.expand_dims(np.array(timeMatrix, dtype='int32'), axis=1)\n aspectMatrix = np.expand_dims(np.array(aspectMatrix, dtype='int32'), axis=1)\n typeMatrix = np.expand_dims(np.array(typeMatrix, dtype='int32'), axis=1)\n tenseMatrix = np.expand_dims(np.array(tenseMatrix, dtype='int32'), axis=1)\n eventClassMatrix = np.expand_dims(np.array(eventClassMatrix, dtype='int32'), axis=1)\n sentenceLengths = np.expand_dims(np.array(sentenceLengths, dtype='int32'), axis=1)\n \n \n sentenceMatrix = np.array(sentenceMatrix, dtype='int32')\n positionMatrix_e = np.array(positionMatrix_e, dtype='int32')\n positionMatrix_t = np.array(positionMatrix_t, dtype='int32')\n \n\n\n return {'labels': labels,\n 'event':eventMatrix, \n 'time':timeMatrix, \n 'sentence':sentenceMatrix, \n 'positions_e':positionMatrix_e, \n 'positions_t':positionMatrix_t, \n 'aspect':aspectMatrix, \n 'tense':tenseMatrix, \n 'eventClass':eventClassMatrix, \n 'type': typeMatrix,\n 'sentence_len': sentenceLengths,\n 'features': featuresList}", "def transform(self, X):\n\n t0 = time.perf_counter()\n check_is_fitted(self)\n self.check_external_components_modified()#[WARN] in d3m, primitives can \"restore\" private class variables...\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\"], reset=False)\n t1 = time.perf_counter()\n\n if X.shape[1] != self.components_af_.shape[1]:\n raise ValueError(\n \"Impossible to perform projection:\"\n \"X at fit stage had a different number of features. \"\n \"(%s != %s)\" % (X.shape[1], self.components_af_.shape[1])\n )\n\n #X_new = safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output)\n #import pdb; pdb.set_trace()\n X_af = af.interop.from_ndarray(X).as_type(self.components_af_.dtype())\n X_new = af.matmulNT(X_af, self.components_af_)\n X_new = X_new.to_ndarray()\n t2 = time.perf_counter()\n return X_new", "def preprocess(self):\n inputMatrix = pd.read_csv(self.input, index_col = 0)\n\n original_filename = self.input.split(\"/\")[-1]\n mapped_filename = \"mapped_\" + self.desiredFormat + \"_\" + original_filename\n output = self.input\n output_filepath = \"/\".join(self.input.split(\"/\")[0:-1])\n #as the DataFormatter always transposes the data before any further processing, we can expect all genes to be in the columns\n genesInColumn = \"true\"\n #only map genes if the current format is not the desired format\n if (self.currentFormat != self.desiredFormat):\n output = output_filepath + \"/\" + mapped_filename\n benchutils.mapDataMatrix(inputMatrix, genesInColumn, self.currentFormat, self.desiredFormat, output, self.labeled)\n\n return output" ]
[ "0.62273926", "0.6207363", "0.6104549", "0.5850489", "0.58403313", "0.58327806", "0.5827466", "0.5725734", "0.5681582", "0.56439", "0.5603615", "0.5602365", "0.5596903", "0.5577693", "0.555407", "0.5526825", "0.5518143", "0.5506637", "0.5487023", "0.5475069", "0.54521763", "0.54350317", "0.54161745", "0.53889203", "0.5372838", "0.536524", "0.5362892", "0.53603494", "0.5355388", "0.53502476", "0.5342728", "0.53392434", "0.5332614", "0.53318256", "0.5330306", "0.5314524", "0.5300495", "0.52994806", "0.5297486", "0.5285395", "0.52770185", "0.5269956", "0.52256763", "0.52124995", "0.5207145", "0.5197813", "0.5195588", "0.5192883", "0.5188637", "0.5187531", "0.5186836", "0.51834", "0.5165309", "0.5156512", "0.51475817", "0.51468784", "0.51353276", "0.5119405", "0.5116644", "0.51156557", "0.5109496", "0.51088977", "0.51020604", "0.50936353", "0.5090827", "0.5090827", "0.5085803", "0.5075744", "0.5061394", "0.50505054", "0.50451446", "0.5026819", "0.50219977", "0.50108826", "0.5005841", "0.5003727", "0.4991218", "0.49900836", "0.49880436", "0.49835762", "0.49827647", "0.49754396", "0.49713415", "0.49713227", "0.49624228", "0.49619067", "0.49594814", "0.49590448", "0.4957838", "0.49543044", "0.4953803", "0.4951752", "0.49468637", "0.494229", "0.4941374", "0.4940706", "0.49354962", "0.4935158", "0.49322695", "0.4932221" ]
0.5767431
7
Motion correct using 3dvolreg. No slicetime correction.
def Motcor(self, info, base): fmt = '3dvolreg -prefix %s -twopass %s -verbose -base %s+orig[%s] ' + \ '-dfile %s %s+orig' cmd = fmt % (info['imgfile_m'], info['motion_interp'], \ info['basefile'], base, info['mot_file'], info['imgfile_t']) self.CheckExec(cmd, ['%s+orig.BRIK' % info['imgfile_m'], \ '%s+orig.HEAD' % info['imgfile_m']])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CorrectMotion(self):\n if self.verbose:\n print \"Correct for motion\"\n for entry in self.entry_map['epi']:\n info = self.info[entry]\n\n if os.path.exists(info['imgfile_m'] + info['suffix']):\n return\n# Always use brik for 3dDeconvolve.\n suffix = '+orig'\n epifile = '%s%s' % (info['imgfile'], suffix)\n prefix = info['imgfile_m']\n base_entry = info['base_entry']\n if info['base'] == 'start':\n# Use the first frame specified in template file. Defaults\n# to zero.\n base = info['motion_ref_frame']\n else:\n# Use the last frame.\n base = self.info[base_entry]['tdim'] - info['skip']-1\n base = ('%d' % base).replace(' ','')\n\n# Correct for slice-timing.\n self.SliceTimeCorrect(info, epifile)\n\n plane = info['plane']\n anat_tgt = info['anat_tgt']\n# anat_entry = self.anat_entry[plane]\n\n if info['catmats']:\n# Include additonal transformation in motion correction such\n# that final image is in register with the fieldmap, which has\n# been registered to the structural image that will be used for\n# spatial normalization.\n self.MotcorCatenate(info, base, anat_tgt)\n else:\n# Assume fieldmap is in register with the structural.\n self.Motcor(info, base)\n\n if info.get('fmapname', None) is None:\n# No fieldmap correction.\n if self.fsl_flip:\n# Flip the way fslview likes it.\n self.FSLFlip(info['imgfile_m'], info['imgfile_final'])\n elif info['suffix'] == '.nii':\n# Copy motion-corrected images from /tmp to output directory\n outfile = info['imgfile_final'] + info['suffix']\n cmd = '3dcopy %s+orig %s' % (info['imgfile_m'], outfile)\n self.CheckExec(cmd, [outfile], force=True)\n cmd = '/bin/rm %s+orig*' % info['imgfile_m']\n self.CheckExec(cmd, [], force=True)", "def test_revolute(self):\n # Rotate around the z axis\n r = Joint.revolute(np.array([0, 0, 1]))\n t_mat = r(np.array([np.pi / 2]))\n rot_vec = np.dot(t_mat, np.array([1, 0, 0, 1]))[:3]\n self.assertTrue(np.allclose(\n rot_vec, np.array([0, 1, 0]), rtol=1e-5, atol=1e-5))", "def on_mouse_move(self, event):\n\n # self.view = 1 * np.eye(4, dtype=np.float32)\n # self.model = 1 * np.eye(4, dtype=np.float32)\n\n # self.translate -= event.delta[1]\n # self.translate = max(-1, self.translate)\n # print(event.delta[1])\n # print(self.translate)\n # self.view = translate((0, 0, -self.translate))\n # self.game_program['u_view'] = self.view\n # self.game_program['u_size'] = 5 / self.translate\n # self.view = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.view\n # self.model = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.model\n # print(self.view)\n\n # self.game_program['u_model'] = self.model\n # self.game_program['u_view'] = self.view\n\n x, y = event.pos\n #print(x, y)\n self.x_offset, self.y_offset = x - self.last_x, - (y - self.last_y)\n self.last_x, self.last_y = x, y\n self.x_offset *= self.sensitivity\n self.y_offset *= self.sensitivity\n\n self.yaw, self.pitch = self.yaw - self.x_offset, self.pitch + self.y_offset\n self.rot_y(self.yaw * np.pi / 180)\n self.rot_x(self.pitch * np.pi / 180)\n\n self.view = np.dot(self.rot_mat_y, self.rot_mat_x)\n self.game_program['u_view'] = self.view\n\n self.update()", "def driftRHS_3D(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.sqrt(f[0]**2 + f[1]**2 + f[2]**2)\n f = f/fs\n return -f*drift_velocity(fs)", "def kelvin_effect(pres, surft, temp, mw_ba, dcell):\n volm = mw_ba/1e3 # approximation: using density 1000 kg/m3\n return pres*exp(-4*surft*volm/(dcell*gas_constant*temp))", "def testNonVarying(self):\n photoCalib = lsst.afw.image.PhotoCalib(self.calibration)\n self._testPhotoCalibCenter(photoCalib, 0)\n\n self.assertEqual(1, photoCalib.instFluxToMaggies(self.instFlux, self.pointXShift))\n self.assertEqual(0, photoCalib.instFluxToMagnitude(self.instFlux, self.pointXShift))\n result = photoCalib.instFluxToMaggies(self.instFlux, self.instFluxErr)\n self.assertEqual(1, result.value)\n\n photoCalib = lsst.afw.image.PhotoCalib(self.calibration, self.calibrationErr)\n self._testPhotoCalibCenter(photoCalib, self.calibrationErr)\n\n # constant, with a bbox\n photoCalib = lsst.afw.image.PhotoCalib(self.calibration, bbox=self.bbox)\n self._testPhotoCalibCenter(photoCalib, 0)", "def test_velocity(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.velocity[0], 144000.0)", "def update_rk3(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n try:\n\n kap1 = (self.vel, self.force(self.pos,\n self.vel,\n self.time))\n\n pos = self.pos+0.5*delta_t*kap1[0]\n vel = self.vel+0.5*delta_t*kap1[1]\n\n self.check_collision_full(pos, self.pos,\n vel, self.vel,\n 0.5*delta_t, drag=False)\n\n kap2 = (vel, self.force(pos, vel, self.time+0.5*delta_t))\n\n pos = self.pos+delta_t*(2.0*kap2[0]-kap1[0])\n vel = self.vel+delta_t*(2.0*kap2[1]-kap1[1])\n\n self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=False)\n\n kap3 = (vel, self.force(pos, vel, self.time+delta_t))\n\n pos = self.pos+delta_t*(kap1[0]+4.0*kap2[0]+kap3[0])/6.0\n vel = self.vel+delta_t*(kap1[1]+4.0*kap2[1]+kap3[1])/6.0\n\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=False)\n\n except Collision.CollisionException as col:\n col.vel = self.vel+col.delta_t*kap1[0]\n raise col\n\n self.time += self.delta_t", "def radio3mmPointingConstants( az, el, sag, ant, subarray=DEFAULT):\n \n mproot = \"Control.Antenna%d.Aperture3mm.PointingConstants.\" %ant\n azValid = False\n elValid = False\n sagValid = False\n numInvalid = 0\n try :\n azDelta = az - queryDouble(mproot+\"azOffset\") \n azValid = True\n except:\n numInvalid += 1\n try :\n elDelta = el - queryDouble(mproot+\"elOffset\") \n elValid = True\n except:\n numInvalid += 1\n try :\n sagDelta = sag - queryDouble(mproot+\"sag\") \n sagValid = True\n except:\n numInvalid += 1\n allValid = azValid and elValid and sagValid\n #print \"Deltas:\", azDelta, elDelta, sagDelta\n\n R3MM = carma.control.APERTURE_RADIO3MM\n multiSubarray('aperturePointingConstants',subarray, R3MM, az, el, sag, ant)\n\n if ant > 15: return\n \n # Apply deltas to the 1mm aperture\n prefix = \"\"\n if not allValid:\n m = \"Could not get the initial value\"\n if numInvalid > 1: m += \"s\"\n m += \" for the 3mm\"\n if not azValid:\n m += prefix + \" az\"\n prefix = \",\"\n if not elValid:\n m += prefix + \" el\"\n prefix = \",\"\n if not sagValid:\n m += prefix + \" sag\"\n m += \" term\" \n if numInvalid > 1: m += \"s\"\n m += \".\"\n print m\n m = \"The 3mm offsets have been changed but the relative 1mm offsets\\n\"\n m += \"will not be changed because of the missing initial 3mm values.\\n\"\n m += \"This is a serious error \"\n m += \"that should be reported to the operations staff!!\"\n print m\n return\n \n mproot = \"Control.Antenna%d.Aperture1mm.PointingConstants.\" %ant \n az1mm = azDelta + queryDouble(mproot+\"azOffset\") \n el1mm = elDelta + queryDouble(mproot+\"elOffset\") \n sag1mm = sagDelta + queryDouble(mproot+\"sag\") \n R1MM = carma.control.APERTURE_RADIO1MM\n #print \"1mm constants:\", az1mm, el1mm, sag1mm\n multiSubarray('aperturePointingConstants', subarray, R1MM, \n az1mm, el1mm,sag1mm, ant)", "def test_volume_3d(self):\n # generate voronoi mesh \n mesh = Mesh3d(self.particles, self.bound)\n print(\"building mesh...\")\n mesh.build_geometry()\n print(\"mesh complete\")\n\n # calculate voronoi volumes of all real particles \n real_indices = self.particles[\"tag\"] == ParticleTAGS.Real\n tot_vol = np.sum(self.particles[\"volume\"][real_indices])\n\n self.assertAlmostEqual(tot_vol, 1.0)", "def test_backward_injection_in_3D(self):\n self.fom = ModeMatch(monitor_name = 'figure_of_merit',\n mode_number = 1,\n direction = 'Backward',\n multi_freq_src = True,\n target_T_fwd = lambda wl: np.ones(wl.size),\n norm_p = 1)\n Optimization.set_source_wavelength(self.sim, 'source', self.fom.multi_freq_src, len(self.wavelengths))\n self.sim.fdtd.setnamed('FDTD','dimension','3D')\n self.sim.fdtd.setnamed('source', 'x', -self.sim.fdtd.getnamed('source','x'))\n self.sim.fdtd.setnamed('source','direction','Backward')\n self.sim.fdtd.setnamed('figure_of_merit','x', -self.sim.fdtd.getnamed('figure_of_merit','x'))\n self.fom.initialize(self.sim)\n self.fom.make_forward_sim(self.sim)\n self.sim.run(name = 'modematch_backward_injection_in_3D', iter = 1)\n FOM = self.fom.get_fom(self.sim)\n self.assertAlmostEqual(FOM, self.ref_fom, 4)", "def update_ab3(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n if len(self._old) >= 2:\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n beta = -(1.0/6.0)*delta_t*(delta_t*(5.0*delta_t+3.0*(self.time-self.get_old(0, 2)))\n /((self.time-self.get_old(0, 2))*(self.get_old(0, 2)-self.get_old(1, 2))))\n gamma = (1.0/6.0)*delta_t*(delta_t*(2.0*delta_t+3.0*(self.time-self.get_old(0, 2)))\n /((self.time-self.get_old(1, 2))*(self.get_old(0, 2)-self.get_old(1, 2))))\n\n pos = self.pos+(delta_t-beta-gamma)*self.vel+beta*self.get_old(0, 0)+gamma*self.get_old(1, 0)\n vel = self.vel+(delta_t-beta-gamma)*kap[1]+beta*self.get_old(0, 1)+gamma*self.get_old(1, 1)\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n\n try:\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n beta = -(1.0/6.0)*col.delta_t*(col.delta_t*(5.0*col.delta_t+3.0*(self.time-self.get_old(0, 2)))\n /((self.time-self.get_old(0, 2))*(self.get_old(0, 2)-self.get_old(1, 2))))\n gamma = (1.0/6.0)*col.delta_t*(col.delta_t*(2.0*col.delta_t+3.0*(self.time-self.get_old(0, 2)))\n /((self.time-self.get_old(1, 2))*(self.get_old(0, 2)-self.get_old(1, 2))))\n vel = self.vel+(col.delta_t-beta-gamma)*kap[1]+beta*self.get_old(0, 1)+gamma*self.get_old(0, 1)\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest=True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n\n self.set_old(kap, 2)\n\n self.time += delta_t\n\n else:\n ## reduced to using Adams Bashforth 2nd order method for the second timestep:\n\n try:\n tmp = [self.get_old(0)]\n except IndexError:\n tmp = []\n kap = update_ab2(self)\n if tmp:\n self._old = self._old + tmp\n\n return kap", "def test_revolute_from_dh(self):\n x_offset = 1\n z_offset = 2\n # Rotate around the z axis\n r = Joint.revolute_from_dh(0, 0, x_offset, z_offset)\n t_mat = r(np.array([np.pi / 2]))\n rot_vec = np.dot(t_mat[:3, :3], np.array([1, 0, 0]))\n self.assertTrue(np.allclose(\n rot_vec, np.array([0, 1, 0]), rtol=1e-5, atol=1e-5))\n self.assertTrue(np.allclose(t_mat[2, 3], z_offset))\n # x was rotated 90 degrees, and is now y\n self.assertTrue(np.allclose(t_mat[1, 3], x_offset))", "def update_apc22(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n if len(self._old) >= 1:\n\n\n try:\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n beta = 0.5*self.delta_t/(self.time-self.get_old(0, 2))\n\n pos = self.pos+delta_t*((1+beta)*self.vel-beta*self.get_old(0, 0))\n vel = self.vel+delta_t*((1+beta)*kap[1]-beta*self.get_old(0, 1))\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n pos = self.pos+delta_t/2.0*(self.vel+vel)\n vel = self.vel+delta_t/2.0*(self.force(pos, vel, self.time+delta_t,\n drag=False)+kap[1])\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n beta = 0.5*col.delta_t/(self.time-self.get_old(0, 2))\n vel = self.vel+col.delta_t*(1+beta)*kap[1]-beta*self.get_old(0, 1)\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest=True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n\n self.time += delta_t\n\n else:\n ## reduced to using the Adams first order method for the first timestep:\n\n kap = update_apc1(self)\n\n self.set_old(kap, 1)\n\n return kap", "def tick(self):\n \n if self.state == ForceFeedbackState.NORMAL:\n # send desired position to IK directly\n # Actually publish if we have some publishers\n rospy.logerr('Im in NORMAL state')\n if self.pubs is not None:\n p0 = self.desired_pos\n # Record the current sent pos as element 0 of self.prev_pos\n # shuffling the other pos to the right. Then, truncate the\n # list to be a maximum of 2 pos long.\n self.prev_pos = [p0] + self.prev_pos\n self.prev_pos = self.prev_pos[:2]\n \n #Publish the desired position to /pos_for_IK\n p0_list = p0.tolist()\n msg = Float64MultiArray()\n msg.data = deepcopy(p0_list)\n self.pubs.publish(msg)\n \n elif self.state == ForceFeedbackState.FORCEFB:\n rospy.logerr('Im in FORCEFB state')\n # Calculate correction vector\n \n # Need to check whether desired_pos is nan. Can use one only because the function only allows scalar\n if math.isnan(self.desired_pos[0]) == False:\n self.stored_desired_pos = self.desired_pos\n \n correction = self.stored_desired_pos - self.camera_pos\n correction_mag = np.sqrt(np.sum(correction * correction))\n \n #separate out two terms for testing and simplicity (and also because error in camera measurements in 2 dimensions) \n correction_x_mag = np.sqrt(correction[0]*correction[0])\n correction_z_mag = np.sqrt(correction[2]*correction[2])\n rospy.logerr('correction_z_mag = ')\n rospy.logerr(correction_z_mag) \n \n #if the correction_vector bigger and smaller than some threshold, then do something\n #if math.isnan(correction_mag)==True or correction_mag <= epsilon or correction_mag >= 0.05:\n #need correction_z_mag in case we are using force feedback on the wrong pose\n if math.isnan(correction_x_mag)==False and math.isnan(correction_z_mag)==False and correction_z_mag <= 0.15:\n if correction_x_mag >= 0.04 or correction_z_mag >= 0.03:\n rospy.logerr('FORCE FEEDBACK with correction mag: ' + str(correction_mag))\n \n # Calculate new commanded pose\n p0 = self.stored_desired_pos\n p0_new = p0 + 1.0*correction #the constant can be changed depending on needs\n \n self.prev_pos = [p0_new] + self.prev_pos\n self.prev_pos = self.prev_pos[:2]\n \n #Publish the new p0 to /pos_for_IK\n p0_list = p0_new.tolist()\n msg = Float64MultiArray()\n msg.data = deepcopy(p0_list)\n \n rospy.logerr('Publishing msg: ' + str(msg))\n self.pubs.publish(msg) \n \n else:\n rospy.logerr('TRIED FORCE FB BUT NOT REQUIRED')\n p0 = self.desired_pos\n # Record the current sent pos as element 0 of self.prev_pos\n # shuffling the other pos to the right. Then, truncate the\n # list to be a maximum of 2 pos long.\n self.prev_pos = [p0] + self.prev_pos\n self.prev_pos = self.prev_pos[:2]\n \n #Publish the desired position to /pos_for_IK\n p0_list = p0.tolist()\n msg = Float64MultiArray()\n msg.data = deepcopy(p0_list)\n self.pubs.publish(msg)\n else:\n rospy.logerr('MATH IS NAN or wrong pose, equivalent to NORMAL state')\n p0 = self.desired_pos\n # Record the current sent pos as element 0 of self.prev_pos\n # shuffling the other pos to the right. Then, truncate the\n # list to be a maximum of 2 pos long.\n self.prev_pos = [p0] + self.prev_pos\n self.prev_pos = self.prev_pos[:2]\n \n #Publish the desired position to /pos_for_IK\n p0_list = p0.tolist()\n msg = Float64MultiArray()\n msg.data = deepcopy(p0_list)\n self.pubs.publish(msg)\n \n self.desired_pos = np.ones(3) * np.nan\n self.state = ForceFeedbackState.NORMAL", "def define_ufl_convec_accel_diff(self):\n\n if hasattr(self, 'ufl_convec_accel_dv'):\n return None\n\n # Exit if problem is formulated with respect to Eulerian\n # coordinates and is not an elastic material.\n eulerian = self.config['formulation']['domain'] == 'eulerian'\n lin_elastic = self.config['material']['const_eqn'] == 'lin_elastic'\n stokes = self.config['material']['const_eqn'] == 'stokes'\n if (not eulerian) or lin_elastic or stokes:\n self.ufl_convec_accel_dv = 0\n return None\n\n self.ufl_convec_accel_dv = dlf.derivative(self.ufl_convec_accel,\n self.velocity,\n self.trial_vector)\n\n return None", "def update_apc11(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n pos = self.pos+delta_t*self.vel\n vel = self.vel+delta_t*kap[1]\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n\n force = self.force(pos,\n vel,\n self.time+delta_t, drag=False)\n\n pos = self.pos+delta_t*vel\n vel = self.vel+delta_t*force\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n\n try:\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n vel = self.vel+col.delta_t*kap[1]\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest = True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n \n self.time += delta_t\n\n return (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)", "def update_rk4(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n try:\n\n kap1 = (self.vel, self.force(self.pos,\n self.vel,\n self.time))\n\n pos = self.pos+0.5*delta_t*kap1[0]\n vel = self.vel+0.5*delta_t*kap1[1]\n self.check_collision_full(pos, self.pos,\n vel, self.vel,\n 0.5*delta_t, drag=False)\n\n kap2 = (self.vel + 0.5*delta_t*kap1[1],\n self.force(pos, vel, self.time + 0.5*delta_t))\n\n pos = self.pos+0.5*delta_t*kap2[0]\n vel = self.vel+0.5*delta_t*kap2[1]\n self.check_collision_full(pos, self.pos,\n vel, self.vel,\n 0.5*delta_t, drag=False)\n\n\n kap3 = (self.vel+0.5*delta_t*kap2[1],\n self.force(pos, vel, self.time+0.5*delta_t))\n\n pos = self.pos+0.5*delta_t*kap3[0]\n vel = self.vel+0.5*delta_t*kap3[1]\n self.check_collision_full(pos, self.pos,\n vel, self.vel,\n 0.5*delta_t, drag=False)\n\n\n kap4 = (self.vel + delta_t * kap3[1],\n self.force(pos, vel, self.time + delta_t))\n\n pos = self.pos+delta_t*(kap1[0]+2.0*kap2[0]+2.0*kap3[0]+kap4[0])/6.0\n vel = self.vel+delta_t*(kap1[1]+2.0*kap2[1]+2.0*kap3[1]+kap4[1])/6.0\n self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=False)\n\n self.pos = pos\n self.vel = vel\n\n except Collision.CollisionException as col:\n col.vel = self.vel+col.delta_t*kap1[0]\n raise col\n\n self.time += delta_t", "def test_forward_injection_in_3D(self):\n self.fom = ModeMatch(monitor_name = 'figure_of_merit',\n mode_number = 1,\n direction = 'Forward',\n multi_freq_src = True,\n target_T_fwd = lambda wl: np.ones(wl.size),\n norm_p = 1)\n Optimization.set_source_wavelength(self.sim, 'source', self.fom.multi_freq_src, len(self.wavelengths))\n self.sim.fdtd.setnamed('FDTD','dimension','3D')\n self.fom.initialize(self.sim)\n self.fom.make_forward_sim(self.sim)\n self.sim.run(name = 'modematch_forward_injection_in_3D', iter = 0)\n FOM = self.fom.get_fom(self.sim)\n self.assertAlmostEqual(FOM, self.ref_fom, 4)", "def test_3D_m6_1k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def test_3D_m6_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def single_volume_inference(self, volume):\n self.model.eval()\n\n # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis\n slices = []\n\n # Write code that will create mask for each slice across the X (0th) dimension. After \n # that, put all slices into a 3D Numpy array. You can verify if your method is \n # correct by running it on one of the volumes in your training set and comparing \n # with the label in 3D Slicer.\n \n # normalize\n image = (volume.astype(np.single) - np.min(volume))/(np.max(volume) - np.min(volume))\n \n new_image = med_reshape(image, new_shape=(self.patch_size, self.patch_size, image.shape[2]))\n mask3d = np.zeros(new_image.shape)\n \n for slc_ix in range(new_image.shape[2]):\n tsr_test = torch.from_numpy(new_image[:,:,slc_ix].astype(np.single)).unsqueeze(0).unsqueeze(0)\n #image = torch.from_numpy(self.data[slc[0]][\"image\"][:,:,slc[1]]).unsqueeze(0)\n #tsr_test = torch.from_numpy(slc.astype(np.single)).unsqueeze(0).unsqueeze(0)\n pred = self.model(tsr_test.to(self.device))\n pred = np.squeeze(pred.cpu().detach())\n mask3d[:,:,slc_ix] = torch.argmax(pred, dim=0)\n\n return mask3d", "def test_3D_m4_1k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def set_voltages(): \n #0) set parameters\n from project_parameters import trapFile,multipoleControls,reg,driveFrequency,ax,az,phi,coefs\n import pickle\n with open(trapFile,'rb') as f:\n trap = pickle.load(f)\n V,X,Y,Z=trap.instance.DC,trap.instance.X,trap.instance.Y,trap.instance.Z\n tc=trap.configuration\n C = tc.multipoleControl\n el = []\n #1) check if trap_knobs has been run yet, creating multipoleControl and multipoleKernel\n if tc.trap_knobs != True:\n return 'WARNING: You must run trap_knobs first!'\n #2a) determine electrode voltages directly\n elif multipoleControls: # note plurality to contrast from attribute\n el = np.dot(C,coefs.T) # these are the electrode voltages\n #2b) determine electrode volages indirectly\n else:\n charge = tc.charge\n mass = tc.mass\n V0 = mass*(2*np.pi*frequencyRF)**2/charge\n U2 = az*V0/8\n U1 = U2+ax*V0/4\n U3 = 2*U1*np.tan(2*np.pi*(phi+tc.thetaRF)/180)\n U1p= np.sqrt(U1**2+U3**2/2)\n U4 = U1p*tc.Qrf[4]/tc.Qrf[1]\n U5 = U1p*tc.Qrf[5]/tc.Qrf[1]\n inp = np.array([E[0], E[1], E[2], U1, U2, U3, U4, U5]).T\n mCf = tc.multipoleCoefficients[1:9,:]\n el = np.dot(mCf.T,inp) # these are the electrode voltages\n el = np.real(el)\n #3) regularize if set to do so\n reg = 0\n if reg: \n C = el\n Lambda = np.linalg.lstsq(tc.multipoleKernel,C)\n Lambda=Lambda[0]\n el = el-(np.dot(tc.multipoleKernel,Lambda))\n return el", "def setCameraRotation3D(ang):\n dislin.vup3d(ang)", "def match_det2cube(self, input_model,\n x, y, file_slice_no,\n this_par1, this_par2,\n spaxel,\n c1_offset, c2_offset):\n\n#________________________________________________________________________________\n if self.instrument == 'MIRI':\n\n det2ab_transform = input_model.meta.wcs.get_transform('detector', 'alpha_beta')\n detector2v23 = input_model.meta.wcs.get_transform('detector', 'v2v3')\n v23toworld = input_model.meta.wcs.get_transform(\"v2v3\", \"world\")\n worldtov23 = input_model.meta.wcs.get_transform(\"world\", \"v2v3\")\n v2ab_transform = input_model.meta.wcs.get_transform('v2v3',\n 'alpha_beta')\n\n alpha, beta, wave = det2ab_transform(x, y)\n v2, v3, lam23 = detector2v23(x, y)\n ra, dec, lam = v23toworld(v2, v3, lam23)\n\n valid1 = np.isfinite(v2)\n valid2 = np.isfinite(v3)\n\n if self.weighting == 'miripsf':\n wave_resol = self.instrument_info.Get_RP_ave_Wave(this_par1, this_par2)\n alpha_resol = self.instrument_info.Get_psf_alpha_parameters()\n beta_resol = self.instrument_info.Get_psf_beta_parameters()\n\n # transform Cube Spaxel centers to alpha,beta of exposure\n # for MIRI weighting parameters are based on distance in\n # alpha-beta coord system\n # transform the cube coordinate values to alpha and beta values\n # xi,eta -> ra,dec\n # world -> v2,v3\n # v2,v3 -> local alpha,beta\n\n elif self.instrument == 'NIRSPEC':\n islice = file_slice_no\n slice_wcs = nirspec.nrs_wcs_set_input(input_model, islice)\n\n x, y = wcstools.grid_from_bounding_box(slice_wcs.bounding_box, \n step=(1, 1), center=True)\n ra, dec, lam = slice_wcs(x, y) # return v2,v3 are in degrees\n valid1 = np.isfinite(ra)\n valid2 = np.isfinite(dec)\n#________________________________________________________________________________\n#________________________________________________________________________________\n# Slices are curved on detector. A slice region is grabbed by corner regions so\n# the region returned may include pixels not value for slice. There are gaps\n# between the slices. Pixels not belonging to a slice are assigned NaN values.\n\n x = x.astype(np.int)\n y = y.astype(np.int)\n\n flux_all = input_model.data[y, x]\n# error_all = input_model.err[y, x]\n dq_all = input_model.dq[y, x]\n\n valid3 = np.isfinite(lam)\n valid4 = np.isfinite(flux_all)\n valid = valid1 & valid2 & valid3 & valid4\n#________________________________________________________________________________\n# using the DQFlags from the input_image find pixels that should be excluded\n# from the cube mapping\n all_flags = (dqflags.pixel['DO_NOT_USE'] + dqflags.pixel['DROPOUT'] +\n dqflags.pixel['NON_SCIENCE'] +\n dqflags.pixel['DEAD'] + dqflags.pixel['HOT'] +\n dqflags.pixel['RC'] + dqflags.pixel['NONLINEAR'])\n\n # find the location of all the values to reject in cube building\n good_data = np.where((np.bitwise_and(dq_all, all_flags) == 0) & (valid == True))\n\n # good data holds the location of pixels we want to map to cube\n flux = flux_all[good_data]\n# error = error_all[good_data]\n wave = lam[good_data]\n\n# xpix = x[good_data] # only used for testing\n# ypix = y[good_data] # only used for testing\n\n ra = ra - c1_offset / 3600.0\n dec = dec - c2_offset / 3600.0\n ra_use = ra[good_data]\n dec_use = dec[good_data]\n if self.instrument == 'MIRI':\n # need alpha,beta if weigthing is miripsf or cubes in alpha-beta space\n alpha_det = alpha[good_data]\n beta_det = beta[good_data]\n# MIRI can make cubes in alpha-beta:\n if self.coord_system == 'alpha-beta':\n coord1 = alpha[good_data]\n coord2 = beta[good_data]\n\n else:\n# xi,eta in arc seconds\n xi, eta = coord.radec2std(self.Crval1, self.Crval2, ra_use, dec_use)\n coord1 = xi\n coord2 = eta\n\n nplane = self.naxis1 * self.naxis2\n lower_limit = 0.01\n\n# iprint = 0\n# now loop over the pixel values for this region and find the spaxels that fall\n# withing the region of interest.\n nn = coord1.size\n\n# print('looping over n points mapping to cloud',nn)\n#________________________________________________________________________________\n for ipt in range(0, nn - 1):\n#________________________________________________________________________________\n # Cube.Xcenters, ycenters is a flattened 1-D array of the 2 X 2 xy plane\n # cube coordinates.\n # find the spaxels that fall withing ROI of point cloud defined by\n # coord1,coord2,wave\n# if(ipt > 2): sys.exit('STOP')\n# print('For point ',coord1[ipt],coord2[ipt],wave[ipt],ipt)\n\n# if(ipt == 0):\n# print('size of Xcenters',self.Xcenters.size)\n xdistance = (self.Xcenters - coord1[ipt])\n ydistance = (self.Ycenters - coord2[ipt])\n radius = np.sqrt(xdistance * xdistance + ydistance * ydistance)\n indexr = np.where(radius <= self.rois)\n indexz = np.where(abs(self.zcoord - wave[ipt]) <= self.roiw)\n\n# print('indexz',indexz)\n# print('indexr',indexr)\n zlam = self.zcoord[indexz] # z Cube values falling in wavelength roi\n xi_cube = self.Xcenters[indexr] # x Cube values within radius\n eta_cube = self.Ycenters[indexr] # y cube values with the radius\n\n# print('found xi_cube',xi_cube)\n# print('found eta_cube',eta_cube)\n\n#________________________________________________________________________________\n# loop over the points in the ROI\n for iz, zz in enumerate(indexz[0]):\n istart = zz * nplane\n for ir, rr in enumerate(indexr[0]):\n# yy_cube = int(rr / self.naxis1)\n# xx_cube = rr - yy_cube * self.naxis1\n# print('xx yy cube',rr,self.naxis1,xx_cube,yy_cube)\n#________________________________________________________________________________\n if self.weighting == 'msm':\n d1 = (xi_cube[ir] - coord1[ipt]) / self.Cdelt1\n d2 = (eta_cube[ir] - coord2[ipt]) / self.Cdelt2\n d3 = (zlam[iz] - wave[ipt]) / self.Cdelt3\n\n weight_distance = math.sqrt(d1 * d1 + d2 * d2 + d3 * d3)\n weight_distance = math.pow(weight_distance, self.weight_power)\n#________________________________________________________________________________\n# if weight is miripsf -distances determined in alpha-beta coordinate system\n elif self.weighting == 'miripsf':\n weights = FindNormalizationWeights(wave[ipt],\n wave_resol,\n alpha_resol,\n beta_resol)\n\n\n ra_spaxel, dec_spaxel = coord.std2radec(self.Crval1,\n self.Crval2,\n xi_cube[ir],\n eta_cube[ir])\n\n v2_spaxel, v3_spaxel, zl = worldtov23(ra_spaxel,\n dec_spaxel,\n zlam[iz])\n\n alpha_spaxel, beta_spaxel, wave_spaxel = v2ab_transform(v2_spaxel,\n v3_spaxel,\n zlam[iz])\n alpha_distance = alpha_det[ipt] - alpha_spaxel\n beta_distance = beta_det[ipt] - beta_spaxel\n wave_distance = abs(wave[ipt] - wave_spaxel)\n\n xn = alpha_distance / weights[0]\n yn = beta_distance / weights[1]\n wn = wave_distance / weights[2]\n\n # only included the spatial dimensions\n weight_distance = math.sqrt(xn * xn + yn * yn + wn * wn)\n weight_distance = math.pow(weight_distance, self.weight_power)\n#________________________________________________________________________________\n# We have found the weight_distance based on instrument type\n\n if weight_distance < lower_limit: weight_distance = lower_limit\n weight_distance = 1.0 / weight_distance\n\n cube_index = istart + rr\n spaxel[cube_index].flux = spaxel[cube_index].flux + weight_distance * flux[ipt]\n spaxel[cube_index].flux_weight = spaxel[cube_index].flux_weight + weight_distance\n spaxel[cube_index].iflux = spaxel[cube_index].iflux + 1", "def quad_eq_of_motion2(self,state,time,force,moment):\n\n\t\t\tA = np.matrix([ [0.25,0, -0.5/self.arm_length],\n\t\t\t\t[0.25,0.5/self.arm_length,0.],\n\t\t\t\t[0.25,0,0.5/self.arm_length],\n\t\t\t\t[0.25,-0.5/self.arm_length,0]])\n\t\t\tT=A*np.asmatrix(np.hstack((force,moment[:2]))).transpose()\n\t\t\tT_clamped=np.maximum(np.minimum(T,self.max_force/4.0),self.min_force/4.0)\n\t\t\tB = np.matrix([[1.0,1.0,1.0,1.0],\n\t\t\t\t\t\t\t[0.0,self.arm_length,0.0,-self.arm_length],\n\t\t\t\t\t\t\t[-self.arm_length,0.0,self.arm_length,0.]])\n\t\t\tforce = B[[0],:]*T_clamped;\n\t\t\tforce = np.array(force).reshape(-1,).tolist()\n\t\t\tmoment = np.vstack( (B[[1,2],:]*np.asmatrix(T_clamped), moment[2]));\n\t\t\tmoment = np.array(moment).reshape(-1,).tolist()\n\t\t\t\n\t\t\t#Assign 13 states\n\t\t\t#x = state[0]\n\t\t\t#y = state[1]\n\t\t\t#z = state[2]\n\t\t\txdot = state[3];\n\t\t\tydot = state[4];\n\t\t\tzdot = state[5];\n\t\t\tqW = state[6];\n\t\t\tqX = state[7];\n\t\t\tqY = state[8];\n\t\t\tqZ = state[9];\n\t\t\tp = state[10];\n\t\t\tq = state[11];\n\t\t\tr = state[12];\n\n\t\t\tquat = np.vstack((qW,qX,qY,qZ)); #!! Attention to the order!!\n\t\t\tbRw=self.quat2mat(quat.transpose())\n\t\t\tbRw=bRw.reshape(3,3) #to remove the last dimension i.e., 3,3,1\n\t\t\twRb = bRw.transpose()\n\t\t\t\n\t\t\t# Acceleration\n\t\t\taccel = 1.0 / self.mass * (wRb * np.matrix([[0],[0],force]) - np.matrix([[0],[0],[self.mass * self.gravity]]))\n\t\t\taccel = np.array(accel).reshape(-1,).tolist()\n\t\t\t# Angular velocity\n\t\t\tK_quat = 2.0; #%this enforces the magnitude 1 constraint for the quaternion\n\t\t\tquaterror = 1 - (qW**2 + qX**2 + qY**2 + qZ**2);\n\t\t\tqdot = -1/2*np.matrix([ [0,-p,-q,-r],[p,0,-r,q],[q,r,0,-p],[r,-q,p,0]])*quat + K_quat*quaterror * quat\n\t\t\tqdot = np.array(qdot).reshape(-1,).tolist()\n\t\t\t# % Angular acceleration\n\t\t\tomega = np.matrix([[p],[q],[r]])\n\t\t\ttemp = np.squeeze(np.cross(omega.transpose(),(self.Inertia*omega).transpose()))\n\t\t\tpqrdot = self.invInertia * (moment - temp).reshape(-1,1)\n\t\t\tsdot=np.zeros(13) #default=float64\n\t\t\tsdot[0]=xdot#[]\n\t\t\tsdot[1]=ydot\n\t\t\tsdot[2]=zdot\n\t\t\tsdot[3]=accel[0]\n\t\t\tsdot[4]=accel[1]\n\t\t\tsdot[5]=accel[2]\n\t\t\tsdot[6]=qdot[0]\n\t\t\tsdot[7]=qdot[1]\n\t\t\tsdot[8]=qdot[2]\n\t\t\tsdot[9]=qdot[3]\n\t\t\tsdot[10]=pqrdot[0]\n\t\t\tsdot[11]=pqrdot[1]\n\t\t\tsdot[12]=pqrdot[2]\n\t\t\treturn sdot", "def revolver(self):\r\n\t\tself.__revuelto=True", "def control_OM(self, ang_vel, dt, t1):\n print(\"OM CONTROL\")\n for i in range(ang_vel.shape[0]):\n t1 = time.time()\n t2 = time.time()\n while t2 - t1 < dt:\n self.compare_OM_w_gyro(ang_vel[i])\n self.move_forward(ang_vel[i])\n t2 = time.time()", "def test_3D_m4_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def refframe_correct(self, ra, dec, obstime, sobjs=None):\n # Correct Telescope's motion\n refframe = self.par['calibrations']['wavelengths']['refframe']\n if refframe in ['heliocentric', 'barycentric'] \\\n and self.par['calibrations']['wavelengths']['reference'] != 'pixel':\n msgs.info(\"Performing a {0} correction\".format(self.par['calibrations']['wavelengths']['refframe']))\n # Calculate correction\n radec = ltu.radec_to_coord((ra, dec))\n vel, vel_corr = wave.geomotion_correct(radec, obstime,\n self.spectrograph.telescope['longitude'],\n self.spectrograph.telescope['latitude'],\n self.spectrograph.telescope['elevation'],\n refframe)\n # Apply correction to objects\n msgs.info('Applying {0} correction = {1:0.5f} km/s'.format(refframe, vel))\n if (sobjs is not None) and (sobjs.nobj != 0):\n # Loop on slits to apply\n gd_slitord = self.slits.slitord_id[np.logical_not(self.extract_bpm)]\n for slitord in gd_slitord:\n indx = sobjs.slitorder_indices(slitord)\n this_specobjs = sobjs[indx]\n # Loop on objects\n for specobj in this_specobjs:\n if specobj is None:\n continue\n specobj.apply_helio(vel_corr, refframe)\n\n # Apply correction to wavelength image\n self.vel_corr = vel_corr\n self.waveimg *= vel_corr\n\n else:\n msgs.info('A wavelength reference frame correction will not be performed.')", "def test_vertical_velocity_dry_air():\n omega = 1 * units('microbar/second')\n w_truth = -0.7995291 * units('cm/s')\n w_test = vertical_velocity(omega, 1000. * units.mbar, 273.15 * units.K)\n assert_almost_equal(w_test, w_truth, 6)", "def update_apc23(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n if len(self._old) >= 1:\n\n\n try:\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n beta = 0.5*self.delta_t/(self.time-self.get_old(0, 2))\n\n pos = self.pos+delta_t*((1+beta)*self.vel-beta*self.get_old(0, 0))\n vel = self.vel+delta_t*((1+beta)*kap[1]-beta*self.get_old(0, 1))\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n\n beta1 = (3.0*(self.time-self.get_old(0,2))+delta_t)/(6.0*self.time-self.get_old(0,2))\n beta2 = -delta_t**2/(6.0*(self.time+delta_t-self.get_old(0,2))*(self.time-self.get_old(0,2)))\n\n print self.force(pos, vel, self.time+delta_t), kap[1], self.get_old(0,1)\n\n pos = self.pos+delta_t*((1.0-beta1-beta2)*vel+beta1*self.vel+beta2*self.get_old(0, 0))\n vel = self.vel+delta_t*((1.0-beta1-beta2)*self.force(pos, vel, self.time+delta_t,\n drag=False)+beta1*kap[1]+beta2*self.get_old(0,1))\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n beta = 0.5*col.delta_t/(self.time-self.get_old(0, 2))\n vel = self.vel+col.delta_t*(1+beta)*kap[1]-beta*self.get_old(0, 1)\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest=True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n\n self.time += delta_t\n\n else:\n ## reduced to using the Adams first order method for the first timestep:\n\n kap = update_apc1(self)\n\n self.set_old(kap, 1)\n\n return kap", "def geocentricToApparentRadiantAndVelocity(ra_g, dec_g, vg, lat, lon, elev, jd, include_rotation=True):\n\n\n # Compute ECI coordinates of the meteor state vector\n state_vector = geo2Cartesian(lat, lon, elev, jd)\n\n eci_x, eci_y, eci_z = state_vector\n\n\n # Assume that the velocity at infinity corresponds to the initial velocity\n v_init = np.sqrt(vg**2 + (2*6.67408*5.9722)*1e13/vectMag(state_vector))\n\n\n # Calculate the geocentric latitude (latitude which considers the Earth as an elipsoid) of the reference \n # trajectory point\n lat_geocentric = np.degrees(math.atan2(eci_z, math.sqrt(eci_x**2 + eci_y**2)))\n\n\n\n\n ### Uncorrect for zenith attraction ###\n\n # Compute the radiant in the local coordinates\n azim, elev = raDec2AltAz(ra_g, dec_g, jd, lat_geocentric, lon)\n\n # Compute the zenith angle\n eta = np.radians(90.0 - elev)\n\n # Numerically correct for zenith attraction\n diff = 10e-5\n zc = eta\n while diff > 10e-6:\n \n # Update the zenith distance\n zc -= diff\n\n # Calculate the zenith attraction correction\n delta_zc = 2*math.atan((v_init - vg)*math.tan(zc/2.0)/(v_init + vg))\n diff = zc + delta_zc - eta\n\n\n # Compute the uncorrected geocentric radiant for zenith attraction\n ra, dec = altAz2RADec(azim, 90.0 - np.degrees(zc), jd, lat_geocentric, lon)\n\n ### ###\n\n\n\n # Apply the rotation correction\n if include_rotation:\n\n # Calculate the velocity of the Earth rotation at the position of the reference trajectory point (m/s)\n v_e = 2*math.pi*vectMag(state_vector)*math.cos(np.radians(lat_geocentric))/86164.09053\n\n\n # Calculate the equatorial coordinates of east from the reference position on the trajectory\n azimuth_east = 90.0\n altitude_east = 0\n ra_east, dec_east = altAz2RADec(azimuth_east, altitude_east, jd, lat, lon)\n\n # Compute the radiant vector in ECI coordinates of the apparent radiant\n v_ref_vect = v_init*np.array(raDec2Vector(ra, dec))\n\n\n v_ref_nocorr = np.zeros(3)\n\n # Calculate the derotated reference velocity vector/radiant\n v_ref_nocorr[0] = v_ref_vect[0] + v_e*np.cos(np.radians(ra_east))\n v_ref_nocorr[1] = v_ref_vect[1] + v_e*np.sin(np.radians(ra_east))\n v_ref_nocorr[2] = v_ref_vect[2]\n\n\n # Compute the radiant without Earth's rotation included\n ra_norot, dec_norot = vector2RaDec(vectNorm(v_ref_nocorr))\n v_init_norot = vectMag(v_ref_nocorr)\n\n ra = ra_norot\n dec = dec_norot\n v_init = v_init_norot\n\n\n\n return ra, dec, v_init", "def test_3D_m8_1k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def angular_memory_effect_analysis(tilt_coef_range, input_field, TM, data_shape, sample_pitch, wavelength, plot_std = True):\n zernike_rho = np.linspace(-0.5, 0.5, data_shape[1])\n\n\n centered_outputs = np.zeros([tilt_coef_range.size, data_shape[1]]) # For storage of outputs that where tilted and then artificially shifted back to the center\n shift_idxs = np.zeros(tilt_coef_range.size) # Will store information on how much outputs needed to be shifted to be centered\n for idx, coef in enumerate(tilt_coef_range):\n output_intensity = np.abs( TM @ ( input_field * np.exp(2j * np.pi * coef * zernike_rho) ).ravel() )**2 # propagation via TM, only saving intensity\n centered_outputs[idx, :], shift_idxs[idx] = calc.center_tilted_output(output_intensity, coef, data_shape, sample_pitch, wavelength, return_shift_idx = True)\n\n centered_outputs = centered_outputs / np.linalg.norm(centered_outputs, 1) # Normalising all outputs\n\n\n input_field = input_field.ravel()\n reference_output = np.abs( TM @ input_field )**2 # untilted output field, which we will use for std\n reference_output = reference_output / np.linalg.norm(reference_output)\n\n # fig, axs = plt.subplots()\n # axs.plot(centered_outputs[-1,:])\n # plt.show()\n\n #Comparing the outputs with a standard deviation method\n subtracted_outputs = np.zeros(centered_outputs.shape)\n for idx in range(centered_outputs[:,0].size):\n subtracted_outputs[idx, :] = centered_outputs[idx, :] - centered_outputs[0, :] # subtracting the outputs with the reference\n\n std_of_outputs = np.std(subtracted_outputs, axis = 1) # Computing the standard deviation of subtracted outputs\n if plot_std:\n fig, axs = plt.subplots()\n img_1, = axs.plot(shift_idxs * sample_pitch[1] * 1e6, std_of_outputs, color = 'black')\n img_2 = axs.scatter(shift_idxs * sample_pitch[1] * 1e6, std_of_outputs, s = 40, color = 'crimson', edgecolors = 'black')\n axs.set_title('Angular memory effect decay')\n axs.set(xlabel = '$\\delta x, \\ \\mu m$', ylabel = 'Standard deviation, p.d.u.')\n\n\n #Fitting a logistic curve onto the results\n popt, pcov = curve_fit(logistic_curve, (shift_idxs * sample_pitch[1] * 1e6), std_of_outputs)\n smooth_range = np.linspace(-0.1, shift_idxs[-1] * sample_pitch[1] * 1e6, 1000)\n fitted_std = logistic_curve(smooth_range, *popt)\n img_fit, = axs.plot(smooth_range, fitted_std, color = 'blue')\n\n axs.text(0.65 * (shift_idxs[-1] * sample_pitch[1] * 1e6), 0.2 * std_of_outputs.max(), 'Fitting parameters: \\nL = {:.3g} \\nx_0 = {:.3g} \\nk = {:.3g}'.format(*popt))\n\n # img_3 = axs.scatter((shift_idxs * sample_pitch[1] * 1e6)[-8:], std_of_outputs[-8:], s = 70, marker = 'x', color = 'green')#excluded point\n plt.legend([(img_2, img_1), img_fit], ['STD($\\delta x $)', '$L / [1 + \\exp(-k (\\delta x - x_0)) ]$'])\n plt.plot(block = False)\n\n return centered_outputs", "def correct_proper_motion(self, invert=False, mjd=None):\n\n # If mjd not set directly, check that it was set from FITS headers in get_cutout method\n if self.mjd is None:\n if mjd is None:\n raise FITSException(\"Date could not be inferred from header, supply with epoch keyword.\")\n else:\n self.mjd = mjd\n\n obstime = Time(self.mjd, format='mjd')\n\n simbad = Simbad.query_region(self.position, radius=180 * u.arcsec)\n\n # Catch SIMBAD failure either from None return of query or no stellar type matches in region\n try:\n simbad = simbad.to_pandas()\n pm_types = ['*', '**', 'PM*', 'EB*', 'Star', 'PSR', 'Pulsar', 'Flare*']\n simbad = simbad[(simbad['OTYPE'].isin(pm_types)) | (simbad['SP_TYPE'].str.len() > 0)]\n\n assert len(simbad) > 0\n\n except (ValueError, AssertionError):\n logger.debug(\"No high proper-motion objects within 180 arcsec.\")\n self.correct_pm = False\n\n return\n\n # Treat non-existent proper motion parameters as extremely distant objects\n simbad['PMRA'].fillna(0, inplace=True)\n simbad['PMDEC'].fillna(0, inplace=True)\n simbad['PLX_VALUE'].fillna(0.01, inplace=True)\n\n newtime = Time(self.radio.mjd, format='mjd')\n pmra = simbad['PMRA'].values * u.mas / u.yr\n pmdec = simbad['PMDEC'].values * u.mas / u.yr\n\n dist = Distance(parallax=simbad['PLX_VALUE'].values * u.mas)\n\n simbad['j2000pos'] = SkyCoord(\n ra=simbad['RA_d'].values * u.deg,\n dec=simbad['DEC_d'].values * u.deg,\n frame='icrs',\n distance=dist,\n pm_ra_cosdec=pmra,\n pm_dec=pmdec,\n obstime='J2000',\n )\n\n datapos = simbad.j2000pos.apply(lambda x: x.apply_space_motion(obstime))\n newpos = simbad.j2000pos.apply(lambda x: x.apply_space_motion(newtime))\n\n simbad_cols = {\n 'MAIN_ID': 'Object',\n 'OTYPE': 'Type',\n 'SP_TYPE': 'Spectral Type',\n 'DISTANCE_RESULT': 'Separation (arcsec)',\n }\n simbad = simbad.rename(columns=simbad_cols)\n simbad = simbad[simbad_cols.values()].copy()\n simbad['PM Corrected Separation (arcsec)'] = np.round(newpos.apply(\n lambda x: x.separation(self.position).arcsec), 3)\n\n # Only display PM results if object within 15 arcsec\n if simbad['PM Corrected Separation (arcsec)'].min() > 15:\n logger.debug(\"No PM corrected objects within 15 arcsec\")\n self.correct_pm = False\n\n return\n\n self.simbad = simbad.sort_values('PM Corrected Separation (arcsec)')\n logger.info(f'SIMBAD results:\\n {self.simbad.head()}')\n\n nearest = self.simbad['PM Corrected Separation (arcsec)'].idxmin()\n\n self.oldpos = datapos[nearest]\n self.pm_coord = newpos[nearest]\n\n near_object = self.simbad.loc[nearest].Object\n msg = f'{near_object} proper motion corrected to <{self.pm_coord.ra:.4f}, {self.pm_coord.dec:.4f}>'\n logger.info(msg)\n\n missing = simbad[simbad['PM Corrected Separation (arcsec)'].isna()]\n if len(missing) > 0:\n msg = f\"Some objects missing PM data, and may be a closer match than presented:\\n {missing}\"\n logger.warning(msg)\n\n return", "def CreateMotionKernel(kernel):\r\n TrajSize = 64\r\n anxiety = 0.2* np.random.rand()\r\n numT = 10\r\n MaxTotalLength =10\r\n TotLength = 0\r\n #term determining, at each sample, the strengh of the component leating towards the previous position\r\n centripetal = 0.7 * np.random.rand()\r\n #term determining, at each sample, the random component of the new direction\r\n gaussianTerm =10 * np.random.rand()\r\n #probability of having a big shake, e.g. due to pressing camera button or abrupt hand movements\r\n freqBigShakes = 3 *np.random.rand()\r\n #v is the initial velocity vector, initialized at random direction\r\n init_angle = 360 * np.random.rand()\r\n #initial velocity vector having norm 1\r\n v0 = math.cos(init_angle / 180.0 * math.pi) + 1.0j * math.sin(init_angle/ 180.0 * math.pi)\r\n #the speed of the initial velocity vector\r\n v = v0* MaxTotalLength/(numT-1);\r\n\r\n if anxiety > 0:\r\n v = v0 * anxiety\r\n # initialize the trajectory vector\r\n x = np.zeros(numT,dtype = np.complex);\r\n\r\n abruptShakesCounter = 0\r\n for t in range(numT-1):\r\n # determine if there is an abrupt (impulsive) shake\r\n if np.random.rand() < freqBigShakes * anxiety:\r\n #if yes, determine the next direction which is likely to be opposite to the previous one\r\n nextDirection = 2 * v * (np.exp( 1.0j * (math.pi + (np.random.rand() - 0.5))))\r\n abruptShakesCounter = abruptShakesCounter + 1\r\n else:\r\n nextDirection=0\r\n\r\n #determine the random component motion vector at the next step\r\n dv = nextDirection + anxiety * (gaussianTerm * (np.random.randn()- + 1.0j * np.random.randn()) - centripetal * x[t]) * (MaxTotalLength / (numT - 1))\r\n v = v + dv\r\n # velocity vector normalization\r\n v = (v / np.abs(v)) * MaxTotalLength / (numT - 1)\r\n #print v\r\n x[t + 1] = x[t] + v\r\n # compute total length\r\n #TotLength=TotLength+np.abs(x([t+1]-x[t]))\r\n x_real = []\r\n x_imag = []\r\n for elem in x:\r\n x_real.append(elem.real)\r\n x_imag.append(elem.imag)\r\n x_real = np.round((x_real - np.min(x_real))/(np.max(x_real) - np.min(x_real)) * kernel-0.5)\r\n x_imag = np.round((x_imag - np.min(x_imag))/(np.max(x_imag) - np.min(x_imag)) * kernel-0.5)\r\n for idx in range(len(x_real)):\r\n if x_real[idx] < 0:\r\n x_real[idx] = 0\r\n if x_imag[idx] < 0:\r\n x_imag[idx] = 0\r\n if x_real[idx] > kernel -1:\r\n x_real[idx] = kernel -1\r\n if x_imag[idx] > kernel -1:\r\n x_imag[idx] = kernel -1\r\n\r\n ker = np.zeros((kernel, kernel))\r\n for idx in range(len(x_real)):\r\n ker[np.int(x_real[idx])][np.int(x_imag[idx])] = 1\r\n ker = ker/np.sum(np.sum(ker))\r\n return ker", "def _CorrectMomentum(optimizer, param_keys, correction):\n logger.info('Scaling update history by %.6f (new lr / old lr)', correction)\n for p_key in param_keys:\n try:\n optimizer.state[p_key]['momentum_buffer'] *= correction\n except:\n continue", "def photometric_calibration():\n pass", "def LeapFrog(self,r,v,dt):\n\n rhalf = r + np.asarray(v)*(dt/2) #Taking a half step forward with positional vector\n # predict the final velocity at the next timestep using the acceleration field at the rhalf position \n vnew = v + self.M31Accel(rhalf)*dt\n # predict the final position using the average of the current velocity and the final velocity\n rnew = r + 0.5*(v+vnew)*dt\n \n return rnew,vnew", "def test_3D_m8_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)", "def ccm_unred(wave, flux, ebv, r_v=\"\"):\n import numpy as np\n wave = np.array(wave, float)\n flux = np.array(flux, float)\n \n if wave.size != flux.size: raise TypeError, 'ERROR - wave and flux vectors must be the same size'\n \n if not bool(r_v): r_v = 3.1\n \n x = 10000.0/wave\n npts = wave.size\n a = np.zeros(npts, float)\n b = np.zeros(npts, float)\n \n ###############################\n #Infrared\n \n good = np.where( (x > 0.3) & (x < 1.1) )\n a[good] = 0.574 * x[good]**(1.61)\n b[good] = -0.527 * x[good]**(1.61)\n \n ###############################\n # Optical & Near IR\n \n good = np.where( (x >= 1.1) & (x < 3.3) )\n y = x[good] - 1.82\n \n c1 = np.array([ 1.0 , 0.104, -0.609, 0.701, 1.137, \\\n -1.718, -0.827, 1.647, -0.505 ])\n c2 = np.array([ 0.0, 1.952, 2.908, -3.989, -7.985, \\\n 11.102, 5.491, -10.805, 3.347 ] )\n \n a[good] = np.polyval(c1[::-1], y)\n b[good] = np.polyval(c2[::-1], y)\n \n ###############################\n # Mid-UV\n \n good = np.where( (x >= 3.3) & (x < 8) )\n y = x[good]\n F_a = np.zeros(np.size(good),float)\n F_b = np.zeros(np.size(good),float)\n good1 = np.where( y > 5.9 )\n \n if np.size(good1) > 0:\n y1 = y[good1] - 5.9\n F_a[ good1] = -0.04473 * y1**2 - 0.009779 * y1**3\n F_b[ good1] = 0.2130 * y1**2 + 0.1207 * y1**3\n \n a[good] = 1.752 - 0.316*y - (0.104 / ( (y-4.67)**2 + 0.341 )) + F_a\n b[good] = -3.090 + 1.825*y + (1.206 / ( (y-4.62)**2 + 0.263 )) + F_b\n \n ###############################\n # Far-UV\n \n good = np.where( (x >= 8) & (x <= 11) )\n y = x[good] - 8.0\n c1 = [ -1.073, -0.628, 0.137, -0.070 ]\n c2 = [ 13.670, 4.257, -0.420, 0.374 ]\n a[good] = np.polyval(c1[::-1], y)\n b[good] = np.polyval(c2[::-1], y)\n \n # Applying Extinction Correction\n \n a_v = r_v * ebv\n a_lambda = a_v * (a + b/r_v)\n \n funred = flux * 10.0**(0.4*a_lambda) \n \n return funred", "def _update_vel(self) -> None:\n self.state[:, :, Boids.Attr.VEL] += self.state[:, :, Boids.Attr.ACC]\n self.state[:, :, Boids.Attr.VEL] = maglim(\n self.state[:, :, Boids.Attr.VEL], self.max_vel)", "def mvr(self, delta, timeout=None, wait=False, log=True):\n\n self.mv(delta + self.wm(), timeout=timeout, wait=wait, log=log)", "def process_3D(radar, velname=\"VEL\", dbzname=\"DBZ\", zdrname=\"ZDR\", rhohvname=\"RHOHV\",\n gatefilter=None, nyquist_velocity=None, two_passes=False, debug=False):\n # Filter\n if gatefilter is None:\n gatefilter = filtering.do_gatefilter(radar, \"VEL\", \"DBZ\", zdr_name=\"ZDR\", rho_name=\"RHOHV\")\n\n if nyquist_velocity is None:\n nyquist_velocity = radar.instrument_parameters['nyquist_velocity']['data'][0]\n\n # Start with first reference.\n slice_number = 0\n myslice = radar.get_slice(slice_number)\n\n r = radar.range['data'].copy()\n velocity = radar.fields[\"VEL\"]['data'].copy()\n azimuth_reference = radar.azimuth['data'][myslice]\n elevation_reference = radar.elevation['data'][myslice].mean()\n\n velocity_reference = np.ma.masked_where(gatefilter.gate_excluded, velocity)[myslice]\n\n # Dealiasing first sweep.\n final_vel, flag_vel, azi_s, azi_e = dealiasing_process_2D(r, azimuth_reference, velocity_reference,\n elevation_reference, nyquist_velocity, debug=False)\n\n velocity_reference = final_vel.copy()\n flag_reference = flag_vel.copy()\n\n ultimate_dealiased_velocity = np.zeros(radar.fields[\"VEL\"]['data'].shape)\n ultimate_dealiased_velocity[myslice] = final_vel.copy()\n\n for slice_number in range(1, radar.nsweeps):\n print(slice_number)\n myslice = radar.get_slice(slice_number)\n azimuth_slice = radar.azimuth['data'][myslice]\n elevation_slice = radar.elevation['data'][myslice].mean()\n\n if len(azimuth_slice) < 60:\n print(f\"Problem with slice #{slice_number}, only {len(azimuth_slice)} radials.\")\n continue\n\n vel = np.ma.masked_where(gatefilter.gate_excluded, velocity)[myslice]\n velocity_slice = vel.filled(np.NaN)\n\n flag_slice = np.zeros_like(velocity_slice) + 1\n flag_slice[np.isnan(velocity_slice)] = -3\n\n # 3D dealiasing\n velocity_slice, flag_slice = continuity.unfolding_3D(r, elevation_reference,\n azimuth_reference, elevation_slice,\n azimuth_slice, velocity_reference,\n flag_reference, velocity_slice,\n flag_slice, nyquist_velocity,\n loose=True)\n\n final_vel, flag_vel, azi_s, azi_e = dealiasing_process_2D(r, azimuth_slice, velocity_slice,\n elevation_slice, nyquist_velocity,\n debug=False, inherit_flag=flag_slice,\n inherit_azi_start=azi_s, inherit_azi_end=azi_e)\n\n if two_passes:\n velocity_slice, flag_slice = continuity.unfolding_3D(r, elevation_reference,\n azimuth_reference,\n elevation_slice,\n azimuth_slice,\n velocity_reference,\n flag_reference,\n final_vel, flag_vel,\n nyquist_velocity, loose=False)\n azimuth_reference = azimuth_slice.copy()\n velocity_reference = final_vel.copy()\n flag_reference = flag_vel.copy()\n elevation_reference = elevation_slice\n\n ultimate_dealiased_velocity[myslice] = final_vel.copy()\n # plot_radar(final_vel, flag_vel, slice_number)\n\n ultimate_dealiased_velocity = np.ma.masked_where(gatefilter.gate_excluded,\n ultimate_dealiased_velocity)\n\n return ultimate_dealiased_velocity", "def wheel_center_disp_damper_movement(rocker_theta, dictionary, results_dictionary, heave):\n results_dictionary['Pushrod Rocker'][-1] = (parametrized_circle(dictionary['Rocker Pivot'],\n dictionary['Rocker Pivot Axis'],\n dictionary['Pushrod Rocker'],\n rocker_theta))\n results_dictionary['Damper Rocker'][-1] = (parametrized_circle(dictionary['Rocker Pivot'],\n dictionary['Rocker Pivot Axis'],\n dictionary['Damper Rocker'],\n rocker_theta))\n results_dictionary['Roll Damper a'][-1] = (parametrized_circle(dictionary['Rocker Pivot'],\n dictionary['Rocker Pivot Axis'],\n dictionary['Roll Damper a'],\n rocker_theta))\n results_dictionary['Pushrod Control Arm'][-1] = fsolve(three_point_method,\n dictionary['Pushrod Control Arm'],\n args=(dictionary['Upper Fore'],\n dictionary['Upper Aft'],\n results_dictionary['Pushrod Rocker'][-1],\n dictionary['Upper Fore'],\n dictionary['Upper Aft'],\n dictionary['Pushrod Rocker'],\n dictionary['Pushrod Control Arm']),\n )\n\n results_dictionary['Lower Out'][-1] = fsolve(three_point_method, dictionary['Lower Out'],\n args=(dictionary['Lower Fore'],\n dictionary['Lower Aft'],\n results_dictionary['Pushrod Control Arm'][-1],\n dictionary['Lower Fore'],\n dictionary['Lower Aft'],\n dictionary['Pushrod Control Arm'],\n dictionary['Lower Out']))\n\n results_dictionary['Upper Out'][-1] = fsolve(three_point_method, dictionary['Upper Out'],\n args=(dictionary['Upper Fore'],\n dictionary['Upper Aft'],\n results_dictionary['Lower Out'][-1],\n dictionary['Upper Fore'],\n dictionary['Upper Aft'],\n dictionary['Lower Out'],\n dictionary['Upper Out']))\n\n # Steering\n results_dictionary['Tie Rod Upright'][-1] = fsolve(three_point_method,\n dictionary['Tie Rod Upright'],\n args=(results_dictionary['Tie Rod Chassis'][-1],\n results_dictionary['Lower Out'][-1],\n results_dictionary['Upper Out'][-1],\n dictionary['Tie Rod Chassis'],\n dictionary['Lower Out'],\n dictionary['Upper Out'],\n dictionary['Tie Rod Upright']))\n\n results_dictionary['Wheel Center'][-1] = (fsolve(three_point_method, dictionary['Wheel Center'],\n args=(results_dictionary['Tie Rod Upright'][-1],\n results_dictionary['Upper Out'][-1],\n results_dictionary['Lower Out'][-1],\n dictionary['Tie Rod Upright'],\n dictionary['Upper Out'],\n dictionary['Lower Out'],\n dictionary['Wheel Center'])))\n\n # solved for wheel displacement - actual wheel displacement\n return (dictionary['Wheel Center'][2] - results_dictionary['Wheel Center'][-1][2] + heave)**2", "def ev_controlleraxismotion(self, event: tcod.event.ControllerAxis) -> T | None:", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def define_ufl_velocity_equation(self):\n\n if hasattr(self, 'f1'):\n return None\n\n if self.config['material']['type'] == 'viscous':\n self.f1 = 0\n return None\n\n if not self.config['formulation']['time']['unsteady']:\n self.f1 = 0\n return None\n\n theta = self.config['formulation']['time']['theta']\n dt = self.config['formulation']['time']['dt']\n f1 = self.displacement - self.displacement0 \\\n - dt*(theta*self.velocity + (1.0 - theta)*self.velocity0)\n\n self.f1 = dlf.dot(self.test_vector, f1)*dlf.dx\n\n return None", "def current_update():\n # Compute the multiplier coefficient:\n ci = dt / (L * dx)\n for k in range(0, nx-1):\n I[k] = I[k] - (ci * (V[k + 1] - V[k]))", "def update_volume(self):\r\n\r\n # for the first cell\r\n self.cells[0].volume = self.cells[0].volume + \\\r\n self.inflow - self.flows[0]\r\n # for the intermediate cells\r\n for i in range(1, self.cells_number-1):\r\n self.cells[i].volume = self.cells[i].volume + \\\r\n self.flows[i-1]-self.flows[i]\r\n # for the last cells\r\n self.cells[-1].volume = self.cells[-1].volume + \\\r\n self.flows[-1] - self.outflow", "def illumination_correction(img, avg):\n corrected = (img / avg * img.mean() / (img / avg).mean()).astype('uint8')\n return corrected", "def detect_velocity(image):\n nonlocal prev, v_last\n curr_bgr = cv.warpPerspective(image, M, (160, 120))\n curr = cv.cvtColor(curr_bgr, cv.COLOR_BGR2GRAY)\n\n if prev is None:\n prev = curr\n v_last = 0.0\n return v_last, curr_bgr, np.zeros_like(image)\n\n flow = cv.calcOpticalFlowFarneback(\n prev, # Previous image\n curr, # Current image\n None, # Computed flow image that has the same size oas prev and type CV_32FC2.\n 0.5, # Specifies the image scale (<1) to build pyramids for each image.\n 3, # Number of pyramid layers including the initial image.\n 15, # winsize, averaging windows size.\n 3, # iterations, number of iterations the algorithm does at each pyramid level.\n 5, # standard deviation of the Gaussian that is used to smooth derivative\n 1.5,\n 0)\n\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n\n v = mag * np.sin(ang)\n\n ######################\n ## Histogram for mag\n ar = np.arange(-20.0, 20.0, 0.50, dtype=np.float)\n his = np.histogram(v, bins=ar)\n\n for i, n in enumerate(his[0]):\n bgr = (255, 255, 0)\n if his[1][i] < 0:\n bgr = (0, 255, 255)\n\n #print('[{}] {} - {}'.format(i, n, his[1][i]))\n cv.rectangle( image, #curr_bgr,\n (i*2, HEIGHT),\n (i*2, HEIGHT - int(n / 10)),\n bgr, #(0, 255, 255),\n cv.FILLED)\n\n hsv = np.zeros_like(image)\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 1] = 255\n hsv[..., 2] = cv.normalize(np.abs(v), None, 0, 255, cv.NORM_MINMAX)\n hsv_bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n ##\n ######################\n\n v_abs = np.absolute(v)\n v = v[v_abs >= np.percentile(v_abs, VELOCITY_CUTOFF_PCT)]\n\n v_max = v_last + MAX_ACC\n v_min = v_last - MAX_ACC\n v = np.clip(v, v_min, v_max)\n if v.size > 0:\n v_avg = v.mean()\n else:\n if v_last > 0:\n v_avg = max(v_last - MAX_ACC, 0)\n elif v_last < 0:\n v_avg = min(v_last + MAX_ACC, 0)\n else:\n v_avg = 0\n\n prev = curr\n v_last = v_avg\n return v_last, curr_bgr, hsv_bgr", "def sos_correction(self, ratio):\n\n # Correct velocities\n self.u_mps = self.u_mps * ratio\n self.v_mps = self.v_mps * ratio", "def test_sequential_update_mvar_missing_middle(ft_ar2_mvar_kw, theta_ar2_mvar, \n Yt_ar2_mvar, Xt_ar2_mvar):\n t = 1\n kf = Filter(ft_ar2_mvar_kw, Yt_ar2_mvar, Xt_ar2_mvar, for_smoother=True)\n kf.init_attr(theta_ar2_mvar)\n for t_ in range(t+1):\n kf._sequential_update(t_)\n Mt = kf.ft(kf.theta, kf.T, x_0=Xt_ar2_mvar[0])\n\n Ht = Mt['Ht'][t][[0, 2]]\n Bt = Mt['Bt'][t]\n Dt = Mt['Dt'][t][[0, 2]]\n Ft = Mt['Ft'][t]\n Qt = Mt['Qt'][t]\n Rt = Mt['Rt'][t][[0, 2]][:,[0, 2]]\n Upsilon = Ht.dot(kf.P_star_t[t][0]).dot(Ht.T) + Rt \n K = kf.P_star_t[t][0].dot(Ht.T).dot(linalg.pinvh(Upsilon))\n v = kf.Yt[t][[0, 1]] - Ht.dot(kf.xi_t[t][0]) - Dt.dot(kf.Xt[t])\n \n expected_xi_t1_0 = Ft.dot(kf.xi_t[t][0] + K.dot(v)) + Bt.dot(kf.Xt[t])\n P_t_0 = kf.P_star_t[t][0] \n P_t_t = P_t_0 - P_t_0.dot(Ht.T).dot(linalg.pinvh(Upsilon)).dot(\n Ht).dot(P_t_0)\n expected_P_t1_0 = Ft.dot(P_t_t).dot(Ft.T) + Qt\n np.testing.assert_array_almost_equal(expected_P_t1_0, \n kf.P_star_t[t+1][0])\n np.testing.assert_array_almost_equal(expected_xi_t1_0, \n kf.xi_t[t+1][0])", "def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]", "def test_converts_to_photomodeler_and_back_exactly() -> None:\n # fmm must be equal, k* and p* must be zero\n cam = Camera(\n imgsz=(4288, 2848), fmm=(3200, 3200), cmm=(0.5, -0.4), sensorsz=(35.1, 24.2)\n )\n xcam = PhotoModeler.from_camera(cam)\n residuals = Converter(xcam, cam).residuals()\n np.testing.assert_allclose(residuals, 0, rtol=0, atol=1e-12)\n cam2 = xcam.to_camera()\n np.testing.assert_allclose(cam.to_array(), cam2.to_array(), rtol=0, atol=1e-13)", "def radial_velocity(wv_obj, fx_obj, sig_obj, wv_std, fx_std, sig_std, obj_name, std_name, rv_std, rv_std_err, order,\n xcorr_width, cut, cutstart, cutend):\n\n # The more random iterations, the better... but it takes longer\n n_iter = 1000\n\n # Step 1: Fix the spectra:\n # * Select only the region in which they overlap\n # * Make a new stretched wavelength array (for sub-pixel precision work)\n # * Interpolate the data onto the new wavelength array\n # * Remove large scale slopes so we only compare line and band features\n\n # Find where standard and object overlap ---------------\n wv_min = max([min(wv_std), min(wv_obj)])\n wv_max = min([max(wv_std), max(wv_obj)])\n\n n_pix_std = len(wv_std)\n\n # Creates ln standard wavelength array ---------------------------------\n # AR 2013.0423 The wavelength array only covers the overlap region. Also, I'm folding the rebinning by 10 into this statement.\n acoef_std = (n_pix_std * 10 - 1) / (math.log(wv_max) - math.log(wv_min))\n bcoef_std = (n_pix_std * 10) - (acoef_std * math.log(wv_max))\n\n arr = np.arange(n_pix_std * 10) + 1\n wv_ln_std = np.exp((arr - bcoef_std) / acoef_std)\n\n # AR 2012.1018: Find the conversion between pixels and velocity. This will vary from instrument\n # to instrument and spectral order to spectral order, so we should preferentially calculate this\n # based on the actual input spectrum.\n # AR 2013.0422: Change the calculation to happen AFTER the corrected wavelength scale has been made\n # Find the average pixel/spectrum offset\n # Note: even though it's called micron_per_pix, it will still work if the wavelengths are\n # angstroms instead (it really converts <wavelength unit> to km/s)\n\n # Interpolate data onto same ln wavelength scale -------------------------------\n\n fx_interp_std = np.interp(wv_ln_std, wv_std, fx_std)\n fx_interp_obj = np.interp(wv_ln_std, wv_obj, fx_obj)\n sig_interp_std = np.interp(wv_ln_std, wv_std, sig_std) # AR 2012.1018 Also need to rebin sig\n sig_interp_obj = np.interp(wv_ln_std, wv_obj, sig_obj) # AR 2012.1018 Also need to rebin sig\n\n # Rebin Data ----------------------------\n\n wv_arr_std = np.asarray(wv_ln_std, dtype=float)\n fx_arr_obj = np.asarray(fx_interp_obj, dtype=float)\n fx_arr_std = np.asarray(fx_interp_std, dtype=float)\n sig_arr_obj = np.asarray(sig_interp_obj, dtype=float)\n sig_arr_std = np.asarray(sig_interp_std, dtype=float)\n\n datalen = len(fx_arr_obj)\n\n # Step 2: Measure vsini:\n # Note that as of 2015.0605, this doesn't actually work.\n\n # AR 2014.0922: For vsini:\n # In a loop:\n # Take the standard spectrum\n # broaden it to width X\n # autocorrelate,\n # measure width of gaussian Y (this is supposed to give you a means of translating between width-of-cross-correlation and vsini)\n # Fit function solving Y for X.\n # For each cross correlation of object and standard:\n # Determine vsini\n\n pix_scale = (2.99792458 * 10 ** 5) / acoef_std\n\n # vsinirange = [1,2,5,10,20,30,40,50,60,80,100,100]\n # widthrange = []\n # for v in vsinirange:\n # # Make convolution kernel for v km/s\n # kernel = lsf_rotate(pix_scale,v)\n # # Broaden the standard spectrum\n # fx_obj_wide = np.correlate(fx_arr_obj, kernel, mode='same')\n # # Rectify the spectrum\n # fx_obj_orig = (fx_arr_obj - np.mean(fx_arr_obj))/np.std(fx_arr_obj,ddof=1)\n # fx_obj_wide = (fx_obj_wide - np.mean(fx_obj_wide))/np.std(fx_obj_wide,ddof=1)\n #\n # # Remove a cubic (flatten the spectrum)\n # coeff,pcov = op.curve_fit(cubic,wv_arr_std,fx_obj_wide)\n # fx_obj_wide = fx_obj_wide - (coeff[0] + coeff[1]*wv_arr_std + coeff[2]*wv_arr_std**2 + coeff[3]*wv_arr_std**3)\n # coeff,pcov = op.curve_fit(cubic,wv_arr_std,fx_obj_orig)\n # fx_obj_orig = fx_obj_orig - (coeff[0] + coeff[1]*wv_arr_std + coeff[2]*wv_arr_std**2 + coeff[3]*wv_arr_std**3)\n #\n # # Cross-correlate the spectrum with its broadened self\n # ycorr = np.correlate(fx_obj_orig, fx_obj_wide, mode='full')\n # # Now determine where the peak is (should be near 0)\n # length = len(ycorr)\n # xcorr = np.arange(length) - length//2\n # xmid = np.argmax(ycorr)\n # ymax = np.max(ycorr)\n # # Chop out just the portion of the array near the peak\n # xcorr_min=xmid-xcorr_width\n # xcorr_max=xmid+xcorr_width\n # ycorr1=ycorr[xcorr_min:xcorr_max]\t#isolate section of array with gaussian\n # xcorr1=xcorr[xcorr_min:xcorr_max] #isolate the same section of the pixel range\n #\n # # set up initial values for gaussian fitting via chi2\n # sig = 10\n # sky = np.min(ycorr1)/1.2\n # # print ycorr1[-1],ycorr1[0],xcorr1[-1],xcorr1[0]\n # sky2 = (ycorr1[-1]-ycorr1[0])/(xcorr1[-1]-xcorr1[0])\n # lnamp = np.log(ymax/1.2-sky)\t# guess some values\n # mean = xcorr[xmid]\n #\n # amp = np.exp(lnamp)\n # sig2 = sig**2\n # # suggestion from D. Hogg 12/15/12: Add extra linear feature to fit.\n # # suggestion from D. Hogg 12/15/12: operate on ln(amp) so that the amplitude CANNOT be negative.\n # def chi2(p):\t#define gaussian function for fitting\n # sig2=p[2] ** 2\n # m = (np.exp(p[0]) * np.exp(-0.5 * (xcorr1 - p[1]) ** 2 / sig2)) + p[3] + p[4]*xcorr1\n # return (ycorr1 - m)\n #\n # # Fit the gaussian.\n # popt, ier = op.leastsq(chi2, [lnamp, mean, sig, sky, sky2])\n # lnamp, mean, sig, sky, sky2 = popt\n #\n # amp = np.exp(lnamp)\n # # record the width\n # widthrange.append(sig)\n #\n # # Plot all the widths to get a width-vsini curve\n # vsinicoeff,popt = op.curve_fit(quartic,np.asarray(widthrange),np.asarray(vsinirange))\n #\n # relationx = np.arange(50,200,1)\n # relationy = vsinicoeff[0]+vsinicoeff[1]*relationx+vsinicoeff[2]*relationx**2+vsinicoeff[3]*relationx**3+vsinicoeff[4]*relationx**4\n # figv = plt.figure(1)\n # axv = figv.add_subplot(211)\n # axv.scatter(widthrange,vsinirange)\n # axv.plot(relationx,relationy)\n # #ax.text(70,100,\"{0:} {1:} {2:} {3:} {4:}\".format(vsinicoeff))\n\n # 3. Cross-correlate the data, using n_iter trials:\n # * Generate two random gaussian noises scaled to the uncertainty on the fluxes\n # * Apply those gaussian noises to the standard and target stars\n # * Cross-correlate the standard and target stars\n # * Find and then cut out just the part of the cross-correlation curve near the maximum\n # * Set up gaussian\n # * Fit gaussian to that center part\n # * Save fitted parameters (pixel shift aka mean of gaussian, width aka stddev of gaussian)\n # * Repeat n_iter times\n\n # Cross correlation loop --------------------------------\n pix_shift = np.array([]) # initialize array for pixel shift values\n pix_width = np.zeros(n_iter) # initialize array for pixel width values\n l = 0\n\n # using the xrange generator rather than making a full list saves memory\n while len(pix_shift) < n_iter:\n # prepare the randomized data\n # GETTING ARRAYS READY FOR CROSS CORRELATION\n\n\n # Randomize noise:\n # create gaussian distribution of random numbers b/t 1 and -1, multiply err by numbers, add numbers to flux\n # I have drastically simplified the arrays here AR 2013.0319\n # AR 2013.0318: There was a problem, previously: noise was a fixed value, not linked to the known error values\n\n # AR 2013.0321: Speed fix. Rather than step through the array and generate one\n # normally-distributed error value scaled to the SNR at that point, I will generate an\n # array of normally-distributed error values scaled to 1, and then multiply by the SNR:\n # One array generation, one array multiplication.\n\n rand_dist = np.random.normal(loc=0.0, scale=1.0, size=datalen)\n rand_dist2 = np.random.normal(loc=0.0, scale=1.0, size=datalen)\n\n fx_temp_obj = np.asarray(fx_arr_obj + rand_dist * sig_arr_obj)\n fx_temp_std = np.asarray(fx_arr_std + rand_dist2 * sig_arr_std)\n mean_obj = np.mean(fx_temp_obj)\n mean_std = np.mean(fx_temp_std)\n stddev_obj = np.std(fx_temp_obj, ddof=1)\n stddev_std = np.std(fx_temp_std, ddof=1)\n\n # Regularize data (subtract mean, divide by std dev) (Should definitely be done AFTER noise was added)\n fx_reg_temp_obj = fx_temp_obj - mean_obj\n fx_reg_temp_obj = fx_reg_temp_obj / stddev_obj\n fx_reg_temp_std = fx_temp_std - mean_std\n fx_reg_temp_std = fx_reg_temp_std / stddev_std\n\n # curve fit - remove a cubic AR 2012.1113\n coeff, pcov = op.curve_fit(cubic, wv_arr_std, fx_reg_temp_obj)\n fx_reg_temp_obj = fx_reg_temp_obj - (\n coeff[0] + coeff[1] * wv_arr_std + coeff[2] * wv_arr_std ** 2 + coeff[3] * wv_arr_std ** 3)\n coeff, pcov = op.curve_fit(cubic, wv_arr_std, fx_reg_temp_std)\n fx_reg_temp_std = fx_reg_temp_std - (\n coeff[0] + coeff[1] * wv_arr_std + coeff[2] * wv_arr_std ** 2 + coeff[3] * wv_arr_std ** 3)\n\n # CROSS CORRELATION\n\n # compute the cross-correlation between the two spectra\n\n ycorr = np.correlate(fx_reg_temp_obj, fx_reg_temp_std, mode='full')\n # time required: 0.045 seconds average\n\n # http://stackoverflow.com/questions/12323959/fast-cross-correlation-method-in-python\n # conv1 = np.zeros(datalen * 2)\n # conv1[datalen/2:datalen/2+datalen] = fx_reg_temp_obj\n # conv2 = fx_reg_temp_std[::-1]\n # ycorr = signal.fftconvolve(conv1,conv2, mode='valid')\n # time required: 0.006 seconds average, but it segfaults by the third try.\n\n ## slight smoothing AR 2013.0315\n # ycorr = scipy.ndimage.filters.gaussian_filter1d(ycorr,11)\n\n # create the x offset axis (same length as ycorr, with 0 in the MIDDLE)\n length = len(ycorr)\n xcorr = np.arange(length) - length // 2\n # AR 2012.1126 Select a tiny piece around the maximum to fit with a gaussian.\n xmid = np.argmax(ycorr)\n ymax = np.max(ycorr)\n # now take just the portion of the array that matters\n xcorr_min = int(xmid - xcorr_width)\n xcorr_max = int(xmid + xcorr_width)\n ycorr1 = ycorr[xcorr_min:xcorr_max] # isolate section of array with gaussian\n xcorr1 = xcorr[xcorr_min:xcorr_max] # isolate the same section of the pixel range\n ycorr2 = ycorr[xcorr_min - 50:xcorr_max + 50]\n xcorr2 = xcorr[xcorr_min - 50:xcorr_max + 50]\n\n # suggestion from D. Hogg 12/15/12: Add extra linear feature to fit.\n # suggestion from D. Hogg 12/15/12: operate on ln(amp) so that the amplitude CANNOT be negative.\n def chi2(p): # define gaussian function for fitting\n sig2 = p[2] ** 2\n m = (np.exp(p[0]) * np.exp(-0.5 * (xcorr1 - p[1]) ** 2 / sig2)) + p[3] + p[4] * xcorr1\n return (ycorr1 - m)\n\n # set up initial values for chi2\n sig = 10\n sky = np.min(ycorr1) / 1.2\n # print ycorr1[-1],ycorr1[0],xcorr1[-1],xcorr1[0]\n sky2 = (ycorr1[-1] - ycorr1[0]) / (xcorr1[-1] - xcorr1[0])\n lnamp = np.log(ymax / 1.2 - sky) # guess some values\n mean = xcorr[xmid]\n\n amp = np.exp(lnamp)\n sig2 = sig ** 2\n\n popt, ier = op.leastsq(chi2, [lnamp, mean, sig, sky, sky2])\n lnamp, mean, sig, sky, sky2 = popt\n\n amp = np.exp(lnamp)\n\n # print_num=len(pix_shift)%100\n print_num = l % 100\n if print_num == 0:\n ## Uncomment the following to make a plot every 500 fits.\n # fig = plt.figure(l)\n # ax = fig.add_subplot(111)\n # my_gauss = (amp * (np.exp(-0.5 * ((xcorr1 - mean) ** 2) / sig**2))) + sky + sky2 * xcorr1\n # ax.plot(xcorr1,my_gauss,'r--')\n # ax.plot(xcorr2,ycorr2,'#000000')\n # ax.plot(xcorr1,ycorr1-my_gauss,'#00CC00')\n ##if abs(mean - xcorr[xmid]) > 5:\n ## print \"Mean is off\",mean,xcorr[xmid]\n # figname='rv_{0:}_{1:}_{2:}_{3:}.png'.format(std_name,obj_name,order,l)\n # ax.set_xlim(xcorr[xcorr_min-50],xcorr[xcorr_max+50])\n # fig.savefig(figname)\n # fig.clf()\n # plt.close()\n print\n \"amp={0: 12.4f} mu={1: 10.4f} sig={2: 9.4f} sky={3: 11.4f} sky2={4: 8.4f} n_entries={5:}\".format(amp,\n mean,\n sig,\n sky,\n sky2,\n len(\n pix_shift))\n\n l += 1\n if (cut == 0) | (mean > np.float(cutstart)) & (mean < np.float(cutend)):\n pix_shift = np.append(pix_shift, mean)\n # if ier < 5:\n # I'm calculating the vsini now because I need errors, and the vsini calculation is not linear.\n # pix_width[l] = vsinicoeff[0] + vsinicoeff[1] * sig + vsinicoeff[2] * sig**2 + vsinicoeff[3] * sig**3 + vsinicoeff[4] * sig**4\n\n # End cross correlation loop ---------------------------------\n\n # 4. Find the RV\n # All 5000 rv fits have been calculated and stored in arrays\n # 4a. Cut out outlier RVs. Useful if the cross-correlation produces occasional bad results. Use cutstart and cutend to force the code to only fit a gaussian to a certain region. Don't over-use this to force the result you want, though.\n # 4b. Compute the mean pixel shift and pixel shift uncertainty.\n # 4c. Convert pixel shift into RV\n # 4d. Shift the wavelength array appropriately - all lines should now line up.\n\n ## Uncomment this to print out an example cross-correlation diagram\n # fig = plt.figure(2)\n # ax = fig.add_subplot(111)\n # ax.plot(xcorr,ycorr,'k')\n # figname='rv_{0:}_{1:}_{2:}_xcorr.png'.format(std_name,obj_name,order)\n # fig.savefig(figname)\n # fig.clf()\n # plt.close()\n\n # Turn the list of pixel shifts into a numpy array\n pix_shift = np.asarray(pix_shift)\n\n # 4a. Cut out outliers from the pixel shift\n if cut == 1:\n pix_shift = pix_shift[np.where((pix_shift > np.float(cutstart)) & (pix_shift < np.float(cutend)))]\n\n # 4b. Compute the mean pixel shift (rv value) and pixel shift uncertainty (RV uncertainty).\n\n print\n l, len(pix_shift), np.float(len(pix_shift)) / np.float(n_iter) * 100.0\n\n mu = np.mean(pix_shift)\n sigma = np.std(pix_shift, ddof=1)\n\n # vsini = np.mean(pix_width)\n # vsini_err = np.std(pix_width,ddof=1)\n\n # axh = figv.add_subplot(212)\n # n, bins, patches=axh.hist(pix_width,bins=30,normed=1.0,facecolor='green',align='mid')\n # figv.savefig('vsiniplot.png')\n # plt.clf()\n # plt.close()\n\n # 4c. Transform pixel shift to shift in radial velocity\n\n # AR 2013.0423: The actually appropriate method requires a speed-of-light correction. This works for both angstroms and microns.\n rv_meas = (2.99792458 * 10 ** 5 * mu) / acoef_std\n rv_meas_err = (2.99792458 * 10 ** 5 * sigma) / acoef_std\n\n # 4d. Apply shift to arrays\n wv_rvcorr_obj = wv_arr_std * (1 - rv_meas / (2.99792458 * 10 ** 5))\n\n ## 5. Create plots ---------------------------------\n # The plots are the only reason find_rv.py needs to know the names of either star, or the RV of the standard.\n\n # Plot object and standard so you can clearly see that shift exists --------------------------------\n fig = plt.figure(1)\n\n # AR 2013.0703 Regularize the spectra for display purposes in the final graph\n # I'm using the mean and stddev of the last random-added attempt so it won't be perfect...\n fx_reg_obj = fx_arr_obj - mean_obj\n fx_reg_obj = fx_reg_obj / stddev_obj\n fx_reg_std = fx_arr_std - mean_std\n fx_reg_std = fx_arr_std / stddev_std\n\n # Plots target and standard with shift applied\n ax1 = fig.add_subplot(311)\n ax1.plot(wv_rvcorr_obj, fx_reg_obj, 'red')\n ax1.plot(wv_arr_std, fx_reg_std, 'blue')\n ax1.set_xlabel('wavelength (microns)')\n ax1.set_ylabel('normalized flux')\n target = 'Target: %s' % (obj_name)\n standard = 'Standard: %s' % (std_name)\n ax1.annotate(target, xy=(.7, .9), xycoords='axes fraction', xytext=(.6, .9), textcoords='axes fraction',\n color='red')\n ax1.annotate(standard, xy=(.7, .8), xycoords='axes fraction', xytext=(.6, .8), textcoords='axes fraction',\n color='blue')\n\n sig2 = sig ** 2\n my_gauss = (amp * (np.exp(-0.5 * ((xcorr1 - mu) ** 2) / sig2))) + sky + sky2 * xcorr1\n\n # Plots example of gaussian fit to cross correlation function\n ax2 = fig.add_subplot(312)\n ax2.plot(xcorr1, ycorr1, 'k.')\n ax2.plot(xcorr1, my_gauss, 'r--', linewidth=2)\n ax2.plot(xcorr1, ycorr1 - my_gauss, '#00CC00')\n ax2.set_xlabel('example of fit to cross correlation function')\n ax2.set_xlim(xcorr[xcorr_min - 50], xcorr[xcorr_max + 50])\n # print pix_shift\n\n\n ## Plot histogram of pixel shift values --------------------------------\n ax3 = fig.add_subplot(313)\n n, bins, patches = plt.hist(pix_shift, bins=30, normed=1.0, facecolor='green', align='mid')\n # Plot best fit gaussian over histogram\n y = mlab.normpdf(bins, mu, sigma)\n ax3.plot(bins, y, 'r--', linewidth=2)\n ax3.set_xlabel('radial velocity of target (pixels)')\n ax3.set_ylabel('frequency (normalized)')\n rad = 'RV = %.3f +/- %.3f' % (rv_meas, rv_meas_err)\n corr = 'RV (corr) = %.3f +/- %.3f' % (rv_std + rv_meas, (rv_std_err ** 2 + rv_meas_err ** 2) ** (0.5))\n # vsinistr = 'VsinI = %.3f +/- %.3f' % (vsini,vsini_err)\n ax3.annotate(rad, xy=(.66, .9), xycoords='axes fraction', xytext=(.66, .9), textcoords='axes fraction',\n color='black')\n ax3.annotate(corr, xy=(.6, .8), xycoords='axes fraction', xytext=(.60, .8), textcoords='axes fraction',\n color='black')\n # ax3.annotate(vsinistr,xy=(.6,.6),xycoords='axes fraction',xytext=(.60,.6),textcoords='axes fraction',color='black')\n ax3.annotate('{0:+5.2f} {1: 5.2f}'.format(mu, sigma), xy=(.05, .9), xycoords='axes fraction', xytext=(.05, .9),\n textcoords='axes fraction', color='black')\n ax3.annotate('{0:5.3f} km/s/pix'.format((2.99792458 * 10 ** 5) / acoef_std), xy=(.05, .8), xycoords='axes fraction',\n xytext=(.05, .8), textcoords='axes fraction', color='black')\n fig.subplots_adjust(hspace=.3)\n\n figname = 'rv_%s_%s_%d.png' % (std_name, obj_name, order)\n fig.savefig(figname)\n fig.clf()\n plt.close()\n\n # plt.figure(l+1)\n # plt.hist(pix_shift)\n\n # END RADIAL VELOCITY FUNCTION -----------------------------------------\n return rv_meas, rv_meas_err", "def test_vertical_velocity_pressure_dry_air():\n w = 1 * units('cm/s')\n omega_truth = -1.25073619 * units('microbar/second')\n omega_test = vertical_velocity_pressure(w, 1000. * units.mbar, 273.15 * units.K)\n assert_almost_equal(omega_test, omega_truth, 6)", "def test_interferometer(self, tol):\n # fmt:off\n U = np.array([[0.83645892-0.40533293j, -0.20215326+0.30850569j],\n [-0.23889780-0.28101519j, -0.88031770-0.29832709j]])\n # fmt:on\n\n S = symplectic.interferometer(U)\n expected = np.block([[U.real, -U.imag], [U.imag, U.real]])\n\n assert np.allclose(S, expected, atol=tol, rtol=0)", "def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=self.MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop()\n ev3.Sound.beep().wait()\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(\n position_sp=-arm_revolutions_for_full_range,\n speed_sp=self.MAX_SPEED,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this\n # line is correct as is).", "def global_phase_correction(imgs_shifted_ft):\n nangles = imgs_shifted_ft.shape[0]\n phase_corrections = np.zeros((nangles))\n\n # todo: should weight by SNR, or something like this\n for ii in range(nangles):\n phase_corrections[ii] = np.angle(np.sum(imgs_shifted_ft[ii, 0] * imgs_shifted_ft[ii, 1].conj()))\n\n return phase_corrections", "def trap_depth_old(V,X,Y,Z,Im,Jm,Km,debug=False): \n from project_parameters import debug\n #from all_functions import sum_of_e_field\n def a(a,N):\n \"\"\"Shortcut function to convert array x into a row vector.\"\"\" \n a=np.ravel(a, order='F') # Same order\n return a\n def index_sort(y,x):\n \"\"\"Takes in two lists of the same length and returns y sorted by the indexing of x sorted.\"\"\"\n xs=np.sort(x)\n ix=np.argsort(x)\n ys=np.ones(len(y)) #Sorted by the sorting defined by f being sorted. \n for i in range(len(y)):\n j=ix[i]\n ys[i]=y[j]\n return ys\n if len(V.shape)!=3:\n return('Problem with find_saddle.py dimensionalities.\\n')\n N1,N2,N3=V.shape\n N=N1*N2*N3\n f=V\n [Ex,Ey,Ez]=np.gradient(f,abs(X[1]-X[0]),abs(Y[1]-Y[0]),abs(Z[1]-Z[0]))\n E=np.sqrt(Ex**2+Ey**2+Ez**2)\n fs,Es=a(f,N),a(E,N) # Convert 3D to 1D array\n fs,Es=np.real(fs),np.real(Es)\n # identify the escape position and height by checking each point\n minElectricField=max(fs) # initialize as maximum E field magnitude\n distance=0\n escapeHeight=1\n escapePosition=[0,0,0]\n for i in range(N1):\n for j in range(N2):\n for k in range(N3):\n if [i,j,k]==[Im,Jm,Km]:\n Vm=V[i,j,k]\n elif E[i,j,k]<minElectricField:\n minElectricField=E[i,j,k]\n escapeHeight=V[i,j,k]\n escapePosition=[i,j,k]\n distance=abs(Im+Jm+Km-i-j-k) \n if debug.trap_depth: # plot sortings of potential and electric field to view escape position\n plt.plot(np.sort(fs)) \n plt.title('sorted potential field')\n plt.show()\n plt.plot(np.sort(Es)) \n plt.title('sorted electric field')\n plt.show()\n q1=index_sort(fs,Es) \n plt.title('potential field sorted by sorted indexing of electric field')\n plt.plot(q1)\n plt.show()\n q2=index_sort(Es,fs) \n plt.title('electric field sorted by sorted indexing of potential field')\n plt.plot(q2)\n plt.show() \n check=1 \n if debug.trap_depth: \n print minElectricField,escapeHeight,escapePosition,distance \n if distance<check:\n print('trap_depth.py: Escape point too close to trap minimum. Improve grid resolution or extend grid.')\n if escapeHeight>0.2:\n print('trap_depth.py: Escape point parameter too high. Improve grid resolution or extend grid.')\n D=escapeHeight-Vm\n [Ie,Je,Ke]=escapePosition\n [Xe,Ye,Ze]=[X[Ie],Y[Je],Z[Ke]] \n return [D,Xe,Ye,Ze]", "def test_vw_controller(self):\n pass\n\n yarp.Network.init()\n\n pose_stream = yarp.BufferedPortBottle()\n pose_stream.open(\"/morse/test/pose/in\")\n yarp.Network.connect(\"/morse/robots/ATRV/Pose/out\", \"/morse/test/pose/in\")\n\n cmd_stream = yarp.BufferedPortBottle()\n cmd_stream.open(\"/morse/test/vw/out\")\n yarp.Network.connect(\"/morse/test/vw/out\", \"/morse/robots/ATRV/Motion_Controller/in\")\n \n # Read the start position, it must be (0.0, 0.0, 0.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n\n send_speed(cmd_stream, 1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -1.0, 0.0, 2.0)\n\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 1.0, -math.pi/4.0, 2.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, 0.5, -math.pi/8.0, 12.0)\n pose = pose_stream.read()\n for i in range(6):\n self.assertAlmostEqual(pose.get(i).asDouble(), 0.0, delta=0.1)\n\n send_speed(cmd_stream, -2.0, math.pi/2.0, 3.0)\n pose = pose_stream.read()\n self.assertAlmostEqual(pose.get(0).asDouble(), 4.0 / math.pi, delta=0.1)\n self.assertAlmostEqual(pose.get(1).asDouble(), -4.0 / math.pi , delta=0.1)\n self.assertAlmostEqual(pose.get(2).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(3).asDouble(), -math.pi/2.0, delta=0.1)\n self.assertAlmostEqual(pose.get(4).asDouble(), 0.0, delta=0.1)\n self.assertAlmostEqual(pose.get(5).asDouble(), 0.0, delta=0.1)\n\n yarp.Network.fini()", "def test_invalid_events(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n tel_azimuth = {}\n tel_altitude = {}\n\n #source = EventSource(filename, max_events=1)\n #subarray = source.subarray\n calib = CameraCalibrator(subarray)\n fit = HillasReconstructor(subarray)\n\n #for event in source:\n\n calib(event)\n\n hillas_dict = {}\n for tel_id, dl1 in event.dl1.tel.items():\n\n geom = subarray.tel[tel_id].camera.geometry\n tel_azimuth[tel_id] = event.pointing.tel[tel_id].azimuth\n tel_altitude[tel_id] = event.pointing.tel[tel_id].altitude\n\n mask = tailcuts_clean(\n geom, dl1.image, picture_thresh=10.0, boundary_thresh=5.0\n )\n\n dl1.parameters = ImageParametersContainer()\n\n try:\n moments = hillas_parameters(geom[mask], dl1.image[mask])\n hillas_dict[tel_id] = moments\n dl1.parameters.hillas = moments\n except HillasParameterizationError:\n dl1.parameters.hillas = HillasParametersContainer()\n continue\n\n # copy event container to modify it\n event_copy = deepcopy(event)\n # overwrite all image parameters but the last one with dummy ones\n for tel_id in list(event_copy.dl1.tel.keys())[:-1]:\n event_copy.dl1.tel[tel_id].parameters.hillas = HillasParametersContainer()\n fit(event_copy)\n assert event_copy.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to 0\n event.dl1.tel[tel_id].parameters.hillas.width = 0 * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to NaN\n event.dl1.tel[tel_id].parameters.hillas.width = np.nan * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False", "def predicted_data_vol(self):\n pass", "def test_el3_vs_original():\n # store computations from original implementation\n # from florian_ell3_paper import el3 as el30\n # N = 10000\n # x11 = np.random.rand(N)*5\n # kc11 = (np.random.rand(N)-.5)*10\n # p11 = (np.random.rand(N)-.5)*10\n # result0 = np.array([el30(x, kc, p) for x,kc,p in zip(x11,kc11,p11)])\n # np.save('data_test_el3', np.array([result0,x11,kc11,p11]))\n\n # load data from orginal implementation\n data = np.load(\"tests/testdata/testdata_el3.npy\")\n res0, x11, kc11, p11 = data\n\n # compare to vectorized\n resv = el3v(x11, kc11, p11)\n assert np.allclose(res0, resv)\n\n # compare to modified original\n res1 = np.array([el30(x, kc, p) for x, kc, p in zip(x11, kc11, p11)])\n assert np.allclose(res0, res1)", "def test_delta_in_diff(self):\n xk = 1 * self.ureg.kelvin\n yk = 2 * self.ureg.kelvin\n yf = yk.to('degF')\n yc = yk.to('degC')\n self.assertEqual(yk - xk, 1 * self.ureg.kelvin)\n self.assertEqual(yf - xk, 1 * self.ureg.kelvin)\n self.assertEqual(yc - xk, 1 * self.ureg.kelvin)", "def runMT3D(self):\n \n # write mt3dms input\n self.__mt.write_input()\n # run mt3dms\n self.__mt.run_model()", "def _diff_pot(a2,t2,d2,wair,temp,pres,ppot,airf,dhum):\n ph2 = _eq_pressure(0,0,0,a2,t2,d2)\n gi2 = _ice_g(0,0,t2,ppot)\n gv2 = _eq_vappot(0,0,0,a2,t2,d2)\n sh1 = -_air_f(0,1,0,airf,temp,dhum)\n si1 = -_ice_g(1,0,temp,pres)\n s1 = wair/airf*sh1 + (1-wair/airf)*si1\n sh2 = -_air_f(0,1,0,a2,t2,d2)\n si2 = -_ice_g(1,0,t2,ppot)\n s2 = wair/a2*sh2 + (1-wair/a2)*si2\n lhs = numpy.array([ppot, gi2, s1])\n rhs = numpy.array([ph2, gv2, s2])\n \n ph2_a = _eq_pressure(1,0,0,a2,t2,d2)\n ph2_t = _eq_pressure(0,1,0,a2,t2,d2)\n ph2_d = _eq_pressure(0,0,1,a2,t2,d2)\n gi2_t = _ice_g(1,0,t2,ppot)\n gv2_a = _eq_vappot(1,0,0,a2,t2,d2)\n gv2_t = _eq_vappot(0,1,0,a2,t2,d2)\n gv2_d = _eq_vappot(0,0,1,a2,t2,d2)\n sh2_a = -_air_f(1,1,0,a2,t2,d2)\n sh2_t = -_air_f(0,2,0,a2,t2,d2)\n sh2_d = -_air_f(0,1,1,a2,t2,d2)\n si2_t = -_ice_g(2,0,t2,ppot)\n s2_a = -wair/a2**2*(sh2 - a2*sh2_a - si2)\n s2_t = wair/a2*sh2_t + (1-wair/a2)*si2_t\n s2_d = wair/a2*sh2_d\n dlhs = numpy.array([[0.,0.,0.], [0.,gi2_t,0.], [0.,0.,0.]])\n drhs = numpy.array([[ph2_a,ph2_t,ph2_d], [gv2_a,gv2_t,gv2_d],\n [s2_a,s2_t,s2_d]])\n return lhs, rhs, dlhs, drhs", "def __inverse_kinematics(self, guess, target_point):\n\n error = 1.0\n tolerance = 0.05\n\n # Initial Guess - Joint Angles\n thetas = np.matrix(guess) # thetas is list which is contain all axes theta angles.\n target_point = np.matrix(target_point) # X, Y, Z list to matrix for Target Position\n # print(target_point.shape)\n # Jacobian\n self.__calc_jacobian_matrix()\n tf_matrix_first_to_last = self.tf_matrices_list[-1]\n\n error_grad = []\n\n theta_dict = {}\n\n lr = 0.2\n while error > tolerance:\n for i in range(len(np.array(thetas)[0])):\n theta_dict[self.q[i]] = np.array(thetas)[0][i]\n\n theta_dict[self.q[-1]] = self.q[-1]\n\n calculated_target_point = np.matrix(self.get_coords_from_forward_kinematics(self.__forward_kinematics(np.array(thetas)[0])[-1]))\n logger.debug(f'calculated target point is \\n{calculated_target_point}')\n\n diff_wanted_calculated = target_point - calculated_target_point\n\n jacob_mat = np.matrix(self.jacobian_matrix.evalf(subs=theta_dict, chop=True, maxn=4)).astype(np.float64).T\n logger.debug(f'jacobian matrix is\\n{jacob_mat} \\n\\n diff is \\n {diff_wanted_calculated}')\n\n thetas = thetas + lr * (jacob_mat * diff_wanted_calculated.T)\n # thetas = np.array(thetas)[0] # this line's purpose is changing Q from matrix level to array level.\n\n prev_error = error\n\n error = linalg.norm(diff_wanted_calculated)\n\n if error > 10 * tolerance:\n lr = 0.3\n elif error < 10 * tolerance:\n lr = 0.2\n error_grad.append((error - prev_error))\n\n # print(error)\n return np.array(thetas)[0]", "def _solve_3d_scalar(self, simu=None):\n # # Call fftw filter\n # self._output_field.data[0] = fftw2py.solve_poisson_3d_pressure(\n # self._input_field.data[0],\n # self._input_field.data[1],\n # self._input_field.data[2])\n pass", "def test_deconvolve_to_motor_error(self):\n tau = 50.0\n mrate = 50.0\n Mrate = 100.0\n\n tmax = 50.0\n dt = 0.1\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = 1\n self.rule.tau_deconv1 = tau\n\n self.motor.error_fct = lambda _: np.ones(self.Nsrc)\n\n M = simulation.StateMonitor(self.rule, 'out')\n\n sim = simulation.Simulation(self.source, self.motor, self.rule, M, dt=dt)\n sim.run(tmax)\n \n # the output should be almost constant\n self.assertAlmostEqual(np.std(M.out)/np.mean(M.out), 0)", "def testConstantBoundedField(self):\n photoCalib = lsst.afw.image.PhotoCalib(self.constantCalibration)\n self._testPhotoCalibCenter(photoCalib, 0)\n\n self.assertEqual(1, photoCalib.instFluxToMaggies(self.instFlux, self.pointYShift))\n self.assertEqual(0, photoCalib.instFluxToMagnitude(self.instFlux, self.pointYShift))\n self.assertFloatsAlmostEqual(1e-9, photoCalib.instFluxToMaggies(self.instFlux*1e-9, self.pointXShift))\n self.assertFloatsAlmostEqual(22.5, photoCalib.instFluxToMagnitude(\n self.instFlux*1e-9, self.pointXShift))\n\n photoCalib = lsst.afw.image.PhotoCalib(self.constantCalibration, self.calibrationErr)\n self._testPhotoCalibCenter(photoCalib, self.calibrationErr)", "def test_diff_analog_in_cal_5v_loop(self):\n for g in self.l.gains:\n for s,c,e in [(5, 11, .1), (2.5, 10, .03)]:\n v = self.l.input(channels=(c,c,c,c), gains=(g,g,g,g))\n r = v[0]\n if s*g > 20:\n if s*g > 25:\n self.assertTrue(v[3],\n \"%s should be overvoltage (%g, %g)\" % (v,s,g))\n continue\n for i in r:\n self.assertTrue(abs(s-i) < e,\n \"%g is not %g, channel %g, gain %g\" % (i,s,c,g))", "def onApplyECVButton(self):\n\n NodeName = 'ECV Map'\n try :\n self.ECVMapNode = slicer.util.getNode(NodeName)\n except:\n slicer.mrmlScene.AddNewNodeByClass('vtkMRMLScalarVolumeNode', NodeName)\n self.ECVMapNode = slicer.util.getNode(NodeName)\n\n T1Native_Matrix,T1Enhanced_Matrix = self.MatchMatrixs(self.NativeT1_Selector.currentNode(),self.EnhancedT1_Selector.currentNode())\n\n Haematocrit = self.SB_Haematocrit.value\n NT1B = self.SB_NBlodd.value\n ET1B = self.SB_EBlodd.value\n Factor = (100-Haematocrit)*(NT1B*ET1B/(NT1B-ET1B))\n epsilon = 0.1\n\n T1Enhanced_Matrix = T1Enhanced_Matrix + epsilon\n T1Native_Matrix = T1Native_Matrix + epsilon\n self.ECV_Matrix = (1/T1Enhanced_Matrix-1/T1Native_Matrix)*Factor\n self.ECV_Matrix = np.nan_to_num(self.ECV_Matrix)\n self.ECV_Matrix[ np.logical_or(self.ECV_Matrix<0 , self.ECV_Matrix>100) ] = 0\n\n slicer.util.updateVolumeFromArray(self.ECVMapNode, self.ECV_Matrix)\n self.SetLayoutViewer(self.ECVMapNode, 'Slice4')\n self.SetScalarDisplay(self.ECVMapNode, 1, 100) ## Que onda el Auto WL\n self.ThSlider_ECV.SetNode(self.ECVMapNode)\n Max = np.nanmax(self.ECV_Matrix)\n self.updateThresholdValues(self.ThSlider_ECV, self.ECVMapNode, Max)", "def detectMotion():\n global MotionDetected\n MotionDetected = False\n return MotionDetected", "def updatePWM(self):\n v_dc = self.dcmotorSpeed * self.dcmotor_sgn # changed \"vr\" to \"v_dc\", \"rightSpeed\" to \"dcmotorSpeed\" and \"right_sgn\" to dcmotor_sgn\", RFMH_2019_02_26\n pwm_dc = self.PWMvalue(v_dc, self.DC_MOTOR_MIN_PWM,\n self.DC_MOTOR_MAX_PWM) # changed \"pwmr\" to \"pwm_dc\" and \"vr\" to \"v_dc\" and adjusted both orange constants to \"DC_MOTOR_MIN_PWM\" AND \"DC_MOTOR_MAX_PWM\", RFMH_2019_02_26\n\n # TODO: Fix this debug message. I am trying to port this code over from an old version, and I do not know\n # what v and u are supposed to be here. Timothy Scott, 5.11.2019\n # if self.debug: # where the duck does the \"u\" come from?!?, RFMH_2019_02_26\n # print(\"v = %5.3f, u = %5.3f, v_dc = %5.3f, pwm_dc = %3d\" % (\n # v, u, v_dc, pwm_dc)) # deleted \"vl\" and \"pwml\" and adjust \"vr\" to \"v_dc\" to \"pwm_dc\"\n\n if math.fabs(v_dc) < self.SPEED_TOLERANCE: # changed v_r to v_dc in if loop , RFMH_2019_02_28\n DcMotorMode = Adafruit_MotorHAT.RELEASE\n pwm_dc = 0\n elif v_dc > 0:\n DcMotorMode = Adafruit_MotorHAT.FORWARD\n elif v_dc < 0:\n DcMotorMode = Adafruit_MotorHAT.BACKWARD\n\n if not self.old_pwm_dc == pwm_dc:\n self.DcMotor.setSpeed(pwm_dc) # changed rightMotor to DcMotor and pwmr to pwm_dc , RFMH_2019_02_28\n self.DcMotor.run(DcMotorMode)\n\n self.old_pwm_dc = pwm_dc", "def nzErr(xerr, yerr, vxerr, vyerr, year_x, year_y, mag, alnDir = '13_08_21/', chainsDir = 'efit/chains_S0-2_newRV2/'):\n\n #Read in values for error in position and velocity of sgr*\n origin_val = asciidata.open('/g/ghez/align/' + alnDir + chainsDir + 'efit_summary.txt')\n ori_x0e = origin_val[25][0]\n ori_y0e = origin_val[26][0]\n ori_vxe = origin_val[27][0]\n ori_vye = origin_val[28][0]\n t_0 = 2000.0 #hard coded t_0 of sgr*\n\n # magBins=np.array([9,11,12,13,14,15,16,17,18,19,20,21])\n # deltaArr=np.array([3.5,71.0,58.0,210.0,300.0,650.0,700.0,1100.0,1900.0,2200.0,3000.0])*1e-6\n\n# delta = mag*0.0\n# for i in range(len(mag)):\n# for j in range(len(deltaArr)):\n# if ((mag[i] > magBins[j]) & (mag[i] <= magBins[j+1])):\n# delta[i]=deltaArr[j]\n\n#pdb.set_trace()\n\n #Update errors\n xerr = np.sqrt(xerr**2 + ori_x0e**2 + ((year_x - t_0)*ori_vxe)**2)\n yerr = np.sqrt(yerr**2 + ori_y0e**2 + ((year_y - t_0)*ori_vye)**2)\n vxerr = np.sqrt(vxerr**2 + ori_vxe**2)\n vyerr = np.sqrt(vyerr**2 + ori_vye**2)\n\n return xerr, yerr, vxerr, vyerr", "def adjust_u(self):\r\n # compute the volume integrals of the x,y, and z components of u\r\n ux = assemble(self.u.sub(0)*dx)\r\n uy = assemble(self.u.sub(1)*dx)\r\n uz = assemble(self.u.sub(2)*dx)\r\n\r\n # create a function of value 1, which can be integrated.\r\n try:\r\n self.unit\r\n except AttributeError:\r\n self.unit = Function(self.Q)\r\n self.unit.assign(Constant(1))\r\n\r\n # compute the volume of the body\r\n Vol = assemble(self.unit*dx)\r\n\r\n try:\r\n self.umean\r\n except AttributeError:\r\n self.umean = Function(self.Z)\r\n\r\n # compute the volume-averaged component means\r\n self.umean.assign(Constant((ux/Vol, uy/Vol, uz/Vol, 0)))\r\n\r\n # subtract the mean from the solution function\r\n self.up.assign(self.up-self.umean)", "def compute_ground_truth_volume(self, display_opt):\n\n self.meshActor.GetProperty().SetOpacity(0.2)\n self.meshActor.GetProperty().SetColor(1, 0, 0)\n\n clean = vtk.vtkCleanPolyData()\n clean.SetInputData(self.endo_poly)\n\n d3 = vtk.vtkDelaunay3D()\n d3.SetInputConnection(clean.GetOutputPort())\n d3.SetTolerance(0.01)\n d3.SetAlpha(0.0)\n d3.Update()\n\n surfaceFilter = vtk.vtkDataSetSurfaceFilter() # output is triangular mesh\n surfaceFilter.SetInputConnection(d3.GetOutputPort())\n surfaceFilter.Update()\n\n Mass = vtk.vtkMassProperties()\n Mass.SetInputConnection(surfaceFilter.GetOutputPort())\n Mass.Update()\n\n self.ground_truth_vol = Mass.GetVolume()/1000.0\n\n if display_opt:\n\n m = vtk.vtkDataSetMapper()\n m.SetInputConnection(d3.GetOutputPort())\n\n a = vtk.vtkActor()\n a.SetMapper(m)\n\n # set mapper for epi for visualization\n m2 = vtk.vtkDataSetMapper()\n m2.SetInputData(self.epi_poly)\n\n epi_actor = vtk.vtkActor()\n epi_actor.SetMapper(m2)\n epi_actor.GetProperty().SetOpacity(0.3)\n epi_actor.GetProperty().SetColor(1,0,0)\n\n ren = vtk.vtkRenderer()\n ren.SetBackground(0.0, 0.0, 0.0)\n ren.AddActor(epi_actor)\n ren.AddActor(a)\n\n vtk_show(ren)", "def update_theta(self, lhs_m, rhs_m):\n t = 0\n # FIXIME: fix the symbol\n self.P = self.E\n self.M = self.K\n self.T = self.G\n self.z_m = self.theta_m.T.copy()\n new_z_m = (lhs_m * rhs_m).T\n z_m_noLag = new_z_m.copy()\n if self.positive_em:\n # print('rhs_m', rhs_m)\n new_z_m = np.asmatrix(np.zeros((self.M, self.P))).T\n for p in range(self.P):\n old_z = self.z_m[p, :].T\n lag_lambda_m = np.asmatrix(np.zeros((1, self.M)))\n # get active set\n active = set()\n for _ in range(self.M):\n # print('[z]:before update\\n', old_z)\n # print('[z]:update without lag\\n', z_m_noLag[p, :])\n # print('[z]:lag term\\n', lag_lambda_m[p,:].T)\n new_z = lhs_m * (rhs_m[:,p] + 1/2*lag_lambda_m[0,:].T)\n # print('[z]:after lag\\n', new_z)\n\n # print('after round up\\n', new_z)\n v = new_z - old_z\n min_k = 1\n min_m = 0\n found = False\n for m in range(self.M):\n if new_z[m,0] < 0:\n if v[m, 0] < 0:\n k = old_z[m, 0] / (-v[m, 0])\n if k < min_k:\n min_k = k\n min_m = m\n found = True\n old_z += min_k*v\n for m in range(self.M):\n if -self.precision <= old_z[m,0] <= self.precision:\n old_z[m,0] = 0\n # print('[z]:updated z', old_z[:, 0])\n \n if found:\n active.add(min_m)\n # print(active)\n # print('[z]:active set', active)\n # set lag term to 0\n done = False\n while not done:\n lag_lambda_m = np.asmatrix(np.zeros((1, self.M)))\n \n if len(active) > 0:\n active_len = len(active)\n lag_lambda = np.asmatrix(np.zeros((1, active_len)))\n lag_lhs = np.asmatrix(np.zeros((active_len, active_len)))\n lag_lhs_bar = np.asmatrix(np.zeros((active_len, self.M - active_len)))\n lag_rhs = np.asmatrix(np.zeros((active_len, 1)))\n lag_rhs_bar = np.asmatrix(np.zeros((self.M - active_len, 1)))\n for r, val in enumerate(active):\n counter = 0\n bar_counter = 0\n for c in range(self.M):\n if c in active:\n lag_lhs[r, counter] = lhs_m[val, c]\n counter += 1\n else:\n lag_lhs_bar[r, bar_counter] = lhs_m[val, c]\n bar_counter += 1\n counter = 0\n bar_counter = 0\n for c in range(self.M):\n if c in active:\n lag_rhs[counter, 0] = rhs_m[c, p]\n counter += 1\n else:\n lag_rhs_bar[bar_counter, 0] = rhs_m[c, p]\n bar_counter += 1\n lag_lambda = (-2 * np.linalg.pinv(lag_lhs) * lag_lhs_bar * lag_rhs_bar - 2 * lag_rhs).T\n # print('lag_lambda\\n', lag_lambda)\n # print('lag_lhs\\n', lag_lhs)\n # print('lag_lhs_bar\\n', lag_lhs_bar)\n # print('lag_rhs\\n', lag_rhs)\n # print('lag_rhs_bar\\n', lag_rhs_bar)\n expected_theta = lag_lhs * (lag_rhs + 1/2*lag_lambda.T) + lag_lhs_bar * lag_rhs_bar\n # print('expected theta\\n', expected_theta) \n \n active_remove = set()\n # print(\"[Neg lag]\", lag_lambda)\n for c, val in enumerate(active):\n if lag_lambda[:, c] < 0:\n print(\"[Neg lag]\", lag_lambda[:, c])\n active_remove.add(val)\n active = active.difference(active_remove)\n if len(active_remove) == 0:\n done = True\n for c, val in enumerate(active):\n lag_lambda_m[:, val] = lag_lambda[:, c]\n else:\n break\n if not self.check_positive(old_z):\n print(\"[Warning]: get negative z value\")\n print(old_z)\n # raise Exception('[z]:Non positive state')\n new_z_m[p, :] = old_z[:, 0].T\n self.theta_m = new_z_m.T.copy()", "def cmdVelCallback(self, req):\n x = req.linear.x # m/s\n th = req.angular.z # rad/s\n\n if x == 0:\n # Turn in place\n right = th * self.wheel_track * self.gear_reduction / 2.0\n left = -right\n elif th == 0: \n # Pure forward/backward motion\n left = right = x\n else:\n # Rotation about a point in space\n left = x - th * self.wheel_track * self.gear_reduction / 2.0\n right = x + th * self.wheel_track * self.gear_reduction / 2.0\n\n # Set motor speeds in meters per second.\n self.mySerializer.mogo_m_per_s([1, 2], [left, right])", "def test3(self):\n sig1 = np.array([0, 1, 0])\n sig2 = np.array([0, 1, 0, 0])\n d, p = EventSync.estimate_delay(sig1, sig2)\n self.assertTrue(d == 0)", "def test_force(self):\n group = hoomd.group.all()\n\n # compute forces\n f = azplugins.restrain.plane(group=group, point=(0,0,0), normal=(1,0,0), k=2.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (-2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 6.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 1.)\n self.assertAlmostEqual(f.forces[1].energy, 1.)\n self.assertAlmostEqual(f.forces[2].energy, 9.)\n np.testing.assert_array_almost_equal(f.forces[0].virial, (-2.,0,0,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].virial, (-2.,0,4.,0,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].virial, (12.,0,0,0,0,0))\n\n # change the spring constant\n f.set_params(k=1.0)\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (-1.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 1.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 3.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.5)\n self.assertAlmostEqual(f.forces[1].energy, 0.5)\n self.assertAlmostEqual(f.forces[2].energy, 4.5)\n\n # shift the plane down\n f.set_params(point=(-1,0,0))\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (-2.,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, ( 0.,0,0))\n np.testing.assert_array_almost_equal(f.forces[2].force, ( 2.,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 2.0)\n self.assertAlmostEqual(f.forces[1].energy, 0.0)\n self.assertAlmostEqual(f.forces[2].energy, 2.0)\n\n # rotate the plane so that only particle 1 is off the line\n f.set_params(point=(0,0,0), normal=(0,0,1))\n hoomd.run(1)\n np.testing.assert_array_almost_equal(f.forces[0].force, (0,0,0))\n np.testing.assert_array_almost_equal(f.forces[1].force, (0,0,-2))\n np.testing.assert_array_almost_equal(f.forces[2].force, (0,0,0))\n self.assertAlmostEqual(f.forces[0].energy, 0.0)\n self.assertAlmostEqual(f.forces[1].energy, 2.0)\n self.assertAlmostEqual(f.forces[2].energy, 0.0)", "def _compute_correction(self, initial_state, final_state, a, b, c, s):\r\n pertub = self.pertub\r\n pertub_s = pertub *10\r\n \r\n pred_no_pertub = self._motion_update_one_shot(initial_state, a, b, c, s)\r\n pred_pertub_a = self._motion_update_one_shot(initial_state, a +pertub, b, c, s)\r\n pred_pertub_b = self._motion_update_one_shot(initial_state, a, b +pertub, c, s)\r\n # no need to correct C, C is constrained by kappa_final\r\n # # pred_pertub_c = self._motion_update_one_shot(initial_state, a, b, c +pertub, s)\r\n pred_pertub_s = self._motion_update_one_shot(initial_state, a, b, c, s +pertub_s)\r\n\r\n d_state = np.zeros((3,1))\r\n d_pertub_state = np.zeros((3,3))\r\n Jacobian = np.zeros((3,3))\r\n for i in range(0, 3):\r\n d_pertub_state[i][0] = (final_state[i] - pred_pertub_a[i]) # a\r\n d_pertub_state[i][1] = (final_state[i] - pred_pertub_b[i]) # b\r\n # d_pertub_state[i][2] = (final_state[i] - pred_pertub_c[i]) # c (no update)\r\n d_pertub_state[i][2] = (final_state[i] - pred_pertub_s[i]) # s\r\n \r\n d_state[i] = final_state[i] - pred_no_pertub[i]\r\n \r\n Jacobian[i][0] = (d_pertub_state[i][0] - d_state[i])/pertub # a\r\n Jacobian[i][1] = (d_pertub_state[i][1] - d_state[i])/pertub # b\r\n # Jacobian[i][2] = (d_pertub_state[i][2] - d_state[i])/pertub # c (no update)\r\n Jacobian[i][2] = (d_pertub_state[i][2] - d_state[i])/pertub_s # s\r\n\r\n # inv_Jacobian = np.linalg.inv(Jacobian)\r\n inv_Jacobian = np.linalg.pinv(Jacobian)\r\n correction = np.dot(inv_Jacobian, d_state)\r\n # pdb.set_trace()\r\n return correction", "def test_vertical_velocity(self):\n L_x = self.x_edge[-1]\n np.testing.assert_array_equal(self.dVbox_dz(self.t, 0), 0)\n np.testing.assert_array_less(self.dVbox_dz(self.t, 0.5 * L_x), 0)\n np.testing.assert_array_equal(self.dVbox_dz(self.t, L_x), 0)", "def rk4(accel,m,r,h,v): \n k1v = accel(m,r) \n k1r = v \n k2v = accel(m,r + h*0.5*k1r) \n k2r = v+k1v*h*0.5 \n k3v = accel(m,r + h*0.5*k2r) \n k3r = v+k2v*h*0.5\n k4v = accel(m,r + h*k3r) \n k4r = v+k3v*h\n new_v = v + h*(k1v + 2*k2v + 2*k3v + k4v)/float(6)\n new_r = r + h*(k1r + 2*k2r + 2*k3r + k4r)/float(6)\n return new_v,new_r", "def check_change_power_spectrum(test_knotpos, test_knotval, matpow):\n #Get the modified power spectrum\n kval = matpow[:,0]\n newpk = lyasimulation.change_power_spectrum_knots(test_knotpos, test_knotval, matpow)\n #Check the kvalues are still the same for comparison to the transfer function\n assert np.all([k in newpk[:,0] for k in kval])\n #Build interpolators for the new power spectrum\n #Only interpolate a subset of Pk for speed\n newpkint = build_restrict_interp(newpk, test_knotpos[0]/3., test_knotpos[-1]*3)\n #Build interpolator for old power spectrum\n pkint = build_restrict_interp(matpow, test_knotpos[0]/3., test_knotpos[-1]*3)\n #Build interpolator for knots\n ext_knotpos = np.concatenate([[kval[0],],test_knotpos, [kval[-1],]])\n ext_knotval = np.concatenate([[test_knotval[0],],test_knotval, [test_knotval[-1],]])\n knotint = interp.interp1d(ext_knotpos, ext_knotval, kind='linear')\n #Check that the interpolator works\n assert np.all(np.abs(knotint(test_knotpos) / test_knotval-1) < 1e-5)\n lg_knotpos = np.log(test_knotpos)\n #Check modification worked at the knot values\n assert np.all(np.abs(np.exp(newpkint(lg_knotpos)) / (np.exp(pkint(lg_knotpos)) * test_knotval) - 1) < 1e-3)\n #Pick some random k values distributed uniformly in log space\n krand = (lg_knotpos[-1]-lg_knotpos[0]+0.2)*np.random.random(250)+lg_knotpos[0]-0.1\n #Check that the modification was accurate at random positions\n #print(np.max(np.abs(np.exp(newpkint(krand)) / (np.exp(pkint(krand)) * knotint(np.exp(krand))) - 1)))\n assert np.all(np.abs(np.exp(newpkint(krand)) / (np.exp(pkint(krand)) * knotint(np.exp(krand))) - 1) < 0.01)", "def get_correction(d, a, hfov, img_x):\n\n width = 2 * d*math.tan((hfov/2)*math.pi/180) # in meters\n one_meter = img_x / width\n return int(a*one_meter)", "def loss_comparison(warp_fname, offpar_fname, moving_rmli_fname,\n fixed_rmli_fname, moved_fname, crop_center, crop_size,\n reg_weight, ncc_win, debug=False):\n rg_crop = crop_size[0]\n az_crop = crop_size[1]\n rg_cen = crop_center[0]\n az_cen = crop_center[1]\n\n # Import voxelmorph with pytorch backend\n os.environ['VXM_BACKEND'] = 'pytorch'\n import voxelmorph as vxm\n\n # Read the voxelmorph warp file\n warp_file = np.load(warp_fname)\n warp = warp_file['offs']\n warp = warp[np.newaxis, :, :, :]\n\n # Read moved scene\n moved_file = np.load(moved_fname)\n moved = moved_file['scene']\n moved = moved[np.newaxis, np.newaxis, :, :]\n\n # Read, crop, and scale the fixed RMLI\n fixed_rmli = gx.MLI(fixed_rmli_fname,\n par=gx.MLI_Par(fixed_rmli_fname + '.par'))\n rmli_dim = fixed_rmli.dim\n fixed_full = fixed_rmli.array\n fixed = fixed_full[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n fixed = scale_rmli(fixed)\n fixed = fixed[np.newaxis, np.newaxis, :, :]\n\n # Read, crop, and scale the moving RMLI\n moving_rmli = gx.MLI(moving_rmli_fname,\n par=gx.MLI_Par(moving_rmli_fname + '.par'))\n moving_full = moving_rmli.array\n moving = moving_full[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n moving = scale_rmli(moving)\n moving = moving[np.newaxis, np.newaxis, :, :]\n\n # Read in the Gamma offsets\n # Scale the Gamma offsets to be the same size as the original data that\n # was cropped to feed into voxelmorph.\n offs_basename, _ = os.path.splitext(os.path.basename(offpar_fname))\n offs_fname = os.path.join(os.path.dirname(offpar_fname), offs_basename + '.offs')\n offpar = gx.OFF_Par(offpar_fname)\n offs_dim = (offpar['offset_estimation_range_samples'],\n offpar['offset_estimation_azimuth_samples'])\n gx_offs = gx.readBin(offs_fname, offs_dim, _dtype='complex64')\n zoom_factor = (rmli_dim[0] / offs_dim[0], rmli_dim[1] / offs_dim[1])\n multilook = (fixed_rmli.par['range_looks'],\n fixed_rmli.par['azimuth_looks'])\n gamma_rg_offs = scipy.ndimage.zoom(np.real(gx_offs), zoom_factor)\n gamma_az_offs = scipy.ndimage.zoom(np.imag(gx_offs), zoom_factor)\n gamma_rg_offs = gamma_rg_offs[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n gamma_rg_offs /= multilook[0]\n gamma_az_offs = gamma_az_offs[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n gamma_az_offs /= multilook[1]\n gamma_warp = np.stack((gamma_rg_offs, gamma_az_offs), axis=0)\n gamma_warp = gamma_warp[np.newaxis, :, :, :]\n\n # Create a moved image with the gamma offsets\n transformer = vxm.layers.SpatialTransformer(crop_size)\n gamma_moved = transformer(torch.from_numpy(moving).float(),\n torch.from_numpy(gamma_warp).float())\n\n # Prepare ncc loss with square window\n ndims = len(list(fixed.shape)) - 2\n assert ndims in [1, 2, 3], \"volumes should be 1 to 3 dimensions. found: %d\" % ndims\n ncc_win_sq = [ncc_win] * ndims # Build a square window\n ncc = vxm.losses.NCC(ncc_win_sq, cuda=False)\n\n # Now we have all the data, compute the losses\n loss_sim_vxm = ncc.loss(torch.from_numpy(fixed).float(),\n torch.from_numpy(moved).float())\n loss_sim_gamma = ncc.loss(torch.from_numpy(fixed).float(), gamma_moved)\n\n grad = vxm.losses.Grad(penalty='l2')\n loss_smooth_vxm = grad.loss(None, torch.from_numpy(warp).float())\n loss_smooth_gamma = grad.loss(None, torch.from_numpy(gamma_warp).float())\n\n loss_total_vxm = loss_sim_vxm + (reg_weight * loss_smooth_vxm)\n loss_total_gamma = loss_sim_gamma + (reg_weight * loss_smooth_gamma)\n\n # Print everything\n print('Lambda: {}\\n'.format(reg_weight))\n print('Voxelmorph:\\nSimilarity loss: {}\\nSmoothness loss: {}\\n'\n 'Total: {}\\n'.format(loss_sim_vxm, loss_smooth_vxm, loss_total_vxm))\n print('Gamma:\\nSimilarity loss: {}\\nSmoothness loss: {}\\n'\n 'Total: {}\\n'.format(loss_sim_gamma, loss_smooth_gamma, loss_total_gamma))\n\n if debug:\n plt.figure()\n plt.imshow(moved[0, 0, :, :])\n plt.title('moved')\n plt.colorbar()\n plt.figure()\n plt.imshow(gamma_moved[0, 0, :, :])\n plt.title('gamma_moved')\n plt.colorbar()\n plt.figure()\n plt.imshow(fixed[0, 0, :, :])\n plt.title('fixed')\n plt.colorbar()\n plt.figure()\n plt.imshow(warp[0, 0, :, :])\n plt.title('warp_rg')\n plt.colorbar()\n plt.figure()\n plt.imshow(warp[0, 1, :, :])\n plt.title('warp_az')\n plt.colorbar()\n plt.figure()\n plt.imshow(gamma_warp[0, 0, :, :])\n plt.title('gamma_warp_rg')\n plt.colorbar()\n plt.figure()\n plt.imshow(gamma_warp[0, 1, :, :])\n plt.title('gamma_warp_az')\n plt.colorbar()\n plt.show()", "def update_apc12(self, delta_t=None):\n\n delta_t = delta_t or self.delta_t\n\n kap = (self.vel, self.force(self.pos,\n self.vel,\n self.time, drag=False), self.time)\n\n pos = self.pos+delta_t*self.vel\n vel = self.vel+delta_t*kap[1]\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(self.pos, self.vel, self.time, delta_t)\n\n\n force = self.force(pos,\n vel,\n self.time+delta_t, drag=False)\n\n pos = self.pos+delta_t/2.0*(vel+self.vel)\n vel = self.vel+delta_t/2.0*(force+kap[1])\n\n for cback in self.pos_callbacks:\n pos += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n for cback in self.vel_callbacks:\n vel += delta_t*cback(pos, vel, self.time+delta_t, delta_t)\n\n try:\n self.pos, self.vel = self.check_collision_full(pos, self.pos,\n vel, self.vel,\n delta_t, drag=True)\n except Collision.CollisionException as col:\n vel = self.vel+col.delta_t*kap[1]\n C, fvel = self.drag_coefficient(col.pos, vel, self.time+col.delta_t, nearest = True)\n col.vel = (self.vel+col.delta_t*(kap[1]+C*fvel))/(1.0+col.delta_t*C)\n raise col\n \n self.time += delta_t\n\n return kap", "def test_compute_after_smooth_goddard_2013(\r\n PM_ds_initialized_3d_full, PM_ds_control_3d_full\r\n):\r\n PM_ds_control_3d_full = smooth_goddard_2013(\r\n PM_ds_control_3d_full,\r\n )\r\n PM_ds_initialized_3d_full = smooth_goddard_2013(\r\n PM_ds_initialized_3d_full,\r\n )\r\n actual = compute_perfect_model(PM_ds_initialized_3d_full, PM_ds_control_3d_full).tos\r\n\r\n north_atlantic = actual.sel(lat=slice(40, 50), lon=slice(-30, -20))\r\n assert not north_atlantic.isnull().any()", "def calc(self):\n\n # the following if query ensures that volume- and interaction-terms\n # are only calculated if tau > 0.\n # (to avoid nan-values from invalid function-evaluations)\n\n if self.V.tau.shape == (1,):\n Isurf = self.surface()\n # differentiation for non-existing canopy, as otherwise NAN values\n if self.V.tau > 0.:\n Ivol = self.volume()\n if self.int_Q is True:\n Iint = self.interaction()\n else:\n Iint = np.array([0.])\n else:\n Ivol = np.array([0.])\n Iint = np.array([0.])\n else:\n # calculate surface-term (valid for any tau-value)\n Isurf = self.surface()\n\n # store initial parameter-values\n old_t_0 = self.t_0\n old_p_0 = self.p_0\n old_t_ex = self.t_ex\n old_p_ex = self.p_ex\n\n old_tau = self.V._get_tau()\n old_omega = self.V._get_omega()\n old_NN = self.SRF._get_NormBRDF()\n\n # set mask for tau > 0.\n mask = old_tau > 0.\n valid_index = np.where(mask)\n inval_index = np.where(~mask)\n\n # set parameter-values to valid values for calculation\n self.t_0 = old_t_0[valid_index[0]]\n self.p_0 = old_p_0[valid_index[0]]\n self.t_ex = old_t_ex[valid_index[0]]\n self.p_ex = old_p_ex[valid_index[0]]\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically adds an axis to the arrays!\n self.V.tau = np.squeeze(old_tau[valid_index[0]])\n if np.array(self.V.omega).size != 1:\n self.V.omega = np.squeeze(old_omega[valid_index[0]])\n if np.array(self.SRF.NormBRDF).size != 1:\n self.SRF.NormBRDF = np.squeeze(old_NN[valid_index[0]])\n\n # calculate volume and interaction term where tau-values are valid\n _Ivol = self.volume()\n if self.int_Q is True:\n _Iint = self.interaction()\n else:\n _Iint = np.full_like(self.t_0, 0.)\n\n # reset parameter values to old values\n self.t_0 = old_t_0\n self.p_0 = old_p_0\n self.t_ex = old_t_ex\n self.p_ex = old_p_ex\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically add an axis to the arrays!\n self.V.tau = np.squeeze(old_tau)\n self.V.omega = np.squeeze(old_omega)\n self.SRF.NormBRDF = np.squeeze(old_NN)\n\n # combine calculated volume-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n Ivol = np.ones_like(self.t_0)\n Ivol[valid_index[0]] = _Ivol\n Ivol[inval_index[0]] = np.ones_like(Ivol[inval_index[0]]) * 0.\n\n # combine calculated interaction-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n if self.int_Q is True:\n Iint = np.ones_like(self.t_0)\n Iint[valid_index[0]] = _Iint\n Iint[inval_index[0]] = np.ones_like(Iint[inval_index[0]]) * 0.\n else:\n Iint = np.full_like(self.t_0, 0.)\n\n return Isurf + Ivol + Iint, Isurf, Ivol, Iint", "def single_volume_inference_unpadded(self, volume):\n \n # normalize the data volume \n image = (volume.astype(np.single) - np.min(volume))/(np.max(volume) - np.min(volume))\n # reshape the image volume to the same patch size used for training\n img_reshaped = med_reshape(image, new_shape=(self.patch_size, self.patch_size, image.shape[2]))\n # create a new 3d mask to store predicted results\n mask3d = np.zeros(img_reshaped.shape)\n # iterate over the image array and predict the all the slices\n for slc_idx in range(img_reshaped.shape[2]):\n # compute for each slice\n slc = torch.from_numpy(img_reshaped[:,:,slc_idx].astype(np.single)).unsqueeze(0).unsqueeze(0)\n # make prediction\n pred = self.model(slc.to(self.device))\n pred = np.squeeze(pred.cpu().detach())\n # store predicted data\n mask3d[:,:,slc_idx] = torch.argmax(pred, dim=0)\n # return the predicted volume\n return mask3d", "def update_speed_input_step(self,curr_v):\n \n # update speed inputs \n self.speed_inputs_east*=0\n self.speed_inputs_west*=0\n self.speed_inputs_north*=0\n self.speed_inputs_south*=0\n\n if self.use_eight_directions is True: \n self.speed_inputs_north_east*=0\n self.speed_inputs_north_west*=0\n self.speed_inputs_south_east*=0\n self.speed_inputs_south_west*=0\n \n #speed_values=self.rr[:self.N_e,0] \n speed_values=np.ones((self.N_e,1))\n\n if curr_v[0]>0:\n \n # north-east\n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_east=speed_values \n \n # south-east \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_east=speed_values\n \n #east \n else:\n self.speed_inputs_east=speed_values\n\n\n elif curr_v[0]<0:\n\n # north-west \n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_west=speed_values\n\n # south-west \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_west=speed_values\n \n # west \n else:\n self.speed_inputs_west=speed_values\n\n else: \n # north\n if curr_v[1]>0:\n self.speed_inputs_north=speed_values\n\n # south\n elif curr_v[1]<0:\n self.speed_inputs_south=speed_values", "def update_focal_axes(self):\n self.update_sigma()\n self.updateGL()", "def compute_desired_velocity(self):\n mask_red = (self.image_red == 255) \\\n *(self.image_green == 0) \\\n *(self.image_blue == 0)\n ind_red = sp.where( mask_red )\n phi = sp.ones(self.image_red.shape)\n phi[ind_red] = 0\n phi = sp.ma.MaskedArray(phi, mask=self.mask)\n numpy.set_printoptions(threshold=sys.maxsize)\n self.door_distance = skfmm.distance(phi, dx=self.pixel_size)\n tmp_dist = self.door_distance.filled(9999)\n grad = sp.gradient(tmp_dist,edge_order=2)\n grad_X = -grad[1]/self.pixel_size\n grad_Y = -grad[0]/self.pixel_size\n norm = sp.sqrt(grad_X**2+grad_Y**2)\n norm = (norm>0)*norm+(norm==0)*0.001\n self.desired_velocity_X = self.vmax * (grad_X/norm)\n self.desired_velocity_Y = self.vmax * (grad_Y/norm)\n '''plt.subplot(1,2,1)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.subplot(1,2,2)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.show()'''\n return self.door_distance, self.desired_velocity_X, self.desired_velocity_Y", "def _update_motion_data(self, msg):\n if self._auv_motion != msg.motion:\n self._target_euler[\"alpha\"] = self._actual_euler[\"alpha\"]\n self._target_euler[\"beta\"] = self._actual_euler[\"beta\"]\n self._target_euler[\"gamma\"] = self._actual_euler[\"gamma\"]\n self._auv_motion = msg.motion\n self._thrusters_actual_speed[\"1\"] = msg.thrusters_speed.thruster_id1_speed\n self._thrusters_actual_speed[\"2\"] = msg.thrusters_speed.thruster_id2_speed\n self._thrusters_actual_speed[\"3\"] = msg.thrusters_speed.thruster_id3_speed\n self._thrusters_actual_speed[\"4\"] = msg.thrusters_speed.thruster_id4_speed\n self._thrusters_actual_speed[\"5\"] = msg.thrusters_speed.thruster_id5_speed\n self._thrusters_actual_speed[\"6\"] = msg.thrusters_speed.thruster_id6_speed\n self._thrusters_actual_speed[\"7\"] = msg.thrusters_speed.thruster_id7_speed\n self._thrusters_actual_speed[\"8\"] = msg.thrusters_speed.thruster_id8_speed" ]
[ "0.65022635", "0.5807314", "0.5593249", "0.55692303", "0.5540268", "0.5456648", "0.5415684", "0.539106", "0.5349843", "0.5269265", "0.52284133", "0.52205676", "0.5212929", "0.5210599", "0.52055985", "0.52035886", "0.5195111", "0.51901066", "0.5163019", "0.5134201", "0.5127333", "0.51255727", "0.5121156", "0.50821805", "0.5077219", "0.50729513", "0.5065775", "0.50641704", "0.50640464", "0.50625086", "0.506009", "0.50583297", "0.50481755", "0.5041057", "0.50132453", "0.4998375", "0.49947917", "0.49931207", "0.49851322", "0.4972273", "0.49561992", "0.49521556", "0.4950889", "0.49484318", "0.4939295", "0.49340686", "0.49319807", "0.49311328", "0.4929449", "0.49292374", "0.49283203", "0.4927944", "0.4917543", "0.49148524", "0.49142018", "0.49086624", "0.49062523", "0.4903244", "0.4900945", "0.48831967", "0.4881095", "0.48802766", "0.48793578", "0.48705056", "0.48566672", "0.48508668", "0.48499233", "0.4842123", "0.48409745", "0.48354304", "0.48342195", "0.48321646", "0.481446", "0.48138908", "0.4811763", "0.48102644", "0.4806386", "0.48030627", "0.479695", "0.47931904", "0.47839934", "0.47836858", "0.47831953", "0.47822326", "0.47803816", "0.47802228", "0.47764271", "0.4776352", "0.47750345", "0.47746554", "0.4774472", "0.4771635", "0.47636148", "0.4761327", "0.47604194", "0.47583973", "0.47582766", "0.475587", "0.47513977", "0.47502747" ]
0.47712728
92
Call the jump_censor program to characterize the degree of motion.
def JumpCensor(self): if self.verbose: print 'Computing censor files.' for entry in self.entry_map['epi']: if self.censor_interleave: input_file = '%s+orig' % self.info[entry]['imgfile'] interleave = '--interleave' else: interleave = '' if os.path.exists(self.info[entry]['mot_file']): input_file = self.info[entry]['mot_file'] else: input_file = '%s+orig' % self.info[entry]['imgfile'] cmd = \ "jump_censor -v --prefix=%s %s --store-plot --threshold=%f %s" % \ (self.info[entry]['censor_prefix'], interleave, self.censor_thresh, input_file) try: self.CheckExec(cmd, ['%s_censor.1D' % self.info[entry]['censor_prefix']], force=False) except: print 'Error computing censor files.'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def censoring_fcn(self, q):\n return 1.0", "def jump(self):\n\t\tself.vel = -10\n\t\tself.tick_count = 0\n\t\tself.height = self.y", "def on_jump_press(self) -> None:\r\n if not self.node:\r\n return\r\n if don.jumpFly:\r\n self.node.handlemessage(\"impulse\",self.node.position[0],self.node.position[1]-2,self.node.position[2],self.node.velocity[0]*0,20 +0.01,self.node.velocity[2]*0,10,5,0,0,self.node.velocity[0]*-0.1,20 + 0.01,self.node.velocity[2]*0)\r\n self.node.handlemessage(\"impulse\",self.node.position[0],self.node.position[1],self.node.position[2],self.node.velocity[0]*0,20 +0.01,self.node.velocity[2]*0,10,5,0,0,self.node.velocity[0]*-0.1,20 + 0.01,self.node.velocity[2]*0)\r\n self.node.color = ((0+random.random()*6.5),(0+random.random()*6.5),(0+random.random()*6.5))\r\n self.node.highlight = ((0+random.random()*6.5),(0+random.random()*6.5),(0+random.random()*6.5)) \r\n self.node.jump_pressed = True\r\n self._turbo_filter_add_press('jump')", "def jump(self, x):\n self.change_y += x * self.speed", "def jump(self):\n self.vy = -9", "def censor(text: str) -> str:\n\n # Split up individual words in the text\n tokens: List[str] = text.split(\" \")\n\n # Create a mapping of 0 if the word is okay, 1 if it should be censored\n censor_mask: List[int] = predict([word for word in tokens])\n\n # A list of tuples with the first element being the word and the second being 0 or 1\n censor_map: List[Tuple[str, int]] = list(zip(tokens, censor_mask))\n\n # A list of the words that make up the censored text\n censored_text: List[str] = [\n censor_word(word) if should_censor else word\n for word, should_censor in censor_map\n ]\n\n return \" \".join(censored_text)", "def content_jump(self, jump: np.ndarray, data: np.ndarray) -> None:\n if len(jump) != 1:\n raise ValueError(\"`jump` must be a one dimensional vector\")\n if jump > 0.5:\n euc_dist = np.abs(self._buffer - data).sum(axis=1)\n self._head = np.argmin(euc_dist)", "def censor(text: Optional[str]) -> str:\n char = \"*\"\n text = text if text else \"\"\n return text[0] + (len(text) - 1) * char if text else text", "def jog(self, axis:str=\"x\", distance:float=1):\n self.sendCommand(\"G91\")\n axis.capitalize()\n self.sendCommand(f'$J={axis}{distance} F1000')", "def evaluate(self,joystick,keys):\n \n self.AG_twinklers.do() \n \n \n if joystick.isUp(keys)==True and self.solomon.current_state[\"jumping\"]==0: \n self.solomon.current_state[\"jumping\"]=1 \n\n\n walkcheck=False\n \n if self.solomon.A_wandswish.overide==False:\n \n self.solomon.current_state[\"wandswish\"]=0 \n\n if joystick.isDown(keys)==True: \n self.solomon.current_state[\"crouching\"]=1 \n self.solomon.current_state[\"standing\"]=0\n else: \n self.solomon.current_state[\"crouching\"]=0 \n \n if joystick.isRight(keys)==True:\n self.solomon.facing=1 \n self.solomon.current_state[\"walking\"]=1\n self.solomon.current_state[\"standing\"]=1\n walkcheck=True\n elif joystick.isLeft(keys)==True: \n self.solomon.facing=-1 \n walkcheck=True\n self.solomon.current_state[\"walking\"]=1\n self.solomon.current_state[\"standing\"]=0\n else:\n self.solomon.current_state[\"walking\"]=0 \n self.solomon.current_state[\"standing\"]=1\n\n canwalk=False\n if walkcheck:\n result=self.detect(self.solomon.x+self.solomon.facing*self.solomon.step*5.0,self.solomon.y) \n if (len(result)==0 or result[0][0]==\".\") and self.solomon.current_state[\"walking\"]==1:\n #self.solomon.x+=self.solomon.step*self.solomon.facing \n self.solomon.current_state[\"standing\"]=0 \n self.solomon.current_state[\"walking\"]=1\n canwalk=True\n #elif result[0][0] in [\"]\n\n result1=self.grid[int(self.solomon.y-0)][int(self.solomon.x+0.5+self.solomon.step*2*self.solomon.facing)]\n result2=self.grid[int(self.solomon.y-0)][int(self.solomon.x+0.5-self.solomon.step*2*self.solomon.facing)]\n #print \"fall check\" + str((result1,result2,self.solomon.x,self.solomon.y))\n if result1==\".\" and result2==\".\":\n self.solomon.y-=self.solomon.step\n self.solomon.current_state[\"walking\"]=0\n canwalk=False\n\n if canwalk==True: self.solomon.x+=self.solomon.step*self.solomon.facing\n\n if joystick.isFire(keys)==True and self.solomon.current_state[\"wandswish\"]==0: \n self.solomon.A_wandswish.kick()\n self.solomon.A_wandswish.overide=True\n self.solomon.current_state[\"wandswish\"]=1 \n\n \n if self.solomon.current_state[\"jumping\"]==1:\n self.solomon.AG_jump.do()\n print \"he's jumping\"\n print str(self.solomon.AG_jump.action(\"jump_displacement\").tick)\n self.solomon.y+=0.2\n #print \"co-ordinates \"+str((self.solomon.x,self.solomon.y))", "def on_r_joy_y(self):\r\n self.log()", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCR_HEIGHT:\n self.change_y = -8", "def jump(self):\n\n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down\n # 1 when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(\n self, self.platforms, False)\n self.rect.y -= 2\n\n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= WIN_HEIGHT:\n self.change_y = -10", "def teleopPeriodic(self):\n self.drive.arcadeDrive(1, 0)\n self.brushless.set(1)\n self.spark.set(self.joystick.getY())", "def jump(self):\n if self.y_pos > self.max_pos_y + self.height:\n self.isJump = True\n self.y_velocity = -13.5\n sounds[\"jump\"].play()", "def on_key(window, key, scancode, action, mods):\n if action != glfw.PRESS:\n return\n \n global controller\n\n if key == glfw.KEY_SPACE:\n controller.fillPolygon = not controller.fillPolygon\n\n elif key == glfw.KEY_ESCAPE:\n glfw.set_window_should_close(window, True)\n\n # Si detecta la tecla [Q] cambia el estado del efecto 1 : zoom\n elif key == glfw.KEY_Z:\n controller.effect1 = not controller.effect1\n\n # Si detecta la tecla [W] cambia el estado del efecto 2 : corte\n elif key == glfw.KEY_C:\n controller.effect2 = not controller.effect2\n\n else:\n print('Unknown key')", "def jump(self):\n \n # move down a bit and see if there is a platform below us.\n # Move down 2 pixels because it doesn't work well if we only move down 1\n # when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set our speed upwards\n if len(platform_hit_list) > 0: #or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def apply_action(self, action):\n robot_state = self.get_state('turtlebot3_waffle_pi','world')\n robot_x = robot_state.pose.position.x\n robot_y = robot_state.pose.position.y\n # Set the distance moved in an action such that it is at least as large as the\n # minimum distance that would let a robot in the middle of the goal go to either side\n #self.move_dist = max(((C.GOAL_TOP + C.GOAL_BOTTOM) / 2) / C.NUM_POS_SENDS, 0.5)\n if action == Learn.MOVE_LEFT:\n print(\"Move left\")\n self.set_robot(robot_x, robot_y+self.move_dist)\n elif action == Learn.MOVE_RIGHT:\n print(\"Move right\")\n self.set_robot(robot_x, robot_y-self.move_dist)\n else:\n print(\"Stay put\")", "def ev_controlleraxismotion(self, event: tcod.event.ControllerAxis) -> T | None:", "def jump(self):\n \n # move down and see if there's a platform below us.\n # Move down 2 pixels because it doesn't work well if you only move down\n # 1 when working with a platform moving down.\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n \n # If it is ok to jump, set the speed upwards\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:\n self.change_y = -10", "def jump(self):\n\t\tself._is_falling = True\n\t\tself._dy = -5", "def ev_joyhatmotion(self, event: tcod.event.JoystickHat) -> T | None:", "def on_r_joy_x(self):\r\n self.log()", "def on_l_joy_y(self):\r\n self.log()", "def ev_joyaxismotion(self, event: tcod.event.JoystickAxis) -> T | None:", "def jump(self, xvel = 0, yvel = 0): #TODO: figure out how a monster's jumping ability is determined.\n self.xvel += xvel\n self.yvel -= yvel\n self.animation.iter()\n self.ai_count = 25 #TEMP\n self.onGround = False", "def _take_action(self, action):\n\n if isinstance(action, list) or isinstance(action, np.ndarray):\n action = action[0]\n\n if self.continuous:\n increment = np.array([1.5*np.cos(action),1.5*np.sin(action)])\n else:\n increment = np.array([0.0,0.0])\n if action == 0:\n increment[0] = 1.5\n elif action == 1:\n increment[0] = 1.225\n increment[1] = 1.225\n elif action == 2:\n increment[1] = 1.5\n elif action == 3:\n increment[0] = -1.225\n increment[1] = 1.225\n elif action == 4:\n increment[0] = -1.5\n elif action == 5:\n increment[0] = -1.225\n increment[1] = -1.225\n elif action == 6:\n increment[1] = -1.5\n elif action == 7:\n increment[0] = 1.225\n increment[1] = -1.225\n else:\n print('NOP!')\n\n self.dog_pose += increment\n self._update_environment()", "def jump(self):\n print(\"Inside ElfRider.jump\")", "def censor_a_word(text, censor):\n censored_item = \"\"\n for x in range(len(censor)):\n if censor[x] == \" \":\n censored_item = censored_item + \" \"\n else:\n censored_item = censored_item + \"X\"\n return text.replace(censor, censored_item)", "def on_cmyk_slide(self,c,m,y,k):\n if not self.active:\n return\n cyan = c / 100.0\n magenta = m / 100.0\n yellow = y / 100.0\n black = k / 100.0\n self.cmyk = colormodel.CMYK(cyan, magenta, yellow, black)\n temp = a3.cmyk_to_rgb(self.cmyk)\n assert (temp == None or type(temp) == colormodel.RGB), 'cmyk_to_rgb does not return a RGB object'\n self.rgb = self.rgb if temp is None else temp\n self.hsv = a3.rgb_to_hsv(self.rgb)\n assert (self.hsv == None or type(self.hsv) == colormodel.HSV), 'rgb_to_hsv does not return a HSV object'\n self.update()", "def jump(self):\n if (self.falling or self.rising) and self.doubleJump:\n self.speed_y = -20 # //////Aquí se cambia la velocidad incial cuando se salta//////\n self.fallin = False\n self.rising = True\n self.doubleJump = False\n\n if not self.falling and not self.rising:\n self.speed_y = -20 # //////Aquí se cambia la velocidad incial cuando se salta//////\n self.rising = True", "def _jump(self):\n # can't jump while jump\n if self._player.is_jumping():\n # no hard-code, set jump with 2*max_velocity\n self._player.set_velocity((0, -2*self._max_velocity))\n self._player.set_jumping(False) # means can't jump", "def jump(self):\r\n if self.grounded == True:\r\n self.vel.y = -13", "def _compute_gravity_torque(self):\n pass", "def cryptensor(*args, backend=None, **kwargs):\n if backend is None:\n backend = get_default_backend()\n if backend == crypten.mpc:\n return backend.MPCTensor(*args, **kwargs)\n else:\n raise TypeError(\"Backend %s is not supported\" % backend)", "def perform_action(self, car, action):\n action[0]=action[0]*10+20\n action[1]=action[1]*0.5\n p.setJointMotorControl2(car, 3, p.POSITION_CONTROL, targetPosition =action[1],force = self.maxForce)\n for i in [0,1]:\n p.setJointMotorControl2(car, i, p.VELOCITY_CONTROL, targetVelocity =action[0],force = self.maxForce)\n p.setJointMotorControl2(car, 7, p.VELOCITY_CONTROL, targetVelocity =action[0]*7,force = self.maxForce)\n pos1, ori1 = p.getBasePositionAndOrientation(car)\n lin, ang = p.getBaseVelocity(car)\n '''\n if(pos1[0]<-self.max_dist_x):\n p.resetBasePositionAndOrientation(car, [pos1[0]+2*self.max_dist_x,pos1[1],pos1[2]], ori1)\n vel = p.resetBaseVelocity(car, lin)\n if(pos1[0]>self.max_dist_x):\n p.resetBasePositionAndOrientation(car, [pos1[0]-2*self.max_dist_x,pos1[1],pos1[2]], ori1)\n vel = p.resetBaseVelocity(car, lin)\n if(pos1[1]<-self.max_dist_y):\n p.resetBasePositionAndOrientation(car, [pos1[0],pos1[1]+2*self.max_dist_y,pos1[2]], ori1)\n vel = p.resetBaseVelocity(car, lin)\n if(pos1[1]>self.max_dist_y):\n p.resetBasePositionAndOrientation(car, [pos1[0],pos1[1]-2*self.max_dist_y,pos1[2]], ori1)\n vel = p.resetBaseVelocity(car, lin)\n '''", "def jump(distance):\r\n t.penup()\r\n t.forward(200)\r\n t.pendown()\r\n return None", "def cooldown():\n print camera.CoolerON()\n camera.status.update()", "def ev_joyballmotion(self, event: tcod.event.JoystickBall) -> T | None:", "def main():\n raw_input()\n clouds = map(int, raw_input().split(' '))\n print jumping_on_the_clouds(clouds)", "def on_l_joy_x(self):\r\n self.log()", "def compute_controller(self):\n \n # here we implement an example for a consensus algorithm\n neig = self.get_neighbors()\n messages = self.get_messages()\n pos, rot = self.get_pos_and_orientation()\n \n #send message of positions to all neighbors indicating our position\n for n in neig:\n self.send_message(n, pos)\n \n # check if we received the position of our neighbors and compute desired change in position\n # as a function of the neighbors (message is composed of [neighbors id, position])\n dx = 0.\n dy = 0.\n if messages:\n for m in messages:\n dx += m[1][0] - pos[0]\n dy += m[1][1] - pos[1]\n # integrate\n des_pos_x = pos[0] + self.dt * dx\n des_pos_y = pos[1] + self.dt * dy\n \n #compute velocity change for the wheels\n vel_norm = np.linalg.norm([dx, dy]) #norm of desired velocity\n if vel_norm < 0.01:\n vel_norm = 0.01\n des_theta = np.arctan2(dy/vel_norm, dx/vel_norm)\n right_wheel = np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n left_wheel = -np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n self.set_wheel_velocity([left_wheel, right_wheel])", "def do_impact(self, car):\r\n\r\n if car is not None:\r\n if self.head_of_jump is None:\r\n self.head_of_jump = Jump()\r\n else:\r\n jj = Jump()\r\n jj.next = self.head_of_jump\r\n self.head_of_jump = jj\r\n\r\n # self.current_jump = self.head_of_jump\r", "def cox_cc_loss(g_case: Tensor, g_control: Tensor, shrink : float = 0.,\n clamp: Tuple[float, float] = (-3e+38, 80.)) -> Tensor:\n control_sum = 0.\n shrink_control = 0.\n if g_case.shape != g_control[0].shape:\n raise ValueError(f\"Need `g_case` and `g_control[0]` to have same shape. Got {g_case.shape}\"+\n f\" and {g_control[0].shape}\")\n for ctr in g_control:\n shrink_control += ctr.abs().mean()\n ctr = ctr - g_case\n ctr = torch.clamp(ctr, *clamp) # Kills grads for very bad cases (should instead cap grads!!!).\n control_sum += torch.exp(ctr)\n loss = torch.log(1. + control_sum)\n shrink_zero = shrink * (g_case.abs().mean() + shrink_control) / len(g_control)\n return torch.mean(loss) + shrink_zero.abs()", "def _twist_callback(self, cmd):\n self.set_velocity(cmd.linear.x, cmd.angular.z)", "def short_circ():\n \n set_mode(mode_cv)\n time.sleep(.250)\n set_CV_volts(0.1)\n time.sleep(.250)\n \n sc_vals = get_input_values()\n sc_data_point = data_point(sc_vals)\n jsc = sc_data_point[4]\n print('Short circuit current: ', jsc)\n write_data_tofile(sc_data_point)\n\n return jsc", "def print_usage():\n print \"Censor a document\\n\"\n print \"Use: python censor.py x NUM_DOCS s t p heuristic flag\\n\"\n print \"Arguments:\"\n print \"\\tif Greedy, x = index of the target document to censor (0 <= x < NUM_DOCS)\\n\\tif KillNodes, x = percentage of nodes to be shut down (0 < x < 1)\\n\"\n print \"\\tNUM_DOCS = total number of docs in the archive\\n\"\n print \"\\ts = number of source blocks\\n\"\n print \"\\tt = number of pointer blocks\\n\"\n print \"\\tp = number of parity blocks\\n\"\n print \"\\theuristic = MinimumAttack or LeapingAttack or CreepingAttack or TailoredAttack or KillNodes or KillDocs or KillRand\\n\"\n print \"\\tflag = write 'draw' if you want graphical visualization\\n\"", "def _do_mc_action(self):\n goal = self._current_mc_goal\n self._position_control_client.send_goal(\n goal,\n done_cb = self._motion_control_callback\n )", "def kick_from_accretion(self):\n\n accth_mag = -self.v * self.pair.dmdt_accr / self.pair.mass\n kick0 = -self.vth_vec * accth_mag[0] * self.dt\n kick1 = self.vth_vec * accth_mag[1] * self.dt\n\n self.pair[0].velocity += kick0\n self.pair[1].velocity += kick1", "def put_on_the_floor(device, q_init):\n global key_pressed\n key_pressed = False\n Kp_pos = 3.\n Kd_pos = 0.01\n imax = 3.0\n pos = np.zeros(device.nb_motors)\n for motor in range(device.nb_motors):\n pos[motor] = q_init[device.motorToUrdf[motor]] * device.gearRatioSigned[motor]\n listener = keyboard.Listener(on_press=on_press)\n listener.start()\n print(\"Put the robot on the floor and press Enter\")\n while not key_pressed:\n device.UpdateMeasurment()\n for motor in range(device.nb_motors):\n ref = Kp_pos*(pos[motor] - device.hardware.GetMotor(motor).GetPosition() - Kd_pos*device.hardware.GetMotor(motor).GetVelocity())\n ref = min(imax, max(-imax, ref))\n device.hardware.GetMotor(motor).SetCurrentReference(ref)\n device.SendCommand(WaitEndOfCycle=True)\n\n print(\"Start the motion.\")", "def _perform_landing(self):\n self.y += self.settings.mario_jump_speed\n if self.y >= self.settings.mario_y_pos:\n self.y = self.settings.mario_y_pos\n self.jumping = 0\n self.is_currently_jumping = False", "def step1c(self):\n\t\tif (self.ends(\"y\") and self.vowelinstem()):\n\t\t\tself.b = self.b[:self.k] + 'i' + self.b[self.k+1:]", "def ocr_correction(token):", "def requestAccel(self) -> None:\n self._protocol.write_line(CMD_ACCELEROMETER)", "def jump_factor(self):\n return self._jump_factor", "def jumped_on(self):\r\n pass", "def warmup():\n print camera.CoolerOFF()\n camera.status.update()", "async def c(self, f : float):\n c = (f-32) * 5/9\n await self.bot.say(\"{0} Celsius\".format(c))", "def do_updates(self):\n if self.words.check_for_dashes():\n self.console.write(\"Great job! We are so proud of you!\")\n self.keep_playing = False\n \n if not self.good_guess:\n del self.jumper.jumper_list[0]\n if self.jumper.jumper_list[0] == \" 0 \":\n webbrowser.open(\"https://www.youtube.com/watch?v=oHg5SJYRHA0&ab_channel=cotter548\")\n self.keep_playing = False", "def set_actuator(self, action):\n deltav = action[0]\n vt = np.clip(self.vt + deltav, -self.maxV, self.maxV)\n self.vt = vt\n p.setJointMotorControl2(bodyUniqueId=self.botId,\n jointIndex=0,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=vt)\n p.setJointMotorControl2(bodyUniqueId=self.botId,\n jointIndex=1,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=-vt)", "def runDisplacement(self,distance,axis):\n if axis == \"x\":\n self.M1.runDisplacement(distance)\n elif axis == \"y\":\n self.M2.runDisplacement(distance)", "def dynamicMoon(moonPosX, moonPosY, velocityXMoon, velocityYMoon, h):\r\n kPosMoon = [[0 for x in range(4)] for y in range(2)] # initialising the 2x2 k matricies\r\n kVMoon = [[0 for x in range(4)] for y in range(2)]\r\n \r\n kPosMoon[0][0] = velocityXMoon # this value is k1 for the x position. It is just the velocity of the rocket at its current position.\r\n kPosMoon[1][0] = velocityYMoon #this value is k1 for the y position\r\n kVMoon[0][0] = accelerationCalcX(moonPosX, moonPosY) #this value is k1 for the x velocity. At its current position what is the acceleration of the projectile\r\n kVMoon[1][0] = accelerationCalcY(moonPosX, moonPosY) # this value is k1 for the y velocity\r\n \r\n #k2s\r\n kPosMoon[0][1] = velocityXMoon + h*kVMoon[0][0]/2 #what would its velocity be if it carried on at its initial acceleration (calculated in k1 for x velocity) for half a time step\r\n kPosMoon[1][1] = velocityYMoon + h*kVMoon[1][0]/2\r\n kVMoon[0][1] = accelerationCalcX(moonPosX + h*kPosMoon[0][0]/2, moonPosY + h*kPosMoon[1][0]/2) # if it continued at the velocity in k2 for x position for half a time step what would the acceleration on the projectile be.\r\n kVMoon[1][1] = accelerationCalcY(moonPosX + h*kPosMoon[0][0]/2, moonPosY + h*kPosMoon[1][0]/2)\r\n \r\n #k3s\r\n kPosMoon[0][2] = velocityXMoon + h*kVMoon[0][1]/2 # if it carried on at the acceleration calculated for k2 in x velocity for half a time step, what would its velocity be\r\n kPosMoon[1][2] = velocityYMoon + h*kVMoon[1][1]/2\r\n kVMoon[0][2] = accelerationCalcX(moonPosX + h*kPosMoon[0][1]/2, moonPosY + h*kPosMoon[1][1]/2) # if carried on at the velocity calculated in k2 for half a time step then what would its accelaration be\r\n kVMoon[1][2] = accelerationCalcY(moonPosX + h*kPosMoon[0][1]/2, moonPosY + h*kPosMoon[1][1]/2)\r\n \r\n #k4s\r\n kPosMoon[0][3] = velocityXMoon + h*kVMoon[0][2] # if it carried on at the acceleration calcualted in k3 fro a whole timestep, then what would its velocity be \r\n kPosMoon[1][3] = velocityYMoon + h*kVMoon[1][2]\r\n kVMoon[0][3] = accelerationCalcX(moonPosX + h*kPosMoon[0][2], moonPosY + h*kPosMoon[1][2]) #if it continued at the velocity calculated in k3 for a whole time step, then what would its accelaration be\r\n kVMoon[1][3] = accelerationCalcY(moonPosX + h*kPosMoon[0][2], moonPosY + h*kPosMoon[1][2])\r\n \r\n velocityXMoon = velocityXMoon+(h/6)*(kVMoon[0][0]+2*kVMoon[0][1]+2*kVMoon[0][2]+kVMoon[0][3]) # the velocity in x is appended, after combining the ks for velocity in x\r\n velocityYMoon = velocityYMoon+(h/6)*(kVMoon[1][0]+2*kVMoon[1][1]+2*kVMoon[1][2]+kVMoon[1][3]) # the velocity in y is appended, after combining the ks for velocity in y\r\n moonPosX = moonPosX+(h/6)*(kPosMoon[0][0]+2*kPosMoon[0][1]+2*kPosMoon[0][2]+kPosMoon[0][3]) # the x position is appended, after combinging the ks for x position\r\n moonPosY = moonPosY+(h/6)*(kPosMoon[1][0]+2*kPosMoon[1][1]+2*kPosMoon[1][2]+kPosMoon[1][3]) # the y position is appended, after combinging the ks for y position\r\n \r\n return moonPosX, moonPosY, velocityXMoon, velocityYMoon # return the position and velocity back to moonPass\r", "def moove_character(self, case_list):\n\t\tself.actual_hero.start_moove(case_list)", "def _do_updates(self):\n is_right = self._puzzle.is_guess_right()\n if is_right:\n self._puzzle.reveal_puzzle()\n else:\n self._jumper.cut_line()", "def rotate_cw(self, deg):\r\n self.send_command_without_response(f'cw {deg}')", "def Keyboard(self, key):\r\n\t\tif key == Keys.K_u:\r\n\t\t\tself.kp+=self.dp\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\tif key == Keys.K_j:\r\n\t\t\tself.kp-=self.dp\r\n\t\t\tif self.kp<0:\r\n\t\t\t\tself.kp=0.0\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\tif key == Keys.K_i:\r\n\t\t\tself.ki+=self.di\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\tif key == Keys.K_k:\r\n\t\t\tself.ki-=self.di\r\n\t\t\tif self.ki<0:\r\n\t\t\t\tself.ki=0.0\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\tif key == Keys.K_o:\r\n\t\t\tself.kd+=self.dd\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\tif key == Keys.K_l:\r\n\t\t\tself.kd-=self.dd\r\n\t\t\tif self.kd<0:\r\n\t\t\t\tself.kd=0.0\r\n\t\t\tself.q[0].SetCoefs(self.q[0].configer,self.kp,self.ki,self.kd)\r\n\t\t\r\n\t\tif key == Keys.K_f:\r\n\t\t\tself.center+=1\r\n\t\t\tif self.center>2:\r\n\t\t\t\tself.center = 0\r\n\t\t\r\n\t\tif key == Keys.K_t:\r\n\t\t\tself.cut=1-self.cut\r\n\t\t\tself.q[0].SetCut(self.cut)\t\t\t\r\n\t\t\t\r\n\t\tif key == Keys.K_r:\r\n\t\t\tpass\t\r\n\t\t\r\n\t\tif key == Keys.K_s:\r\n\t\t\tself.q[0].saveConf()", "def set_jump(self, jump):\n self.jump = jump", "def apply_action(self, action):\n real_action = self.policy_action_to_robot_action(action)\n p.setGravity(0, 0, 0)\n p.resetBaseVelocity(\n self.robot_ids[0], real_action[:3], real_action[3:])", "def before_step(self, action, physics):\n # # Support legacy internal code.\n\n physics.named.data.xfrc_applied[:,:3]=np.zeros((3,))\n\n if self._random_location and not self._maxq:\n index = self._current_loc\n else:\n one_hot = action[:4]\n index = np.argmax(one_hot)\n action = action[4:]\n\n goal_position = action * 0.05\n corner_action = CORNER_INDEX_ACTION[index]\n corner_geom = CORNER_INDEX_POSITION[index]\n\n\n # apply consecutive force to move the point to the target position\n position = goal_position + physics.named.data.geom_xpos[corner_geom]\n dist = position - physics.named.data.geom_xpos[corner_geom]\n\n loop = 0\n while np.linalg.norm(dist) > 0.025:\n loop += 1\n if loop > 40:\n break\n physics.named.data.xfrc_applied[corner_action, :3] = dist * 20\n physics.step()\n self.after_step(physics)\n dist = position - physics.named.data.geom_xpos[corner_geom]\n\n if self._random_location and not self._maxq:\n self._current_loc = self._generate_loc()", "def CreateMotionKernel(kernel):\r\n TrajSize = 64\r\n anxiety = 0.2* np.random.rand()\r\n numT = 10\r\n MaxTotalLength =10\r\n TotLength = 0\r\n #term determining, at each sample, the strengh of the component leating towards the previous position\r\n centripetal = 0.7 * np.random.rand()\r\n #term determining, at each sample, the random component of the new direction\r\n gaussianTerm =10 * np.random.rand()\r\n #probability of having a big shake, e.g. due to pressing camera button or abrupt hand movements\r\n freqBigShakes = 3 *np.random.rand()\r\n #v is the initial velocity vector, initialized at random direction\r\n init_angle = 360 * np.random.rand()\r\n #initial velocity vector having norm 1\r\n v0 = math.cos(init_angle / 180.0 * math.pi) + 1.0j * math.sin(init_angle/ 180.0 * math.pi)\r\n #the speed of the initial velocity vector\r\n v = v0* MaxTotalLength/(numT-1);\r\n\r\n if anxiety > 0:\r\n v = v0 * anxiety\r\n # initialize the trajectory vector\r\n x = np.zeros(numT,dtype = np.complex);\r\n\r\n abruptShakesCounter = 0\r\n for t in range(numT-1):\r\n # determine if there is an abrupt (impulsive) shake\r\n if np.random.rand() < freqBigShakes * anxiety:\r\n #if yes, determine the next direction which is likely to be opposite to the previous one\r\n nextDirection = 2 * v * (np.exp( 1.0j * (math.pi + (np.random.rand() - 0.5))))\r\n abruptShakesCounter = abruptShakesCounter + 1\r\n else:\r\n nextDirection=0\r\n\r\n #determine the random component motion vector at the next step\r\n dv = nextDirection + anxiety * (gaussianTerm * (np.random.randn()- + 1.0j * np.random.randn()) - centripetal * x[t]) * (MaxTotalLength / (numT - 1))\r\n v = v + dv\r\n # velocity vector normalization\r\n v = (v / np.abs(v)) * MaxTotalLength / (numT - 1)\r\n #print v\r\n x[t + 1] = x[t] + v\r\n # compute total length\r\n #TotLength=TotLength+np.abs(x([t+1]-x[t]))\r\n x_real = []\r\n x_imag = []\r\n for elem in x:\r\n x_real.append(elem.real)\r\n x_imag.append(elem.imag)\r\n x_real = np.round((x_real - np.min(x_real))/(np.max(x_real) - np.min(x_real)) * kernel-0.5)\r\n x_imag = np.round((x_imag - np.min(x_imag))/(np.max(x_imag) - np.min(x_imag)) * kernel-0.5)\r\n for idx in range(len(x_real)):\r\n if x_real[idx] < 0:\r\n x_real[idx] = 0\r\n if x_imag[idx] < 0:\r\n x_imag[idx] = 0\r\n if x_real[idx] > kernel -1:\r\n x_real[idx] = kernel -1\r\n if x_imag[idx] > kernel -1:\r\n x_imag[idx] = kernel -1\r\n\r\n ker = np.zeros((kernel, kernel))\r\n for idx in range(len(x_real)):\r\n ker[np.int(x_real[idx])][np.int(x_imag[idx])] = 1\r\n ker = ker/np.sum(np.sum(ker))\r\n return ker", "def cmu_mocap_35_walk_jog(data_set='cmu_mocap'):\r\n train_motions = ['01', '02', '03', '04', '05', '06',\r\n '07', '08', '09', '10', '11', '12',\r\n '13', '14', '15', '16', '17', '19',\r\n '20', '21', '22', '23', '24', '25',\r\n '26', '28', '30', '31', '32', '33', '34']\r\n test_motions = ['18', '29']\r\n data = cmu_mocap('35', train_motions, test_motions, sample_every=4, data_set=data_set)\r\n data['info'] = \"Walk and jog data from CMU data base subject 35. As used in Tayor, Roweis and Hinton at NIPS 2007, but without their pre-processing (i.e. as used by Lawrence at AISTATS 2007). It consists of \" + data['info']\r\n return data", "def moco_loss_func(\n query: torch.Tensor, key: torch.Tensor, queue: torch.Tensor, temperature=0.1\n) -> torch.Tensor:\n\n pos = torch.einsum(\"nc,nc->n\", [query, key]).unsqueeze(-1)\n neg = torch.einsum(\"nc,ck->nk\", [query, queue])\n logits = torch.cat([pos, neg], dim=1)\n logits /= temperature\n targets = torch.zeros(query.size(0), device=query.device, dtype=torch.long)\n return F.cross_entropy(logits, targets)", "def put_cmyk(self, path, res, mode, color_correction):\n\n ### Stratasys_J750\n if mode == \"stratasys\":\n _c_ = (0, 90, 158, 255)\n _m_ = (166, 33, 98, 255)\n _y_ = (200, 189, 3, 255)\n _k_ = (26, 26, 29, 255)\n\n ### cmyk\n else:\n _c_ = (0, 255, 255, 255)\n _m_ = (255, 0, 255, 255)\n _y_ = (255, 255, 0, 255)\n \n\n\n ut = util.UTIL()\n \n image = Image.open(path)\n image_size = image.size\n\n clr_list = self.get_color_to_memory(image)\n\n # print(\"Memory : \", ut.ll_size(clr_list))\n\n new_image = self.up_scale(image, res)\n new_image_size = new_image.size\n \n # print(\"PIL : \", image_size)\n # print(new_image_size)\n\n vectors = ut.set_vector(res)\n\n for i in range(image_size[0]):\n for j in range(image_size[1]):\n\n pt = (i * res, j * res)\n rgb = clr_list[i][j]\n new_vectors = random.sample(vectors, len(vectors))\n\n cmyk = self.calc_rgb_cmyk(rgb)\n\n\n ### ========== CMYK ==========\n # cc, mm, yy, kk = self.calc_cmyk_count(cmyk, res)\n # new_length = cc + mm + yy + kk\n ### ========== CMYK ==========\n\n\n ### ========== CMY ==========\n cc, mm, yy = self.calc_cmy_count(cmyk, res, color_correction)\n _length = cc + mm + yy\n ### ========== CMY ==========\n\n\n if _length > (res * res):\n new_length = (res * res)\n else:\n new_length = _length\n\n new_pt = []\n\n for k in range(new_length):\n\n new_pt = ut.pt2d_add(pt, new_vectors[k])\n # print(new_pt)\n\n if k < cc:\n new_image.putpixel(new_pt, (_c_))\n elif k < (cc + mm):\n new_image.putpixel(new_pt, (_m_))\n elif k < (cc + mm + yy):\n new_image.putpixel(new_pt, (_y_))\n # else:\n # new_image.putpixel(new_pt, (_k_))\n \n return new_image", "def cox_cc_loss_single_ctrl(g_case: Tensor, g_control: Tensor, shrink: float = 0.) -> Tensor:\n loss = F.softplus(g_control - g_case).mean()\n if shrink != 0:\n loss += shrink * (g_case.abs().mean() + g_control.abs().mean())\n return loss", "def handle_continuous_keys(self):\n shift = pygame.K_LSHIFT in self.held\n ctrl = pygame.K_LCTRL in self.held\n factor = 3 if shift else 1/3 if ctrl else 1\n for key in self.held:\n if not self.followmode:\n # if self.held_delay[key] == 0:\n if key in (pygame.K_w, pygame.K_UP): # up\n # self.canvas.move_offset(0, 5 * factor)\n self.canvas.move_focus(0, 5 * factor)\n elif key in (pygame.K_s, pygame.K_DOWN): # down\n # self.canvas.move_offset(0, -5 * factor)\n self.canvas.move_focus(0, -5 * factor)\n elif key in (pygame.K_d, pygame.K_RIGHT): # right\n # self.canvas.move_offset(-5 * factor, 0)\n self.canvas.move_focus(5 * factor, 0)\n elif key in (pygame.K_a, pygame.K_LEFT): # left\n # self.canvas.move_offset(5 * factor, 0)\n self.canvas.move_focus(-5 * factor, 0)\n if key in (pygame.K_e, pygame.K_KP_PLUS):\n self.canvas.zoom(2 * factor)\n elif key in (pygame.K_q, pygame.K_KP_MINUS):\n self.canvas.zoom(-2 * factor)\n for key in self.held:\n self.held_delay[key] = (self.held_delay[key] + 1) % 5", "def _motion_control_callback(self, state, result):\n if len(self._mc_goals) > 0:\n self._current_mc_goal = self._mc_goals.pop(0)\n self._do_mc_action()\n else :\n self._current_mc_goal = None", "def acceleration(degrees, finalSpeed, steering = 0, robot = MoveSteering(OUTPUT_A, OUTPUT_B), motorA = LargeMotor(OUTPUT_A)): # Function to accelerate while moving so the robot can get traction before moving fast\n\n motorA.reset() #reseting how many degrees the robot has moved\n motorA.position = 0 # setting how many degrees the robot has moved\n\n accelerateDegree = degrees * 0.8\n # declerationDegree = degrees * 0.2'\n speed = 0 #starting speed\n while motorA.position < degrees and False == Constants.STOP: #while the robot hasen't moved the target amount of degree's(distance)\n if motorA.position < accelerateDegree and False == Constants.STOP: \n if speed < finalSpeed: # while the robot hasen't accelerated to the target speed\n speed += 5 #speed up\n robot.on(steering = steering, speed = speed) # Start Moving\n sleep(0.1) #wait so that it doesn't accelerate immidiatly\n else:\n robot.on(steering = steering, speed = finalSpeed)# otherwise just keep moving\n sleep(0.01)\n elif False == Constants.STOP:\n if speed > 10:\n speed -= 5\n robot.on(steering = steering, speed = speed)\n sleep(0.05)\n else:\n robot.on(steering = steering, speed = speed)\n sleep(0.01)\n \n robot.off() # Stop Moving", "def sit(self):\n\n\t\tself.pose.goToPosture(\"Crouch\")", "def cohen_kappa(*args, **kwargs):\n return ConfusionMatrix2.from_ccw(*args, **kwargs).kappa()", "def set_hybrid_control(self, model, max_force_torque, timeout=5.0, stop_on_target_force=False):\n\n reduced_speed = np.deg2rad([100, 100, 100, 150, 150, 150])\n q_last = self.joint_angles()\n\n # Timeout for motion\n initime = rospy.get_time()\n xb = self.end_effector()\n failure_counter = 0\n\n while not rospy.is_shutdown() \\\n and (rospy.get_time() - initime) < timeout:\n\n # Transform wrench to the base_link frame\n Wb = self.get_ee_wrench()\n\n # Current Force in task-space\n Fb = -1 * Wb\n # Safety limits: max force\n if np.any(np.abs(Fb) > max_force_torque):\n rospy.logerr('Maximum force/torque exceeded {}'.format(np.round(Wb, 3)))\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return FORCE_TORQUE_EXCEEDED\n\n if stop_on_target_force and np.any(np.abs(Fb)[model.target_force != 0] > model.target_force[model.target_force != 0]):\n rospy.loginfo('Target F/T reached {}'.format(np.round(Wb, 3)) + ' Stopping!')\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return STOP_ON_TARGET_FORCE\n\n # Current position in task-space\n xb = self.end_effector()\n\n dxf = model.control_position_orientation(Fb, xb) # angular velocity\n\n # Limit linear/angular velocity\n dxf[:3] = np.clip(dxf[:3], -0.5, 0.5)\n dxf[3:] = np.clip(dxf[3:], -5., 5.)\n\n xc = transformations.pose_from_angular_velocity(xb, dxf, dt=model.dt)\n\n # Avoid extra acceleration when a point failed due to IK or other violation\n # So, this corrects the allowed time for the next point\n dt = model.dt * (failure_counter+1)\n\n q = self._solve_ik(xc)\n if q is None:\n rospy.logwarn(\"IK not found\")\n result = IK_NOT_FOUND\n else:\n q_speed = (q_last - q)/dt\n if np.any(np.abs(q_speed) > reduced_speed):\n rospy.logwarn(\"Exceeded reduced max speed %s deg/s, Ignoring command\" % np.round(np.rad2deg(q_speed), 0))\n result = SPEED_LIMIT_EXCEEDED\n else:\n result = self.set_joint_positions_flex(position=q, t=dt)\n\n if result != DONE:\n failure_counter += 1\n continue # Don't wait since there is not motion\n else:\n failure_counter = 0\n\n # Compensate the time allocated to the next command when there are failures\n for _ in range(failure_counter+1):\n self.rate.sleep()\n\n q_last = self.joint_angles()\n return DONE", "def _content_jump(self, target):\n head = self._relative_head_pos()\n similarities = 1 - np.sqrt(np.sum((self.memory - target) ** 2, 1)) / self.memory_unit_size\n pos = int(np.argmax(similarities).item())\n if similarities[pos] > self.min_similarity_to_jump:\n self.head_pos = pos\n else:\n self.head_pos = 0\n if self.history is not None:\n self.history[\"loc\"][-1].append((head, 0.1))", "def rotateCW(device, runTime = 0.1):\n return execute(device, ROTATE_CCW_CMD, STATUS_LIMIT_ROTATE, runTime)", "def omega(self):\n self.cosineSequences()", "def cauchy(self, loc, gamma):\n c = loc + gamma * np.tan(np.pi * (self.random() - 0.5))\n return c if c > 0 else self.cauchy(loc, gamma)", "def attack(self, robot):\n pass", "def move_to_coc(self):\n coc = scale(self.center_of_charge(), -1.0)\n self.translate(coc)", "def run(self):\n self.coffee_machine.water_tank.decrease_weight(self.coffee_machine.chosen_coffee_data.get('water_weight'))", "def k_2_jy(freq: float, theta_major: float,\n theta_minor: float, brightness: float) -> float:\n conv = (1.222E3 * (freq ** -2) / theta_minor / theta_major) ** -1\n return brightness * conv", "def joy_callback(self, msg):\n mappings = gamepad_mappings.set_gamepad_mappings(msg)\n self.move_vertical = mappings[\"button_vertical\"] # up: +1.0, down: -1.0\n self.move_horizontal = mappings[\"button_horizontal\"] # left: +1.0, right: -1.0", "def recommend_cosim():\n pass", "def execute(self):\n # Check buttons\n self.check_enable_pids(self.inputs[JoyInput.START])\n self.check_disable_pids(self.inputs[JoyInput.BACK])\n self.check_toggle(self.inputs[JoyInput.LS], self.inputs[JoyInput.LS_Y], self.saved[JoyInput.LS_Y])\n\n # Check axes\n #self.input_forward(JoyInput.LS_Y)\n self.input_depth(JoyInput.CROSS_Y)\n self.set_raw_pwm(JoyInput.LT, JoyInput.RT) # uses LT/RT for depth\n #self.input_rotate(JoyInput.LT, JoyInput.RT) # uses RT for rotation\n\n # yaw setpoint\n magnitude, angle = self.input_yaw_setpoint(JoyInput.RS_X, JoyInput.RS_Y, Joystick.RS_ANGLE)\n self.check_yaw_setpoint_toggle(JoyInput.RS, Joystick.RS_ANGLE, magnitude, angle)", "def take_action(self, action):\r\n\r\n self._update_velocity(action)\r\n self._update_position()\r\n if self.is_terminal_state():\r\n return 100.0\r\n\r\n return -1.0", "def teleopPeriodic(self):\n\n turningValue = (self.angleSetpoint - self.gyro.getAngle()) * self.pGain\n if self.joystick.getY() <= 0:\n # forwards\n self.myRobot.arcadeDrive(self.joystick.getY(), turningValue)\n elif self.joystick.getY() > 0:\n # backwards\n self.myRobot.arcadeDrive(self.joystick.getY(), -turningValue)", "def JogMode(port):\n\tport.write(\"Q\")", "def runTask1(self):\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.initialize()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.sweepDuckie()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieSuctionOn()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLiftEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppSweep()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.ppSuctionOff()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.reset()\r\n\t\tself._motion.terminate()", "def control_change(self, channel, cc, value):\n knob, bank = self.decode_mpd218_cc(cc)\n log.debug(\"Winch control change %d on knob %d bank %d\", cc, knob, bank)\n\n if knob == 1: # Knob #1 on MPD218, use to control resonant frequency\n #self.frequency = 0.05 + 0.1 * value\n self.frequency = 5.00\n self.set_freq_damping()\n\n elif knob == 2: # Knob #2 on on MPD218, use to control damping ratio\n #self.damping_ratio = 0.05 + 0.01 * value\n self.damping_ratio = 1.32\n self.set_freq_damping()", "async def get_words():\n global CENSORED_WORDS\n CENSORED_WORDS = get_censor()", "def first_time_run(self, w):\n \n if w in ['half', 'quarter']:\n self._first_time_channel = getattr(self,'_'+w+'_channel')\n else:\n raise ValueError('Input type for \"w\" should be \"half\" or \"quarter\"')\n\n #measure position before optimizing\n getattr(self.rotator, 'set_zero_position')(getattr(self,'_'+w+'_channel')) \n pos_before = getattr(self.rotator, 'get_noof_steps_ch'+\\\n str(getattr(self,'_'+w+'_channel')))()\n\n #turn waveplates\n data, qtdata, dataplot, premature_quit = self.run('first_time', self._first_opt_red_power)\n qtdata.close_file()\n\n if not premature_quit:\n\n print '\\tGuessing optimal waveplate position...'\n optim_pos = data['wp_step'][self.find_nearest(data['counts'],\n min(data['counts']))]\n\n if self.get_plot_degrees():\n print '\\tOptimal waveplate position determined at %.0f degrees.'%(optim_pos*self.get_conversion_factor(w))\n else:\n print '\\tOptimal waveplate position determined at %d steps.'%optim_pos\n \n #BEWARE: never ask the current position in noof_steps\n curr_pos = data['wp_step'][len(data['wp_step'])-1]\n\n #set the position to the optimal position\n self.rotator.quick_scan(optim_pos-curr_pos, getattr(self,'_'+w+'_channel'))\n\n else:\n #ways to get a premature quit action: q key stroke or > threshold\n #BEWARE: never ask the current position in noof_steps\n curr_pos = data['wp_step'][len(data['wp_step'])-1] \n\n print '\\tReturning to initial position...'\n\n #set the position to the optimal position\n self.rotator.quick_scan(pos_before-curr_pos, getattr(self,'_'+w+'_channel'))\n\n\n #measure position after optimizing\n pos_after = getattr(self.rotator, 'get_noof_steps_ch'+\\\n str(getattr(self,'_'+w+'_channel') ) )()\n\n #print \"\\tPosition of %s waveplate changed %d steps\"\\\n # %(w, pos_after-pos_before)", "def keyboardController(self):\n # get pressed keys\n keypress = pygame.key.get_pressed()\n\n # randomize world if R is pressed and enough frames have passed since last time\n if keypress[pygame.K_r]:\n if not self.seedWorldTimer:\n self.seedWorldTimer = int(self.FPS / 4)\n self.calculateWorldValues()\n self.findPolygons()\n # decrement delay between allowed randomizations\n if self.seedWorldTimer:\n self.seedWorldTimer -= 1\n\n # change displayMode if necessary\n if keypress[pygame.K_p]:\n self.displayMode = self.DISPLAYMODE_POINTS\n glEnable(GL_CULL_FACE) # enable culling\n glCullFace(GL_BACK)\n elif keypress[pygame.K_o]:\n self.displayMode = self.DISPLAYMODE_MESH\n glDisable(GL_CULL_FACE) # disable culling for mesh viewing\n elif keypress[pygame.K_i]:\n self.displayMode = self.DISPLAYMODE_WIREFRAME\n\n # apply polar camera movement\n if keypress[pygame.K_w]:\n self.cameraPolar[2] -= 0.08 * self.dt\n if self.cameraPolar[2] < 1:\n self.cameraPolar[2] = 1.0\n if keypress[pygame.K_s]:\n self.cameraPolar[2] += 0.08 * self.dt\n if self.cameraPolar[2] > 179:\n self.cameraPolar[2] = 179\n if keypress[pygame.K_d]:\n self.cameraPolar[1] += 0.08 * self.dt\n if self.cameraPolar[1] > 180:\n self.cameraPolar[1] -= 360\n if keypress[pygame.K_a]:\n self.cameraPolar[1] -= 0.08 * self.dt\n if self.cameraPolar[1] <= -180:\n self.cameraPolar[1] += 360\n if keypress[pygame.K_b]:\n self.t = 'b'\n self.calculateWorldValues()\n self.findPolygons()\n if keypress[pygame.K_n]:\n self.t = 'n'\n self.calculateWorldValues()\n self.findPolygons()\n if keypress[pygame.K_m]:\n self.t = 'm'\n self.calculateWorldValues()\n self.findPolygons()\n # update camera cartesian position\n self.polarCameraToCartesian()", "def switch_on(self,name):\n self.circles[name].switch_on()\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=1 WHERE target=%s\"\"\", (name,))" ]
[ "0.59280413", "0.541607", "0.5290688", "0.52872306", "0.5283519", "0.51918846", "0.5162083", "0.5111304", "0.50946474", "0.50678855", "0.5063725", "0.50507736", "0.50380087", "0.5027665", "0.50178564", "0.49831063", "0.4976855", "0.4957412", "0.49509645", "0.49407175", "0.49123913", "0.49025196", "0.48670948", "0.48562348", "0.48345843", "0.48306543", "0.48277506", "0.48266277", "0.4821847", "0.48047793", "0.47947925", "0.4777308", "0.47706807", "0.47633016", "0.47522616", "0.47461694", "0.47306776", "0.46897304", "0.46725744", "0.46627864", "0.46619913", "0.460711", "0.45884004", "0.457084", "0.45661137", "0.45659515", "0.45629004", "0.45610657", "0.45566863", "0.45564222", "0.45481992", "0.45399728", "0.4534141", "0.45305654", "0.4521202", "0.45067325", "0.45059216", "0.44972107", "0.44857696", "0.4459462", "0.44584933", "0.44425666", "0.44363317", "0.44331554", "0.4408211", "0.44071746", "0.4403953", "0.44030344", "0.4402658", "0.43947783", "0.4391479", "0.43854916", "0.4384707", "0.4349547", "0.43463236", "0.43445387", "0.43428218", "0.43296224", "0.4327063", "0.43196416", "0.43099603", "0.43088573", "0.43073884", "0.43072504", "0.430633", "0.4306214", "0.43053004", "0.43022972", "0.4301223", "0.43007088", "0.43005797", "0.42994553", "0.42946732", "0.42940536", "0.4293233", "0.4283806", "0.4278969", "0.42773655", "0.4277172", "0.42746904" ]
0.7094361
0
Check if output file exists, then execute commmand. If there is more than one output file, the command will be executed if at least one is missing.
def CheckExec(self, cmd, checknames, force=False, halt_on_error=True): gone = False names = [] for name in checknames: if '+orig' in name: if name.endswith('+orig'): names.append('%s.HEAD' % name) names.append('%s.BRIK' % name) elif name.endswith('HEAD'): names.append(name) newname = name[:-4] + 'BRIK' if newname not in checknames: names.append(newname) elif name.endswith('BRIK'): newname = name[:-4] + 'HEAD' if newname not in checknames: names.append(newname) names.append(name) else: names.append(name) for name in names: if not os.path.exists(name) and not os.path.exists('%s.gz'%name): gone = True elif self.redo or force or gone: os.remove(name) gone = True if self.redo or gone: self.ExecCmd(cmd, halt_on_error=halt_on_error) if '+orig.' in names[0]: name = names[0].replace('.BRIK','') name = name.replace('.HEAD','') append_history_note(name, cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_for_preexisting_output_file(output_file_path):\n if path.exists(f\"{output_file_path}\"):\n print(\"Output file at specified save location file path already exists!\")\n print(\"Aborting operation!\")\n sys.exit()", "def exec_command_string_one_file(command_str,output):\n print(command_str)\n # (status, result) = subprocess.check_output(command_str, universal_newlines=True, stderr=sys.stdout) #available in both Python 2.x and 3.x\n\n (status, result) = getstatusoutput(command_str)\n\n if os.path.isfile(output):\n return output\n else:\n outputlogMessage(result)\n # syslog.outputlogMessage('The version of GDAL must be great than 2.0 in order to use the r option ')\n return False", "def exec_command_args_list_one_file(args_list,output):\n outputlogMessage(output_commandString_from_args_list(args_list))\n ps = subprocess.Popen(args_list)\n returncode = ps.wait()\n if os.path.isfile(output):\n return output\n else:\n outputlogMessage('return codes: '+ str(returncode))\n return False", "def exec_to_file ( cmd, output_file, cwd = '/tmp/' ):\n\n try:\n dn = open(os.devnull, 'r')\n with open(output_file, 'w') as fo:\n vlog(4, 'Running command: %s > %s from %s '% (cmd, output_file, cwd))\n p = subprocess.Popen(\n cmd, \n stdin=dn,\n stdout=fo, \n stderr=fo, \n cwd=cwd, \n close_fds=True\n )\n\n if p:\n p.wait()\n return p.returncode\n\n except Exception as e:\n vlog(1, 'Command Error: %s'% (str(e)))\n\n vlog(1, 'Failed to run command: %s > %s '% (cmd, output_file))\n return None", "def do_command(cmd, output_file):\n global txt_output_dir\n output_path = os.path.join(txt_output_dir, output_file)\n print \"doing: %s > %s\" % (cmd, output_path)\n output = check_output(cmd.split(\" \"))\n with open(output_path, \"w\") as f:\n f.write(output)", "def output_file_exists(self):\n file = self.config['OUT_FOLDER'] + '/' + self.config['OUTPUT_FOLDER'] + '/' + self.output_filename + '.' + \\\n self.options['image_format'].lower()\n self.__log(f'Checking if output file: \"{file}\" already exists.')\n if os.path.exists(file) and not os.stat(file).st_size == 0:\n self.__log(f'Output file: \"{file}\" does exist.')\n return True\n self.__log(f'Output file: \"{file}\" does not exist.')\n return False", "def check_output(self, args):\n if isinstance(args, str):\n args = [args]\n try:\n return check_output(args)\n\n except IOError as e:\n raise ScriptError(e)\n\n except OSError as e:\n raise ScriptError(e)\n\n except CalledProcessError as e:\n raise ScriptError(e)", "def test_check_if_output_file_exists():\n input_file = os.path.join(os.getcwd(), 'tests', 'input_test_file.docx')\n output_file = os.path.join(os.getcwd(), 'tests', 'output_test_file.txt')\n\n questions_parser = QuestionsParser()\n questions_parser.main(argv=['-i', input_file, '-o', output_file])\n assert os.path.exists(output_file)\n os.unlink(output_file)", "def get_output(self, output_dir=\"tools_output\"):\n\n output_dir = self.project_dir / output_dir / self.name\n # create output directory if didn't exist\n if not output_dir.exists():\n os.makedirs(output_dir)\n logger.info(f\"Created {output_dir}\")\n\n for outfile in self.output:\n outfile = self.project_dir / outfile\n if outfile.exists():\n src = os.fspath(outfile)\n dst = os.fspath(output_dir / outfile.name)\n shutil.move(src, dst)\n logger.info(f\"Moved {outfile.name} to {output_dir}\")\n else:\n msg = f\"File not found: {outfile} - did you execute run() before?\"\n logger.error(msg)\n raise FileNotFoundError(msg)", "def _execute_internal(cmd, log_std_out, log_std_err, log_cmd, also_output_to_file=None,\r\n return_output=False):\r\n tmp_filename = None\r\n if log_std_out or log_std_err:\r\n tmp_filename = tempfile.mktemp()\r\n tmp_file = open(tmp_filename, \"w\")\r\n\r\n if (not log_std_err) or (not log_std_out):\r\n dev_null_file = open(os.devnull, \"w\")\r\n\r\n if log_std_out:\r\n if also_output_to_file:\r\n std_out_file = open(also_output_to_file, \"w\")\r\n else:\r\n std_out_file = tmp_file\r\n else:\r\n std_out_file = dev_null_file\r\n\r\n if log_std_err:\r\n std_err_file = tmp_file\r\n else:\r\n std_err_file = dev_null_file\r\n\r\n if log_cmd:\r\n log.info(\"Executing: %s\" % \" \".join(cmd))\r\n #Call subprocess.call to run the command.\r\n try:\r\n ret = subprocess.call(cmd, stdout=std_out_file, stderr=std_err_file)\r\n text = None\r\n #Check if there is some output or error captured to log\r\n if tmp_filename:\r\n with open(tmp_filename, 'r') as file_read:\r\n text = file_read.read()\r\n os.remove(tmp_filename)\r\n if text:\r\n log.info(\"External output:\\n%s\" % text)\r\n return (ret, text) if return_output else ret\r\n except subprocess.CalledProcessError as exception:\r\n log.error(\"Subprocess Exception occurred %s\" % exception)\r\n return (ret, None) if return_output else ret\r\n except OSError as exception:\r\n log.error(\"OS Exception occurred %s\" % exception)\r\n #IF this exception occurs the ret value is not initialized hence return non-zero\r\n return (1, None) if return_output else 1", "def get_output_file():\n if len(sys.argv) < 4:\n return -1\n return sys.argv[3]", "def execute(cmd, output_file, env={}):\n return subprocess.Popen(shlex.split(cmd),\n stderr=subprocess.STDOUT,\n stdout=open(output_file, \"w\"),\n env = dict(os.environ, **env))", "def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)", "def ensure_file(command, filename, overwrite=False, shell=False):\n print(command)\n if overwrite or not os.path.exists(filename):\n ret = subprocess.call(command, shell=shell)\n if ret:\n raise Exception(ret)\n return ret", "def _execute_cmd(cmd, outdir: str = DEFAULT_OUTDIR, stdout_file=None, stderr_file=None):\n if cmd is None:\n raise Exception(\"cmd was not specified\")\n\n logging.info(f\"Will attempt to execute '{cmd}'\")\n\n if outdir is None:\n outdir = '/tmp'\n logging.info(f\"outdir was not defined and therefore was set to default '{outdir}'\")\n\n if stdout_file is None:\n stdout_file = os.path.join(outdir, os.path.basename(__file__) + '.stdout')\n logging.info(f\"stdout_file was not specified and therefore was set to '{stdout_file}'\")\n\n if stderr_file is None:\n stderr_file = os.path.join(outdir, os.path.basename(__file__) + '.stderr')\n logging.info(f\"stderr_file was not specified and therefore was set to '{stderr_file}'\")\n\n if os.path.exists(stdout_file):\n logging.info(f\"STDOUT file '{stdout_file}' already exists so will delete it now\")\n os.remove(stdout_file)\n\n if os.path.exists(stderr_file):\n logging.info(f\"STDERR file '{stderr_file}' already exists so will delete it now\")\n os.remove(stderr_file)\n\n p = subprocess.Popen(cmd, shell=True)\n\n (stdout, stderr) = p.communicate()\n\n pid = p.pid\n\n logging.info(f\"The child process ID is '{pid}'\")\n\n p_status = p.wait()\n\n p_returncode = p.returncode\n\n if p_returncode is not None:\n logging.info(f\"The return code was '{p_returncode}'\")\n else:\n logging.info(\"There was no return code\")\n\n if p_status == 0:\n logging.info(f\"Execution of cmd '{cmd}' has completed\")\n else:\n raise Exception(f\"Received status '{p_status}'\")\n\n if stdout is not None:\n logging.info(\"stdout is: \" + stdout)\n\n if stderr is not None:\n logging.info(\"stderr is: \" + stderr)\n\n return stdout_file", "def fileoutput(cd, bin, args=[]):\n cmd = \"cd %s; ./%s %s\" % (cd, bin, ' '.join(args))\n logger.info(cmd)\n\n return exec_command(cmd)", "def check_file_output(self, actual: str, expected: str):\n assert self._program_executed, f\"You first need to `execute` the program before checking its outputs!\"\n assert actual in self._write_files, f\"Unknown output file {actual}. Did you forget to provide it to the program by calling input_write_filename?\"\n full_expected = _root_dir / expected\n assert full_expected.is_file(), f\"Reference file {full_expected} does not exist!\"\n # check to make sure the output file exists\n full_actual = _root_dir / actual\n self._test.assertTrue(full_actual.is_file(), f\"It seems like the program never created the output file {full_actual}\")\n # open and compare the files\n with open(full_actual, 'rb') as a:\n actual_bin = a.read()\n with open(full_expected, 'rb') as e:\n expected_bin = e.read()\n self._test.assertEqual(actual_bin, expected_bin, f\"Bytes of {actual} and {expected} did not match!\")", "def check_output_wrapper(*args, **kwargs):\n\n logger.debug('Executing %s, %s', args, kwargs)\n try:\n return check_output(*args, **kwargs)\n except CalledProcessError as msg:\n logger.warning('Error %s,%s,%s from command.', msg.returncode, msg.output, msg.stderr)\n logger.debug('Output: %s', msg.output)\n sys.exit(ERROR_EXECUTING_COMMAND);", "def runTool(self, filename, expected_out, args):\n\n input_path = os.path.join(self.inputs_dir, filename)\n return_value, actual_output = create_subprocess(self.executable_binary, args + [input_path] + ['--'])\n actual_output = actual_output.decode('utf-8')\n\n self.assertEqual(return_value, 0)\n self.evaluate(expected_out, actual_output, command=f'{[self.executable_binary] + args} {filename}')", "def process_cleanup(self, output_file=None, output_list=None):\n if output_file:\n self.check_output_file( output_file )\n elif output_list:\n for output_file in output_list:\n self.check_output_file( output_file )\n log.info('All expected output files found - process successful!\\n')", "def call_files():\n try:\n predicted_proteins = sys.argv[1]\n except IndexError:\n predicted_proteins = input('Please input AUGUSTUS file for analysis: ')\n try:\n protein_db = sys.argv[2]\n except IndexError:\n protein_db = input('Please input a protein database file: ')\n\n try:\n output_file_aug_to_fasta = sys.argv[3]\n output_to_file = True\n except IndexError:\n output_to_file = input('Write output to file?'\n + ' [Yes/No]: ')\n if output_to_file.upper() in 'YES':\n output_to_file = True\n output_file_aug_to_fasta = input('Please supply output file name '\n + 'for AUGUSTUS conversion to '\n + 'FASTA: ')\n else:\n output_to_file = False\n output_file_aug_to_fasta = None\n\n try:\n output_file_proteins_to_db = sys.argv[4]\n except IndexError:\n if output_to_file:\n output_file_proteins_to_db = input('Please supply output file name'\n + 'for blast database: ')\n else:\n output_file_proteins_to_db = None\n\n try:\n blastp_output = sys.argv[5]\n except IndexError:\n if output_to_file:\n blastp_output = input('Please supply output file name for blastp: ')\n else:\n blastp_output = None\n\n finally:\n if len(sys.argv) >= 7:\n overwrite = sys.argv[6]\n elif output_file and os.path.exists(output_file):\n overwrite = input('Output file already exists. Overwrite? '\n + '[Yes/No]: ')\n if overwrite.upper() in 'YES':\n overwrite = True\n else:\n overwrite = False\n else: overwrite = False\n\n return (predicted_proteins, protein_db, output_file_aug_to_fasta, \n output_file_proteins_to_db, blastp_output, \n output_to_file, overwrite)", "def load_job_output(output_title, output_summary, output):\n def read_if_file(val):\n if os.path.exists(val):\n logger.info(\"Reading file: %s\", val)\n with open(val, \"r\") as inf:\n return inf.read()\n else:\n return val\n\n if output_title:\n assert output_summary\n return checks.Output(\n title = output_title,\n summary = read_if_file(output_summary),\n text = read_if_file(output) if output else None\n )\n else:\n return None", "def check_output(self, cmd, nonzero_e = tc.error_e):\n _exitcode, stdoutf, _stderrf = self.run(cmd, nonzero_e = nonzero_e)\n return stdoutf.read()", "def check_output(*args, **kwargs):\n if \"stdout\" in kwargs:\n raise ValueError(\"stdout argument not allowed, it will be overriden.\")\n p = Popen(stdout=PIPE, *args, **kwargs)\n out, _ = p.communicate()\n rc = p.poll()\n if rc != 0:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = args[0]\n raise CalledProcessError(rc, cmd, output=out)\n return out", "def check_output(command):\n process = Popen(command, shell=True, stdout=PIPE)\n output, err = process.communicate()\n if process.returncode == 0: # success\n return output\n else:\n raise RuntimeError(\"Command {0} running unsuccessfully\".format(command))", "def try_process_output_file(ins_file, output_file=None):\n if output_file is None:\n output_file = ins_file.replace(\".ins\", \"\")\n df = None\n i = InstructionFile(ins_file)\n try:\n df = i.read_output_file(output_file)\n except Exception as e:\n print(\"error processing instruction/output file pair: {0}\".format(str(e)))\n return df", "def process_tool_output(self, proc, log_fn, name):\n output, error = proc.communicate()\n if proc.returncode != 0:\n fd = open(log_fn, \"a+\")\n fd.write(\"Error: %s returned the following output:\"\n \"\\n%s\" % (name, error))\n fd.close()\n raise Exception(\"%s returned the following output:\"\n \"\\n%s\" % (name, error))\n else:\n fd = open(log_fn, \"a+\")\n fd.write(\"%s output: %s %s\" % (name, output, error))\n # printing stdout and stderr though bowtie2 currently will always\n # output to stderr instead of stdout even if no error occurred\n fd.close()", "def run(self):\n try:\n self.parse_args(None)\n self.execute_command()\n except FileExistsException, e:\n print \"Can't copy file as destination already exists.\"\n print \"Exiting...\"\n except Exception, e:\n print \"Exception occured: %s\\nExiting...\" % e", "def check_output(*popenargs, **kwargs):\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n error = subprocess.CalledProcessError(retcode, cmd)\n error.output = output\n raise error\n return output", "def test_execution(self):\n\n # This process will echo the input and output file name to stdout.\n the_process_unit = ProcessUnit([self.a_pattern_ds], '/another/%file%/%pattern%.txt',\n 'echo')\n\n ds_result = the_process_unit.execute(simulate=True)\n\n outfiles = [file_thing for file_thing in ds_result.files]\n self.assertEqual(len(outfiles), 1)\n\n expected_string = self.script_header + \"mkdir -p /another/file_1\\necho test_file1 /another/file_1/pattern_1.txt\\n\"\n self.assertEqual(expected_string, the_process_unit.scheduler.job.to_str())", "def collect(self,outfilename):\n # TODO actually gather results and check if run is successful\n if os.path.isfile(outfilename):\n self.completed=True\n else:\n self.completed=False", "def setup_molecule_output_check(exp_builder_db, mol_id, output_path):\n exp_builder_db._setup_molecules(mol_id)\n assert os.path.exists(output_path)\n assert os.path.getsize(output_path) > 0", "def test_output_exists():\n global out_dir, cor_dir\n assert(path.exists(path.join(out_dir, 'oshea_similarity.json')))", "def ExecuteIf(self, args, src_files, dst_files):\n if self.ShouldBuild(src_files, dst_files):\n self.MakeDestinationDirectories(dst_files)\n self.Execute(args)\n if self.execute and not self.VerifyExists(dst_files):\n raise RuntimeError(\"FAILED: build did not create all required files\")", "def output_files_exist(self):\n return all([split.exists() for split in self.split_files])", "def eval_output(ifile,cfile,ofile):\n print('Evaluating {} on combo {}'.format(ifile, cfile))\n\tglobal EVAL_TABLE_PATH\n\tEVAL_TABLE_ARGS = \" -i %s -C %s -o %s -u %s \"%(ifile,cfile,ofile,OUT)\n\tresult=os.system(EVAL_TABLE_PATH + \" \" + EVAL_TABLE_ARGS) \n\tif result!=0:\n\t\tprint \"error while executing\"\n\t\texit(0)", "def blankOutputFiles():\n print(\"Checking for blank output files\")\n find_output = re.compile(r\"/\\* Output:(.*)\\*/\", re.DOTALL)\n for java in config.example_dir.rglob(\"*.java\"):\n with java.open() as codeFile:\n output = find_output.search(codeFile.read())\n if output:\n # print(output.group(1))\n if not output.group(1).strip():\n print(java)", "def check_output(*popenargs, **kwargs):\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n error = subprocess.CalledProcessError(retcode, cmd)\n error.output = output\n raise error\n return output", "def check_output(*popenargs, **kwargs):\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n error = subprocess.CalledProcessError(retcode, cmd)\n error.output = output\n raise error\n return output", "def execute(cmd, log_cmd=True, also_output_to_file=None):\r\n return CommandUtil._execute_internal(cmd, True, True, log_cmd, also_output_to_file)", "def execute(cmd, path):\n oldPath = os.getcwd()\n os.chdir(path)\n\n exitcode, output = subprocess.getstatusoutput(cmd)\n\n os.chdir(oldPath)\n\n ok = not exitcode\n\n return ok, output", "def executeCommand(commandtoexecute):\n try:\n _output = commands.getstatusoutput(commandtoexecute)\n except Exception as er:\n print \"not able to execute command\"\n return False\n return _output", "def optional_output_file(func):\n\n @click.argument(\n \"output\", required=False, type=click.Path(exists=False, )\n )\n @wraps(func)\n def wrapped(*args, **kwargs):\n return func(*args, **kwargs)\n return wrapped", "def check_output(args, **kwargs):\n kwargs.setdefault('stdin', DEVNULL)\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it would be overridden.')\n return check_call_out(args, stdout=PIPE, **kwargs)[0]", "def present_output(self, output: str):\n\n if self._config.use_stdout():\n print(output, flush=True, end='')\n else:\n try:\n # print(\"OUTPUT FILE IS THERE!!!!\", self._config.values['output'])\n with open(self._config.values['output'], mode='w', newline='') as f:\n # print(\"PRINITNG OUTPUT>>>>\"+output+\"<<<<<\")\n f.write(output)\n # print(\"OUTPUT FILE IS THERE!!!!\", \"After write\")\n except:\n raise OutputFileException('Error writing the output')", "def shell_execute(command, output, test=None, options=None,\n data=None, test_split=None):\n command = check_debug(command)\n world.directory = os.path.dirname(output)\n world.folders.append(world.directory)\n try:\n retcode = check_call(command, shell=True)\n if retcode < 0:\n assert False\n else:\n if test is not None:\n world.test_lines = file_number_of_lines(test)\n if options is None or options.find('--prediction-header') == -1:\n # test file has headers in it, so first line must be ignored\n world.test_lines -= 1\n if test_split is not None:\n data_lines = file_number_of_lines(data) - 1\n world.test_lines = int(data_lines * float(test_split))\n \n world.output = output\n assert True\n except (OSError, CalledProcessError, IOError) as exc:\n assert False, str(exc)", "def exists(parser, output_name):\n return parser.setParseAction(isNotEmpty).setResultsName(output_name)", "def checkForCommand(quickLogger, commandList):\n\n for command in commandList:\n\n cmd = \"which -s \" + command + \" > \" + os.devnull + \" 2>&1\"\n retcode = os.system(cmd)\n \n if(retcode):\n quickLogger.critical(\"unix command \"+command+\" not found.\")\n raise CommandNotFound", "def executeSAGATool(self, tool_cmd, f_out, desc=''):\n\n if self.runSep(self.p_uid):\n\n self.logger.debug('[SAGA cmd]: %s', tool_cmd)\n subprocess.call(self.saga_cmd + tool_cmd)\n\n # if output file exist log workflow step\n if os.path.isfile(f_out):\n if not desc:\n desc = '{0}: Processing step {1}'.format(self.wf_name, self.p_uid)\n\n self.logWorkflowStep(self.p_uid, desc)\n else:\n raise workflowException('SAGA process \\'{0}\\' output file is missing: {1}'.format(desc, f_out))\n else:\n self.logger.info(desc + ': done.')\n\n # increment p_uid\n self.p_uid += 10\n\n return", "def get_output(command):\n if is_debug_environment():\n dir_name = get_test_dir_argument()\n filename = '{}.txt'.format(command)\n full_path = os.path.join(dir_name, filename)\n if os.path.isfile(full_path):\n with open(full_path) as f:\n return f.read()\n raise IOError(\"File {} is not found\".format(full_path))\n else:\n import cli\n result = cli.cli(command)\n i = 0\n # Occasionally, cli library returns an empty output, in this case we try multiple time\n while result.count('\\n') <= 2 and i < 3:\n result = cli.cli(command)\n i += 1\n return result", "def execute_task(self, filename):\n stdin, stdout, stderr = self.ssh.exec_command(open(filename).read())\n if stdout.channel.recv_exit_status() == 0:\n return True, stdout.read().strip(), stderr.read().strip()\n else:\n return False, stdout.read().strip(), stderr.read().strip()", "def test_missing_file():\n\n rv, out = getstatusoutput(f'{prg} -o {outfile}')\n assert rv != 0\n assert re.search('the following arguments are required: -f/--file', out)", "def command_exe(cmd, file_name, folder=None):\r\n if folder:\r\n file_path = os.path.join(folder, file_name)\r\n subprocess.call('%s > %s' % (cmd, file_path), shell=True)\r\n logging.info(file_path)\r\n else:\r\n file_path = os.path.join(os.getcwd(), file_name)\r\n subprocess.call('%s > %s' % (cmd, file_path), shell=True)\r\n logging.info(file_path)", "def get_prog_file():\n get_file()\n ## Executa\n file = ARGS.output\n os.system(\"chmod +x \" + file)\n subprocess.call([file])", "def execute(self, *args, **options):\n self.output_file = options.get('output_file')\n try:\n super(CsvMixin, self).execute(*args, **options)\n finally:\n self.close_file_handle()", "def test_call_output_to_file(self):\r\n\r\n fd, tmp_result_filepath = mkstemp(\r\n prefix='CdHitOtuPickerTest.test_call_output_to_file_',\r\n suffix='.txt')\r\n close(fd)\r\n\r\n app = CdHitOtuPicker(params={'Similarity': 0.90})\r\n obs = app(self.tmp_seq_filepath1, result_path=tmp_result_filepath)\r\n\r\n result_file = open(tmp_result_filepath)\r\n result_file_str = result_file.read()\r\n result_file.close()\r\n # remove the result file before running the test, so in\r\n # case it fails the temp file is still cleaned up\r\n remove(tmp_result_filepath)\r\n\r\n # compare data in result file to fake expected file\r\n self.assertEqual(result_file_str, dna_seqs_result_file_90_exp)\r\n # confirm that nothing is returned when result_path is specified\r\n self.assertEqual(obs, None)", "def check_output(*popenargs, **kwargs):\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it will be overridden.')\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise subprocess.CalledProcessError(retcode, cmd)#, output=output)\n return output", "def check_for(command):\n if shutil.which(command) is None:\n print(colored(\"{} not available on system\".format(command),\"red\"))\n sys.exit(1)", "def check_output(*popenargs, **kwargs):\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it will be overridden.')\n logging.debug(' '.join(popenargs[0]))\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise subprocess.CalledProcessError(retcode, cmd)\n return output", "def execute(cmd) :\n return os.system( cmd )", "def command(self, command, out=False, err=False):\n \n if out:\n std_out = subprocess.PIPE\n else:\n std_out = None\n \n if not err:\n std_err = subprocess.PIPE\n else:\n std_err = None\n \n \n proc = subprocess.Popen(command, stdout = std_out, stderr=std_err)#std_out)\n out, err = proc.communicate()\n \n return out, err", "def testGetOutput(self):\n #f = open(\"src_output.root\", 'w')\n #f.close()\n\n #1) missing required -d option (the other required option, -r, is ignored)\n go = getoutput(self.logger, self.maplistopt)\n res = go()\n expRes = CommandResult(2001, 'ERROR: Task option is required')\n self.assertEquals(expRes, res)\n\n #2) -d option is present but -r is missing\n analysisDir = self.reqarea\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir])\n res = go()\n expRes = CommandResult(2002, 'ERROR: Range option is required')\n self.assertEquals(expRes, res)\n\n #3) request passed with the -d option does not exist\n #res = go([\"-d\", analysisDir + \"asdf\"])\n #TODO we expect an appropriate answer from the server.\n #By now, the server just answer an empty list\n\n #4) check correct behaviour without specifying output directory\n #N.B.: -p options is required for tests to skip proxy creation and delegation\n destDir = os.path.join(analysisDir, 'results')\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir(destDir))\n self.assertTrue(os.path.isfile(os.path.join(destDir, '1.root')))\n #Remove the directory\n shutil.rmtree(destDir)\n self.assertFalse(os.path.isdir(destDir))\n self.assertEquals(expRes, res)\n\n #5) correct behavior and output directory specified which exists\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"/tmp\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('/tmp'))\n destFile = os.path.join('/tmp', '1.root')\n self.assertTrue(os.path.isfile(destFile))\n os.remove(destFile)\n self.assertFalse(os.path.isfile(destFile))\n self.assertEquals(expRes, res)\n\n #6) correct behavior and output directory specified which does not exists\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"/tmp/asdf/qwerty\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('/tmp/asdf/qwerty'))\n #Remove the directory\n shutil.rmtree('/tmp/asdf/qwerty')\n self.assertEquals(expRes, res)\n\n #7) correct behavior and output directory specified which does not exists (relative path)\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"qwerty\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('qwerty'))\n #Remove the directory\n shutil.rmtree('qwerty')\n self.assertEquals(expRes, res)", "def check_output_file(output_dir,\n identifier,\n shuffle_labels,\n model_options,\n predictor='classify',\n fold_no=None,\n titration_ratio=None):\n\n signal = 'shuffled' if shuffle_labels else 'signal'\n\n if not isinstance(model_options.training_data, str):\n training_data = '.'.join(model_options.training_data)\n else:\n training_data = model_options.training_data\n\n if isinstance(model_options.n_dim, list):\n n_dim = '.'.join(map(str, model_options.n_dim))\n else:\n n_dim = model_options.n_dim\n\n check_file = construct_filename(output_dir,\n 'coefficients',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n predictor,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n if check_file.is_file():\n raise ResultsFileExistsError(\n 'Results file already exists for identifier: {}\\n'.format(\n identifier)\n )\n return check_file", "def fetch_output(self, path, name, working_directory, action_type, output_type):\n if output_type in ['output_workdir', 'output_metadata']:\n self._populate_output_path(name, path, action_type, output_type)\n elif output_type == 'output':\n self._fetch_output(path=path, name=name, action_type=action_type)\n else:\n raise Exception(\"Unknown output_type %s\" % output_type)", "def assertExecutedOnce(self, component: Text) -> None:\n component_path = os.path.join(self._pipeline_root, component)\n self.assertTrue(fileio.exists(component_path))\n outputs = fileio.listdir(component_path)\n\n self.assertIn(\".system\", outputs)\n outputs.remove(\".system\")\n system_paths = [\n os.path.join(\".system\", path)\n for path in fileio.listdir(os.path.join(component_path, \".system\"))\n ]\n self.assertNotEmpty(system_paths)\n self.assertIn(\".system/executor_execution\", system_paths)\n outputs.extend(system_paths)\n self.assertNotEmpty(outputs)\n for output in outputs:\n execution = fileio.listdir(os.path.join(component_path, output))\n self.assertLen(execution, 1)", "def get_output(self, output, download_dir, overwrite=False, callback=None, block=4096):\n download = self._get_file(output, download_dir, overwrite, callback=callback, block=block)\n if download.success:\n return os.path.join(download_dir, output.get('name', ''))\n else:\n raise download.result", "def __fetch_output_task(\n self, task, download_dir, overwrite, changed_only, **extra_args):\n return task.fetch_output(\n download_dir, overwrite, changed_only, **extra_args)", "def check_output(*popenargs, **kwargs):\n process = subprocess.Popen(stdout=subprocess.PIPE,\n universal_newlines=True,\n *popenargs,\n **kwargs)\n output, _ = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n error = subprocess.CalledProcessError(retcode, cmd)\n error.output = output\n raise error\n return output.strip()", "def _execute_run(prefix, osiris_path, run_dir):\n # Cf. run_config\n p = subprocess.Popen(prefix + osiris_path + \" > out.txt 2> err.txt\", shell=True, cwd=path.abspath(run_dir))\n p.wait()", "def run_command(self,command):\n from subprocess import Popen, PIPE, STDOUT\n if command == '':\n raise RuntimeError('no command for run_command :(')\n # print 'Running: ', command #debug\n proc = Popen([command], shell=True, stderr=PIPE)\n proc.wait()\n exitcode = proc.returncode\n if exitcode != 0:\n # print exitcode,'label:', self.calc_dir\n error='%s exited with error code %i in %s' % (\n command,exitcode,self.calc_dir)\n stdout,stderr = proc.communicate()\n print 'shell output: ',stdout,stderr\n raise RuntimeError(error)\n return 0", "def run_file(self, value=None):\n self.save_file()\n self.p = Popen(\"./Project/myfile.py\", stdout=PIPE, stderr=PIPE)\n output, errors = self.p.communicate()\n self.my_output.delete(\"1.0\", END)\n self.my_output.insert(\"1.0\", output)\n if errors != \"\":\n print_to_log(errors)\n self.my_output.configure(fg=\"red\")\n else:\n self.my_output.configure(fg=\"white\")\n self.my_output.insert(\"1.0\", errors)", "def _check_output(*args, **kwargs):\n kwargs['stdout'] = subprocess.PIPE\n p = subprocess.Popen(*args, **kwargs)\n stdout, stderr = p.communicate()\n if p.returncode != 0:\n raise ValueError(\n 'subprocess exited with return code %s' % p.returncode\n )\n return stdout", "def subprocess_check_output(*popenargs, **kwargs):\r\n if 'stdout' in kwargs:\r\n raise ValueError('stdout argument not allowed, it will be overridden.')\r\n if 'stderr' in kwargs:\r\n raise ValueError('stderr argument not allowed, it will be overridden.')\r\n\r\n #executable_exists(popenargs[0][0])\r\n\r\n # NOTE: it is very, very important that we use temporary files for\r\n # collecting stdout and stderr here. There is a nasty bug in python\r\n # subprocess; if your process produces more than 64k of data on an fd that\r\n # is using subprocess.PIPE, the whole thing will hang. To avoid this, we\r\n # use temporary fds to capture the data\r\n stdouttmp = TemporaryFile()\r\n stderrtmp = TemporaryFile()\r\n\r\n process = subprocess.Popen(stdout=stdouttmp, stderr=stderrtmp, *popenargs,\r\n **kwargs)\r\n process.communicate()\r\n retcode = process.poll()\r\n\r\n stdouttmp.seek(0, 0)\r\n stdout = stdouttmp.read()\r\n stdouttmp.close()\r\n\r\n stderrtmp.seek(0, 0)\r\n stderr = stderrtmp.read()\r\n stderrtmp.close()\r\n\r\n if retcode:\r\n cmd = ' '.join(*popenargs)\r\n raise Exception(\"'%s' failed(%d): %s\" % (cmd, retcode, stderr), retcode)\r\n return (stdout, stderr, retcode)", "def assertOutput(self, toExec, argList, expectedStdout=None, \n\t\t\texpectedStderr=\"\", expectedRetcode=0, input=None,\n\t\t\tstdoutStrings=None):\n\t\tfor name in [\"output.stderr\", \"output.stdout\"]:\n\t\t\ttry:\n\t\t\t\tos.unlink(name)\n\t\t\texcept os.error:\n\t\t\t\tpass\n\n\t\tif isinstance(toExec, basestring):\n\t\t\tp = subprocess.Popen([toExec]+argList, executable=toExec, \n\t\t\t\tstdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\telse:\n\t\t\tp = ForkingSubprocess([\"test harness\"]+argList, executable=toExec, \n\t\t\t\tstdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tout, err = p.communicate(input=input)\n\t\tretcode = p.wait()\n\n\t\ttry:\n\t\t\tself.assertEqual(expectedRetcode, retcode)\n\n\t\t\tif isinstance(expectedStderr, basestring):\n\t\t\t\tself.assertEqual(err, expectedStderr)\n\t\t\telse:\n\t\t\t\tself.failUnless(expectedStderr(err))\n\t\texcept AssertionError:\n\t\t\twith open(\"output.stdout\", \"w\") as f:\n\t\t\t\tf.write(out)\n\t\t\twith open(\"output.stderr\", \"w\") as f:\n\t\t\t\tf.write(err)\n\t\t\traise\n\n\t\ttry:\n\t\t\tif isinstance(expectedStdout, basestring):\n\t\t\t\tself.assertEqual(out, expectedStdout)\n\t\t\telif expectedStdout is not None:\n\t\t\t\tself.failUnless(expectedStdout(out))\n\t\t\tif stdoutStrings:\n\t\t\t\tfor s in stdoutStrings:\n\t\t\t\t\tself.failIf(s not in out, \"%s missing\"%s)\n\t\texcept AssertionError:\n\t\t\twith open(\"output.stdout\", \"w\") as f:\n\t\t\t\tf.write(out)\n\t\t\traise", "def get_output(self, download_dir, output=None, overwrite=False, callback=None, block=4096):\n if output:\n name = output.get('name', \"\")\n download = self._get_intermediate_output(output,\n download_dir,\n overwrite,\n callback=callback,\n block=block)\n\n elif self.output_url and self.output_filename:\n name = self.output_filename\n download = self._get_final_output(download_dir, overwrite,\n callback=callback, block=block)\n\n else:\n raise FileDownloadException(\n \"Job has no reference to an output file, \"\n \"please update to check if the output is ready\")\n\n if download.success:\n return os.path.join(download_dir, name)\n\n else:\n raise download.result", "def _output_result(result: str, arguments: Dict[str, str]) -> None:\n if (\"ciphered_file\" in arguments) or (\"deciphered_file\" in arguments):\n output_filename = arguments[\"ciphered_file\"] if \"ciphered_file\" in arguments else arguments[\"deciphered_file\"]\n with open(output_filename, mode=\"w\") as output_file:\n output_file.write(result)\n output_file.flush()\n else:\n print(result)", "def command_output(cmd):\n import subprocess\n return subprocess.Popen(\n cmd.split(\";\"), stdout=subprocess.PIPE).communicate()[0]", "def get_output_path():\n return os.getcwd() + \"/output/\"", "def eichertest():\n try:\n eicher = ('echo ZQZXJVBVT >> mcafee1.txt', 'echo ZQZXJVBVT >> mcafee2.txt','echo ZQZXJVBVT >> mcafee2.txt')\n for test in eicher:\n executeCommand(test)\n except Exception as er:\n print \"Not able to do install check\"\n return False", "def __subprocess_out(self):\n file_out = open(os.path.join(self.report_path, \"cyclomatic-complexity.csv\"), \"w\")\n status = subprocess.call(r'%s' % self.cmd, stdout=file_out)\n if status:\n print(\"There was error while processing the sub process command\") # pragma: no mutate\n file_out.close()\n return status", "def write_actual_output(self, output):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n with open(actual_output_file, \"w\") as f:\n f.write(output)", "def ExecuteScript(script):\n os.system(\"%s > /dev/null 2>&1\" % script)", "def test_DDSim_runIt_success_OutputFile_1(self):\n self.ddsim.platform = \"Windows\"\n self.ddsim.applicationLog = self.logFileName\n self.ddsim.SteeringFile = \"mySteering.py\"\n self.ddsim.OutputFile = \"grailDiary.root\"\n ## side effect for Steering1a, Steering1b, Steering2, Script, userlibs, log, logAfter\n with patch(\"os.path.exists\", new=Mock(side_effect=[False, True, True, False, False, False, True] ) ):\n res = self.ddsim.runIt()\n assertDiracSucceeds( res, self )\n self.assertIn( \" --outputFile grailDiary.root \", self.ddsim.extraCLIarguments )", "def execute(container, output_strategy, *args, **kwargs):\n env = os.environ.copy()\n\n if kwargs.get(\"env\"):\n env.update(kwargs[\"env\"])\n\n try:\n cmd = list(process_shebang(args))\n\n # On Windows, we need to explicitly specify the\n # executable, since PATH is only read in its\n # state at the time this process started and\n # not at the time Popen is called.\n if not os.path.exists(cmd[0]):\n cmd[0] = which(cmd[0])\n assert cmd[0] is not None\n\n # Sanity-check for non-unicode environment\n # variables and print the name of the\n # variable and its value/type on error.\n for key, value in env.items():\n if not isinstance(value, str):\n raise RuntimeError(\"\"\"{}'s value {} is not a \"\"\"\n \"\"\"str but a {}\"\"\".format(key,\n repr(value),\n type(value)))\n\n process = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=env)\n except OSError as error:\n raise Exception(u\"\"\"Failed to execute \"\"\"\n u\"\"\"{0} - {1}\"\"\".format(\" \".join(cmd), str(error)))\n\n with close_file_pair((process.stdout, process.stderr)) as outputs:\n status = output_strategy(process, outputs)\n\n instant_fail = kwargs.get(\"instant_fail\") or False\n\n if status != 0:\n IndentedLogger.message(u\"\"\"!!! Process {0}\\n\"\"\".format(cmd[0]))\n for arg in cmd[1:]:\n IndentedLogger.message(u\"\"\"!!! {0}\\n\"\"\".format(arg))\n IndentedLogger.message(u\"\"\"!!! failed with {0}\\n\"\"\".format(status))\n if not kwargs.get(\"allow_failure\", None):\n container.note_failure(instant_fail)\n\n return status", "def get_output(self, cmd, *args, **kwargs):\n return self.exec(cmd, *args, **kwargs, capture_output=True)", "def _call(self, filename, *args, **kwargs):\n #hard code tmp_output as the output name since we don't save it anyway\n #self.cli('{} -log {log_output} {seqfile} > {tmp_path}'.format(self.command(), tmp_path=os.path.join(tmpd,'tmp_output'), log_output=logfile, seqfile=filename), wait=True)\n self.cli('{} {seq_file}'.format(self.command(), seq_file=filename), wait=True)\n\n return (self.cli.get_stdout(), self.cli.get_stderr())", "def postProcessOutput(self):\n\n logging.info(\" ========> Analysis %20s called postProcessOutput:\"%(self.name))\n\n if self.checkExpectedOutputFiles() == False:\n raise Exception(\"Missing expected output files. Number missing are [%d]\"%(len(self.missing_output_files)))\n\n FileUtils.checkDirExists(self.output_dir)\n\n tmpfiles = []\n\n logging.info(\" ========> Analysis %20s called postProcessOutput: Moving files from %s to %s \"%(self.name,self.working_dir,self.output_dir))\n try:\n for srcfile in self.expected_output_files:\n\n fullsrcfile = os.path.join(self.working_dir,srcfile)\n destfile = os.path.join(self.output_dir,srcfile)\n\n FileUtils.checkDirExistsForFile(destfile)\n\n res = shutil.move(fullsrcfile,destfile)\n\n if res == None:\n res = \"OK\"\n else:\n res = \"FAILED\"\n\n print \"Checking %s\"%destfile\n tmpfiles.append(destfile)\n \n logging.info(\" ========> Analysis %20s called postProcessOutput: Result of file move for %s = %s\" % (self.name,srcfile,res))\n\n except Exception as e:\n logging.info(\" ========> Analysis %20s file move failed %s\"%(self.name,e))\n raise\n\n self.output_files = tmpfiles\n\n for f in self.temp_output_files:\n logging.info(\" ========> Analysis %20s removing temp file %s \"%(self.name,f))\n\t res = os.remove(f)", "def test_outfile():\n\n out_file = random_filename()\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n try:\n cmd = f'{prg} --cdhit {cdhit} --proteins {proteins} -o {out_file}'\n rv, out = getstatusoutput(cmd)\n assert rv == 0\n\n assert out == ('Wrote 309 of 220,520 unclustered '\n f'proteins to \"{out_file}\"')\n\n assert os.path.isfile(out_file)\n\n seqs = list(SeqIO.parse(out_file, 'fasta'))\n assert len(seqs) == 309\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def get_output_file_name(argn=2, std_name='output.txt'):\n try:\n name = sys.argv[argn]\n except IndexError:\n name = std_name\n print(\"Warning: no output file name received. Output will be\"\n \" written to '%s'.\" % name)\n return name", "def execute_tool(description, *args):\n command_line = list(args) + files_and_directories\n click.echo(f\"{description}: {' '.join(command_line)}\")\n rv = call(command_line)\n if rv != 0:\n exit(rv)", "def test_empty_file(self):\n input_file = \"does_not_exist.fasta\"\n self.assertFalse(os.path.isfile(input_file))\n\n cline = XXmotifCommandline(outdir=self.out_dir, seqfile=input_file)\n\n try:\n stdout, stderr = cline()\n except ApplicationError as err:\n self.assertEqual(err.returncode, 255)\n else:\n self.fail(f\"Should have failed, returned:\\n{stdout}\\n{stderr}\")", "def execute(self, log_out, log_err):\n EventGenerator.execute(self, log_out, log_err)\n if 'moller' not in self.name:\n src = os.path.join(self.rundir, 'brems.stdhep')\n dest = os.path.join(self.rundir, self.output_files()[0])\n logger.debug(\"Copying '%s' to '%s'\" % (src, dest))\n shutil.copy(src, dest)", "def execute_command(self, command):\n\n if os.system(command) != 0:\n self.log('ERROR: Command \"' + command + '\" returned with a non-zero exit code')\n raise NonZeroExitCodeException('Non-zero exit code')", "def has_fileout(self):\n return self.fileout is not None", "async def collect_final_outputs(self) -> None: # pylint: disable=too-many-branches\n self._become_current()\n\n missing_outputs = False\n assert self.step is not None\n\n did_sleep = False\n\n for pattern in sorted(self.step.output): # pylint: disable=too-many-nested-blocks\n formatted_pattern = fmt_capture(self.kwargs, pattern)\n if is_phony(pattern):\n Invocation.up_to_date[formatted_pattern] = UpToDate(self.name, self.newest_input_mtime_ns + 1)\n continue\n\n try:\n paths = glob_paths(formatted_pattern)\n if not paths:\n Logger.debug(f\"Did not make the optional output(s): {pattern}\")\n else:\n for path in paths:\n self.built_outputs.append(path)\n\n global touch_success_outputs # pylint: disable=invalid-name\n if touch_success_outputs.value:\n if not did_sleep:\n await self.done(asyncio.sleep(1.0))\n did_sleep = True\n Logger.file(f\"Touch the output: {path}\")\n Stat.touch(path)\n\n mtime_ns = Stat.stat(path).st_mtime_ns\n Invocation.up_to_date[path] = UpToDate(self.name, mtime_ns)\n\n if Logger.isEnabledFor(logging.DEBUG):\n if path == formatted_pattern:\n Logger.debug(f\"Has the output: {path} \" f\"time: {_datetime_from_nanoseconds(mtime_ns)}\")\n else:\n Logger.debug(\n f\"Has the output: {pattern} -> {path} \"\n f\"time: {_datetime_from_nanoseconds(mtime_ns)}\"\n )\n\n except NonOptionalException:\n self._become_current()\n Logger.error(f\"Missing the output(s): {pattern}\")\n missing_outputs = True\n break\n\n if missing_outputs:\n self.abort(\"Missing some output(s)\")", "def _write_output(output: List[str], output_file: Optional[str]) -> None:\n if output_file:\n with open(output_file, 'w+') as file:\n file.write('\\n'.join(output))\n else:\n for line in output:\n print(line)", "def command_exists(name, path=None):\n if path is None:\n path = sys.path\n\n for prefix in path:\n filename = os.path.join(prefix, name)\n is_executable = os.access(filename, os.X_OK)\n is_file = os.path.isfile(filename)\n if is_executable and is_file:\n return True\n\n return False", "def _exec_command(command):\n\n log(\"Run command for '%s'\" % command)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n (output, err) = p.communicate()\n p_status = p.wait()\n return p_status, output", "def has_output(self, output_ref):\n outputs = self.get_recipe_outputs()\n for (output_role_name, output_role) in outputs.items():\n for item in output_role.get(\"items\", []):\n if item.get(\"ref\", None) == output_ref:\n return True\n return False", "def _get_sys_cmd(self, file_path_input, fold_coverage, file_path_output_prefix):\n assert self.validate_file(file_path_input)\n assert isinstance(fold_coverage, (int, float))\n assert self.validate_dir(file_path_output_prefix, only_parent=True)\n\n # TODO: mask 'N' default: '-nf 1'\n read_length = self._art_read_length[self._profile]\n error_profile = os.path.join(self._directory_error_profiles, self._art_error_profiles[self._profile])\n arguments = [\n \"-sam\", \"-na\",\n \"-i '{}'\".format(file_path_input),\n \"-l\", str(read_length),\n \"-m\", str(self._fragment_size_mean),\n \"-s\", str(self._fragment_size_standard_deviation),\n \"-f\", str(fold_coverage),\n \"-o '{}'\".format(file_path_output_prefix),\n \"-1 '{}'\".format(error_profile+'1.txt'),\n \"-2 '{}'\".format(error_profile+'2.txt'),\n ]\n\n if self._logfile:\n arguments.append(\">> '{}'\".format(self._logfile))\n\n # art illumina only accepts integer as seed!\n arguments.append(\"-rs '{}'\".format(self._get_seed()))\n\n cmd = \"{exe} {args}\".format(exe=self._file_path_executable, args=\" \".join(arguments))\n return cmd", "def execute(self, parameters, messages):\n #3.4\n try:\n importlib.reload (arcsdm.combine_outputnnfiles)\n except :\n reload(arcsdm.combine_outputnnfiles);\n arcsdm.combine_outputnnfiles.execute(self, parameters, messages)\n return" ]
[ "0.6835208", "0.6785431", "0.667483", "0.6603357", "0.6349663", "0.606884", "0.60539275", "0.5998481", "0.59256774", "0.58965987", "0.5828717", "0.57747966", "0.5732897", "0.56906545", "0.56636125", "0.5657635", "0.56451756", "0.5630182", "0.56291837", "0.5592528", "0.55353385", "0.54916877", "0.54818755", "0.5480598", "0.54655564", "0.54590255", "0.5442302", "0.5441071", "0.5426283", "0.5395376", "0.53868616", "0.53861314", "0.5381742", "0.53598833", "0.5359872", "0.5340553", "0.533075", "0.5317391", "0.5317391", "0.53045946", "0.53038675", "0.5284631", "0.5283594", "0.5282211", "0.5238213", "0.5235603", "0.52337116", "0.52323663", "0.52290547", "0.52142435", "0.52139276", "0.5202516", "0.52010083", "0.51969004", "0.5188644", "0.5181365", "0.5178959", "0.51771265", "0.51748395", "0.5161045", "0.5153142", "0.51437736", "0.5129286", "0.51269037", "0.5122244", "0.51185095", "0.5118305", "0.51145816", "0.510891", "0.51055145", "0.50926816", "0.5077752", "0.5075787", "0.5075702", "0.5073708", "0.5063155", "0.5058177", "0.5046708", "0.50432163", "0.50414026", "0.50273204", "0.5024844", "0.5019808", "0.50060666", "0.5005501", "0.5004443", "0.5002541", "0.5000222", "0.49910882", "0.4990591", "0.49896017", "0.49840474", "0.4977657", "0.49773848", "0.49745914", "0.49669817", "0.4966776", "0.49649832", "0.49636388", "0.49552718", "0.4950346" ]
0.0
-1
Compute the temporal SNR for each epi, save in a nifti file, and store a summmary in a png file.
def ComputeSNR(self): for epi in self.entry_map['epi']: epifile = self.info[epi]['imgfile_final'] + self.info[epi]['suffix'] prefix = self.info[epi]['imgfile_final'] + '_snr' if not os.path.exists('%s_snr.png' % prefix): if self.verbose: print 'TemporalSnr(epifile=%s, prefix=%s)' % \ (epifile, prefix) try: TemporalSnr(epifile=epifile, prefix=prefix)() except: print("Error computing temporal SNR")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_SNR(snid: int, photo_data: pd.DataFrame, \n head_data: pd.DataFrame, code_zenodo: int, \n snana_file_index: int, code_snana: int):\n \n types_names = {90: 'Ia', 62: 'Ibc', 42: 'II', 67: '91bg', 52: 'Iax',\n 64:'KN', 95: 'SLSN', 994: 'PISN', 992: 'ILOT', \n 993: 'CaRT', 15: 'TDE', 88: 'AGN', 92: 'RRL', \n 65: 'M-dw', 16: 'EB', 53: 'Mira', 991: 'BMicroL',\n 6: 'MicroL'}\n \n # LSST filters\n filters = [b'u ', b'g ', b'r ', b'i ', b'z ', b'Y ']\n\n flag_id_photo = photo_data['SNID'] == snid\n\n flux = photo_data[flag_id_photo]['FLUXCAL'].values\n fluxerr = photo_data[flag_id_photo]['FLUXCALERR'].values\n\n SNR_all = flux/fluxerr\n \n indx = np.random.choice(range(flux.shape[0]))\n\n flag_id_head = head_data['SNID'].values == snid\n redshift = head_data['SIM_REDSHIFT_CMB'].values[flag_id_head][0]\n \n # store values\n line = [snid, snana_file_index, code_zenodo, code_snana, \n types_names[code_zenodo], redshift]\n \n for fil in filters: \n line.append(head_data['SIM_PEAKMAG_' + str(fil)[2]].values[flag_id_head][0])\n \n # calculate SNR statistics \n for f in [np.mean, max, np.std]: \n for fil in filters: \n \n flag_fil = photo_data[flag_id_photo]['FLT'] == fil\n neg_flag = flux > -100\n flag2 = np.logical_and(flag_fil, neg_flag)\n \n if sum(flag2) > 0:\n SNR_fil = SNR_all[flag2] \n line.append(f(SNR_fil))\n \n if len(line) == 30:\n return line\n else:\n return []", "def save(self, compute_snrs=True):\n if not self.save_mode:\n raise RuntimeError('Need to enable save mode to save')\n\n fn = os.path.join(self.output_dir,\n 'data_' + time_string() + '.h5')\n save_dict(fn=fn, d=self.data)\n if compute_snrs:\n from src.analyzer import DataAnalyzer\n da = DataAnalyzer.fromfilename(fn)\n da.snr_list()\n return fn", "def do_SEIR(self, t_max=200, dt=1.):\n dt = float(dt)\n g = Graph()\n\n for node in ['S', 'E', 'I', 'R']:\n g.add_node(node, 0)\n\n g.set_node('S', self.population)\n g.set_node('E', 0)\n g.set_node('I', self.N_init)\n g.set_node('R', 0)\n\n # cumulative time series\n S = [g.get_node_value('S')] # Susceptible\n E = [g.get_node_value('E')] # Exposed\n I = [g.get_node_value('I')] # noqa Infected\n R = [g.get_node_value('R')] # Recovered\n\n ts = [0.] # time series\n nms = ['prob', 'lag']\n\n g.add_edge('S', 'S', nms, [0.1, 2])\n g.add_edge('E', 'E', nms, [0.4, 21])\n g.add_edge('I', 'I', nms, [0.1, 2])\n\n g.add_edge('S', 'E', nms, [1.2, 1])\n g.add_edge('E', 'I', nms, [0.1, 14]) # [, tiempo de incubacion]\n g.add_edge('I', 'R', nms, [0.7, 2]) # [, tiempo de recuperacion]\n\n t, time_steps = 0., 0\n while t < t_max:\n\n time_steps = time_steps + 1\n\n t = t + dt\n ts.append(t)\n\n # (( S ))\n prob_SS = g.get_edge('S', 'S', 'prob') # beta\n\n dS = - S[-1] * (I[-1] / self.population) * prob_SS\n\n # n_S = min(S[-1] + min(dS * dt, 0), self.population)\n n_S = S[-1] + dS * dt\n\n # (( E ))\n prob_EE = g.get_edge('E', 'E', 'prob')\n dE = - dS - prob_EE * E[-1]\n\n # n_E = min(E[-1] + max(dE * dt, 0), self.population)\n n_E = E[-1] + dE * dt\n\n # (( I ))\n prob_EI = g.get_edge('E', 'I', 'prob')\n lag_EI = g.get_edge('E', 'I', 'lag')\n update_EI = E[-lag_EI] if lag_EI < len(E) else 0.\n\n prob_IR = g.get_edge('I', 'R', 'prob')\n lag_IR = g.get_edge('I', 'R', 'lag')\n update_IR = I[-lag_IR] if lag_IR < len(I) else 0.\n\n prob_II = g.get_edge('I', 'I', 'prob')\n\n dI = prob_EI * update_EI - prob_IR * update_IR\n dI = -dI # porque ????\n n_I = min(I[-1] + dI * dt, self.population)\n\n # (( R ))\n prob_II = g.get_edge('I', 'I', 'prob')\n dR = prob_II * I[-1]\n n_R = min(R[-1] + max(dR * dt, 0), self.population)\n\n S.append(n_S)\n E.append(n_E)\n I.append(n_I)\n R.append(n_R)\n\n df = pd.DataFrame(\n {'ts': ts, 'S': S, 'E': E, 'I': I, 'R': R}).set_index(\"ts\")\n\n extra = attr.asdict(self)\n extra[\"model_name\"] = \"SEIR\"\n return ModelResultFrame(df=df, extra=extra)", "def run(self):\r\n #print 'WriteFITS_IDI.run'\r\n\r\n # construct the name of the file\r\n readfits = self.previous_results['readfits']\r\n obs_date = readfits['obs date']\r\n idifitsfile = '%s.idi.fits' % obs_date\r\n\r\n configxml = 'firi.xml'\r\n\r\n # midnight on date to Julian day\r\n obs_date_midnight = astro_time.Time('%s-%s-%sT00:00:00' %\r\n (obs_date[:4], obs_date[4:6], obs_date[6:8]), format='isot')\r\n obs_date_midnight = obs_date_midnight.jd\r\n\r\n rdate = astro_time.Time(obs_date_midnight, format='jd',\r\n out_subfmt='date')\r\n rdate = rdate.iso\r\n\r\n # number of days after midnight at obs start\r\n obs_date_time = astro_time.Time('%s-%s-%s:%s:%s' %\r\n (obs_date[:4], obs_date[4:6], obs_date[6:11], obs_date[11:13],\r\n obs_date[13:]), format='isot')\r\n obs_date_time = obs_date_time.jd - obs_date_midnight\r\n\r\n # get specific items from the results that will be need in\r\n # the reduction\r\n reduce_interferogram = self.previous_results['reduceinterferogram']\r\n data_quality = reduce_interferogram['data_quality']\r\n scan_uvspectra = reduce_interferogram['scan_uvspectra']\r\n\r\n wavenumber = scan_uvspectra[0].wavenumber\r\n\r\n # construct lists of the values to be stored in each Table column\r\n n_uvspectra = max(scan_uvspectra.keys()) + 1\r\n mcomplex = 3\r\n mstokes = 1\r\n mfreq = len(wavenumber)\r\n mra = 1\r\n mdec = 1\r\n\r\n uv_data = np.zeros([n_uvspectra, mdec, mra, mfreq, mstokes, mcomplex])\r\n u = np.zeros([n_uvspectra])\r\n v = np.zeros([n_uvspectra])\r\n w = np.zeros([n_uvspectra])\r\n dates = np.zeros([n_uvspectra])\r\n times = np.zeros([n_uvspectra])\r\n baselines = np.zeros([n_uvspectra], dtype=np.int)\r\n freqid = np.ones([n_uvspectra], dtype=np.int)\r\n\r\n for k,val in scan_uvspectra.items():\r\n uv_data[k,0,0,:,0,0] = val.spectrum.real\r\n uv_data[k,0,0,:,0,1] = val.spectrum.imag\r\n uv_data[k,0,0,:,0,2] = np.ones(val.spectrum.real.shape)\r\n u[k] = np.mean(val.baseline_x)\r\n v[k] = np.mean(val.baseline_y)\r\n w[k] = np.mean(val.baseline_z)\r\n dates[k] = obs_date_midnight\r\n times[k] = obs_date_time + (np.mean(val.time) / (3600 * 24))\r\n baselines[k] = 258\r\n\r\n # external_params is referred to inside config.xml and can be\r\n # used to set parameters there\r\n light_speed = constants.c.to('m/s').value\r\n external_params = {'NCHAN':len(wavenumber),\r\n 'RDATE':rdate,\r\n 'REF_FREQ':0.0 * 100 * light_speed,\r\n 'CHAN_BW':np.abs(wavenumber[1] - wavenumber[0]) * \\\r\n 100 * light_speed}\r\n\r\n print \"Out: %s\\nConfig: %s\"%(idifitsfile, configxml)\r\n\r\n print('\\nConfiguring Array geography')\r\n print('--------------------------')\r\n # Meaningless numbers, hopefully not needed by any CASA method \r\n # that we want to use\r\n (latitude, longitude, elevation) = ('00:00:00.00', '00:00:00.00', 0)\r\n now = datetime.datetime.now()\r\n\r\n # Make ourselves an Array (pyEphem observer)\r\n array_geometry_m = np.array([\r\n [0.0, 0.0, 0.0],\r\n [0.0, 80.0, 0.0]], dtype = 'float32')\r\n beach = Array(lat=latitude, long=longitude, elev=elevation, date=now,\r\n antennas=array_geometry_m)\r\n\r\n print('\\nConfiguring phase source')\r\n print('--------------------------')\r\n # The source is our phase centre for UVW coordinates\r\n line = \"%s,f,%s,%s,%s,%d\" % ('Deep Space', '00:00:00',\r\n '00:00:00', '1', 2000)\r\n source = ephem.readdb(line)\r\n source.compute(beach)\r\n print \"Name: %s \\nRA: %s \\nDEC: %s\"%(source.name, source.ra, source.dec)\r\n\r\n # Make a new blank FITS HDU\r\n print('\\nCreating PRIMARY HDU')\r\n print('------------------------------------')\r\n hdu = make_primary(config=configxml, external_params=external_params)\r\n print repr(hdu.header)\r\n\r\n # Go through and generate required tables\r\n print('\\nCreating ARRAY_GEOMETRY')\r\n print('------------------------------------')\r\n tbl_array_geometry = make_array_geometry(config=configxml, num_rows=2,\r\n external_params=external_params)\r\n tbl_array_geometry = config_array_geometry(tbl_array_geometry,\r\n array_geometry_m)\r\n print repr(tbl_array_geometry.header)\r\n\r\n print('\\nCreating FREQUENCY')\r\n print('------------------------------------')\r\n tbl_frequency = make_frequency(config=configxml, num_rows=1,\r\n external_params=external_params)\r\n tbl_frequency = config_frequency(tbl_frequency,\r\n external_params=external_params)\r\n print repr(tbl_frequency.header)\r\n\r\n print('\\nCreating SOURCE')\r\n print('------------------------------------')\r\n tbl_source = make_source(config=configxml, num_rows=1,\r\n external_params=external_params)\r\n tbl_source = config_source(tbl_source, source)\r\n print repr(tbl_source.header)\r\n\r\n print('\\nCreating ANTENNA')\r\n print('------------------------------------')\r\n tbl_antenna = make_antenna(config=configxml, num_rows=2,\r\n external_params=external_params)\r\n tbl_antenna = config_antenna(tbl_antenna)\r\n print repr(tbl_antenna.header)\r\n\r\n print('\\nCreating UV_DATA')\r\n print('------------------------------------')\r\n\r\n print 'Data dimensions: %i dumps, %i chans, %i pols, %i data' % (\r\n n_uvspectra, mfreq, mstokes, mcomplex)\r\n\r\n print('Generating blank UV_DATA rows...')\r\n tbl_uv_data = make_uv_data(config=configxml, num_rows=n_uvspectra,\r\n external_params=external_params)\r\n\r\n timesorted = np.argsort(times)\r\n\r\n for k in timesorted:\r\n tbl_uv_data.data[k]['FLUX'] = uv_data[k,0,0,:,0,:].ravel()\r\n tbl_uv_data.data[k]['UU'] = u[k] / light_speed\r\n tbl_uv_data.data[k]['VV'] = v[k] / light_speed\r\n tbl_uv_data.data[k]['WW'] = w[k] / light_speed\r\n tbl_uv_data.data[k]['BASELINE'] = baselines[k]\r\n tbl_uv_data.data[k]['DATE'] = dates[k]\r\n tbl_uv_data.data[k]['TIME'] = times[k]\r\n tbl_uv_data.data[k]['SOURCE'] = 1\r\n tbl_uv_data.data[k]['FREQID'] = 1\r\n tbl_uv_data.data[k]['INTTIM'] = 3\r\n\r\n print repr(tbl_uv_data.header)\r\n \r\n hdulist = pyfits.HDUList(hdus=\r\n [hdu,\r\n tbl_array_geometry,\r\n tbl_source, \r\n tbl_frequency,\r\n tbl_antenna,\r\n tbl_uv_data])\r\n\r\n print('Verifying integrity...') \r\n hdulist.verify()\r\n \r\n if(os.path.isfile(idifitsfile)):\r\n print('Removing existing file...')\r\n os.remove(idifitsfile)\r\n print('Writing to file...')\r\n hdulist.writeto(idifitsfile)\r\n\r\n print('Done.')\r\n\r\n self.result['idifitsfile'] = idifitsfile\r\n\r\n return self.result", "def save_nifti(self, path):\n meta = {'te': self.te, 'tr': self.tr, 'sw': self.sw}\n if self.sequence_type == 'STEAM':\n meta['tm'] = self.tm\n\n # store real and imaginary components in last 2 dims\n component_fid = np.stack((np.real(self.fid),np.imag(self.fid)), -2)\n nifti = nib.Nifti2Image(component_fid, self.transform.get_matrix(), extra=meta)\n nib.save(nifti, path)", "def write_seisan(filename, args):\n bf = BaikalFile(filename)\n if not bf.valid:\n print(\"Invalid file {}\".format(filename))\n return\n header = bf.MainHeader\n # datetime\n date = datetime.datetime(header[\"year\"], header[\"month\"], header[\"day\"])\n delta = datetime.timedelta(seconds=header[\"to\"])\n dt = date + delta\n _time = dt.time() # time\n # make utc datetime\n utcdatetime = UTCDateTime(date.year, date.month, date.day,\n _time.hour, _time.minute, _time.second, _time.microsecond, precision=3)\n bf.traces = bf.traces.astype(np.int32)\n bf.traces = bf.traces[:3]\n traces = []\n for channel, data in zip(CHANNELS, bf.traces):\n stats = DEFAULT_STATS.copy()\n stats.update({\n \"station\": header['station'].upper()[:3],\n 'channel': channel,\n 'sampling_rate': int( 1./header[\"dt\"] ),\n \"delta\": header[\"dt\"],\n \"npts\": data.size,#shape[0]\n 'starttime': utcdatetime,\n })\n # save coordinates\n stats['gse2'][\"lat\"] = header['latitude']\n stats['gse2'][\"lon\"] = header[\"longitude\"]\n trace = Trace(data=data, header=stats)\n traces.append(trace)\n # create Stream\n stream = Stream(traces)\n #== write seisan\n # date\n name = \"{year:04}-{month:02}-{day:02}\".format(**header)\n # time\n name += \"-{t.hour:02}-{t.minute:02}\".format(t=stats['starttime'])\n # + station name + Day_of_Year\n name += \"{0}__{1:03}\".format(stats[\"station\"], stats['starttime'].timetuple().tm_yday)\n print('Writing GSE2 file %s.' % name)\n writeGSE2(stream, os.path.join(args.outdir, name))", "def in_situ_tair_snd(sno0, year0=2016, npr_date=-1, ascat_date=-1):\n if npr_date < 0:\n npr_date = 100*24*3600 + bxy.get_total_sec('%d0101' % year0)\n if ascat_date < 0:\n ascat_date = 100*24*3600 + bxy.get_total_sec('%d0101' % year0)\n snd_name = \"snow\"\n print 'the %d was processing' % sno0\n sno = str(sno0)\n tair_name = \"Air Temperature Observed (degC)\"\n if sno0 in [2065, 2081]:\n if year0 == 2016:\n tair_name = \"Air Temperature Average (degC)\"\n # read measurements\n hr_list = [5, 7, 9, 14, 18, 21]\n t_air_one_year = read_site.in_situ_series(sno, y=year0, hr=hr_list) # [:, :, 0] temperature at 7:00 (local)\n # time_above_zero_0 = data_process.zero_find(t_air_one_year[:, :, 0], w=10, th=-0.1) #\n # time_above_zero_1 = data_process.zero_find(t_air_one_year[:, :, 1], w=10, th=-0.1)\n # time_above_zero_2 = data_process.zero_find(t_air_one_year[:, :, 3], w=10, th=-0.1)\n time_above_zero_list = [data_process.zero_find(t_air_one_year[:, :, i], w=10, th=-0.1)\n for i in range(0, len(hr_list))]\n date_tuple = bxy.time_getlocaltime(time_above_zero_list, ref_time=[2000, 1, 1, 0], t_source='US/Alaska')\n t_value, t_date = read_site.read_measurements\\\n (sno, tair_name, np.arange(1, 365), year0=year0, hr=18, t_unit='sec')\n\n\n tair_zero_day2 = data_process.zero_find(np.array([t_date, -t_value]), w=7, th=0) # in unit of sec\n tair_zero_day1 = data_process.zero_find_gt(np.array([t_date, t_value]), w=7, th=1)\n air_win = 7 # check days during window shown air temperature gt 0 degC\n w, w_valid = data_process.n_convolve3(t_value, air_win)\n air0_index0 = np.where(w>5)\n for ind0 in air0_index0[0]:\n if t_date[ind0] > bxy.get_total_sec('%d0307' % year0):\n tair_zero_day = t_date[ind0] - air_win*24*3600\n break\n # check\n zero_date = bxy.time_getlocaltime([tair_zero_day,tair_zero_day2, npr_date[0], ascat_date[0]],\n ref_time=[2000, 1, 1, 0], t_source=\"US/Alaska\")[-2]\n i_zero = np.where(bxy.time_getlocaltime(t_date, ref_time=[2000, 1, 1, 0],\n t_source=\"US/Alaska\")[-2] == zero_date[0])[0][0]\n t_check = t_value[i_zero - 3: i_zero + 4]\n air_0, air00 = read_site.read_measurements(sno, tair_name, 366+np.arange(50, 70), hr=18)\n a_extend = np.array([-3600*24, 3600*24])\n period0, period1 = np.array(sorted([tair_zero_day, npr_date])) + a_extend, \\\n np.array(sorted([tair_zero_day, ascat_date])) + a_extend\n snow_value, snow_date = read_site.read_measurements\\\n (sno, snd_name, np.arange(1, 365), year0=year0, hr=0, t_unit='sec')\n # get the in situ measurements during a period\n snow2date0 = data_process.measurements_slice(np.array([snow_date, snow_value]),\n peroid=period0)\n snow2date1 = data_process.measurements_slice(np.array([snow_date, snow_value]),\n peroid=period1)\n air2date0, air2date1 = data_process.measurements_slice(np.array([t_date, t_value]),\n peroid=period0),\\\n data_process.measurements_slice(np.array([t_date, t_value]),\n peroid=period1)\n return tair_zero_day, snow2date0, snow2date1, air2date0, air2date1", "def plot_seaice_trend(anomlous = False, temporal_resolution = 'monthly', spatial_resolution = 1, detrend = False, imagefolder = 'images/trends/SIC/',seaice_source='nsidc'):\n output_folder = 'processed_data/SIC/'\n if seaice_source == 'ecmwf':\n output_folder = 'processed_data/ERA5/SIC/'\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n dt = 'detrended'\n title += dt + ' '\n else:\n dt = 'raw'\n\n title += temporal_resolution\n title += ' SIE trends'\n\n\n seaicename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n seaice = xr.open_dataset(output_folder + seaicename +'.nc')\n area = xr.open_dataset('data/area_files/processed_nsidc.nc').area\n\n if seaice_source == 'nsidc':\n seaice = seaice * area /250\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = xr.apply_ufunc(scipy.stats.linregress, seaice[seaicename].time.values.astype(float), seaice[seaicename], input_core_dims=[['time'],['time']], vectorize=True, dask='parallelized', output_dtypes=[float]*5, output_core_dims=[[]]*5)\n if seaice_source =='ecmwf':\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(seaice[seaicename].time.values.astype(float), seaice[seaicename])\n \n seaice_m = seaice_m * 1e9 * 60 * 60 * 24 * 365\n area = xr.open_dataset('data/area_files/processed_nsidc.nc').area\n # seaice_m = seaice_m*area\n seaice_m = seaice_m.where(seaice_m != 0)\n # seaice_m = seaice_m.where(seaice_p_value <= 0.05)\n max_ = seaice_m.max()\n min_ = seaice_m.min() \n # max_ = 1\n divnorm = TwoSlopeNorm(vmin=min_, vcenter=0, vmax=max_)\n fig = plt.figure(figsize = (5,5))\n ax = fig.add_subplot(111, projection = ccrs.SouthPolarStereo())\n # Plotting\n contor = ax.contourf(seaice_m.x, seaice_m.y, seaice_m, cmap = 'RdBu', levels = 11, norm = divnorm, transform=ccrs.SouthPolarStereo())\n ax.coastlines()\n ax.set_axis_off()\n cbar = plt.colorbar(contor)\n cbar.set_label('Trend in SIE (km$^2$ yr$^{-1}$)')\n plt.title(title)\n plt.savefig(imagefolder + seaicename + '.pdf')\n plt.show()", "def plot_seaice_predict(anomlous = False, temporal_resolution = 'monthly', spatial_resolution = 1, detrend = False, imagefolder = 'images/timeseries/prediction/',seaice_source='nsidc'):\n output_folder = 'processed_data/SIC/'\n if seaice_source == 'ecmwf':\n output_folder = 'processed_data/ERA5/SIC/'\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n dt = 'detrended'\n title += dt + ' '\n else:\n dt = 'raw'\n\n title += temporal_resolution\n title += ' SIE prediction'\n\n\n# Loading Seaice Trends\n seaicename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n seaice = xr.open_dataset(output_folder + seaicename +'.nc')\n area = xr.open_dataset('data/area_files/processed_nsidc.nc').area\n seaice = seaice\n\n\n# Index contributions\n filename = f'processed_data/regressions/spatial_multiple/regr_{temp_decomp}_{temporal_resolution}_{dt}_{spatial_resolution}'\n dataset = xr.open_dataset(filename + '.nc')\n indicies = np.array([i for i in dataset])\n values = np.array([dataset[i].values for i in dataset])\n index_data = {}\n for indexname in indicies[:-1]:\n filename = f'{indexname}_{temp_decomp}_{temporal_resolution}_{dt}'\n index_data[indexname] = xr.open_dataset('processed_data/INDICIES/' + filename +'.nc')[indexname]\n index_data[indexname] = (index_data[indexname] - index_data[indexname].mean()) \n index_data[indexname] = index_data[indexname] / index_data[indexname].std()\n\n times = list(set.intersection(set(seaice.time.values), *(set(index_data[i].time.values)for i in indicies[:-1])))\n\n prediction = seaice.copy() * 0\n for indexname in indicies:\n if indexname in index_data.keys():\n prediction += index_data[indexname] * dataset[indexname]\n else:\n prediction += dataset[indexname]\n\n seaice = seaice.sortby('time').sel(time=times).sortby('time')\n prediction = prediction.sortby('time').sel(time=times).sortby('time')\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n # ax2 = ax.twinx()\n # ax2.plot([],[])\n ln1 = ax.plot(seaice.time, (seaice[seaicename]*area/250).sum(dim = ('x', 'y')), label = 'SIE')\n ln2 = ax.plot(seaice.time, (prediction[seaicename]*area/250).sum(dim = ('x', 'y')), label = 'Prediction')\n # ax.set_xlim([min(times),max(times)])\n\n lines = ln1 + ln2\n labels = [line.get_label() for line in lines]\n plt.legend(lines,labels,bbox_to_anchor=(0.99, -0.15), ncol = 2, loc = 'upper right')\n\n\n data = (prediction[seaicename]*area/250).sum(dim = ('x', 'y'))\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n plt.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#EA1B10')\n data = (seaice[seaicename]*area/250).sum(dim = ('x', 'y'))\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n plt.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#177E89')\n plt.title(title)\n\n plt.savefig(imagefolder + seaicename + '.pdf')\n plt.show()", "def plot_index_sic_timeseries(anomlous = False, temporal_resolution = 'monthly', detrend = False, imagefolder = 'images/timeseries/SIC_INDICIES', indexname = 'SAM', n = 5, seaice_source = 'nsidc'):\n output_folder = 'processed_data/'\n\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n if detrend:\n dt = 'detrended'\n else:\n dt = 'raw'\n\n filename = f'{indexname}_{temp_decomp}_{temporal_resolution}_{dt}'\n indicies = xr.open_dataset(output_folder + 'INDICIES/' + filename +'.nc')[indexname]\n data = indicies.copy()\n data = data.loc[data.time.dt.year >= 1979]\n seaicename = f'{temp_decomp}_{temporal_resolution}_{n}_{dt}'\n\n seaice = xr.open_dataset(output_folder + 'SIC/' + seaicename +'.nc')\n\n\n times = list(set.intersection(set(seaice.time.values), set(data.time.values)))\n\n seaice = seaice_area_mean(seaice.sel(time=times).sortby('time'), 1)\n data = data.sel(time=times).sortby('time')\n\n\n if seaice_source == 'ecmwf':\n seaice = xr.open_dataset(output_folder + 'ERA5/SIC/' + seaicename +'.nc')\n if seaice_source == 'ecmwf':\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(seaice[seaicename].time.values.astype(float), seaice[seaicename].mean(dim = ('longitude', 'latitude')))\n if seaice_source == 'nsidc':\n mean_seaice = seaice_area_mean(seaice[seaicename],1)\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(mean_seaice.time.values.astype(float), mean_seaice)\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n title += dt + ' '\n\n title += temporal_resolution\n title += f' mean {indexname} and SIC'\n fig, ax = plt.subplots()\n ax2 = plt.twinx(ax)\n ax2.plot([],[])\n\n if anomlous or detrend: ax.axhline(0, alpha = 0.5)\n\n ln1 = ax.plot(data.time, data, label = f'{indexname}', color = '#EA1B10')\n ax.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#EA1B10')\n if seaice_source == 'ecmwf':\n ln2 = ax2.plot(seaice.time, seaice[seaicename].mean(dim = ('longitude', 'latitude')), label = 'SIC', color = '#177E89')\n if seaice_source == 'nsidc':\n ln2 = ax2.plot(mean_seaice.time, mean_seaice, label = 'SIC', color = '#177E89')\n ax2.plot(seaice.time, seaice_m * seaice.time.values.astype(float) + seaice_b, color = '#177E89')\n\n yabs_max = abs(max(ax.get_ylim(), key=abs))\n ax.set_ylim(ymin=-yabs_max, ymax=yabs_max)\n if anomlous or detrend:\n yabs_max = abs(max(ax2.get_ylim(), key=abs))\n ax2.set_ylim(ymin=-yabs_max, ymax=yabs_max)\n\n # ylabels\n ax.set_ylabel(f'{indexname}')\n ax2.set_ylabel(f'Mean SIC')\n\n # legend\n lines = ln1 + ln2\n labels = [line.get_label() for line in lines]\n plt.legend(lines,labels,bbox_to_anchor=(0.99, -0.15), ncol = 2, loc = 'upper right')\n\n plt.title(title)\n plt.savefig(imagefolder + f'/SIC_{indexname}_{filename}_{seaice_source}' + '.pdf')\n plt.show()", "def snr(mag=20, itime=1., read=24.5, sky=8.43, npix=24., zero=26.44, dark=0.0):\n # 2009-02-20 14:40 IJC: Initiated\n \n star = itime * 10**(0.4*(zero-mag))\n noise = npix * (itime*(sky+dark)+read**2)\n\n return star * (star+noise)**-0.5", "def writeNoise(self):\n\n if (self.noise_file == None or self.noise_file == \"\"):\n return\n ofname = self.noise_file\n ofh = open(ofname,'w')\n\n # these have to be there as long as we've read the FAST file already\n ## not true: we don't store these in the dict.\n have_data = False\n if (\"TipRad\" in self.fstDict and 'TowerHt' in self.fstDict and 'Twr2Shft' in self.fstDict):\n tiprad = self.fstDict['TipRad']\n towerht = self.fstDict['TowerHt']\n twr2shft = self.fstDict['Twr2Shft']\n have_data = True\n\n for line in self.lines_noise:\n if (have_data and line.find('Observer location') >= 0):\n xdist = -1.0 * (tiprad + (towerht + twr2shft))\n ofh.write('{:.1f} 0.0 0.0'.format(xdist))\n ofh.write(' (x,y,z) Observer location in tower-base coordinate system. Use -(RotRad+HubHt)\\n')\n else:\n ofh.write(line)\n ofh.close()", "def generate_nrrd(file):\n for root, dirs, files in os.walk(file):\n path = root.split(file)\n if path[1] != \"\":\n patient_id = int(path[1].split('/')[1][3:])\n path = file + \"_nrrd\" + path[1]\n # if path.find(\"frisk\")==-1 & path.find(\"M+\")==-1 & path.find('T2M')==-1:\n if (path.find('T2M') != -1 & path.find('frisk') ==\n -1 & path.find('+') == -1\n ) or path.find('masks') != -1: # Only T2M or mask can be found\n os.makedirs(path, exist_ok=True)\n print(path)\n Nrrd = ImageCollection(os.path.join(root, \"*.tiff\"),\n plugin='tifffile',\n load_func=convert_to_gray)\n Nrrd = np.asarray(Nrrd)\n\n if get_image_info(patient_id):\n print(patient_id)\n (spacings, thickness) = get_image_info(patient_id)\n thicknesses = [float('nan'), float('nan'), thickness]\n spacing_direction = np.eye(3)\n # Note: All header fields are specified in Fortran order,\n # per the NRRD specification, regardless of the index order. For example,\n # a C-ordered array with shape (60, 800, 600) would have a sizes field of (600, 800, 60).\n if len(Nrrd) > 0:\n header = {\n 'spacings': spacings,\n 'thicknesses': thicknesses\n \n }\n nrrd.write(os.path.join(path,\n str(patient_id) + '.nrrd'),\n Nrrd,\n header,\n index_order='C')", "def plot_snr(tseries, lb=0, ub=None, fig=None):\r\n\r\n if fig is None:\r\n fig = plt.figure()\r\n\r\n ax_spectra = fig.add_subplot(1, 2, 1)\r\n ax_snr_info = fig.add_subplot(1, 2, 2)\r\n\r\n A = []\r\n info = []\r\n s_n_r = []\r\n coh = []\r\n noise_spectra = []\r\n signal_spectra = []\r\n #If you only have one channel, make sure that everything still works by\r\n #adding an axis\r\n if len(tseries.data.shape) < 3:\r\n this = tseries.data[np.newaxis, :, :]\r\n else:\r\n this = tseries.data\r\n\r\n for i in range(this.shape[0]):\r\n A.append(nta.SNRAnalyzer(ts.TimeSeries(this[i],\r\n sampling_rate=tseries.sampling_rate)))\r\n info.append(A[-1].mt_information)\r\n s_n_r.append(A[-1].mt_snr)\r\n coh.append(A[-1].mt_coherence)\r\n noise_spectra.append(A[-1].mt_noise_psd)\r\n signal_spectra.append(A[-1].mt_signal_psd)\r\n\r\n freqs = A[-1].mt_frequencies\r\n\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, lb, ub)\r\n freqs = freqs[lb_idx:ub_idx]\r\n\r\n coh_mean = np.mean(coh, 0)\r\n snr_mean = np.mean(s_n_r, 0)\r\n info_mean = np.mean(info, 0)\r\n n_spec_mean = np.mean(noise_spectra, 0)\r\n s_spec_mean = np.mean(signal_spectra, 0)\r\n\r\n ax_spectra.plot(freqs, np.log(s_spec_mean[lb_idx:ub_idx]), label='Signal')\r\n ax_spectra.plot(freqs, np.log(n_spec_mean[lb_idx:ub_idx]), label='Noise')\r\n ax_spectra.set_xlabel('Frequency (Hz)')\r\n ax_spectra.set_ylabel('Spectral power (dB)')\r\n\r\n ax_snr_info.plot(freqs, snr_mean[lb_idx:ub_idx], label='SNR')\r\n ax_snr_info.plot(np.nan, np.nan, 'r', label='Info')\r\n ax_snr_info.set_ylabel('SNR')\r\n ax_snr_info.set_xlabel('Frequency (Hz)')\r\n ax_info = ax_snr_info.twinx()\r\n ax_info.plot(freqs, np.cumsum(info_mean[lb_idx:ub_idx]), 'r')\r\n ax_info.set_ylabel('Cumulative information rate (bits/sec)')\r\n return fig", "def noe_analysis():\n\n files = ['f34k_7p2_noe_heights.txt', 'dphs_7p4_noe_heights.txt']\n proteins = ['f34k','dphs']\n\n heights = [nmrfn.parse_noe(f) for f in files]\n heights = pd.concat(heights, axis=1, keys=proteins)\n\n results = [noe_calcs(heights[i]) for i in proteins]\n results = pd.concat(results, axis=1, keys=proteins)\n\n fig = plt.figure(figsize=(10,6))\n val = plt.subplot(211)\n dif = plt.subplot(212)\n \n seqmin = 6\n seqmax = 144\n noemin = 0.5\n noemax = 0.95\n\n dy = results.dphs.noe - results.f34k.noe\n dif_plot(val, dy.index, dy.ix[8:141], seqmin, seqmax)\n noe_plot(proteins, results, seqmin, seqmax, noemin, noemax, xax=False)\n\n s1 = plt.Rectangle((0, 0), 1, 1, fc=proteins['f34k'])\n s2 = plt.Rectangle((0, 0), 1, 1, fc=proteins['dphs'])\n val.legend([s1, s2], ['F34K', '∆+PHS'], loc=8)\n\n title = 'HN NOE: F34K pH 7.2 compared with ∆+PHS pH 7.4'\n savefile = 'f34k_7p2_noe_analysis.pdf'\n save_plot(title, savefile)", "def plot_seaice_timeseries(anomlous = False, temporal_resolution = 'monthly', spatial_resolution = 1, detrend = False, imagefolder = 'images/timeseries/SIC/',seaice_source='nsidc'):\n output_folder = 'processed_data/SIC/'\n if seaice_source == 'ecmwf':\n output_folder = 'processed_data/ERA5/SIC/'\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n dt = 'detrended'\n title += dt + ' '\n else:\n dt = 'raw'\n\n title += temporal_resolution\n title += ' mean SIC in Antarctica'\n\n\n seaicename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n seaice = xr.open_dataset(output_folder + seaicename +'.nc')\n\n if seaice_source == 'nsidc':\n seaice = seaice\n mean_seaice = seaice_area_mean(seaice[seaicename],1)\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(mean_seaice.time.values.astype(float), mean_seaice)\n if seaice_source =='ecmwf':\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(seaice[seaicename].time.values.astype(float), seaice[seaicename].sum(dim = ('longitude', 'latitude')))\n ax = plt.gca()\n if anomlous or detrend: ax.axhline(0, alpha = 0.5)\n if seaice_source == 'nsidc':\n mean_seaice = seaice_area_mean(seaice[seaicename],1)\n plt.plot(seaice.time, mean_seaice)\n\n if seaice_source == 'ecmwf':\n plt.plot(seaice.time, seaice[seaicename].mean(dim = ('longitude', 'latitude')))\n plt.plot(seaice.time, (seaice_m * seaice.time.values.astype(float) + seaice_b), color = '#177E89')\n plt.title(title)\n plt.savefig(imagefolder + seaicename+f'_{seaice_source}.pdf')\n plt.show()", "def SaveNIFTI(data, file_path):\n if(np.iscomplex(data).any()):\n data = abs(data)\n nii = nib.Nifti1Image(data, np.eye(4)) \n nib.save(nii, file_path)", "def plotSate(s,i,seed):\r\n fig, ax = plt.subplots()\r\n\r\n im = ax.imshow(s)\r\n\r\n plt.xticks([i for i in range(dim)], \"\")\r\n plt.yticks([i for i in range(dim)], \"\")\r\n\r\n fig.tight_layout()\r\n plt.savefig(\"Systems/\" + str(dim) + \"_\" + str(seed) + \"/Images/\" + str(i) +\r\n \".jpeg\",quality=80,optimize=True,\r\n dpi=80,progressive=True,transparent=True)\r\n fig.clear()\r\n plt.close(fig)", "def fun_cnoise_Stim(self, t_stim = 10*s, sexp = 0, cutf = 0, do_csd = 1, t_qual = 0, freq_used = np.array([]), K_mat_old = np.array([]), inh_factor = [1], onf = None, equi = 0):\n self.barrier() # wait for other nodes\n \n filename = str(self.pickle_prefix) + \"_results_pop_cnoise.p\"\n filepath = self.data_dir + \"/\" + filename\n \n if self.id == 0: print \"- filepath:\", filepath \n \n if self.do_run or (os.path.isfile(filepath) is False):\n\n tstart = 0; \n fs = 1 / self.dt # sampling rate \n fmax = fs / 2 # maximum frequency (nyquist)\n \n t_noise = arange(tstart, t_stim, self.dt) # create stimulus time vector, make sure stimulus is even!!!\n\n #print self.syn_ex_dist\n #print self.syn_inh_dist\n #exit()\n \n if (self.syn_ex_dist == []):\n for nt in range(self.n_celltypes): # loop over all cells\n #print \"nt\", nt\n if hasattr(self.cells[nt][0], 'input_vec'):\n self.syn_ex_dist.append([1] * len(self.cells[nt][0].input_vec)) # default ex for all by default!!!\n else: \n self.syn_ex_dist.append([1] * self.n_syn_ex[nt]) # default ex for all by default!!!\n \n #print self.syn_ex_dist\n \n if (self.syn_ex_dist[0] == []):\n nemax = 1\n else:\n nemax = max([item for sublist in self.syn_ex_dist for item in sublist])\n \n if (self.syn_inh_dist == []): # and (any(self.n_syn_inh) > 0)\n for nt in range(self.n_celltypes): # loop over all cells\n self.syn_inh_dist.append([0] * self.n_syn_inh[nt]) # default no inh for all by default!!!\n \n #print self.syn_inh_dist\n #exit()\n \n if (self.syn_inh_dist[0] == []):\n nimax = 0\n else:\n nimax = max([item for sublist in self.syn_inh_dist for item in sublist]) \n \n #print \"self.syn_inh_dist, self.syn_ex_dist\", self.syn_inh_dist, self.syn_ex_dist\n \n n_noise = max([nemax,nimax]) # number of noise sources\n #print n_noise,nemax,nimax\n # create reproduceable input\n noise_data = []\n\n for nj in range(n_noise):\n \n if self.id == 0: # make sure all have the same signal !!!\n if len(freq_used) == 0: \n noise_data0 = create_colnoise(t_noise, sexp, cutf, self.seed+nj, onf = onf)\n else:\n noise_data0, _, _, _ = create_multisines(t_noise, freq_used) # create multi sine signal\n else:\n noise_data0 = np.empty(len(t_noise), dtype=np.float64)\n\n noise_data0 = self.broadcast(noise_data0, fast = True) \n \n noise_data.append(noise_data0)\n noise_data0 = [] \n \n noise_data_points = len(noise_data[0]) \n\n # Create signal weight vector inh_factor if it is not fully given\n if len(noise_data) > len(inh_factor):\n inh_factor = [inh_factor[0]] * len(noise_data) \n print \"inh_factor:\", inh_factor\n\n #if equi:\n #pass\n # tstop = t_stim\n \n if max(self.n_syn_ex) == 0: # this means current input\n \n self.set_IStim() # sets amp\n \n if self.fluct_s != []:\n if self.fluct_s[self.a_celltype[0]] > 0:\n if self.id == 0: print \"- adding i fluct\"\n self.connect_fluct()\n \n for i, m in enumerate(self.method_interpol):\n if \"syn\" in m: self.method_interpol[i] = \"syn \" + str(self.syn_tau1/ms) + \"/\" + str(self.syn_tau2/ms) + \"ms\"\n if \"bin\" in m: self.method_interpol[i] = \"bin \" + str(self.bin_width/ms) + \"ms\"\n \n stimulus = []\n for nj in range(len(noise_data)):\n stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, self.amp[self.a_celltype[0]], ihold = 0, delay_baseline = self.delay_baseline) # , tail_points = 0\n stimulus.append(stimulus0)\n tstop = t[-1]\n \n self.set_IPlay2(stimulus, t)\n if self.id == 0: print \"- starting colored noise transfer function estimation! with amp = \" + str(np.round(self.amp[self.a_celltype[0]],4)) + \", ihold = \" + str(np.round(self.ihold[self.a_celltype[0]],4)) + \", ihold_sigma = \" + str(np.round(self.ihold_sigma,4)) + \", dt = \" + str(self.dt) + \" => maximum frequency = \" + str(fmax) + \"\\r\" \n \n else:\n\n self.give_freq = False\n ihold = self.set_i(self.ihold) # just sets amp, ihold should not change! \n\n if 'gsyn_in' not in self.method_interpol: \n pass\n else:\n self.g_syn_ex = [1]*len(self.N)\n \n \n if ((self.fluct_g_e0 != []) or (self.fluct_g_i0 != [])):\n if ((self.fluct_g_e0[self.a_celltype[0]] > 0) or (self.fluct_g_i0[self.a_celltype[0]] > 0)):\n if self.id == 0: print \"- adding g fluct\"\n self.connect_gfluct(E_i=-65)\n \n stimulus = []\n for nj in range(len(noise_data)):\n stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) # self.amp\n stimulus.append(stimulus0)\n \n noise_data = [] \n tstop = t[-1]\n \n if self.N[self.a_celltype[0]] > 1:\n self.set_IStim(ihold = [0]*self.n_celltypes, ihold_sigma = [0]*self.n_celltypes, random_start = True, tstart_offset = 1)\n if self.id == 0: print \"- add random start\"\n \n #print \"Enter Synplay()\"\n self.set_SynPlay(stimulus, t, t_startstop = t_startstop) \n #print \"Exit Synplay()\"\n\n if self.id == 0: print \"- starting colored noise transfer function estimation with synaptic input! with amp = \" + str(np.round(self.amp,4)) + \", ihold = \" + str(np.round(self.ihold,4)) + \", ihold_sigma = \" + str(np.round(self.ihold_sigma,4)) + \", dt = \" + str(self.dt) + \" => maximum frequency = \" + str(fmax) + \"\\r\" \n \n amp_vec = []\n mag_vec = [] \n pha_vec = []\n freq_used = []\n ca = []\n SNR_mat = []\n VAFf_mat = []\n Qual_mat = []\n CF_mat = [] \n VAF_mat = []\n stim = []\n stim_re_mat = []\n resp_mat = []\n current_re = []\n ihold1 = []\n tk = []\n K_mat = []\n gsyn_in = []\n fmean = []\n fmax = [] \n fmstd = [] \n fcvm = [] \n fmeanA = []\n fmaxA = [] \n fmstdA = [] \n fcvmA = [] \n t_all_vec_input_sorted = []\n id_all_vec_input_sorted = []\n \n if (self.id == 0) and (max(self.n_syn_ex) > 0):\n print range(self.n_celltypes), np.shape(self.t_all_vec_input)\n for l in range(self.n_celltypes): \n ie = argsort(self.t_all_vec_input[l]) \n t_all_vec_input_sorted.append( self.t_all_vec_input[l][ie] )\n id_all_vec_input_sorted.append( self.id_all_vec_input[l][ie].astype(int) )\n \n #if (self.id == 0): \n # print self.g_syn_ex\n # print np.array(self.g_syn_ex)>= 0\n \n #print \"g_syn_ex:\",self.g_syn_ex\n if np.array(np.array(self.g_syn_ex)>= 0).any():\n \n if hasattr(self.cells[self.a_celltype[0]][0], 'get_states') and equi:\n print \"- Equilibrate!\"\n self.run(tstop, do_loadstate = False)\n m = md5.new()\n cell_exe_new = self.cell_exe[0]\n m.update(cell_exe_new)\n filename = './states_' + self.celltype[0] + '_' + m.hexdigest() + '_Population.b'\n self.cells[self.a_celltype[0]][0].get_states(filename)\n else:\n self.run(tstop, do_loadstate = False)\n \n i_startstop = []\n \n results = self.get(t_startstop, i_startstop) \n time = results.get('time')\n current = results.get('current') \n voltage = results.get('voltage') \n fmean = results.get('fmean') \n gsyn = results.get('gsyn') \n freq_times = results.get('freq_times')\n spike_freq = results.get('spike_freq')\n t_all_vec_vec = results.get('t_all_vec_vec')\n id_all_vec_vec = results.get('id_all_vec_vec')\n gsyns = results.get('gsyns')\n gsyn_in = results.get('gsyn_in')\n \n fmax = results.get('fmax')\n fmstd = results.get('fmstd')\n fcvm = results.get('fcvm')\n \n fmeanA = results.get('fmeanA') \n fmaxA = results.get('fmaxA')\n fmstdA = results.get('fmstdA')\n fcvmA = results.get('fcvmA')\n \n fbaseA = results.get('fbaseA') \n fbase = results.get('fbase')\n fbstdA = results.get('fbstdA')\n \n \n else: # do not run, analyse input!!!\n \n time = t\n voltage = []\n for l in range(self.n_celltypes): \n voltage.append(np.zeros(len(t)))\n current = []\n \n freq_times = []\n spike_freq = []\n gsyn = []\n gsyn_in = []\n \n t_all_vec_vec = []\n id_all_vec_vec = []\n \n fmean = []\n fmax = []\n fmstd = []\n fcvm = []\n fstdm = []\n \n fmeanA = []\n fmaxA = []\n fmstdA = []\n fcvmA = []\n fbaseA = []\n fbase = []\n fbstdA = []\n \n if self.id == 0:\n \n current = self.n_train_ex\n \n #t_all_vec = self.t_all_vec_input\n #id_all_vec = self.id_all_vec_input\n\n #ie = argsort(t_all_vec) \n #t_all_vec_vec.append( t_all_vec[ie] )\n #id_all_vec_vec.append( id_all_vec[ie].astype(int) )\n \n t_all_vec_vec = t_all_vec_input_sorted\n id_all_vec_vec = id_all_vec_input_sorted\n \n freq_times = arange(0, tstop, self.bin_width)\n spike_freq = np.zeros(len(freq_times))\n \n for j in self.a_celltype:\n \n [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[j], bins = freq_times)\n\n if self.tau2_ex[0] > 0:\n spike_freq = np.concatenate((zeros(1),num_spikes)) \n print \"NOSYN TEST: start convolution with Ksyn\"\n Ksyn = syn_kernel(arange(0,10*self.tau2_ex[0],self.bin_width), self.tau1_ex[0], self.tau2_ex[0]) \n Ksyn = np.concatenate((zeros(len(Ksyn)-1),Ksyn))\n spike_freq = np.convolve(Ksyn, spike_freq, mode='same')\n print \"NOSYN TEST: convolution finished\"\n else:\n\n if isinstance(self.factor_celltype[j], ( int, long ) ):\n f = self.factor_celltype[j] \n else:\n f = self.factor_celltype[j][0] \n \n spike_freq = spike_freq + f * np.concatenate((zeros(1),num_spikes)) / self.bin_width\n\n fmean.append(self.fmean_input)\n fmax.append(self.fmax_input) \n fmstd.append(self.fmstd_input) \n fcvm.append(self.fcvm_input) \n fstdm.append(self.fstdm_input)\n\n if self.no_fmean == True:\n fmean.append(ihold)\n \n #plt.figure('spike_freq') \n #plt.plot(freq_times, spike_freq)\n #plt.savefig(\"./figs/Pub/Spike_freq_\" + str(self.pickle_prefix) + \".pdf\", dpi = 300, transparent=True) # save it \n #plt.clf()\n \n fmeanA = fmean[0]\n fmaxA = fmax[0]\n fmstdA = fmstd [0] \n fcvmA = fcvm[0]\n fstdmA = fstdm[0]\n \n \n if self.id == 0: \n \n if any([i<0 for i in inh_factor]):\n \n p0 = []\n inhf_idx = []\n for i, inhf in enumerate(inh_factor):\n if inhf < 0: \n p0.append(0) \n inhf_idx.append(i)\n \n plsq = fmin(self.residuals_compute_Transfer, p0, args=(stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor))\n p = plsq\n \n ip = 0\n for i in inhf_idx:\n inh_factor[i] = p[ip]\n ip += 1\n \n\n print \"Final inh_factor: \", inh_factor\n \n \n results = self.compute_Transfer(stimulus, spike_freq = spike_freq, freq_times = freq_times, \n t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in, \n do_csd = do_csd, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, inh_factor=inh_factor)\n \n mag_vec, pha_vec, ca, freq, freq_used, fmean_all = results.get('mag_mat'), results.get('pha_mat'), results.get('ca_mat'), results.get('freq'), results.get('freq_used'), results.get('fmean') \n SNR_mat, VAFf_mat, Qual_mat, CF_mat, VAF_mat = results.get('SNR_mat'), results.get('VAFf_mat'), results.get('Qual_mat'), results.get('CF_mat'), results.get('VAF_mat') \n stim, resp_mat, stim_re_mat, tk, K_mat = results.get('stim'), results.get('resp_mat'), results.get('stim_re_mat'), results.get('tk'), results.get('K_mat') \n \n \n self.barrier() # wait for other nodes\n \n \n if self.id == 0:\n \n if t_qual > 0:\n #print t_startstop[0], t_startstop[0]/self.dt, (t_startstop[0]+t_qual)/self.dt\n current_re = current[int(t_startstop[0]/self.dt):int((t_startstop[0]+t_qual)/self.dt)]\n current_re = current_re[int(len(K_mat[self.a_celltype[0]])):int(len(current_re))-int(len(K_mat[self.a_celltype[0]]))]\n \n if len(self.i_holdrs) > 0:\n ihold1 = self.i_holdrs[self.a_celltype[0]][0]\n else:\n ihold1 = []\n \n for l in range(len(self.method_interpol)): # unwrap \n pha_vec[l,:] = unwrap(pha_vec[l,:] * (pi / 180)) * (180 / pi) # unwrap for smooth phase\n \n # only return fraction of actual signal, it is too long!!! \n if time[-1] > self.tmax: \n imax = -1*int(self.tmax/self.dt)\n time = time[imax:]; current = current[imax:]; gsyn = gsyn[imax:]; gsyn_in = gsyn_in[imax:]\n for n in range(self.n_celltypes): \n voltage[n] = voltage[n][imax:]\n \n if freq_times != []: \n if freq_times[-1] > self.tmax:\n imax2 = where(freq_times > self.tmax)[0][0] # for spike frequency \n freq_times = freq_times[0:imax2]; spike_freq = spike_freq[0:imax2] \n \n bvec = [\"_syn\" in st for st in self.method_interpol]\n if np.any(bvec):\n # normalize synaptic integration with others \n mag_vec[1,:]= mag_vec[0,0]*mag_vec[1,:]/mag_vec[1,0] \n \n if self.id == 0: print \"start pickle\"\n \n results = {'freq_used':freq_used, 'amp':amp_vec,'mag':mag_vec,'pha':pha_vec,'ca':ca,'voltage':voltage,'tk':tk,'K_mat':K_mat, 'ihold1': ihold1, 't_startstop':t_startstop, #'stimulus':stimulus,\n 'current':current,'t1':time,'freq_times':freq_times,'spike_freq':spike_freq, 'stim':stim, 'stim_re_mat':stim_re_mat, 'resp_mat':resp_mat, 'current_re':current_re, 'gsyn_in':gsyn_in, 'fmeanA':fmeanA, 'fmaxA':fmaxA, 'fmstdA':fmstdA, 'fcvmA':fcvmA, 'fbaseA':fbaseA, 'fbase':fbase, 'fbstdA':fbstdA,\n 'fmean':fmean,'method_interpol':self.method_interpol, 'SNR':SNR_mat, 'VAF':VAFf_mat, 'Qual':Qual_mat, 'CF':CF_mat, 'VAFs':VAF_mat, 'fmax':fmax, 'fmstd':fmstd, 'fcvm':fcvm, 'inh_factor':inh_factor, 't_all_vec_vec':t_all_vec_vec, 'id_all_vec_vec':id_all_vec_vec} \n \n if self.id == 0:\n if self.dumpsave == 1:\n pickle.dump( results, gzip.GzipFile( filepath, \"wb\" ) )\n print \"pickle done\" \n \n \n if self.plot_train:\n \n for a in self.a_celltype:\n\n #i_start = mlab.find(t_all_vec_vec[a] >= 0)[0]\n #i_stop = mlab.find(t_all_vec_vec[a] >= 5)[0]\n \n #t_all_cut = t_all_vec_vec[a][i_start:i_stop]\n #id_all_cut = id_all_vec_vec[a][i_start:i_stop]\n \n t_all_cut = t_all_vec_vec[a]\n id_all_cut = id_all_vec_vec[a]\n \n f_start_in = mlab.find(t_all_cut >= 0) \n f_stop_in = mlab.find(t_all_cut <= 10) \n \n f_start = f_start_in[0] \n f_stop = f_stop_in[-1]+1 \n use_spikes = t_all_cut[f_start:f_stop]\n use_id = id_all_cut[f_start:f_stop]\n \n plt.figure('results_train') \n ax99 = plt.subplot(1,1,1)\n ax99.plot(use_spikes,use_id,'|', ms=2)\n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Train_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n \n plt.clf()\n \n if len(t_all_cut) > 0:\n \n tbin = 100*ms\n tb = np.arange(0,t[-1],tbin)\n [all_rate, _] = neuronpy.util.spiketrain.get_histogram(t_all_cut, bins = tb)\n all_rate = np.concatenate((np.zeros(1),all_rate)) / self.N[a] / tbin\n \n plt.figure('results_train2') \n plt.plot(tb,all_rate)\n plt.savefig(\"./figs/Pub/PSTH_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n plt.figure('results_noise') \n plt.plot(time,current)\n plt.savefig(\"./figs/Pub/Noise_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n \n if self.plot_input:\n \n if len(t_all_vec_input_sorted[0]) > 0:\n \n i_start = mlab.find(t_all_vec_input_sorted[0] >= 0)[0]\n i_stop = mlab.find(t_all_vec_input_sorted[0] >= 5)[0]\n \n t_all_cut = t_all_vec_input_sorted[0][i_start:i_stop]\n id_all_cut = id_all_vec_input_sorted[0][i_start:i_stop]\n \n plt.figure('results_input') \n ax99 = plt.subplot(1,1,1)\n ax99.plot(t_all_cut,id_all_cut,'|', ms=2)\n plt.text(0.5, 1.1, r'fmean=' + str(round(self.fmean_input,1)) + ',fmax=' + str(round(self.fmax_input,1)) + ',fmstd=' + str(round(self.fmstd_input,1)) + ',fcvm=' + str(round(self.fcvm_input,1)) + ',fstdm=' + str(round(self.fstdm_input,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Input_\" + str(self.pickle_prefix) + \"_N\" + str(self.N[self.a_celltype[0]]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n\n else:\n \n if self.id == 0:\n results = pickle.load( gzip.GzipFile( filepath, \"rb\" ) )\n \n #print results\n #print {key:np.shape(value) for key,value in results.iteritems()}\n \n if self.minimal_dir: # save only info needed for plot\n \n print {key:np.shape(value) for key,value in results.iteritems()}\n \n if \"Fig6_pop_transfer_grc_syngr_nsyn4_cn_a1_noisesynlow_inhlow_adjfinh_varih_N100_CFo6.0_results_pop_cnoise.p\" in filename:\n results['ca'] = [] \n results['resp_mat'] = []\n results['stim'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n results['stim_re_mat'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = [] \n results['gsyn_in'] = []\n \n elif (\"Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_is0.14_CFo9.0_results_pop_cnoise.p\" in filename) \\\n :\n\n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n \n elif (\"Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_is0.14_twopop_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo14.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo14.0_results_pop_cnoise.p\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n \n elif (\"Fig4_pop_transfer_grc_cn_addn100_N[100]_CF[40]_amod[1]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4_pop_transfer_grc_cn_addn1_N[100]_CF[40]_amod[1]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_cn_twopop_N[50, 50]_CF[0.0055, 0.0055]_amod[None, None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_cn_N[100]_CF[0.0055]_amod[None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_slownoise_cn_twopop_N[50, 50]_CF[0.0051, 0.0051]_amod[None, None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_slownoise_cn_N[100]_CF[0.0051]_amod[None]_results_pop_cnoise.p\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n \n elif (\"Fig2_pop_transfer_\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['t1'] = []\n results['voltage'] = [] \n results['freq_times'] = []\n results['spike_freq'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['gsyn_in'] = []\n \n else:\n results['ca'] = [] \n results['resp_mat'] = []\n results['stim'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['t1'] = []\n results['voltage'] = [] \n results['freq_times'] = []\n results['spike_freq'] = []\n results['stim_re_mat'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['gsyn_in'] = []\n\n print {key:np.shape(value) for key,value in results.iteritems()}\n\n pickle.dump( results, gzip.GzipFile( self.minimal_dir + \"/\" + filename, \"wb\" ) ) \n \n else:\n results = {'freq_used':[], 'amp':[],'mag':[],'pha':[],'ca':[],'voltage':[], 'tk':[],'K_mat':[], 'ihold1':[], 't_startstop':[], #'stimulus':[],\n 'current':[],'t1':[],'freq_times':[],'spike_freq':[], 'stim':[], 'stim_re_mat':[], 'current_re':[], 'gsyn_in':[], 'fmeanA':[], 'fmaxA':[], 'fmstdA':[], 'fcvmA':[], 'fbaseA':[], 'fbase':[], 'fbstdA':[],\n 'fmean':[],'method_interpol':self.method_interpol, 'SNR':[], 'VAF':[], 'Qual':[], 'CF':[], 'VAFs':[], 'fmax':[], 'fmstd':[], 'fcvm':[], 'inh_factor':[], 't_all_vec_vec':[], 'id_all_vec_vec':[]} \n \n if self.id == 0: \n\n if self.plot_train: \n\n for a in self.a_celltype:\n \n t1 = results.get('t1') \n voltage = results.get('voltage') \n fmean = results.get('fmean') \n fmax = results.get('fmax') \n fmstd = results.get('fmstd') \n \n \n if results.has_key('t_all_vec_vec'):\n \n if len(results['t_all_vec_vec']) > 0: \n t_all_vec_vec = results.get('t_all_vec_vec') \n id_all_vec_vec = results.get('id_all_vec_vec') \n \n t_all_cut = t_all_vec_vec[a]\n id_all_cut = id_all_vec_vec[a]\n \n f_start_in = mlab.find(t_all_cut >= 0) \n f_stop_in = mlab.find(t_all_cut <= 10) \n \n f_start = f_start_in[0] \n f_stop = f_stop_in[-1]+1 \n use_spikes = t_all_cut[f_start:f_stop]\n use_id = id_all_cut[f_start:f_stop]\n \n plt.figure('results_train') \n ax97 = plt.subplot(1,1,1)\n ax97.plot(use_spikes,use_id,'|', ms=6)\n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax97.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Train_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n\n \n plt.figure('results_voltage') \n ax99 = plt.subplot(2,1,1)\n ax99.plot(t1,voltage[a])\n \n t_noise = arange(0, t_stim, self.dt)\n noise_data = create_colnoise(t_noise, sexp, cutf, 50, onf = onf)\n stimulus, t, t_startstop = construct_Stimulus(noise_data, 1/self.dt, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) \n ax98 = plt.subplot(2,1,2)\n ax98.plot(t[0:10/self.dt],stimulus[0:10/self.dt],color='k')\n \n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Voltage_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.show()\n plt.clf()\n \n if (self.id == 0) and (do_csd == 1):\n Qual = results.get('Qual') \n for i, ii in enumerate(self.method_interpol):\n print \"\\n[QUAL:] Interpol:\", ii, \"SNR0:\", Qual[i,0,0], \"SNR_cutff:\", Qual[i,0,1], \"SNR_mean:\", Qual[i,0,2], \"\\n VAF0:\", Qual[i,1,0], \"VAF_cutff:\", Qual[i,1,1], \"VAF_mean:\", Qual[i,1,2], \"\\n CF(subtracted):\", Qual[i,2,0], \"VAF(subtracted):\", Qual[i,2,1] \n \n VAF = results.get('VAF')\n freq_used = results.get('freq_used') \n iend = mlab.find(freq_used >= self.xmax)[0] \n print 'm(VAF)=' + str(np.mean(VAF[1][0,0:iend])) \n \n self.barrier() # wait for other nodes\n \n return results", "def plot_sic_sic_timeseries(anomlous = False, temporal_resolution = 'monthly', spatial_resolution = 1, detrend = False, imagefolder = 'images/timeseries/SIC/'):\n output_folder = 'processed_data/'\n\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n if detrend:\n dt = 'detrended'\n else:\n dt = 'raw'\n\n filename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n indicies = xr.open_dataset(output_folder + 'ERA5/SIC/' + filename +'.nc')[filename].mean(dim = ('longitude', 'latitude'))\n data = indicies.copy()\n data = data.loc[data.time.dt.year >= 1979]\n seaicename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n seaice = xr.open_dataset(output_folder + 'SIC/' + seaicename +'.nc')[seaicename]\n\n times = list(set.intersection(set(seaice.time.values), set(data.time.values)))\n\n seaice = seaice_area_mean(seaice.sel(time=times).sortby('time'), 1)\n data = data.sel(time=times).sortby('time')\n\n seaice = seaice_area_mean(seaice,1)\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(seaice.time.values.astype(float), seaice)\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n title += dt + ' '\n\n title += temporal_resolution\n title += f' mean ERA5 and SIC'\n fig, ax = plt.subplots()\n ax2 = plt.twinx(ax)\n ax2.plot([],[])\n\n if anomlous or detrend: ax.axhline(0, alpha = 0.5)\n\n ln1 = ax.plot(data.time, data, label = f'ERA5', color = '#EA1B10')\n ax.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#EA1B10')\n ln2 = ax2.plot(seaice.time, seaice, label = 'SIC', color = '#177E89')\n ax2.plot(seaice.time, seaice_m * seaice.time.values.astype(float) + seaice_b, color = '#177E89')\n\n if anomlous or detrend:\n yabs_max = abs(max(ax2.get_ylim(), key=abs))\n ax2.set_ylim(ymin=-yabs_max, ymax=yabs_max)\n\n yabs_max = abs(max(ax.get_ylim(), key=abs))\n ax.set_ylim(ymin=-yabs_max, ymax=yabs_max)\n\n # ylabels\n ax.set_ylabel(f'ECMWF')\n ax2.set_ylabel(f'Mean SIC')\n\n # legend\n lines = ln1 + ln2\n labels = [line.get_label() for line in lines]\n plt.legend(lines,labels,bbox_to_anchor=(0.99, -0.15), ncol = 2, loc = 'upper right')\n\n plt.title(title)\n plt.savefig(imagefolder + f'/SIC_ERA5_{filename}' + '.pdf')\n plt.show()", "def TICwriter(TIC, dataFile, saveDirectory):\n #Create savename from data file name:\n savefile = dataFile.split('/')[-1].split('.')[0] + '_TIC.png'\n #Create ouput directory:\n saveDirectory = os.path.join(saveDirectory, 'output/')\n os.makedirs(os.path.dirname(saveDirectory), exist_ok=True)\n #Plot figure:\n Plot = pl.figure()\n TICplot = Plot.add_subplot(111)\n TICplot.plot([d[0] for d in TIC], [d[1] for d in TIC])\n \n #Save and close plot:\n pl.savefig(saveDirectory + savefile)\n pl.close(Plot)", "def fn_intensitytrace(file_name,folder,data,time,i,x,y):\n import numpy as np\n import matplotlib.pyplot as plt\n\n\n figure_name=file_name+'_intensity_trace'\n for a in range(0,len(data),int(len(data)/2)):\n if i==a:\n x_coord=x[i+1]\n y_coord=y[i+1]\n max_int=np.max(data[i])\n min_int=np.min(data[i])\n #norm_int = [b / max_int for b in data[i]]\n plt.figure()\n #plt.plot(time[0:len(time)-1],norm_int,'g')\n plt.plot(time[0:len(time)-1],data[i],'g')\n plt.xlim(0, 100)\n plt.ylim(min_int, (max_int+100))\n plt.xlabel('Time (s)', fontname='Arial', fontsize=12)\n plt.ylabel('Photon counts (photons)', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial',fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.png', dpi=500)\n\n return (plt.show())", "def plot_TSNE(fig_name):\n dir = \"log/peps mini\"\n pattern = r'(internal|access|lock)\\\\\\d{1,2}.csv$'\n pattern_valid = r'(3|6|9|12).csv$'\n utils.construct_set(dir, pattern, pattern_valid)\n X, y = utils.load_all()\n utils.plot_TSNE(X, y)\n plt.title(fig_name)\n if not os.path.exists(dir_fig):\n os.makedirs(dir_fig)\n plt.savefig(dir_fig + '/' + fig_name + '.png')", "def process_nitrate(store, site):\n constituent = 'NitrateSurr'\n db_path = '/said/{}/'.format(site['id'])\n iv_path = db_path + 'iv'\n df = store.get(iv_path)\n\n n_error = np.maximum(0.5, df[constituent]*.1)\n df[constituent+'_U90.0'] = df[constituent] + n_error\n df['NitrateSurr_L90.0'] = df.NitrateSurr - n_error\n #clip values below 0\n df['NitrateSurr_L90.0'] = np.maximum(0, df['NitrateSurr_L90.0'])\n\n update_table(store, iv_path, df)", "def snr_stats(\r\n t,\r\n y,\r\n period,\r\n duration,\r\n T0,\r\n transit_times,\r\n transit_duration_in_days,\r\n per_transit_count,\r\n):\r\n\r\n snr_per_transit = numpy.zeros([len(transit_times)])\r\n snr_pink_per_transit = numpy.zeros([len(transit_times)])\r\n intransit = transit_mask(t, period, 2 * duration, T0)\r\n flux_ootr = y[~intransit]\r\n\r\n try:\r\n pinknoise = pink_noise(flux_ootr, int(numpy.mean(per_transit_count)))\r\n except:\r\n pinknoise = numpy.nan\r\n\r\n # Estimate SNR and pink SNR\r\n # Second run because now the out of transit points are known\r\n if len(flux_ootr) > 0:\r\n std = numpy.std(flux_ootr)\r\n else:\r\n std = numpy.nan\r\n for i in range(len(transit_times)):\r\n mid_transit = transit_times[i]\r\n tmin = mid_transit - 0.5 * transit_duration_in_days\r\n tmax = mid_transit + 0.5 * transit_duration_in_days\r\n if numpy.isnan(tmin) or numpy.isnan(tmax):\r\n idx_intransit = []\r\n mean_flux = numpy.nan\r\n else:\r\n idx_intransit = numpy.where(numpy.logical_and(t > tmin, t < tmax))\r\n if len(y[idx_intransit]) > 0:\r\n mean_flux = numpy.mean(y[idx_intransit])\r\n else:\r\n mean_flux = numpy.nan\r\n\r\n intransit_points = numpy.size(y[idx_intransit])\r\n try:\r\n snr_pink_per_transit[i] = (1 - mean_flux) / pinknoise\r\n if intransit_points > 0 and not numpy.isnan(std):\r\n std_binned = std / intransit_points ** 0.5\r\n snr_per_transit[i] = (1 - mean_flux) / std_binned\r\n else:\r\n snr_per_transit[i] = 0\r\n snr_pink_per_transit[i] = 0\r\n except:\r\n snr_per_transit[i] = 0\r\n snr_pink_per_transit[i] = 0\r\n\r\n return snr_per_transit, snr_pink_per_transit", "def writetipsy(self, outfile=None, hubble=None):\n from . import analysis\n from . import tipsy\n from .analysis import cosmology\n from snapshot import _new as new\n import math\n s = self._base()\n if outfile is None: outfile = s.filename+'.gtp'\n print \"write tipsy file to \", outfile\n sout = new(star=self._nhalos) # create new tipsy snapshot written as halos.\n sout.properties['a'] = s.properties['a']\n sout.properties['z'] = s.properties['z']\n sout.properties['boxsize'] = s.properties['boxsize']\n if hubble is None: hubble = s.properties['h']\n sout.properties['h'] = hubble\n ### ! dangerous -- rho_crit function and unit conversions needs simplifying\n rhocrithhco = cosmology.rho_crit(s, z=0, unit=\"Msol Mpc^-3 h^2\")\n lboxkpc = sout.properties['boxsize'].ratio(\"kpc a\")\n lboxkpch = lboxkpc*sout.properties['h']\n lboxmpch = lboxkpc*sout.properties['h']/1000.\n tipsyvunitkms = lboxmpch * 100. / (math.pi * 8./3.)**.5\n tipsymunitmsun = rhocrithhco * lboxmpch**3 / sout.properties['h']\n\n print \"transforming \", self._nhalos, \" halos into tipsy star particles\"\n for ii in xrange(self._nhalos):\n h = self[ii+1].properties\n sout.star[ii]['mass'] = h['m']/hubble / tipsymunitmsun\n ## tipsy units: box centered at 0. (assume 0<=x<=1)\n sout.star[ii]['x'] = h['pos'][0][0]/lboxmpch - 0.5\n sout.star[ii]['y'] = h['pos'][0][1]/lboxmpch - 0.5\n sout.star[ii]['z'] = h['pos'][0][2]/lboxmpch - 0.5\n sout.star[ii]['vx'] = h['vel'][0][0]/tipsyvunitkms\n sout.star[ii]['vy'] = h['vel'][0][1]/tipsyvunitkms\n sout.star[ii]['vz'] = h['vel'][0][2]/tipsyvunitkms\n sout.star[ii]['eps'] = h['r']/lboxkpch\n sout.star[ii]['metals'] = 0.\n sout.star[ii]['phi'] = 0.\n sout.star[ii]['tform'] = 0.\n print \"writing tipsy outfile %s\"%outfile\n sout.write(fmt=tipsy.TipsySnap, filename=outfile)\n return sout", "def summarize_series(fglob, outfile):\n with open(outfile, mode='w') as of:\n #Iterate over files\n flist = glob(fglob)\n flist = sorted(flist)\n lgrho = [] #list of log(rho) values, parallel to the list of rxnmap maps\n rxns = [] #list of maps of form 'rxn_name' --> energy release [erg/g/s]\n for f in flist:\n rxnmap = {}\n currxn = ''\n eps_nuc = ''\n for line in open(f,mode='r'):\n if not currxn and line.count('reaction name') == 1:\n i1 = line.index('<') + 1\n i2 = line.index('>')\n currxn = line[i1:i2]\n elif currxn and line.count('eps_nuc') == 1:\n eps_nuc = float(line.partition(':')[2].strip())\n rxnmap[currxn] = eps_nuc\n currxn = ''\n elif line.count('log rho') == 1:\n lgrho.append(line.partition('rho')[2].strip())\n srtmap = sorted(rxnmap.items(), key=operator.itemgetter(1), reverse=True) #sort on values\n rxns.append(srtmap)\n\n #Write header\n of.write('log(rho): ' + (' {:3.3s} |'*len(lgrho)).format(*lgrho) + '\\n')\n\n #Write rows of data for each logrho, include top ten rxns\n start = ' '\n for i in range(10):\n of.write(start)\n for tup in rxns:\n of.write('{:23s}'.format(tup[i][0]))\n of.write('\\n')\n of.write(start)\n for tup in rxns:\n of.write('{:<23.8e}'.format(tup[i][1]))\n of.write('\\n\\n')", "def runErdosRenyi(n,p):\n s = z.Optimize()\n g = ig.Graph.Erdos_Renyi(n, p, directed=True, loops=True)\n while g.is_dag():\n g = ig.Graph.Erdos_Renyi(n, p, directed=True, loops=True)\n\n return MFAS_set_cover(s,g), u.get_feedback_arc_set(g)", "def simul_and_export(file, config, i):\n\n simulate_UVSPEC(file, config)\n\n load_skymap(config)\n\n sim = files_sim(config)[i]\n export_sim_rad(sim, config)", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def AssignEpiNames(self):\n# Sort each run in the series by its acquisition time.\n epi_sort = self.epi_times.keys()\n epi_sort.sort()\n# Rewrite pfiles as an ordered list of p-files to be reconstructed.\n for idx in xrange(len(epi_sort)):\n entry = self.epi_times[epi_sort[idx]]\n info = self.info[entry]\n if info['data_filetype'] == 'ge_data':\n self.pfiles_recon.append(entry)\n info['run'] = '%0d' % (self.n_epi)\n self.n_epi = self.n_epi + 1\n plane = info['plane']\n if not self.epinames.has_key(plane):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n if n_epi > len(self.epinames[plane]['names'])-1:\n if self.epinames.has_key('any') and \\\n n_epi < len(self.epinames['any']):\n plane = 'any'\n n_epi = self.epinames[plane]['n_epi']\n else:\n self.DumpInfo()\n errstr = 'Not enough EPI names in template file'\n raise RuntimeError(errstr)\n# epiname = self.epinames[plane]['names'][n_epi]\n\n filebase = os.path.basename(self.epinames[plane]['names'][n_epi])\n epi_mf_outdir = os.path.dirname(\\\n self.epinames[plane]['names'][n_epi])\n\n epi_base = self.epinames[plane]['subdir'][n_epi]\n tmp_outdir = '%s/%s' % (self.tmpdir, epi_base)\n# Get output directory for raw epis.\n if self.no_motcorr:\n epi_r_outdir = epi_mf_outdir\n elif self.keep_epi_raw:\n epi_r_outdir = self.epi_scratch_space\n else:\n epi_r_outdir = tmp_outdir\n\n# Get output directory for motion-corrected epis.\n if self.keep_epi_mot:\n epi_m_outdir = self.epi_scratch_space\n else:\n epi_m_outdir = tmp_outdir\n info['outdir'] = epi_mf_outdir\n if n_epi < len(self.epinames[plane]['names']):\n epiname = self.epinames[plane]['names'][n_epi]\n info['imgfile'] = '%s/%s' % (epi_r_outdir, filebase)\n else:\n info['imgfile'] = '%s/s%0d_epi_run%0d' % \\\n (epi_r_outdir, n_epi, idx+1)\n self.epinames[plane]['n_epi'] += 1\n\n info['mot_file'] = '%s/%s_mtn.txt' % (epi_mf_outdir, filebase)\n info['censor_prefix'] = '%s/%s' % (epi_mf_outdir, filebase)\n info['imgfile_t'] = '%s/%s_t' % (epi_m_outdir, filebase)\n if self.no_motcorr:\n info['imgfile_m'] = None\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n if self.no_fmapcorr or info['fmap_entry'] is None:\n info['imgfile_m'] = '%s/%s_m' % (epi_mf_outdir, filebase)\n info['imgfile_mf'] = None\n info['imgfile_final'] = info['imgfile_m']\n else:\n info['imgfile_m'] = '%s/%s_m' % (epi_m_outdir, filebase)\n info['imgfile_mf'] = '%s/%s_mf' % (epi_mf_outdir, filebase)\n info['imgfile_final'] = info['imgfile_mf']\n info['skip'] = self.skip\n info['motion_ref_frame'] = self.tmplt['motion_ref_frame']\n\n info['motion_interp'] = self.tmplt['epi_motion_interp']\n if not info['motion_interp'].startswith('-'):\n info['motion_interp'] = '-%s' % info['motion_interp']\n\n info['filetype'] = self.tmplt['epi_file_format']\n info['valid'] = True\n self.info[entry] = info\n\n if not self.no_motcorr:\n epi_base = os.path.basename(info['imgfile_m'])\n info['matfile_m'] = '%s/%s.aff12.1D' % (info['outdir'], epi_base)\n info['matfile_mcat'] = '%s/%scat.aff12.1D' % (info['outdir'], epi_base)", "def writeRotNSeosfile(self, filename, tempPrescription, ye=None, addNeutrinoTerms=False):\n assert not(ye == 'NuFull' and 'quantity' not in tempPrescription), \\\n \"NuFull Beta-Eq not supported for fixedQuantityPrescription\"\n\n tempOfLog10Rhob = self.tempOfLog10RhobFuncFromPrescription(tempPrescription, ye)\n\n log10numberdensityMin = 2.67801536139756E+01\n log10numberdensityMax = 3.97601536139756E+01 # = 1e16 g/cm^3\n\n npoints = 600\n\n #dlogn = (log10numberdensityMax - log10numberdensityMin) / (npoints - 1.0)\n\n logns = numpy.linspace(log10numberdensityMin, log10numberdensityMax, npoints)\n\n outfile = open(filename, 'w')\n\n for logn in logns:\n numberdensityCGS = numpy.power(10.0, logn)\n rho_b_CGS = numberdensityCGS * consts.CGS_AMU\n logrho_b_CGS = numpy.log10(rho_b_CGS)\n\n temp = tempOfLog10Rhob(logrho_b_CGS)\n\n if ye is None and temp is not None:\n self.setBetaEqState({'rho':rho_b_CGS, 'temp': temp})\n elif ye is None and temp is None:\n quantity = tempPrescription['quantity']\n target = tempPrescription['target']\n self.setConstQuantityAndBetaEqState({'rho': rho_b_CGS},\n quantity,\n target)\n elif ye == 'NuFull':\n self.setNuFullBetaEqState({'rho': rho_b_CGS, 'temp': temp})\n else:\n self.setState({'rho': rho_b_CGS, 'temp': temp, 'ye': ye})\n\n logpress, logeps, munu = self.query(['logpress', 'logenergy', 'munu'])\n #print logpress, rho_b_CGS\n P_nu = 0.0\n if addNeutrinoTerms:\n P_nu = P_nu_of(rho_b_CGS, temp, munu)\n logpress = numpy.log10(numpy.power(10.0, logpress) + P_nu)\n eps_nu = 3.0 * P_nu / rho_b_CGS\n\n #Total energy density (1.0 + eps) is in AGEO units\n eps = (numpy.power(10.0, logeps) - self.energy_shift + eps_nu)\\\n * consts.AGEO_ERG / consts.AGEO_GRAM\n #print logpress\n #print\n totalEnergyDensity = rho_b_CGS * (1.0 + eps)\n logTotalEnergyDensity = numpy.log10(totalEnergyDensity)\n #print logrho_b_CGS ,logeps, numpy.power(10.0, logeps), eps, self.energy_shift\n #print logrho_b_CGS, eps, logpress\n outfile.write(\"{:24.14e}{:24.14e}{:24.14e}\\n\".format(logn,\n logTotalEnergyDensity,\n logpress))", "def TNG(self):\n \n import h5py as h5\n filename = localpath+'input/yields/TNG/SNIa.hdf5'\n # Read H5 file\n f = h5.File(filename, \"r\")\n \n indexing = {}\n indexing['H'] = 'Hydrogen'\n indexing['He'] = 'Helium'\n indexing['Li'] = 'Lithium'\n indexing['Be'] = 'Beryllium'\n indexing['B'] = 'Boron'\n indexing['C'] = 'Carbon'\n indexing['N'] = 'Nitrogen'\n indexing['O'] = 'Oxygen'\n indexing['F'] = 'Fluorine'\n indexing['Ne'] = 'Neon'\n indexing['Na'] = 'Sodium'\n indexing['Mg'] = 'Magnesium'\n indexing['Al'] = 'Aluminum'\n indexing['Si'] = 'Silicon'\n indexing['P'] = 'Phosphorus'\n indexing['S'] = 'Sulphur'\n indexing['Cl'] = 'Chlorine'\n indexing['Ar'] = 'Argon'\n indexing['K'] = 'Potassium'\n indexing['Ca'] = 'Calcium'\n indexing['Sc'] = 'Scandium'\n indexing['Ti'] = 'Titanium'\n indexing['V'] = 'Vanadium'\n indexing['Cr'] = 'Chromium'\n indexing['Mn'] = 'Manganese'\n indexing['Fe'] = 'Iron'\n indexing['Co'] = 'Cobalt'\n indexing['Ni'] = 'Nickel'\n indexing['Cu'] = 'Copper'\n indexing['Zn'] = 'Zinc'\n indexing['Ga'] = 'Gallium'\n indexing['Ge'] = 'Germanium'\n indexing['As'] = 'Arsenic'\n indexing['Se'] = 'Selenium'\n indexing['Br'] = 'Bromine'\n indexing['Kr'] = 'Krypton'\n indexing['Rb'] = 'Rubidium'\n indexing['Sr'] = 'Strontium'\n indexing['Y'] = 'Yttrium'\n indexing['Zr'] = 'Zirconium'\n indexing['Nb'] = 'Niobium'\n indexing['Mo'] = 'Molybdenum'\n \n \n self.elements = list(indexing.keys())\n \n self.table = {}\n \n self.metallicities = list([0.02]) # arbitrary since only one value\n self.masses = list([np.sum(f['Yield'].value)]) # sum of all yields\n \n names = ['Mass','mass_in_remnants']+self.elements\n \n yield_subtable = {}\n \n base = np.zeros(len(self.masses))\n list_of_arrays = []\n for i in range(len(names)):\n list_of_arrays.append(base)\n \n yield_subtable = np.core.records.fromarrays(list_of_arrays,names=names)\n \n yield_subtable['Mass'] = self.masses\n yield_subtable['mass_in_remnants'] = np.asarray([-1*m for m in self.masses])\n \n for el_index,el in enumerate(self.elements):\n yield_subtable[el] = np.divide(f['Yield'][el_index],self.masses)\n \n self.table[self.metallicities[0]] = yield_subtable", "def plot_seaice_predict_components(anomlous = False, temporal_resolution = 'monthly', spatial_resolution = 1, detrend = False, imagefolder = 'images/timeseries/prediction/',seaice_source='nsidc'):\n output_folder = 'processed_data/SIC/'\n if seaice_source == 'ecmwf':\n output_folder = 'processed_data/ERA5/SIC/'\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n dt = 'detrended'\n title += dt + ' '\n else:\n dt = 'raw'\n\n title += temporal_resolution\n title += ' SIE prediction'\n\n\n# Loading Seaice Trends\n seaicename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n seaice = xr.open_dataset(output_folder + seaicename +'.nc')\n area = xr.open_dataset('data/area_files/processed_nsidc.nc').area\n seaice = seaice\n\n\n# Index contributions\n filename = f'processed_data/regressions/spatial_multiple/regr_{temp_decomp}_{temporal_resolution}_{dt}_{spatial_resolution}'\n dataset = xr.open_dataset(filename + '.nc')\n indicies = np.array([i for i in dataset])\n values = np.array([dataset[i].values for i in dataset])\n index_data = {}\n for indexname in indicies[:-1]:\n filename = f'{indexname}_{temp_decomp}_{temporal_resolution}_{dt}'\n index_data[indexname] = xr.open_dataset('processed_data/INDICIES/' + filename +'.nc')[indexname]\n index_data[indexname] = (index_data[indexname] - index_data[indexname].mean()) \n index_data[indexname] = index_data[indexname] / index_data[indexname].std()\n\n times = list(set.intersection(set(seaice.time.values), *(set(index_data[i].time.values)for i in indicies[:-1])))\n\n prediction = seaice.copy() * 0\n predictions = []\n for indexname in indicies:\n if indexname in index_data.keys():\n prediction += index_data[indexname].sel(time=times).sortby('time') * dataset[indexname]\n predictions += [index_data[indexname].sel(time=times).sortby('time') * dataset[indexname]]\n else:\n prediction += dataset[indexname]\n predictions += [dataset[indexname]]\n\n\n seaice = seaice.sortby('time').sel(time=times).sortby('time')\n prediction = prediction.sortby('time').sel(time=times).sortby('time')\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.axhline(y=0, alpha = 0.5)\n data = (prediction[seaicename]*area/250).sum(dim = ('x', 'y'))\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n plt.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#EA1B10', alpha = 0.5)\n data = (seaice[seaicename]*area/250).sum(dim = ('x', 'y'))\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n plt.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#177E89', alpha = 0.5)\n\n\n ln1 = ax.plot(seaice.time, (seaice[seaicename]*area/250).sum(dim = ('x', 'y')), label = 'SIE', alpha = 0.5)\n ln2 = ax.plot(seaice.time, (prediction[seaicename]*area/250).sum(dim = ('x', 'y')), label = 'Prediction', alpha = 0.5)\n\n lines = ln1 + ln2\n i = 0\n for indexname in indicies[:-1]:\n predict = index_data[indexname] * dataset[indexname]\n lines += ax.plot(predict.time, (predict*area/250).sum(dim = ('x', 'y')), '--', label = f'{indexname}', linewidth=1)\n i += 1\n\n labels = [line.get_label() for line in lines]\n plt.legend(lines,labels,bbox_to_anchor=(0.99, -0.15), ncol = 3, loc = 'upper right')\n plt.title(title)\n\n plt.savefig(imagefolder + seaicename + '_components.pdf')\n plt.show()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.axhline(y=0, alpha = 0.5)\n data = (prediction[seaicename]*area/250).sum(dim = ('x', 'y'))\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n # plt.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#EA1B10', alpha = 0.5)\n data = (seaice[seaicename]*area/250).sum(dim = ('x', 'y'))\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(data.time.values.astype(float), data)\n # plt.plot(data.time, data_m * data.time.values.astype(float) + data_b, color = '#177E89', alpha = 0.5)\n\n\n norm = (seaice[seaicename]*area/250).sum(dim = ('x', 'y')) / 100\n ln1 = ax.plot(seaice.time, (seaice[seaicename]*area/250).sum(dim = ('x', 'y')).sortby('time')/norm, label = 'SIE', alpha = 0.5)\n ln2 = ax.plot(seaice.time, (prediction[seaicename]*area/250).sum(dim = ('x', 'y')).sortby('time')/norm, label = 'Prediction', alpha = 0.5)\n\n lines = ln1 + ln2\n i = 0\n for indexname in indicies[:-1]:\n predict = index_data[indexname].sel(time=times).sortby('time') * dataset[indexname]\n lines += ax.plot(predict.time, (predict*area/250).sum(dim = ('x', 'y'))/norm, '--', label = f'{indexname}', linewidth=1)\n i += 1\n\n labels = [line.get_label() for line in lines]\n plt.legend(lines,labels,bbox_to_anchor=(0.99, -0.15), ncol = 3, loc = 'upper right')\n plt.title(title)\n\n plt.savefig(imagefolder + seaicename + '_components_normalised.pdf')\n plt.show()", "def s(t,n, T=2*np.pi):\n k = np.arange(1, n)\n s = ((1/(2*k-1))*np.sin((2*(2*k-1)*np.pi*t)/ T))\n return ((4/np.pi)*np.sum(s))", "def process_nitrate(self):\n constituent = 'NitrateSurr'\n db_path = '/said/{}/'.format(self.site['id'])\n iv_path = db_path + 'iv'\n df = self.store.get(iv_path)\n\n n_error = np.maximum(0.5, df[constituent]*.1)\n df[constituent+'_U90.0'] = df[constituent] + n_error\n df['NitrateSurr_L90.0'] = df.NitrateSurr - n_error\n #clip values below 0\n df['NitrateSurr_L90.0'] = np.maximum(0, df['NitrateSurr_L90.0'])\n\n update_table(self.store, iv_path, df)", "def test(nifti_region_to_save, path_where_store_out=\"pet_regions_segmented\"):\n regions_used = \"three\"\n list_regions = session.select_regions_to_evaluate(regions_used)\n dic_regions_segmented = load_pet_regions_segmented(list_regions)\n\n region_container_3d = dic_regions_segmented[\n nifti_region_to_save] # [patients x heigh, width, depth]\n\n for patient in range(0, region_container_3d.shape[0], 1):\n img = nib.Nifti1Image(region_container_3d[patient, :, :, :], np.eye(4))\n img.to_filename(os.path.join(path_where_store_out,\n \"region_{0},patient_{1}.nii\".format(\n regions_used, patient)))", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def niriss_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n filter='F150W', grism='GR150R'): \n naxis = 2048, 2048\n crpix = 1024, 1024\n \n cd = np.array([[ -0.0658, 0], [0, 0.0654]])/3600.\n cd_rot = rotate_CD_matrix(cd, pa_aper)\n \n h = pyfits.Header()\n \n h['CRVAL1'] = ra\n h['CRVAL2'] = dec\n \n h['WCSAXES'] = 2\n h['CTYPE1'] = 'RA---TAN'\n h['CTYPE2'] = 'DEC--TAN'\n \n for i in range(2):\n h['NAXIS%d' %(i+1)] = naxis[i]\n h['CRPIX%d' %(i+1)] = crpix[i]\n h['CDELT%d' %(i+1)] = 1.0\n for j in range(2):\n h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]\n \n ### Backgrounds\n # http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf\n bg = {'F090W':0.50, 'F115W':0.47, 'F140M':0.23, 'F150W':0.48, 'F158M':0.25, 'F200W':0.44}\n \n h['BACKGR'] = bg[filter], 'Total, e/s'\n h['FILTER'] = filter\n h['INSTRUME'] = 'NIRISS'\n h['READN'] = 6 , 'Rough, per pixel per 1 ks exposure' # e/pix/per\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n if grism == 'GR150R':\n h['GRISM'] = 'GR150R', 'Spectral trace along X'\n else:\n h['GRISM'] = 'GR150C', 'Spectral trace along Y'\n \n wcs = pywcs.WCS(h)\n h['EXTVER'] = 1\n \n return h, wcs", "def time_calibration(input_file):\n original_path = os.getcwd()\n save_path = input_file['save_path']\n #change to save data reduction directory\n os.chdir(save_path)\n print '\\n Reading the list of images ....\\n'\n planet = input_file['exoplanet'] #set exoplanet name\n images = sorted(glob.glob('AB'+planet+'*.fits'))\n print images\n #include de RA,DEC and epoch of the exoplanet\n RA,DEC,epoch = input_file['RA'],input_file['DEC'],input_file['epoch']\n #obtain ST JD using iraf task and introduce in the header\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n if int(split(hdr['UT'],':')[0]) < int(hdr['timezone']):\n new_date = use.yesterday(hdr['date-obs'])\n #print images[i], new_date\n else:\n new_date = hdr['date-obs']\n year,month,day = split(new_date,'-')\n iraf.asttimes(year=year,month=month,day=day,time=hdr['loctime'],obs=input_file['observatory'])\n JD = iraf.asttimes.jd #obtain julian date\n LMST = iraf.asttimes.lmst #obtain the sideral time\n LMST = use.sexagesimal_format(LMST) #convert sideral time in sexagesimal format\n iraf.hedit(images[i],'ST',LMST,add='yes',verify='no',show='no',update='yes') #create the ST keyword in the header\n iraf.ccdhedit(images[i],'LMST',LMST,type='string') #include the mean sideral time in the header\n iraf.ccdhedit(images[i],'JD',JD,type='string') #include de julian date in the header\n #include RA, and DEC of the object in your header\n iraf.ccdhedit(images[i],\"RA\",RA,type=\"string\") #include right ascention in the header\n iraf.ccdhedit(images[i],\"DEC\",DEC,type=\"string\") #include declination in the header\n iraf.ccdhedit(images[i],\"epoch\",epoch,type=\"string\") #include epoch in the header\n # use.update_progress((i+1.)/len(images))\n print '\\n Setting airmass ....\\n'\n for i in range(len(images)):\n print '# ',images[i]\n #iraf.hedit(images[i],'airmass',airmass,add='yes')\n #iraf.hedit(images[i],'HJD',HJD,add='yes')\n iraf.setairmass.observatory = input_file['observatory']\n iraf.setairmass(images[i])\n iraf.setjd.time = 'ut'\n iraf.setjd(images[i])\n print '\\n.... done.\\n'\n #export information\n hjd, jd, airmass, st = [],[],[],[]\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n hjd.append(hdr['HJD'])\n jd.append(hdr['JD'])\n airmass.append(hdr['airmass'])\n st.append(hdr['st'])\n #saving the data\n data = DataFrame([list(hjd),list(jd),list(st),list(airmass)]).T\n data.columns = ['HJD','JD','ST','Airmass']\n data.to_csv('results_iraf_calibrations.csv')\n #change to workings directory\n os.chdir(original_path)\n return", "def calculateSNR(self):\n pass", "def save2nifti(self, file_path):\n #Define nifti1 datatype codes\n NIFTI_TYPE_UINT8 = 2 # unsigned char\n NIFTI_TYPE_INT16 = 4 # signed short\n NIFTI_TYPE_INT32 = 8 # signed int.\n NIFTI_TYPE_FLOAT32 = 16 # 32 bit float.\n NIFTI_TYPE_COMPLEX64 = 32 # 64 bit complex = 2 32 bit floats\n NIFTI_TYPE_FLOAT64 = 64 # 64 bit float = double.\n NIFTI_TYPE_RGB24 = 128 # 3 8 bit bytes.\n NIFTI_TYPE_INT8 = 256 # signed char.\n NIFTI_TYPE_UINT16 = 512 # unsigned short.\n NIFTI_TYPE_UINT32 = 768 # unsigned int.\n NIFTI_TYPE_INT64 = 1024 #signed long long.\n NIFTI_TYPE_UINT64 = 1280 # unsigned long long.\n NIFTI_TYPE_FLOAT128 = 1536 # 128 bit float = long double.\n NIFTI_TYPE_COMPLEX128 = 1792 #128 bit complex = 2 64 bit floats.\n NIFTI_TYPE_COMPLEX256 = 2048 # 256 bit complex = 2 128 bit floats\n NIFTI_TYPE_RGBA32 = 2304 # 4 8 bit bytes.\n\n #Detect the data type of the input data.\n data_type = {\n np.uint8: NIFTI_TYPE_UINT8,\n np.uint16: NIFTI_TYPE_UINT16,\n np.uint32: NIFTI_TYPE_UINT32,\n np.float32: NIFTI_TYPE_FLOAT32,\n np.int16: NIFTI_TYPE_INT16,\n np.int32: NIFTI_TYPE_INT32,\n np.int8: NIFTI_TYPE_INT8\n }\n if sys.maxint > 2 ** 32: # The platform is 64 bit\n data_type[np.float128] = NIFTI_TYPE_FLOAT128\n data_type[np.float64] = NIFTI_TYPE_FLOAT64\n data_type[np.int64] = NIFTI_TYPE_INT64\n data_type[np.uint64] = NIFTI_TYPE_UINT64\n data_type[np.complex64] = NIFTI_TYPE_COMPLEX64\n data_type[np.complex128] = NIFTI_TYPE_COMPLEX128\n data_type[np.complex256] = NIFTI_TYPE_COMPLEX256\n\n data = np.rot90(self._data, 3)\n if data_type.has_key(data.dtype.type):\n self._header['datatype'] = data_type[data.dtype.type]\n self._header['cal_max'] = data.max()\n self._header['cal_min'] = 0\n image = nib.nifti1.Nifti1Image(data, None, self._header)\n nib.nifti1.save(image, file_path)", "def add_noise(self, snr, unit=None):\n return self.from_time(self.fs, noisify(self.in_time, snr, unit=unit))", "def _make_s2n(self, ffile, hdul):\n bname = os.path.basename(ffile)\n phu = hdul[0].copy()\n phu.data = None\n if 'S/N' in hdul:\n hdu = hdul['S/N']\n else:\n if 'FLUX' in hdul and 'ERROR' in hdul:\n log.debug(f'Making S/N image from FLUX and ERROR '\n f'extensions for {bname}.')\n hdu = fits.ImageHDU(hdul['FLUX'].data,\n hdul['FLUX'].header)\n s = hdul['FLUX'].data\n n = hdul['ERROR'].data\n elif 'FLUX' in hdul and 'STDDEV' in hdul:\n log.debug(f'Making S/N image from FLUX and STDDEV '\n f'extensions for {bname}.')\n hdu = fits.ImageHDU(hdul['FLUX'].data,\n hdul['FLUX'].header)\n s = hdul['FLUX'].data\n n = hdul['STDDEV'].data\n elif 'STOKES I' in hdul and 'ERROR I' in hdul:\n log.debug(f'Making S/N image from STOKES I and '\n f'ERROR I extensions for {bname}.')\n hdu = fits.ImageHDU(hdul['STOKES I'].data,\n hdul['STOKES I'].header)\n s = hdul['STOKES I'].data\n n = hdul['ERROR I'].data\n else:\n raise ValueError(f'Cannot determine S/N from extensions '\n f'in file {bname}')\n\n hdu.data = s / n\n hdu.header['EXTNAME'] = 'S/N'\n hdu.header['BUNIT'] = ''\n\n # blank out data outside of range\n try:\n low, high = self.disp_parameters['s2n_range']\n hdu.data[hdu.data < float(low)] = np.nan\n hdu.data[hdu.data > float(high)] = np.nan\n except (ValueError, AttributeError, IndexError, TypeError):\n pass\n\n s2n = fits.HDUList([phu, hdu])\n return s2n", "def save_to_nii(im, filename, outdir=\"\", mode=\"image\", system=\"sitk\"):\n if system == \"sitk\":\n if mode == 'label':\n img = sitk.GetImageFromArray(im.astype(np.uint8))\n else:\n img = sitk.GetImageFromArray(im.astype(np.float32))\n if not os.path.exists(\"./{}\".format(outdir)):\n os.mkdir(\"./{}\".format(outdir))\n sitk.WriteImage(img, \"./{}/{}.nii.gz\".format(outdir, filename))\n else:\n img = np.rot90(im, k=2, axes= (1,2))\n OUTPUT_AFFINE = np.array(\n [[0, 0, 1, 0],\n [0, 1, 0, 0],\n [1, 0, 0, 0],\n [0, 0, 0, 1]])\n if mode == 'label':\n img = nibabel.Nifti1Image(img.astype(np.uint8), OUTPUT_AFFINE)\n else:\n img = nibabel.Nifti1Image(img.astype(np.float32), OUTPUT_AFFINE)\n if not os.path.exists(\"./{}\".format(outdir)):\n os.mkdir(\"./{}\".format(outdir))\n nibabel.save(img, \"./{}/{}.nii.gz\".format(outdir, filename))", "def Writefile(self, outfile, verbose=True):\n \n self.outfile = outfile\n \n # Write SUNTANS grid to file\n nc = Dataset(outfile, 'w', format='NETCDF3_CLASSIC')\n nc.Description = 'SUNTANS subsetted history file'\n nc.Author = ''\n nc.Created = datetime.now().isoformat()\n nc.type = 'SUNTANS HIS file'\n #pdb.set_trace()\n nc.createDimension('Nc', self.Nc)\n nc.createDimension('Np', self.Np)\n nc.createDimension('Ne', self.Ne)\n nc.createDimension('Nk', self.Nk)\n nc.createDimension('numsides', self.numsides)\n \n nc.createDimension('time', None)\n \n def write_nc_var(var, name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n nc.variables[name][:] = var\n if verbose:\n print ' ... wrote ', name\n \n def create_nc_var(name, dimensions, units=None):\n nc.createVariable(name, 'f8', dimensions)\n if units is not None:\n nc.variables[name].units = units\n if verbose:\n print ' ... wrote ', name\n \n # Grid variables\n write_nc_var(self.xv, 'xv', ('Nc'))\n write_nc_var(self.yv, 'yv', ('Nc'))\n write_nc_var(self.xp, 'xp', ('Np'))\n write_nc_var(self.yp, 'yp', ('Np'))\n write_nc_var(self.xe, 'xe', ('Ne'))\n write_nc_var(self.ye, 'ye', ('Ne'))\n write_nc_var(self.dz, 'dz', ('Nk'))\n write_nc_var(self.dv, 'dv', ('Nc'))\n write_nc_var(self.Ac, 'Ac', ('Nc'))\n write_nc_var(self.Nk, 'Nk', ('Nc'))\n write_nc_var(self.face, 'face', ('Nc','numsides'))\n write_nc_var(self.mark, 'mark', ('Ne'))\n write_nc_var(self.cells, 'cells', ('Nc','numsides'))\n \n \n # Create the data variables\n create_nc_var('time',('time'),'seconds since 1990-01-01 00:00:00')\n create_nc_var('salt',('time','Nk','Nc'),'psu')\n create_nc_var('temp',('time','Nk','Nc'),'degrees C')\n create_nc_var('uc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('vc',('time','Nk','Nc'),'meter second-1')\n create_nc_var('nu_v',('time','Nk','Nc'),'m2 s-1')\n create_nc_var('rho',('time','Nk','Nc'),'kg m-3')\n create_nc_var('tau_x',('time','Nc'),'N m-2')\n create_nc_var('tau_y',('time','Nc'),'N m-2')\n create_nc_var('eta',('time','Nc'),'m')\n \n nc.close()", "def fractalise(fractals,n,fpath):\n # Use different fractals for MP\n n = nnn-n\n # Load numpy array\n for f in fractals:\n if f.endswith('{:02d}.npy'.format(n)):\n frac_arr = N.load(f)\n f_out = f\n \n # Normalise to +/- 1%\n frac_arr_pc = normalise(frac_arr)\n\n # Load virgin data\n nc = Dataset(fpath,'r+')\n virgindata = nc.variables['SMOIS'][:,:,:,:]\n newdata = N.zeros_like(virgindata)\n shp = virgindata[0,0,:,:].shape\n\n # Interpolate fractal to wrfinput grid\n frac_arr_interp = interpolate_to_wrfgrid(frac_arr_pc,shp)\n \n # Perturb each soil level (broadcasting is scary)\n for lv in N.arange(virgindata.shape[1]):\n newdata[0,lv,:,:] = virgindata[0,lv,:,:] + (virgindata[0,lv,:,:]*frac_arr_interp)\n\n # Write back\n assert N.all(newdata > 0.0)\n # pdb.set_trace()\n nc.variables['SMOIS'][:,:,:,:] = newdata\n nc.close()\n return f_out", "def SNR(self, flux_sky, n_pix_star, flux_star, gain, ron):\n SNR = (gain*flux_star/sqrt(gain*flux_star + n_pix_star*gain*flux_sky + n_pix_star*ron**2)) \n return SNR", "def plot_subplot_trend(anomlous = False, temporal_resolution = 'monthly', spatial_resolution = 1, detrend = False, imagefolder = 'images/subplots/',seaice_source='nsidc'):\n output_folder = 'processed_data/SIC/'\n if seaice_source == 'ecmwf':\n output_folder = 'processed_data/ERA5/SIC/'\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n dt = 'detrended'\n title += dt + ' '\n else:\n dt = 'raw'\n\n title += temporal_resolution\n title += ' SIE trends'\n\n\n# Loading Seaice Trends\n seaicename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n seaice = xr.open_dataset(output_folder + seaicename +'.nc')\n if seaice_source == 'nsidc':\n seaice = seaice/250\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = xr.apply_ufunc(scipy.stats.linregress, seaice[seaicename].time.values.astype(float), seaice[seaicename], input_core_dims=[['time'],['time']], vectorize=True, dask='parallelized', output_dtypes=[float]*5, output_core_dims=[[]]*5)\n if seaice_source =='ecmwf':\n seaice_m, seaice_b, seaice_r_value, seaice_p_value, seaice_std_err = scipy.stats.linregress(seaice[seaicename].time.values.astype(float), seaice[seaicename])\n seaice_m = seaice_m * 1e9 * 60 * 60 * 24 * 365\n area = xr.open_dataset('data/area_files/processed_nsidc.nc').area\n seaice_m = seaice_m*area\n seaice_m = seaice_m.where(seaice_m != 0)\n seaice_m = seaice_m.where(seaice_p_value <= 0.05)\n\n\n# Index xontributions\n filename = f'processed_data/regressions/spatial_multiple/regr_{temp_decomp}_{temporal_resolution}_{dt}_{spatial_resolution}'\n dataset = xr.open_dataset(filename + '.nc')\n indicies = np.array([i for i in dataset])\n values = np.array([dataset[i].values for i in dataset])\n index_data = {}\n for indexname in indicies[:-1]:\n filename = f'{indexname}_{temp_decomp}_{temporal_resolution}_{dt}'\n index_data[indexname] = xr.open_dataset('processed_data/INDICIES/' + filename +'.nc')[indexname]\n index_data[indexname] = (index_data[indexname] - index_data[indexname].mean()) \n index_data[indexname] = index_data[indexname] / index_data[indexname].std()\n newdata = {} \n for indexname in indicies[:-1]:\n a = scipy.stats.linregress(index_data[indexname].time.values.astype(float), index_data[indexname])\n newdata[indexname] = a[0] * dataset[indexname] * 24*60*60*365e9\n title = temp_decomp.capitalize() + ' '\n if detrend == 'detrended':\n title += detrend + ' '\n title += temporal_resolution\n title += f' SIC trend contributions'\n area = xr.open_dataset('data/area_files/processed_nsidc.nc').area\n # Plotting\n for i in range(len(indicies)-1):\n indexname = indicies[i]\n newdata[indexname] = newdata[indexname] * area / 250\n newdata[indexname] = newdata[indexname].where(newdata[indexname] !=0)\n\n\n\n fig = plt.figure(figsize = (15,5))\n\n # seaice_m = log_data(seaice_m)\n max_ = min(seaice_m.max(),-seaice_m.min())\n # max_ = 1\n divnorm = TwoSlopeNorm(vmin=-max_, vcenter=0, vmax=max_)\n ax = fig.add_subplot(131, projection = ccrs.SouthPolarStereo())\n # Plotting\n contor = ax.contourf(seaice_m.x, seaice_m.y, seaice_m, cmap = 'RdBu', levels = 100, norm = divnorm, transform=ccrs.SouthPolarStereo())\n ax.coastlines()\n ax.set_axis_off()\n # cbar = plt.colorbar(contor)\n # cbar.set_label('Trend in SIE (km$^2$ yr$^{-1}$)')\n # plt.title(title)\n ax = [fig.add_subplot(2,6,3, projection = ccrs.SouthPolarStereo()),fig.add_subplot(2,6,4, projection = ccrs.SouthPolarStereo()),fig.add_subplot(2,6,9, projection = ccrs.SouthPolarStereo()),fig.add_subplot(2,6,10, projection = ccrs.SouthPolarStereo())]\n for i in range(len(indicies)-1):\n indexname = indicies[i]\n # newdata[indexname] = log_data(newdata[indexname])\n newdata[indexname] = newdata[indexname].where(newdata[indexname] !=0)\n contor = ax[i].contourf(dataset.x, dataset.y, newdata[indexname], cmap = 'RdBu', norm = divnorm, transform=ccrs.SouthPolarStereo(), levels = 100)\n ax[i].coastlines()\n ax[i].set_axis_off()\n ax[i].set_title(indicies[i])\n\n ax = fig.add_subplot(1,3,3, projection = ccrs.SouthPolarStereo())\n data = seaice_m\n for i in range(len(indicies)-1):\n indexname = indicies[i]\n data = data - newdata[indexname]\n ax.contourf(dataset.x, dataset.y, data, cmap = 'RdBu', norm = divnorm, levels = 100, transform=ccrs.SouthPolarStereo())\n ax.coastlines()\n fig.subplots_adjust(right=0.9)\n cbar_ax = fig.add_axes([0.95, 0.15, 0.05, 0.7])\n cbar = fig.colorbar(cm.ScalarMappable(norm=divnorm, cmap='RdBu'), cax=cbar_ax, shrink=0.88)\n\n plt.savefig(imagefolder + seaicename + '.pdf')\n plt.show()", "def calculate_piN_piS(codonseqs, method, codon_table, het=False):\n analysis = {\"seqname\": \"\", \"piN\": -1, \"piS\": -1, \"piNpiS\": -1, \"pi\": -1, \"method\":method}\n x = seqfreqs(codonseqs)\n #if 'piNpiS' in options.debug:\n # print(\"freqs are: {}\".format(x))\n # print(\"len codonseqs is: \", len(codonseqs))\n piN = 0\n piS = 0\n for i in range(len(codonseqs)):\n for j in range(i+1, len(codonseqs)):\n #print(codonseqs[i], codonseqs[j])\n if not het:\n dN, dS = cal_dn_ds(codonseqs[i], codonseqs[j], codon_table=codon_table, method=method)\n piN = piN + (x[i] * x[j] * dN)\n piS = piS + (x[i] * x[j] * dS)\n #if 'piNpiS' in options.debug:\n # print(\"{0} dN{1}{2}={3} dS{1}{2}={4}\".format(method, i, j, dN, dS))\n else:\n try:\n dN, dS = cal_dn_ds(codonseqs[i], codonseqs[j], codon_table=codon_table, method=method)\n piN = piN + (x[i] * x[j] * dN)\n piS = piS + (x[i] * x[j] * dS)\n except:\n pass\n\n analysis['piN'] = piN\n analysis['piS'] = piS\n try:\n analysis['piNpiS'] = piN/piS\n except:\n analysis['piNpiS'] = 0\n #if 'piNpiS' in options.debug:\n # print (\"{0} dN={1:.3f} dS={2:.3f} piN/piS = {3:.3f}\".format(\n # method, analysis['piN'], analysis['piS'], analysis['piNpiS']))\n\n return analysis", "def save():\n pl.savefig('/home/filippini/Documents/plot/RUN55/compa'+INFO_RUN+'.png')", "def visualize(self, save: bool = False) -> None:\n import matplotlib.pyplot as plt\n import inspect\n\n plt.style.use('seaborn-whitegrid')\n plt.rcParams['figure.figsize'] = [10, 5]\n if not self.inverse_transformation:\n grid = np.linspace(0, 1, 10000)\n func = self.pdf(np.linspace(0, 1, 10000))\n try:\n plt.plot(grid, func)\n except:\n plt.plot(grid, np.repeat(func, 10000))\n plt.title('Intensity function')\n plt.xlabel('time')\n plt.ylabel('value')\n if save:\n try:\n plt.savefig('intensity_function_' + inspect.getsource(self.pdf).split('return')[\n 1].strip() + '.png')\n print('Saved as ' + 'intensity_function_' + inspect.getsource(self.pdf).split('return')[\n 1].strip() + '.png')\n except:\n warnings.warn(\"Saving intensity function failed!\")\n plt.show()\n plt.clf()\n\n t = self.generate()\n plt.step(t, list(range(0, len(t))))\n plt.title('Simulated trajectory')\n plt.xlabel('time')\n plt.ylabel('value')\n if save:\n try:\n plt.savefig(\n 'trajectory_' + inspect.getsource(self.pdf).split('return')[1].strip() + '.png')\n print('Saved as ' + 'trajectory_' + inspect.getsource(self.pdf).split('return')[\n 1].strip() + '.png')\n except:\n warnings.warn(\"Saving trajectory failed!\")\n plt.show()\n plt.clf()\n\n plt.plot(t, list(np.repeat(0, len(t))), '.')\n plt.title('Simulated points')\n plt.xlabel('time')\n if save:\n try:\n plt.savefig('points_' + inspect.getsource(self.pdf).split('return')[1].strip() + '.png')\n print('Saved as ' + 'points_' + inspect.getsource(self.pdf).split('return')[\n 1].strip() + '.png')\n except:\n warnings.warn(\"Saving points failed!\")\n plt.show()\n plt.clf()", "def time_info(input_file):\n original_path = os.getcwd() #set original directory\n save_path = input_file['save_path']\n planet = input_file['exoplanet'] #set exoplanet name\n print '\\nObtain the images .... \\n'\n print 'Change to ', save_path\n os.chdir(save_path) #change to save directory where is our scvience images\n images = sorted(glob.glob('AB'+input_file['exoplanet']+'*.fits'))\n print '\\nImages = \\n',images\n tempo_loc = [] #time object\n SUN = [] #Sun coordinate object\n ra_sun, dec_sun, dsun = np.zeros(len(images)),np.zeros(len(images)),np.zeros(len(images)) #sun coordinates\n JD = np.zeros(len(images)) #julian date from time object\n ST = np.zeros(len(images))\n HJD = np.zeros(len(images))\n #create the exoplanet object coordianate\n exoplanet = SkyCoord(dec=input_file['DEC'],ra=input_file['RA'],unit=('deg','deg'),frame=input_file['frame'])\n print '\\nObtain data info from header ....\\n'\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n UTC = hdr['date-obs']+'T'+hdr['UT'] #string that contain the time in UTC in isot format\n tempo_loc.append(Time(UTC,scale=input_file['scale-time'],format='isot',location=(input_file['lon-obs'],input_file['lat-obs'])))#,input_data['altitude'])))\n JD[i] = tempo_loc[i].jd\n ST[i] = tempo_loc[i].sidereal_time('apparent').hour\n SUN.append(get_sun(tempo_loc[i]))\n ra_sun[i],dec_sun[i] = SUN[i].ra.deg, SUN[i].dec.deg\n dsun[i] = SUN[i].distance.value\n HJD[i] = use.hjd_date(JD[i],dsun[i],dec_sun[i],ra_sun[i],exoplanet.dec.deg,exoplanet.ra.deg,circular_orbit=input_file['circular_orbit'])\n use.update_progress((i+1.)/len(images))\n print '\\n.... done.\\n'\n print '\\n Time from header = \\n'\n #print '\\nImages ** UTC (YYYY-MM-DDTHH:MM:SS) ** JD (7d.5d) ** ST (hours) ** ST (HH:MM:SS) ** Sun Coordinate (epoch,RA,DEC,Distance) (deg,deg,AU) \\n'\n ST_string = []\n for i in range(len(images)):\n ST1 = int(ST[i])\n ST2 = int((ST[i]-ST1)*60.)\n ST3 = (((ST[i]-ST1)*60.)-ST2)*60\n ST_string.append(str(ST1)+':'+str(ST2)+':'+str(ST3))\n tempo_loc[i] = tempo_loc[i].value\n use.update_progress((i+1.)/len(images))\n #print images[i], ' ** ',tempo_loc[i], ' ** ', JD[i], ' ** ', ST[i],' ** ',ST_string[i],' ** ',sun_loc[i],' ** ',HJD[i]\n print '\\nSave data file ... \\n'\n data = DataFrame([images,tempo_loc,list(JD),list(ST),list(ST_string),list(ra_sun),list(dec_sun),list(dsun),list(HJD)]).T\n data.columns=['images','UTC','JD','ST','ST_isot','RA_SUN','DEC_SUN','D_SUN','HJD']\n print data\n data.to_csv('results.csv')\n os.chdir(original_path)\n return", "def get_sinogram( self, N = 128, theta_vec = None ):\n\n if theta_vec is None:\n theta_vec = np.deg2rad(np.linspace(0, 359, 360))\n if self.phantom_type in ['ellipses', 'shepp_logan', 'modified_shepp_logan']:\n analytical_sinogram = radon_ellipses(N, theta_vec, np.copy(self.matrix), circle=self.circle);\n elif self.phantom_type == 'squares':\n analytical_sinogram = radon_squares(N, theta_vec, np.copy(self.matrix), circle = self.circle);\n elif self.phantom_type == 'rectangles':\n analytical_sinogram = radon_rectangles(N, theta_vec, np.copy(self.matrix), circle=self.circle);\n else:\n print('error on the choice of phantom type')\n return analytical_sinogram", "def intensityRatioSave(self,outFile=0):\n if not outFile:\n outFile = self.IntensityRatio['filename']\n print(' saving ratio to filename = %s'%(outFile))\n if hasattr(self, 'IntensityRatio'):\n temperature=self.IntensityRatio['temperature']\n eDensity=self.IntensityRatio['eDensity']\n ratio=self.IntensityRatio['ratio']\n out = open(outFile,'w')\n nvalues=len(ratio)\n # need to add 7 lines to maintain IDL like files\n out.write(outFile+'\\n') #1\n out.write(self.IntensityRatio['desc']+'\\n') #2\n out.write(' created with ChiantiPy using CHIANTI version '+ chdata.ChiantiVersion +'\\n') #3\n out.write(' columns are temperature, eDensity, ratio'+'\\n') #5\n tunit = 'K'\n out.write(' temperature in '+tunit+', electron eDensity in cm^(-3)'+'\\n') #6\n out.write(' ratio given in '+self.Defaults['flux']+'\\n') #4\n out.write(' '+'\\n') #7\n for ivalue in range(nvalues):\n s='%12.3e %12.3e %12.3e \\n' % (temperature[ivalue],eDensity[ivalue],ratio[ivalue])\n out.write(s)\n out.close()\n else:\n print(' in .intensityRatioSave(), no IntensityRatio is found')", "def analyseSNRfluctuations(self, fsu='FSUA', snr='PD', plot=True,\n xlims=None, title='', normalized=False):\n t = self.raw['IMAGING_DATA_'+fsu].data.field('TIME')\n\n if (fsu=='FSUB' and self.insmode=='NORMAL') or \\\n (fsu=='FSUA' and self.insmode=='SWAPPED'):\n wno = np.where((np.interp(t, self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'))<3))\n else:\n wno = np.where(np.interp(t, self.raw['DOPDC'].data.field('TIME'),\n self.raw['DOPDC'].data.field('STATE'))<3)\n if (fsu=='FSUB' and self.insmode=='NORMAL') or \\\n (fsu=='FSUA' and self.insmode=='SWAPPED'):\n wftk = np.where((np.interp(t, self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'))>=7))\n else:\n wftk = np.where(np.interp(t, self.raw['DOPDC'].data.field('TIME'),\n self.raw['DOPDC'].data.field('STATE'))>=7)\n\n snrNo = self.raw['IMAGING_DATA_'+fsu].data.field(snr+'SNR')[wno[0]]\n snrFtk = self.raw['IMAGING_DATA_'+fsu].data.field(snr+'SNR')[wftk[0]]\n\n if plot:\n fig = plt.figure(1, figsize=(8,4))\n plt.subplots_adjust(left=0.08, right=.98)\n fig.clf()\n if normalized:\n norma = np.median(snrNo)\n plt.xlabel('normalized SNR to out-of-fringes')\n else:\n norma = 1\n plt.xlabel('SNR')\n\n plt.hist(snrNo/norma, bins=50, normed=True, alpha=0.5,\n color='r', label='NOT FTK')\n hno = np.histogram(snrNo, bins=50, normed=True)\n plt.hist(snrFtk/norma, bins=50, normed=True, alpha=0.5,\n color='g', label='FTK')\n hftk = np.histogram(snrFtk, bins=50, normed=True)\n\n if not xlims is None:\n plt.xlim(xlims[0], xlims[1])\n plt.title(title)\n poissonDist = lambda x,p:\\\n poisson(p['m']*p['p']).pmf(np.int_(np.floor(x*p['p'])))*p['p'] +\\\n (x*p['p']-np.floor(x*p['p']))/\\\n (np.ceil(x*p['p'])-np.floor(x*p['p']))*\\\n (poisson(p['m']*p['p']).pmf(np.int_(np.ceil(x*p['p'])))*p['p'] -\n poisson(p['m']*p['p']).pmf(np.int_(np.floor(x*p['p'])))*p['p'])\n\n guess = {'m':np.median(snrNo), 'p':1}\n X = 0.5*(hno[1][:-1]+hno[1][1:])\n fit = dpfit.leastsqFit(poissonDist, X, guess, hno[0])\n guessNo = fit['best']\n uncer = fit['uncer']\n chi2 = fit['chi2']\n model = fit['model']\n print guessNo\n print 'NOFTK; POISSON: LAMBDA', guessNo['p']*guessNo['m']\n print 'NOFTK; POISSON: STD/MEAN', 1/np.sqrt(guessNo['p']*guessNo['m'])\n plt.plot(X/norma, poissonDist(X, guessNo)*norma, '-r',\n linewidth=3, alpha=0.8, linestyle='dashed')\n\n guess = {'m':np.median(snrNo), 'p':1/10.}\n X = 0.5*(hftk[1][:-1]+hftk[1][1:]) \n fit = dpfit.leastsqFit(poissonDist, X, guess, hftk[0])\n guess = fit['best']\n uncer = fit['uncer']\n chi2 = fit['chi2']\n model = fit['model']\n print guess\n print ' FTK; POISSON: LAMBDA', guess['p']*guess['m']\n print ' FTK; POISSON: STD/MEAN', 1/np.sqrt(guess['p']*guess['m'])\n plt.plot(X/norma, poissonDist(X, guess)*norma, '-g',\n linewidth=3, alpha=0.8, linestyle='dashed')\n plt.legend( loc='upper left')\n #plt.xscale('log')\n\n print 'DIFFERENCIATION',\\\n np.abs(guess['m']-guessNo['m'])/\\\n (np.sqrt(guessNo['m']/guessNo['p']) +\n np.sqrt(guess['m']/guess['p']))\n return", "def generate_siaf_pre_flight_reference_files_nircam():\n\n instrument = 'NIRCam'\n overwrite_wedge_file = False\n overwrite_grism_file = False\n\n\n # wedge definitions\n wedge_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_wedge_offsets.txt'.format(instrument.lower()))\n\n if (not os.path.isfile(wedge_file) or (overwrite_wedge_file)):\n\n wedge_offsets = Table.read(os.path.join(JWST_SOURCE_DATA_ROOT, instrument, 'wedge_offsets.txt'), format='ascii.basic', delimiter=' ', guess=False)\n\n comments = []\n comments.append('{} detector parameter definition file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the wedge offsets.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n wedge_offsets.meta['comments'] = comments\n wedge_offsets.write(wedge_file, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False)\n\n # grism definitions\n grism_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_grism_parameters.txt'.format(instrument.lower()))\n\n if (not os.path.isfile(wedge_file) or (overwrite_grism_file)):\n # grism parameters, see WFSS worksheet in EXCEL SIAF\n grism_parameters = Table.read(grism_file, format='ascii.basic', delimiter=',', guess=False)\n\n # Save a backup copy of the grism file\n cmd = 'cp {} {}'.format(grism_file,os.path.join(JWST_TEMPORARY_DATA_ROOT, instrument, 'nircam_siaf_grism_parameters_backup.txt'))\n os.system(cmd)\n\n # different sign in Y for NRCB apertures\n factor = np.array(\n [1. if 'NRCA' in grism_parameters['aperture_name'][i] else -1. for i in range(len(grism_parameters))])\n\n for col in grism_parameters.colnames[1:]:\n # these are Sci coordinates\n if col[0] != 'D':\n if 'X' in col:\n grism_parameters['D{}'.format(col)] = grism_parameters[col].data - 1024.5\n elif 'Y' in col:\n grism_parameters['D{}'.format(col)] = factor * (grism_parameters[col].data - 1024.5)\n\n\n\n comments = []\n comments.append('{} grism parameter definition file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the grism parameters.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n grism_parameters.meta['comments'] = comments\n grism_parameters.write(grism_file, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False)\n\n # Transformation parameters, mapping used to select rows in cold_fit_[] file\n coldfit_name_mapping = {\n 'NRCA1_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_1_20161025081540',\n 'pixels_to_mm':'NIRCAMASW_1ToNIRCAMASW_20161025081540',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA2_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_2_20161025081547',\n 'pixels_to_mm':'NIRCAMASW_2ToNIRCAMASW_20161025081547',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA3_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_3_20161025081552',\n 'pixels_to_mm':'NIRCAMASW_3ToNIRCAMASW_20161025081552',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA4_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_4_20161025081557',\n 'pixels_to_mm':'NIRCAMASW_4ToNIRCAMASW_20161025081557',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA5_FULL' :{'degrees_to_mm':'OTESKYToNIRCAMALW_RT_20170307121022',\n 'mm_to_pixels':'NIRCAMALWToNIRCAMALW_1_20161227162042',\n 'pixels_to_mm':'NIRCAMALW_1ToNIRCAMALW_20161227162042',\n 'mm_to_degrees':'NIRCAMALWToOTESKY_RT_20170307121022',\n },\n 'NRCB1_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_1_20161025081604',\n 'pixels_to_mm':'NIRCAMBSW_1ToNIRCAMBSW_20161025081604',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB2_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_2_20161025081912',\n 'pixels_to_mm':'NIRCAMBSW_2ToNIRCAMBSW_20161025081912',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB3_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_3_20161025082300',\n 'pixels_to_mm':'NIRCAMBSW_3ToNIRCAMBSW_20161025082300',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB4_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_4_20161025082647',\n 'pixels_to_mm':'NIRCAMBSW_4ToNIRCAMBSW_20161025082647',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB5_FULL' :{'degrees_to_mm':'OTESKYToNIRCAMBLW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBLWToNIRCAMBLW_1_20161227162336',\n 'pixels_to_mm':'NIRCAMBLW_1ToNIRCAMBLW_20161227162336',\n 'mm_to_degrees':'NIRCAMBLWToOTESKY_RT_20170307121023',\n },\n 'NRCA1_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RNDNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_1_20161025081540',\n 'pixels_to_mm':'NIRCAMASW_1ToNIRCAMASW_20161025081540',\n 'mm_to_degrees':'NIRCAMASW_RNDNCToOTESKY_202110261138',\n },\n 'NRCA2_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RND_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_2_20161025081547',\n 'pixels_to_mm':'NIRCAMASW_2ToNIRCAMASW_20161025081547',\n 'mm_to_degrees':'NIRCAMASW_RNDToOTESKY_202005150434',\n },\n 'NRCA3_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RNDNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_3_20161025081552',\n 'pixels_to_mm':'NIRCAMASW_3ToNIRCAMASW_20161025081552',\n 'mm_to_degrees':'NIRCAMASW_RNDNCToOTESKY_202110261138',\n },\n 'NRCA4_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RND_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_4_20161025081557',\n 'pixels_to_mm':'NIRCAMASW_4ToNIRCAMASW_20161025081557',\n 'mm_to_degrees':'NIRCAMASW_RNDToOTESKY_202005150434',\n },\n 'NRCA1_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BARNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_1_20161025081540',\n 'pixels_to_mm':'NIRCAMASW_1ToNIRCAMASW_20161025081540',\n 'mm_to_degrees':'NIRCAMASW_BARNCToOTESKY_202110261138',\n },\n 'NRCA2_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BAR_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_2_20161025081547',\n 'pixels_to_mm':'NIRCAMASW_2ToNIRCAMASW_20161025081547',\n 'mm_to_degrees':'NIRCAMASW_BARToOTESKY_202005150434',\n },\n 'NRCA3_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BARNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_3_20161025081552',\n 'pixels_to_mm':'NIRCAMASW_3ToNIRCAMASW_20161025081552',\n 'mm_to_degrees':'NIRCAMASW_BARNCToOTESKY_202110261138',\n },\n 'NRCA4_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BAR_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_4_20161025081557',\n 'pixels_to_mm':'NIRCAMASW_4ToNIRCAMASW_20161025081557',\n 'mm_to_degrees':'NIRCAMASW_BARToOTESKY_202005150434',\n },\n 'NRCA5_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMALW_RND_202005150434',\n 'mm_to_pixels':'NIRCAMALWToNIRCAMALW_1_20161227162042',\n 'pixels_to_mm':'NIRCAMALW_1ToNIRCAMALW_20161227162042',\n 'mm_to_degrees':'NIRCAMALW_RNDToOTESKY_202005150434',\n },\n 'NRCA5_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMALW_BAR_202005150434',\n 'mm_to_pixels':'NIRCAMALWToNIRCAMALW_1_20161227162042',\n 'pixels_to_mm':'NIRCAMALW_1ToNIRCAMALW_20161227162042',\n 'mm_to_degrees':'NIRCAMALW_BARToOTESKY_202005150434',\n }\n }\n\n # coldfit_source_data_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}'.format('cold_fit_201703071210.csv'))\n coldfit_source_data_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}'.format('nircam_cold_fit.txt'))\n print('NIRCam coldfit data from', coldfit_source_data_file)\n t = open(coldfit_source_data_file)\n coldfit_source_data = t.readlines()\n t.close()\n # remove comments from read content\n coldfit_source_data = [line for line in coldfit_source_data if line[0] != '#']\n\n siaf_detector_layout = iando.read.read_siaf_detector_layout()\n # siaf_alignment_parameters = iando.read.read_siaf_alignment_parameters(instrument)\n siaf_aperture_definitions = iando.read.read_siaf_aperture_definitions(instrument)\n # aperture_dict = {}\n aperture_name_list = siaf_aperture_definitions['AperName'].tolist()\n\n # generate alignment reference file, one file for all master apertures\n outfile = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_alignment.txt'.format(instrument.lower()))\n siaf_alignment = Table()\n\n for AperName in aperture_name_list:\n\n # process the master apertures of NIRCam\n if AperName in siaf_detector_layout['AperName']:\n (A, B, C, D, betaX, betaY, V2Ref, V3Ref) = nircam_get_polynomial_both(AperName, siaf_aperture_definitions, coldfit_name_mapping, coldfit_source_data)\n\n #generate distortion reference file\n number_of_coefficients = len(A)\n polynomial_degree = int((np.sqrt(8 * number_of_coefficients + 1) - 3) / 2)\n siaf_index = []\n exponent_x = []\n exponent_y = []\n for i in range(polynomial_degree + 1):\n for j in np.arange(i + 1):\n siaf_index.append('{:d}{:d}'.format(i, j))\n exponent_x.append(i - j)\n exponent_y.append(j)\n\n distortion_reference_table = Table((siaf_index, exponent_x, exponent_y, A, B, C, D), names=(\n 'siaf_index', 'exponent_x', 'exponent_y', 'Sci2IdlX', 'Sci2IdlY', 'Idl2SciX', 'Idl2SciY'))\n distortion_reference_table.add_column(\n Column([AperName] * len(distortion_reference_table), name='AperName'), index=0)\n distortion_reference_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_distortion_{}.txt'.format(instrument.lower(),\n AperName.lower()))\n # distortion_reference_table.pprint()\n comments = []\n comments.append('{} distortion reference file for SIAF\\n'.format(instrument))\n comments.append('Aperture: {}\\n'.format(AperName))\n comments.append('Based on coefficients given in {},'.format(os.path.basename(coldfit_source_data_file)))\n # comments.append('that were rescaled, shifted for a different reference pixel location, and rotated:')\n # comments.append('Rotation of {:2.3f} deg was removed and is carried separately in V3IdlYangle.'.format(\n # np.rad2deg(V3angle))) # *units.deg.to(units.arcsecond)\n # if 'may_2015' in distortion_file_name:\n # comments.append(\n # 'These parameters are stored in the currently (January 2018) active SIAF (PRDOPSSOC-G-012). ')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n distortion_reference_table.meta['comments'] = comments\n distortion_reference_table.write(distortion_reference_file_name, format='ascii.fixed_width',\n delimiter=',', delimiter_pad=' ', bookend=False, overwrite=True)\n\n V3SciYAngle = betaY\n V3SciXAngle = betaX\n if np.abs(V3SciYAngle) < 90.:\n V3IdlYAngle = V3SciYAngle\n else:\n V3IdlYAngle = V3SciYAngle - np.sign(V3SciYAngle) * 180.\n\n if len(siaf_alignment) == 0: # first entry\n siaf_alignment['AperName'] = ['{:>30}'.format(AperName)]\n siaf_alignment['V3IdlYAngle'] = [V3IdlYAngle]\n siaf_alignment['V3SciXAngle'] = V3SciXAngle #[np.rad2deg(betaX)]\n siaf_alignment['V3SciYAngle'] = V3SciYAngle #[np.rad2deg(betaY)]\n siaf_alignment['V2Ref'] = [V2Ref]\n siaf_alignment['V3Ref'] = [V3Ref]\n else:\n siaf_alignment.add_row(['{:>30}'.format(AperName), V3IdlYAngle, V3SciXAngle, V3SciYAngle, V2Ref,V3Ref])\n comments = []\n comments.append('{} alignment parameter reference file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the focal plane alignment parameters of master apertures calibrated')\n comments.append('during FGS-SI alignment.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n siaf_alignment.meta['comments'] = comments\n siaf_alignment.write(outfile, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False, overwrite=True)", "def calc_rsi(image):\n\n # roll axes to conventional row,col,depth\n img = np.rollaxis(image, 0, 3)\n\n # bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral\n COAST = img[:, :, 0]\n B = img[:, :, 1]\n G = img[:, :, 2]\n Y = img[:, :, 3]\n R = img[:, :, 4]\n RE = img[:, :, 5]\n NIR1 = img[:, :, 6]\n NIR2 = img[:, :, 7]\n\n arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))\n dd = (2 * NIR1 - R) - (G - B)\n gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5\n gndvi = old_div((NIR1 - G), (NIR1 + G))\n ndre = old_div((NIR1 - RE), (NIR1 + RE))\n ndvi = old_div((NIR1 - R), (NIR1 + R))\n ndvi35 = old_div((G - R), (G + R))\n ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))\n nirry = old_div((NIR1), (R + Y))\n normnir = old_div(NIR1, (NIR1 + R + G))\n psri = old_div((R - B), RE)\n rey = old_div((RE - Y), (RE + Y))\n rvi = old_div(NIR1, R)\n sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69\n vi1 = old_div((10000 * NIR1), (RE) ** 2)\n vire = old_div(NIR1, RE)\n br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))\n gr = old_div(G, R)\n rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))\n\n ###Built-Up indices\n wvbi = old_div((COAST - RE), (COAST + RE))\n wvnhfd = old_div((RE - COAST), (RE + COAST))\n\n ###SIs\n evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))\n L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES\n savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))\n msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)\n bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))\n rgi = old_div(R, G)\n bri = old_div(B, R)\n\n rsi = np.stack(\n [arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,\n wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],\n axis=2)\n\n return rsi", "def save_time_spent(self):\n\n ratings_dir = Path(self.out_dir).resolve() / cfg.suffix_ratings_dir\n if not ratings_dir.exists():\n makedirs(ratings_dir, exist_ok=True)\n\n timer_file = ratings_dir / '{}_{}_{}'.format(\n self.vis_type, self.suffix, cfg.file_name_timer)\n\n lines = '\\n'.join(['{},{}'.format(sid, elapsed_time)\n for sid, elapsed_time in self.timer.items()])\n\n # saving to disk\n try:\n with open(timer_file, 'w') as tf:\n tf.write(lines)\n except:\n print('Unable to save timer info to disk -- printing them to log:')\n print(lines)\n raise IOError('Error in saving timer info to file!')\n\n # printing summary\n times = np.array(list(self.timer.values()))\n if len(times) < 10:\n print('\\n\\ntimes spent per subject in seconds:\\n{}'.format(lines))\n\n print('\\nMedian time per subject : {} seconds'.format(np.median(times)))\n print('\\t5th and 95th percentile of distribution of times spent '\n ': {} seconds'.format(np.nanpercentile(times, [5, 95])))", "def make_sn(sn_thres=0, format=\"png\", snsig=False):\r\n ###############################################\r\n # Read values of S/N\r\n sn = np.loadtxt(outtable, usecols=(14,))\r\n if snsig:\r\n sigma = np.loadtxt(outtable, usecols=(3,))\r\n sn /= sigma / 100.\r\n ###############################################\r\n # Find good (and bad) regions according to S/N\r\n good = np.where(((~np.isnan(sn)) & (sn >= sn_thres)))[0]\r\n bad = np.where((sn < sn_thres))[0]\r\n ###############################################\r\n # Filter S/N\r\n sn = sn[good]\r\n ###############################################\r\n # Colorbar limits\r\n vmin, vmax = 10, 50\r\n # Set limits for the plot\r\n norm = Normalize(vmin, vmax)\r\n ###############################################\r\n # Set colormap\r\n cmap = \"cubelaw_r\"\r\n cmap = \"Spectral\"\r\n # Produces a collection of polygons with colors according to S/N values\r\n coll = PolyCollection(polygons_bins[good], array=sn, cmap=cmap,\r\n edgecolors='w', norm=norm, linewidths=1.)\r\n ###############################################\r\n # Initiate figure and axis for matplotlib\r\n fig, ax = plt.subplots(1, 1, figsize=(6.4, 6), )\r\n fig.subplots_adjust(left=0.09, right=0.985, bottom=0.092, top=0.98,\r\n hspace=0.05, wspace=0.06)\r\n ###############################################\r\n # ax.add_patch(Rectangle((-100, -100), 200, 200, facecolor=\"0.8\", zorder=0,\r\n # alpha=0.5))\r\n ###############################################\r\n # Draw the polygons\r\n draw_map(fig, ax, coll, lims=40)\r\n ###############################################\r\n # Add contours according to V-band image\r\n # draw_contours(\"residual\", fig, ax, c=\"k\")\r\n draw_contours(\"vband\", fig, ax, c=\"k\")\r\n # Draw actual slit positions\r\n # canvas.draw_slits(ax, slit_type=1, fc=\"r\", ec=\"r\", ignore=ignore_slits )\r\n # canvas.draw_slits(ax, slit_type=3, fc=\"r\", ec=\"r\", ignore=ignore_slits )\r\n canvas.draw_slits_ids(ax, slits, fc=\"r\", ec=\"r\")\r\n ###############################################\r\n # Draw white rectangle in the position of the colorbar so background\r\n # stars do not overplot the labels and ticks\r\n plt.gca().add_patch(Rectangle((18, -36), 20, 10, alpha=1, zorder=10000,\r\n color=\"w\"))\r\n ###############################################\r\n # Draw the colorbar\r\n label = r\"100 S/N [pix] / $\\sigma$\" if snsig else r\"S/N\"\r\n draw_colorbar(fig, ax, coll, ticks=np.linspace(vmin, vmax, 5),\r\n cblabel=label, cbar_pos=[0.16, 0.15, 0.17, 0.04])\r\n ##############################################\r\n # Write labels\r\n xylabels(ax)\r\n ##############################################\r\n # Draw positions of galaxies\r\n # draw_galaxies(fig, ax)\r\n ##############################################\r\n # Save the figure\r\n plt.savefig(\"figs/sn.{0}\".format(format), dpi=300)\r\n # plt.savefig(\"figs/sn.pdf\", dpi=100)\r\n # plt.savefig(\"figs/sn.eps\", dpi=2500, format=\"eps\")\r\n # plt.savefig(\"figs/sn.png\", dpi=300)\r\n return", "def tiled_writing(red, nir, output):\n \n #open datasets\n src_red = rio.open(red)\n src_nir = rio.open(nir)\n \n #define raster properies and update datatype\n meta = src_red.meta.copy()\n meta.update({'dtype':'float32'}) # meta is a dictionary\n outfile = output\n #open outfile in writing mode with the properties of defined raster band\n with rio.open(outfile, 'w', **meta) as dst:\n #iterate over blocks of the bands, calculate ndvi for each block \n # and put the blocks back together\n for window in calc_tiles(src_red, tile_size_x, tile_size_y):\n red_block = src_red.read(window=window, masked=True)\n nir_block = src_nir.read(window=window, masked=True)\n #cast ndarrays to Float32 type\n red = red_block.astype('f4')\n nir = nir_block.astype('f4')\n #allow division by zero\n np.seterr(divide='ignore', invalid='ignore')\n #calculate ndvi and write raster\n ndvi = (nir - red) / (nir + red)\n dst.write(ndvi, window=window)\n\n #close dataset\n src_red.close()\n src_nir.close()\n return outfile", "def save(self, inst):\n n = inst.dimensions[\"n\"]\n with open(self.location, \"wt\") as f:\n f.write(f\"measurements: {n}\\n\")\n f.write(f\"time temperature\\n\")\n for time, temp in zip(inst.time, inst.temperature):\n f.write(f\"{time:4} {temp:12}\\n\")", "def SNR_vs_NSA():\n NSA = np.array([1, 2, 3, 4])\n noise = np.array([0.7, 0.5, 0.4, 0.3]) # **2\n comp1 = np.array([39.3, 40.8, 41.3, 41.2])\n comp2 = np.array([55.3, 56.5, 56.8, 56.4])\n comp3 = np.array([69.4, 69.2, 69.1, 68.7])\n comp4 = np.array([53.5, 53.3, 53.7, 53.0])\n comp5 = np.array([65.1, 65.4, 65.7, 65.7])\n\n comp = [comp1, comp2, comp3, comp4, comp5]\n colors = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\", \"#9467bd\"]\n for i, j, f in zip(comp, range(1, 6), colors):\n popt, _ = curve_fit(SNR, (NSA, i / noise), i / noise, p0=np.array([2, 0.5]))\n k, n = popt\n y_new = SNR((NSA, i / noise), k, n)\n plt.plot(NSA, y_new, c=f)\n plt.plot(NSA, i / noise, \"o\", label=\"Compartment %i, n=%.2f\" % (j, n))\n\n plt.grid()\n plt.legend()\n plt.ylabel(\"SNR\")\n plt.xlabel(\"NSA\")\n plt.show()\n\n # curve fit to see if SNR goes as root of NSA", "def run_psavg_sims(bursttimefile):\n\n nfolder = [5,6,8,12]\n datadirs = [\"P20165/20165-01-01-000\", \"P20165/20165-01-01-001\", \"P20165/20165-01-01-002\",\n \"P10223/10223-01-03-01\", \"P10223/10223-01-03-010\" ]\n\n data_all, unbary_all, tstart_all, tend_all, t0_all, pcus_all, std1dir_all = [], [], [], [], [], [], []\n\n for d in datadirs:\n print(\"I am on directory %s\" %d)\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div8192*.asc\")\n if len(files) == 0:\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div-32768s*.asc\")\n if len(files) == 0:\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div8*.asc\")\n #print(\"File to use %s\" %files[0])\n data = rxte.RXTEData(times=None, channels=None, datafile=files[0], npcus=None, ra=None, dec=None, emid=None, emiddir=None, bary=True)\n\n len_datafile = len(files[0].split(\"/\")[-1])\n len_processed = len(files[0].split(\"/\")[-2])\n std1dir_all.append(files[0][:-(len_datafile+len_processed+1)])\n\n data_all.append(np.array([p.time for p in data.photons])+data.t0)\n unbary_all.append(np.array([p.unbary for p in data.photons])+data.t0)\n tstart_all.append(data.photons[0].unbary+data.t0)\n tend_all.append(data.photons[-1].unbary+data.t0)\n t0_all.append(data.t0)\n pcus_all.append(data.pcus)\n\n t0_sorted, tstart_sorted, tend_sorted, data_sorted, pcus_sorted, std1dir_sorted, unbary_sorted = \\\n zip(*sorted(zip(t0_all, tstart_all, tend_all, data_all, pcus_all, std1dir_all, unbary_all)))\n t0_sorted = np.array(t0_sorted)\n\n psno = [5,6,8,12]\n m_all = [30, 23, 23, 50]\n\n for n,m in zip(psno, m_all):\n psavg_all = sgr1900_results.make_randomly_sampled_periodograms(datadirs, bursttimefile, m, n=1000,\n save_step=100, fileroot=\"sgr1806_psavg%i\"%n,\n data_sorted=data_sorted, t0_sorted=t0_sorted,\n pcus_sorted=pcus_sorted, tend_sorted=tend_sorted,\n tstart_sorted=tstart_sorted,\n unbary_sorted=unbary_sorted)\n\n return", "def plot_seaice_predict_components_scatter(anomlous = False, temporal_resolution = 'monthly', spatial_resolution = 1, detrend = False, imagefolder = 'images/timeseries/prediction/',seaice_source='nsidc'):\n output_folder = 'processed_data/SIC/'\n if seaice_source == 'ecmwf':\n output_folder = 'processed_data/ERA5/SIC/'\n\n if anomlous:\n temp_decomp = 'anomalous'\n else:\n temp_decomp = 'raw'\n\n\n title = temp_decomp.capitalize() + ' '\n\n if detrend:\n dt = 'detrended'\n title += dt + ' '\n else:\n dt = 'raw'\n\n title += temporal_resolution\n title += ' SIE prediction'\n\n\n# Loading Seaice Trends\n seaicename = f'{temp_decomp}_{temporal_resolution}_{spatial_resolution}_{dt}'\n seaice = xr.open_dataset(output_folder + seaicename +'.nc')\n area = xr.open_dataset('data/area_files/processed_nsidc.nc').area\n seaice = seaice\n\n\n# Index contributions\n filename = f'processed_data/regressions/spatial_multiple/regr_{temp_decomp}_{temporal_resolution}_{dt}_{spatial_resolution}'\n dataset = xr.open_dataset(filename + '.nc')\n indicies = np.array([i for i in dataset])\n values = np.array([dataset[i].values for i in dataset])\n index_data = {}\n for indexname in indicies[:-1]:\n filename = f'{indexname}_{temp_decomp}_{temporal_resolution}_{dt}'\n index_data[indexname] = xr.open_dataset('processed_data/INDICIES/' + filename +'.nc')[indexname]\n index_data[indexname] = (index_data[indexname] - index_data[indexname].mean()) \n index_data[indexname] = index_data[indexname] / index_data[indexname].std()\n\n times = list(set.intersection(set(seaice.time.values), *(set(index_data[i].time.values)for i in indicies[:-1])))\n\n prediction = seaice.copy() * 0\n predictions = []\n for indexname in indicies:\n if indexname in index_data.keys():\n prediction += index_data[indexname].sel(time=times).sortby('time') * dataset[indexname]\n predictions += [index_data[indexname].sel(time=times).sortby('time') * dataset[indexname]]\n else:\n prediction += dataset[indexname]\n predictions += [dataset[indexname]]\n\n\n seaice = seaice.sortby('time').sel(time=times).sortby('time')\n prediction = prediction.sortby('time').sel(time=times).sortby('time')\n\n\n fig = plt.figure(figsize = [7,14])\n\n ax1 = fig.add_subplot(4,2,1)\n ax2 = fig.add_subplot(4,2,2)\n ax3 = fig.add_subplot(4,2,3)\n ax4 = fig.add_subplot(4,2,4)\n smallaxes = [ax1,ax2,ax3,ax4]\n AX = fig.add_subplot(2,1,2)\n x_data = (seaice[seaicename]*area/250).sum(dim = ('x', 'y'))\n for i in range(4):\n ax = smallaxes[i]\n indexname = indicies[i]\n predict = index_data[indexname].sel(time=times).sortby('time') * dataset[indexname]\n y_data = (predict*area/250).sum(dim = ('x', 'y'))\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(x_data, y_data)\n r, p = scipy.stats.pearsonr(x_data, y_data)\n ax.scatter(x_data, y_data)\n ax.plot(x_data,data_m*x_data+data_b)\n ax.set_title(indexname)\n lim = max([-min(x_data),-min(y_data),max(x_data),max(y_data)])*1.05\n ax.text(-lim*0.9, lim*0.55, f'gradient = {data_m:.2f}\\ncorrelation = {r:.2f}\\np-value = {p:.2f}')\n ax.set_ylim([-lim, lim])\n ax.set_xlim([-lim, lim])\n ax.axhline(0, alpha = 0.2)\n ax.axvline(0, alpha = 0.2)\n y_data = (prediction[seaicename]*area/250).sum(dim = ('x', 'y'))\n AX.scatter(x_data, y_data)\n data_m, data_b, data_r_value, data_p_value, data_std_err = scipy.stats.linregress(x_data, y_data)\n r, p = scipy.stats.pearsonr(x_data, y_data)\n AX.plot(x_data,data_m*x_data+data_b)\n AX.set_title('Total Prediction')\n lim = max([-min(x_data),-min(y_data),max(x_data),max(y_data)])*1.05\n AX.text(-lim*0.9, lim*0.7, f'gradient = {data_m:.2f}\\ncorrelation = {r:.2f}\\np-value = {p:.2f}')\n AX.set_ylim([-lim, lim])\n AX.set_xlim([-lim, lim])\n AX.axhline(0, alpha = 0.2)\n AX.axvline(0, alpha = 0.2)\n fig.suptitle(title)\n\n plt.savefig(imagefolder + seaicename + 'scatter.pdf')\n plt.show()", "def _write_nemo_hr_file(rpn_hr_ds_path, nemo_hr_ds_path):\n with xarray.open_dataset(rpn_hr_ds_path) as rpn_hr:\n logging.debug(\n f\"calculating specific humidity & incoming longwave radiation from {rpn_hr_ds_path}\"\n )\n qair, ilwr, rh = _calc_qair_ilwr(rpn_hr)\n u_out, v_out = _rotate_winds(rpn_hr)\n data_vars = {\n \"nav_lon\": rpn_hr.nav_lon,\n \"nav_lat\": rpn_hr.nav_lat,\n # [:, 0] drops z dimension that NEMO will not tolerate\n \"qair\": qair[:, 0],\n \"RH_2maboveground\": rh[:, 0],\n \"therm_rad\": ilwr[:, 0],\n \"u_wind\": u_out[:, 0],\n \"v_wind\": v_out[:, 0],\n # \"LHTFL_surface\": ** needs to be calculated**,\n }\n nemo_rpn_vars = (\n (\"atmpres\", \"PN\"),\n (\"percentcloud\", \"NT\"),\n (\"PRATE_surface\", \"RT\"),\n (\"precip\", \"PR\"),\n (\"solar\", \"FB\"),\n (\"tair\", \"TT\"),\n )\n missing_vars = \"\"\n for nemo_var, rpn_var in nemo_rpn_vars:\n try:\n # [:, 0] drops z dimension that NEMO will not tolerate\n data_vars.update({nemo_var: getattr(rpn_hr, rpn_var)[:, 0]})\n except AttributeError:\n # Variable is missing from RPN dataset, so provide a placeholder DataArray\n # full of NaNs that we will deal with later via interpolation\n data_vars.update(\n {nemo_var: xarray.DataArray(numpy.full_like(qair[:, 0], numpy.nan))}\n )\n missing_vars = (\n \", \".join((missing_vars, nemo_var)) if missing_vars else nemo_var\n )\n logging.warning(f\"missing RPN variable {rpn_var} from {rpn_hr_ds_path}\")\n nemo_hr = xarray.Dataset(\n data_vars=data_vars, coords=rpn_hr.coords, attrs=rpn_hr.attrs\n )\n nemo_hr.attrs[\"history\"] += (\n f\"\\n{arrow.now().format('ddd MMM DD HH:mm:ss YYYY')}: \"\n f\"Add specific and relative humidity and incoming longwave radiation variables from \"\n f\"correlations\"\n )\n if missing_vars:\n nemo_hr.attrs[\"missing_variables\"] = missing_vars\n _add_vars_metadata(nemo_hr)\n _write_netcdf_file(nemo_hr, nemo_hr_ds_path)", "def _check_PSNR(self, dataset, is_test=False):\n\n # process one image per iter for test phase\n if is_test:\n batch_size = 1\n else:\n batch_size = 1 # self.batch_size\n\n dataloader = DataLoader(dataset, batch_size=batch_size,\n shuffle=False, num_workers=1)\n\n avr_psnr = 0\n avr_ssim = 0\n\n # book keeping variables for test phase\n psnrs = [] # psnr for each image\n ssims = [] # ssim for each image\n proc_time = [] # processing time\n outputs = [] # output for each image\n names = []\n\n for batch, sample in enumerate(dataloader):\n input_batch, label_batch, name = sample['lr'], sample['hr'], sample['im_name']\n\n # Wrap with torch Variable\n input_batch, label_batch = self._wrap_variable(input_batch,\n label_batch,\n self.use_gpu)\n\n if is_test:\n start = time.time()\n if self.model_name in ['TDAN']:\n output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = forward_x8(input_batch, self.model).unsqueeze(0)\n #print(output_batch.size())\n # _, lrs = self.model(input_batch)\n # output_batch = lrs[:, -1, :, :, :]\n else:\n output_batch = self.model(input_batch)\n elapsed_time = time.time() - start\n else:\n if self.model_name in ['TDAN']:\n #output_batch, _ = self.model(input_batch)\n output_batch = chop_forward(input_batch, self.model, 4)\n else:\n output_batch = self.model(input_batch)\n # ssim is calculated with the normalize (range [0, 1]) image\n ssim = pytorch_ssim.ssim(output_batch + 0.5, label_batch + 0.5, size_average=False)\n ssim = torch.sum(ssim.data)\n avr_ssim += ssim\n\n # calculate PSRN\n output = output_batch.data\n label = label_batch.data\n\n output = (output + 0.5) * 255\n label = (label + 0.5) * 255\n\n output = quantize(output, 255)\n label = quantize(label, 255)\n # diff = input - target\n\n output = output.squeeze(dim=0)\n label = label.squeeze(dim=0)\n\n psnr = self._comput_PSNR(output / 255.0, label / 255.0)\n # print(psnr)\n avr_psnr += psnr\n\n # save psnrs and outputs for statistics and generate image at test time\n if is_test:\n psnrs.append(psnr)\n ssims.append(ssim)\n proc_time.append(elapsed_time)\n np_output = output.cpu().numpy()\n outputs.append(np_output)\n names.append(name)\n\n epoch_size = len(dataset)\n avr_psnr /= epoch_size\n avr_ssim /= epoch_size\n stats = (psnrs, ssims, proc_time)\n\n return avr_psnr, avr_ssim, stats, outputs, names", "def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})", "def writetipsy(self, snapshot, halos, tipsyoutfile, hubble=None):\n from . import analysis\n from . import tipsy\n from .analysis import cosmology\n from snapshot import _new as new\n import math\n s = snapshot\n outfile = tipsyoutfile\n nhalos = halos._nhalos\n nstar = nhalos\n sout = new(star=nstar) # create new tipsy snapshot written as halos.\n sout.properties['a'] = s.properties['a']\n sout.properties['z'] = s.properties['z']\n sout.properties['boxsize'] = s.properties['boxsize']\n if hubble is None:\n hubble = s.properties['h']\n sout.properties['h'] = hubble\n # ! dangerous -- rho_crit function and unit conversions needs simplifying\n rhocrithhco = cosmology.rho_crit(s, z=0, unit=\"Msol Mpc^-3 h^2\")\n lboxkpc = sout.properties['boxsize'].ratio(\"kpc a\")\n lboxkpch = lboxkpc * sout.properties['h']\n lboxmpch = lboxkpc * sout.properties['h'] / 1000.\n tipsyvunitkms = lboxmpch * 100. / (math.pi * 8. / 3.) ** .5\n tipsymunitmsun = rhocrithhco * lboxmpch ** 3 / sout.properties['h']\n\n for ii in xrange(nhalos):\n h = halos[ii + 1].properties\n sout.star[ii]['mass'] = h['mass'] / hubble / tipsymunitmsun\n # tipsy units: box centered at 0. (assume 0<=x<=1)\n sout.star[ii]['x'] = h['Xc'] / lboxkpch - 0.5\n sout.star[ii]['y'] = h['Yc'] / lboxkpch - 0.5\n sout.star[ii]['z'] = h['Zc'] / lboxkpch - 0.5\n sout.star[ii]['vx'] = h['VXc'] / tipsyvunitkms\n sout.star[ii]['vy'] = h['VYc'] / tipsyvunitkms\n sout.star[ii]['vz'] = h['VZc'] / tipsyvunitkms\n sout.star[ii]['eps'] = h['Rvir'] / lboxkpch\n sout.star[ii]['metals'] = 0.\n sout.star[ii]['phi'] = 0.\n sout.star[ii]['tform'] = 0.\n\n sout.write(fmt=tipsy.TipsySnap, filename=outfile)\n return sout", "def save2nifti(self, file_path):\n # Define nifti1 datatype codes\n NIFTI_TYPE_UINT8 = 2 # unsigned char\n NIFTI_TYPE_INT16 = 4 # signed short\n NIFTI_TYPE_INT32 = 8 # signed int.\n NIFTI_TYPE_FLOAT32 = 16 # 32 bit float.\n NIFTI_TYPE_COMPLEX64 = 32 # 64 bit complex = 2 32 bit floats\n NIFTI_TYPE_FLOAT64 = 64 # 64 bit float = double.\n NIFTI_TYPE_RGB24 = 128 # 3 8 bit bytes.\n NIFTI_TYPE_INT8 = 256 # signed char.\n NIFTI_TYPE_UINT16 = 512 # unsigned short.\n NIFTI_TYPE_UINT32 = 768 # unsigned int.\n NIFTI_TYPE_INT64 = 1024 # signed long long.\n NIFTI_TYPE_UINT64 = 1280 # unsigned long long.\n NIFTI_TYPE_FLOAT128 = 1536 # 128 bit float = long double.\n NIFTI_TYPE_COMPLEX128 = 1792 # 128 bit complex = 2 64 bit floats.\n NIFTI_TYPE_COMPLEX256 = 2048 # 256 bit complex = 2 128 bit floats\n NIFTI_TYPE_RGBA32 = 2304 # 4 8 bit bytes.\n\n # Detect the data type of the input data.\n data_type = {\n np.uint8: NIFTI_TYPE_UINT8,\n np.uint16: NIFTI_TYPE_UINT16,\n np.uint32: NIFTI_TYPE_UINT32,\n np.float32: NIFTI_TYPE_FLOAT32,\n np.int16: NIFTI_TYPE_INT16,\n np.int32: NIFTI_TYPE_INT32,\n np.int8: NIFTI_TYPE_INT8\n }\n if sys.maxint > 2 ** 32: # The platform is 64 bit\n data_type[np.float128] = NIFTI_TYPE_FLOAT128\n data_type[np.float64] = NIFTI_TYPE_FLOAT64\n data_type[np.int64] = NIFTI_TYPE_INT64\n data_type[np.uint64] = NIFTI_TYPE_UINT64\n data_type[np.complex64] = NIFTI_TYPE_COMPLEX64\n data_type[np.complex128] = NIFTI_TYPE_COMPLEX128\n data_type[np.complex256] = NIFTI_TYPE_COMPLEX256\n\n header = nib.Nifti1Header()\n if self.data.shape[1] == 1:\n new_shape = (self.data.shape[0], 1, 1)\n else:\n new_shape = (self.data.shape[0], 1, 1, self.data.shape[1])\n data = self.data.reshape(new_shape)\n\n if data.dtype.type in data_type:\n header['datatype'] = data_type[data.dtype.type]\n header['cal_max'] = data.max()\n header['cal_min'] = data.min()\n image = nib.Nifti1Image(data, None, header)\n nib.nifti1.save(image, file_path)", "def IR():\n s = np.array(\n [2.40774137,2.287696084,2.203613927,2.048710132,1.899829585,1.591776247,\n 2.021218754,2.572949552,3.298381484,3.635993426,3.788266224,3.8307278,3.834208811]\n )\n\n TI = np.array([50, 75, 100, 150, 200, 300, 400, 500, 750, 1000, 1500, 2000, 3000])\n\n comp1 = s * np.array([-159.1,-134.2,-109.1,-64.7,25.0,40.1,88.6,126.8,187.6,219.4,245.4,253.6,256.1])\n comp2 = s * np.array([-368.3,-356.9,-343.8,-318.1,-292.0,-242.5,-199.3,-158.4,-68.8,14.2,131.9,219.5,333.5])\n comp3 = s * np.array([-77.5,-51.9,-29.8,9.9,40.2,85.7,115.4,135.1,160.1,167.6,172.3,171.7,171.8])\n comp4 = s * np.array([-265.0,-240.6,-216.7,-170.5,-128.2,-53.5,9.6,62.3,159.7,223.8,296.5,328.3,346.7])\n comp5 = s * np.array([-346.5,-328.9,-312.1,-278.5,-244.4,-182.3,-128.0,-80.0,30.8,109.3,225.1,299.5,372.2])\n\n comp = [comp1, comp2, comp3, comp4, comp5]\n MSE = []\n colors = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\", \"#9467bd\"]\n x_new = np.linspace(0, 3000, 10000)\n for i, j, k in zip(comp, colors, np.arange(1, 6)):\n plt.scatter(TI, i, c=j)\n # popt, _ = curve_fit(MZ, TI, i, p0=np.array([200, 220, 300]))\n popt, _ = curve_fit(MZ, TI, i, p0=np.array([300, 220]))\n # M_z0, T1, M0 = popt\n M0, T1 = popt\n y_new = MZ(x_new, *popt)\n plt.plot(x_new, y_new, \"--\", c=j, label=f\"Fit Comp. {k:d} : $T_1$={T1:3.2f}\")\n MSE.append(mean_squared_error(i,y_new[TI]))\n print(MSE)\n print(np.mean(MSE))\n plt.grid()\n plt.legend(loc=\"best\")\n plt.xlabel(\"TI\")\n plt.ylabel(r\"Singal Intensity $M_z$\")\n plt.show()", "def save_ttest_metrics(self, ttest_metrics, fname, no_genes=20):\n\n top_genes = self.fetch_gene_descriptions(ttest_metrics, nih_fetch_num=no_genes, printme=False)\n eids = [int(i[0]) for i in top_genes]\n myfig = self.effect_size_distr(ttest_metrics, genes_of_interest=eids[0:no_genes], return_fig=True)\n plt.savefig(fname+'.png')\n\n with open(fname+'.csv', 'wb') as csvfile:\n writer = csv.writer(csvfile)\n for i in top_genes:\n writer.writerow([i[0], i[3], i[1], i[2], i[4]])", "def IRIS_network(input):\n\n t_iris_1 = datetime.now()\n \n global events\n \n len_events = len(events)\n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n \n create_foders_files(events, eventpath)\n\n print 'IRIS-Folders are Created!'\n print \"--------------------\"\n \n Stas_iris = []\n \n for i in range(0, len_events):\n \n target_path = os.path.join(eventpath, events[i]['event_id'])\n Sta_iris = IRIS_available(input, events[i], target_path, event_number = i)\n Stas_iris.append(Sta_iris)\n \n if input['iris_bulk'] != 'Y':\n print 'IRIS-Availability for event: ' + str(i+1) + str('/') + \\\n str(len_events) + ' ---> ' + 'DONE'\n else:\n print 'IRIS-bulkfile for event : ' + str(i+1) + str('/') + \\\n str(len_events) + ' ---> ' + 'DONE'\n \n if input['get_continuous'] == 'Y' and input['iris_bulk'] == 'Y':\n for j in range(1, len_events):\n Stas_iris.append(Sta_iris)\n target_path = os.path.join(eventpath, events[j]['event_id'])\n shutil.copy2(os.path.join(eventpath, events[0]['event_id'], \\\n 'info', 'bulkdata-0.txt'), \\\n os.path.join(target_path, \\\n 'info', 'bulkdata-' + str(j) + '.txt'))\n print 'IRIS-Availability for event: ' + str(j+1) + str('/') + \\\n str(len_events) + ' ---> ' + 'DONE'\n break\n \n t_iris_2 = datetime.now()\n t_iris = t_iris_2 - t_iris_1\n print \"--------------------\"\n print 'IRIS-Time: (Availability)'\n print t_iris \n \n return Stas_iris", "def log(pro, logname, savepng=True):\n\n # Load processed data variables\n rawfiles = pro['rawfiles']\n transect = pro['transect']\n t120 = pro['t120' ]\n r120 = pro['r120' ]\n Sv120 = pro['Sv120' ]\n Sv120sw = pro['Sv120sw' ]\n t120r = pro['t120r' ]\n t120intrvls = pro['t120intervals']\n nm120r = pro['nm120r' ]\n lon120r = pro['lon120r' ]\n lat120r = pro['lat120r' ]\n sbline120r = pro['sbliner' ][0,:] \n NASC120swr = pro['NASC120swr'][0,:]\n pc120swr = pro['pc120swr'][0,:]\n \n # Build summary results\n results = {'Time' : np.array(t120r , dtype=str) ,\n 'Longitude': np.round(lon120r , 5) ,\n 'Latitude' : np.round(lat120r , 5) ,\n 'Transect' : np.ones(len(t120r ), dtype=int)*transect,\n 'Miles' : nm120r ,\n 'Seabed' : np.round(sbline120r , 1) ,\n 'NASC' : np.round(NASC120swr , 2) ,\n '% samples': np.round(pc120swr , 1) }\n results = pd.DataFrame(results, columns= ['Time' , 'Longitude',\n 'Latitude' , 'Transect' ,\n 'Miles' , 'Seabed' ,\n 'NASC' , '% samples'])\n \n # Create new log subdirectory\n path = os.path.join(os.path.dirname(__file__), '..', 'log', logname, '')\n if not os.path.exists(path):\n os.makedirs(path)\n \n # Write results in CSV log file\n with open(path+logname+'.csv', 'a') as f:\n results.to_csv(path+logname+'.csv', index=False, mode='a',\n header=f.tell()==0) \n \n # save png image\n if savepng:\n \n # set figure\n plt.close()\n plt.figure(figsize=(8, 8))\n plt.subplots_adjust(left=0.066, right=1.055, bottom=0.065, top=0.985,\n wspace=0, hspace=0.05)\n plt.rcParams.update({'font.size': 9, 'lines.linewidth': 1})\n \n # plot raw echogram\n plt.subplot(211).invert_yaxis()\n im=plt.pcolormesh(t120, r120, Sv120,\n vmin=-80, vmax=-50, cmap=cmaps().ek500)\n plt.colorbar(im).set_label('Sv raw (dB re 1m$^{-1}$)')\n plt.gca().set_ylim(270,0)\n plt.gca().set_ylabel('Depth (m)')\n plt.gca().set_xlim(t120intrvls[0], t120intrvls[-1])\n plt.gca().set_xticks(t120intrvls[[0,-1]])\n plt.tick_params(labelright=False, labelbottom=False)\n \n # plot processed echogram\n ax= plt.subplot(212)\n ax = [ax, ax.twinx()]\n im=ax[0].pcolormesh(t120, r120, Sv120sw,\n vmin=-80,vmax=-50, cmap=cmaps().ek500)\n plt.colorbar(im).set_label('Sv pro (dB re 1m$^{-1}$)')\n ax[0].invert_yaxis()\n ax[0].set_ylim(270,0)\n ax[0].set_ylabel('Depth (m)')\n \n # overlay distance/NASC info\n for t, nm, NASC in zip(t120r, nm120r, NASC120swr):\n ax[1].plot([t, t], [0, 1], color=[0,.8,0], linewidth=2)\n ax[1].text(t, .95, ' ' + str(transect) + ': ' + str(round(nm,2)),\n fontweight='bold', color=[0,.8,0])\n ax[1].text(t, .02, ' ' + str(round(NASC,2)),\n fontweight='bold', color=[1,0,0]) \n ax[1].set_ylim(0, 1)\n ax[1].set_xlim(t120intrvls[0], t120intrvls[-1])\n ax[1].set_xticks(t120intrvls[[0,-1]])\n ax[1].tick_params(labelright=False)\n ax[1].xaxis.set_major_formatter(mdates.DateFormatter('%d%b-%H:%M:%S'))\n \n # save figure\n pf = rawfiles[0].split('-')[0]\n fn = pd.to_datetime(str(t120[0])).strftime(pf + '-D%Y%m%d-T%H%M%S')\n plt.savefig(path+fn+'.png' ,figsize=(8, 8), dpi=100)\n plt.close()", "def make_sim(args):\n flux, nread, iexp = args\n satlevel=65535\n\n #outdir = \"sim_data_flux\" + str(flux)\n #outdir = \"sim_data_flux\" + str(flux) + \"_nonl\"\n outdir = \"sim_\" + \"exposure\" + str(iexp) + \"_flux\" + str(flux)\n pathlib.Path(outdir).mkdir(parents=True, exist_ok=True) \n \n for i in range(1, nread+1):\n logger.info(\"Looping through UTR {}\".format(i))\n #outfileprefix = outdir + \"/exposure\" + str(iexp) + \"_utr\" + str(i)\n outfileprefix = outdir + \"/utr\" + str(i)\n refoutfileprefix = outdir + \"/utr\" + str(i) + \"_ref\"\n\n primary_hdu = fits.PrimaryHDU()\n outhdus = fits.HDUList([primary_hdu])\n outhdus_ref = fits.HDUList([primary_hdu])\n\n ahdu = fits.HDUList([primary_hdu]) #for a coefficients\n bhdu = fits.HDUList([primary_hdu]) #for b coefficients\n\n #loop all 16 detectors\n ndet = 16\n logger.info(\"Looping through all 16 detectors\")\n for idet in range(ndet):\n detid = detids[idet]\n logger.info(\"Looping through the detector {}, {}\".format(idet+1, detid)) \n\n flux_actual = np.zeros((2048, 2048))\n #start with the measured flux, constant spatially but varying temporally\n flux_actual[4:2044,4:2044] = flux * TREAD * i #fill data region with measured flux, (ADU/f) * s = ADU ???\n flux_actual *= Gain #e-??\n logger.info(\"Filled with measured flux\")\n\n #calculate nonlinearity from calibration data, varing spatially but not temporally, unit e-??\n flux_nonlinear = make_nonlinear(flux_actual, outfileprefix, ahdu, bhdu, detid) \n #flux_nonlinear = flux_actual #no non-linearity\n logger.info(\"Made non-linearized flux\")\n\n #add pedestal, constant spatially very varying temporally\n pedestal = random.randint(0, 1000) #set the random pedistal (between 0 - 1000) for this exposure, unit e-??\n flux_nonlinear[:, :] += pedestal #add the pedestal to all of the data including the reference pixels.\n logger.info(\"Added pedestal\")\n\n #add noise (e-)\n readnoise_array = np.random.normal(size=[2048,2048]) * ReadNoise #generate the read noise, the reference pixels should also have noise. \n flux_nonlinear += readnoise_array #add the read noise to the data array\n logger.info(\"Added read noise\")\n\n # set any pixel above the saturation limit to the saturation limit.\n # This can set to 65535 for all pixels for now, but should be able to \n # take a map since this will vary pixel to pixel. \n ind = np.where(flux_nonlinear > satlevel)\n flux_nonlinear[ind] = satlevel\n logger.info(\"Checked saturation level\")\n\n #add this dector to the final simulated data\n hdu = fits.ImageHDU(data=flux_nonlinear, name=detid)\n outhdus.append(hdu)\n\n #do reference pixel correction\n updownCorr(flux_nonlinear)\n leftrightCorr(flux_nonlinear)\n\n #add this dector to another output\n hdu1 = fits.ImageHDU(data=flux_nonlinear, name=detid)\n outhdus_ref.append(hdu1)\n \n\n #write the sim data for this UTR\n outfile = outfileprefix + \".fits\"\n #logger.info(\"Writing sim data to: {}\".format(outfile))\n #outhdus.writeto(outfile)\n\n refoutfile = refoutfileprefix + \".fits\"\n logger.info(\"Writing reference pixel corrected sim data to: {}\".format(refoutfile))\n outhdus_ref.writeto(refoutfile)\n\n #ahdu.writeto(outfileprefix + \"_a_coef.fits\")\n #bhdu.writeto(outfileprefix + \"_b_coef.fits\")", "def read_taiwan_ntu_dsi():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'taiwan_ntu_dsi')\n fraw = pjoin(folder, 'DSI203.nii.gz')\n fbval = pjoin(folder, 'DSI203.bval')\n fbvec = pjoin(folder, 'DSI203.bvec')\n md5_dict = {'data': '950408c0980a7154cb188666a885a91f',\n 'bval': '602e5cb5fad2e7163e8025011d8a6755',\n 'bvec': 'a95eb1be44748c20214dc7aa654f9e6b',\n 'license': '7fa1d5e272533e832cc7453eeba23f44'}\n\n check_md5(fraw, md5_dict['data'])\n check_md5(fbval, md5_dict['bval'])\n check_md5(fbvec, md5_dict['bvec'])\n check_md5(pjoin(folder, 'DSI203_license.txt'), md5_dict['license'])\n\n bvals, bvecs = read_bvals_bvecs(fbval, fbvec)\n bvecs[1:] = bvecs[1:] / np.sqrt(np.sum(bvecs[1:] * bvecs[1:], axis=1))[:, None]\n\n gtab = gradient_table(bvals, bvecs)\n img = nib.load(fraw)\n return img, gtab", "def to_nifti(self,folder_path: str):\n data_path = settings.STORAGE_DIR\n path = folder_path \n nifti=series.get_series_object(path) \n nifti_str=str(nifti)\n nifti_str=nifti_str[1:44]\n if nifti_str=='dicom_to_cnn.model.reader.SeriesCT.SeriesCT': \n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')\n if nifti_str=='dicom_to_cnn.model.reader.SeriesPT.SeriesPT':\n nifti.get_instances_ordered() \n nifti.get_numpy_array()\n nifti.set_ExportType('suv')\n image_md5 = hashlib.md5(str(nifti).encode())\n image_id = image_md5.hexdigest()\n img=nifti.export_nifti(data_path+'/image/image_'+image_id+'.nii')", "def store_std_dev_of_noises(src_file: H5File) -> None:\n mean_perp_noise = src_file.attrs['perp_noise_mean']\n mean_par_noise = src_file.attrs['par_noise_mean']\n mean_ref_noise = src_file.attrs['ref_noise_mean']\n perp_sum = 0\n par_sum = 0\n ref_sum = 0\n counts = 0\n for path in rawnav.pump_group_paths(src_file):\n perp_path = path + '/perp'\n par_path = path + '/par'\n ref_path = path + '/ref'\n perp_noise = src_file[perp_path].attrs['noise']\n par_noise = src_file[par_path].attrs['noise']\n ref_noise = src_file[ref_path].attrs['noise']\n perp_sum += (perp_noise - mean_perp_noise) ** 2\n par_sum += (par_noise - mean_par_noise) ** 2\n ref_sum += (ref_noise - mean_ref_noise) ** 2\n counts += 1\n src_file.attrs['perp_noise_std_dev'] = np.sqrt(perp_sum / (counts - 1))\n src_file.attrs['par_noise_std_dev'] = np.sqrt(par_sum / (counts - 1))\n src_file.attrs['ref_noise_std_dev'] = np.sqrt(ref_sum / (counts - 1))\n return", "def calculate_psf_tilts():\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)", "def PCO1S12Noise():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/TestOptics_PCO1S12/'\n d1,dx1 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas3.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f13,pow13 = fourier.meanPSD((d1-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f13],[pow12,pow23,pow13])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f13,pow13/f13[0],label='1-3: %.2f' % midfreq[2])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: PCO1S12')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12", "def final_states_figure_file(self, it, avg):\n return self.output_file(self.figures_dir(), 'final_states', it, avg, 'png')", "def n27_and_stark():\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4.5, 4))\n # n=27 without static field.\n folder = os.path.join(\"..\", \"..\", \"2018-09-09\")\n fname = \"11_freq_diode.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n data['afpoly'] = data['fpoly'] + 4511\n ax.axhline(0, color='grey')\n data.plot(x='afpoly', y='sig', c='k', ax=ax)\n # n=27 with static field.\n fname = \"10_freq_diode.txt\"\n fname = os.path.join(folder, fname)\n data = pmu.fscan_import(fname)\n data['afpoly'] = data['fpoly'] + 4511\n data['asig'] = data['sig'] + 0.15\n ax.axhline(0.15, color='grey')\n data.plot(x='afpoly', y='asig', c='k', ax=ax)\n # text\n ax.text(-5.5, 0.2, \"3 V/cm\", horizontalalignment='right')\n ax.text(-5.5, 0.01, \"0 V/cm\", horizontalalignment='right')\n # pretty figure\n ax.legend().remove()\n ax.set_ylabel(r\"$e^-$ Signal\")\n ax.set_yticks([])\n ax.set_xlabel(r\"Frequency (GHz from $3d_{5/2} \\rightarrow 27f$)\")\n ax.set_xlim(-8, 7)\n # save\n fig.tight_layout()\n fig.savefig('n27_and_stark.pdf')\n return", "def plot_seir(sir, T, tmin = 0, tmax = 200, absolute=True, figsize=(10,10), facecolor='LightGrey'):\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111, axisbelow=True)\n ax.set_facecolor(facecolor)\n\n if absolute:\n S = sir.S * sir.N\n E = sir.E * sir.N\n I = sir.I * sir.N\n R = sir.R * sir.N\n Y = 1.1 * sir.N\n else:\n S = sir.S\n E = sir.E\n I = sir.I\n R = sir.R\n Y = 1.1\n\n ax.plot(sir.t, S, 'k', alpha=0.5, lw=3, label='Susceptible')\n ax.plot(sir.t, I, 'b', alpha=0.5, lw=3, label='Infected')\n ax.plot(sir.t, E, 'r', alpha=0.5, lw=3, label='Exposed')\n ax.plot(sir.t, R, 'g', alpha=0.5, lw=3, label='Recovered')\n xlabel = 'Time /days'\n if absolute:\n ylabel = 'Total population'\n else:\n ylabel = 'Fraction of population'\n\n set_pretty_ax(ax, facecolor, xlabel, ylabel, tmin, tmax, Y)\n plt.title(T)\n plt.show()", "def tsne_projection(train_file, gen_file, i):\n train_data = pd.read_csv(train_file)\n def get_ori_prom(row):\n if row['gaps'] <= 2 and row['dips'] <=2:\n return 'Model'+str(i)+'_train_promising'\n else:\n return 'Model'+str(i)+'_train'\n train_data['label'] = train_data.apply(get_ori_prom, axis=1)\n train_data = train_data.drop(['id', 'gaps', 'dips'], axis=1)\n gen_prom_smiles = pd.read_csv(gen_file)\n gen_prom_smiles = gen_prom_smiles.drop(['Group', 'Gaps', 'Dips'], axis=1)\n gen_prom_smiles['label'] = 'Generated_promising'\n all_smi = pd.concat([train_data, gen_prom_smiles])\n mols = get_mols(all_smi.SMILES)\n fps, _ = get_fingerprints(mols)\n fp_embeded = TSNE(n_components=2, perplexity=100).fit_transform(fps)\n all_smi['tsne1'] = fp_embeded[:, 0]\n all_smi['tsne2'] = fp_embeded[:, 1]\n return all_smi, len(train_data)", "def plot_stairway_Ne_estimate(infile, outfile):\n nt = pandas.read_csv(infile, sep=\"\\t\", skiprows=5)\n nt = nt[nt['year'] > 10]\n f, ax = plt.subplots(figsize=(7, 7))\n ax.set(xscale=\"log\", yscale=\"log\")\n ax.plot(nt['year'], nt['Ne_median'], c=\"red\")\n ax.plot(nt['year'], nt['Ne_2.5%'], c='grey')\n ax.plot(nt['year'], nt['Ne_97.5%'], c='grey')\n f.savefig(outfile, bbox_inches='tight')", "def make_fakeSN_spectrum(snfile, galfile, params, outfile, err=False, signal_noise=20.0, z=0.0, wave_range=[3842, 8195]):\n snt=loadtext(snfile)\n galt=loadsdss(fits.open(galfile))\n minsalt=list(snt.wavelength).index(3840)\n maxsalt=list(snt.wavelength).index(8200)\n snt.wavelength=snt.wavelength[minsalt:maxsalt+1]\n snt.flux=snt.flux[minsalt:maxsalt+1]\n igal=np.interp(snt.wavelength, galt.wavelength, galt.flux)\n \n #foflux=fake observed flux\n foflux=(snt.flux*params[0] + igal*params[1]) \n \n if err:\n foflux = signal_noise**2 * foflux\n foflux = np.random.poisson(foflux)\n fofluxerr=foflux**0.5\n \n# if err:\n# foflux = foflux + np.random.normal(0.0, 0.1, foflux.shape)\n# fofluxerr=0.1*foflux\n \n if not err:\n fofluxerr=0.1*foflux\n \n fout=open(outfile, 'w')\n for i in range(len(snt.wavelength)):\n fout.write('%f %f %f\\n' % (snt.wavelength[i], foflux[i], fofluxerr[i]))\n fout.close()\n \n return fout", "def to_nii(self, outbase, spirec='spirec', saveInOut=False):\n if self.image_data is None:\n self.recon(spirec)\n\n image_tlhc = np.array([self.header.image.tlhc_R, self.header.image.tlhc_A, self.header.image.tlhc_S])\n image_trhc = np.array([self.header.image.trhc_R, self.header.image.trhc_A, self.header.image.trhc_S])\n image_brhc = np.array([self.header.image.brhc_R, self.header.image.brhc_A, self.header.image.brhc_S])\n #image_cent = np.array([self.header.image.ctr_R, self.header.image.ctr_A, self.header.image.ctr_S])\n\n row_vec = (image_trhc-image_tlhc)/np.sqrt(np.dot(image_trhc-image_tlhc, image_trhc-image_tlhc))\n col_vec = -(image_trhc-image_brhc)/np.sqrt(np.dot(image_trhc-image_brhc, image_trhc-image_brhc))\n # The DICOM standard defines these two unit vectors in an LPS coordinate frame, but we'll\n # need RAS (+x is right, +y is anterior, +z is superior) for NIFTI. So, we compute them\n # such that row_vec points to the right and col_vec points up.\n # Not sure if we need to negate the slice_norm. From the NIFTI-1 header:\n # The third column of R will be either the cross-product of the first 2 columns or\n # its negative. It is possible to infer the sign of the 3rd column by examining\n # the coordinates in DICOM attribute (0020,0032) \"Image Position (Patient)\" for\n # successive slices. However, this method occasionally fails for reasons that I\n # (RW Cox) do not understand.\n\n # can also get slice_norm from: slice_norm = np.cross(row_vec, col_vec)\n slice_norm = np.array([self.header.image.norm_R, self.header.image.norm_A, self.header.image.norm_S])\n slice_fov = np.abs(self.header.series.start_loc - self.header.series.end_loc)\n\n # This is either the first slice tlhc (image_tlhc) or the last slice tlhc. How to decide?\n # And is it related to wheather I have to negate the slice_norm?\n # Tuned this empirically by comparing spiral and EPI data with the sam Rx.\n # Everything seems reasonable, except the test for axial orientation (start_ras==S|I).\n # I have no idea why I need that! But the flipping only seems necessary for axials, not\n # coronals or the few obliques I've tested.\n # FIXME: haven't tested sagittals! (to test for spiral: 'sprt' in self.psd_name.lower())\n if (self.header.series.start_ras=='S' or self.header.series.start_ras=='I') and self.header.series.start_loc > self.header.series.end_loc:\n pos = image_tlhc - slice_norm*slice_fov\n # FIXME: since we are reversing the slice order here, should we change the slice_order field below?\n self.image_data = self.image_data[:,:,::-1,]\n if self.fm_data is not None:\n self.fm_data = self.fm_data[:,:,::-1,]\n else:\n pos = image_tlhc\n\n if self.num_bands > 1:\n pos = pos - slice_norm * self.band_spacing_mm * (self.num_bands - 1.0) / 2.0\n\n qto_xyz = np.zeros((4,4))\n qto_xyz[0,0] = row_vec[0]\n qto_xyz[0,1] = col_vec[0]\n qto_xyz[0,2] = slice_norm[0]\n\n qto_xyz[1,0] = row_vec[1]\n qto_xyz[1,1] = col_vec[1]\n qto_xyz[1,2] = slice_norm[1]\n\n qto_xyz[2,0] = row_vec[2]\n qto_xyz[2,1] = col_vec[2]\n qto_xyz[2,2] = slice_norm[2]\n\n qto_xyz[:,3] = np.append(pos, 1).T\n qto_xyz[0:3,0:3] = np.dot(qto_xyz[0:3,0:3], np.diag(self.mm_per_vox))\n\n nii_header = nibabel.Nifti1Header()\n nii_header.set_xyzt_units('mm', 'sec')\n nii_header.set_qform(qto_xyz, 'scanner')\n nii_header.set_sform(qto_xyz, 'scanner')\n\n nii_header['slice_start'] = 0\n nii_header['slice_end'] = self.num_slices - 1\n # nifti slice order codes: 0 = unknown, 1 = sequential incrementing, 2 = seq. dec., 3 = alternating inc., 4 = alt. dec.\n slice_order = 0\n nii_header['slice_duration'] = self.tr * 1000 / self.num_slices\n # FIXME: check that this is correct.\n if self.header.series.se_sortorder == 0:\n slice_order = 1 # or 2?\n elif self.header.series.se_sortorder == 1:\n slice_order = 3 # or 4?\n nii_header['slice_code'] = slice_order\n\n # Note: the freq/phase dir isn't meaningful for spiral trajectories.\n if self.header.image.freq_dir==1:\n nii_header.set_dim_info(freq=1, phase=0, slice=2)\n else:\n nii_header.set_dim_info(freq=0, phase=1, slice=2)\n\n # FIXME: There must be a cleaner way to set the TR! Maybe bug Matthew about it.\n nii_header.structarr['pixdim'][4] = self.tr\n nii_header.set_slice_duration(nii_header.structarr['pixdim'][4] / self.num_slices)\n nii_header.structarr['cal_max'] = self.image_data.max()\n nii_header.structarr['cal_min'] = self.image_data.min()\n\n if self.num_echoes == 1:\n nifti = nibabel.Nifti1Image(self.image_data, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n elif self.num_echoes == 2:\n if saveInOut:\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,0], None, nii_header)\n nibabel.save(nifti, outbase + '_in.nii.gz')\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,1], None, nii_header)\n nibabel.save(nifti, outbase + '_out.nii.gz')\n # FIXME: Do a more robust test for spiralio!\n # Assume spiralio, so do a weighted average of the two echos.\n # FIXME: should do a quick motion correction here\n w_in = np.mean(self.image_data[:,:,:,:,0], 3)\n w_out = np.mean(self.image_data[:,:,:,:,1], 3)\n inout_sum = w_in + w_out\n w_in = w_in / inout_sum\n w_out = w_out / inout_sum\n avg = np.zeros(self.image_data.shape[0:4])\n for tp in range(self.image_data.shape[3]):\n avg[:,:,:,tp] = w_in*self.image_data[:,:,:,tp,0] + w_out*self.image_data[:,:,:,tp,1]\n nifti = nibabel.Nifti1Image(avg, None, nii_header)\n nibabel.save(nifti, outbase + '.nii.gz')\n else:\n for echo in range(self.num_echoes):\n nifti = nibabel.Nifti1Image(self.image_data[:,:,:,:,echo], None, nii_header)\n nibabel.save(nifti, outbase + '_echo%02d.nii.gz' % echo)\n\n if self.fm_data is not None:\n nii_header.structarr['cal_max'] = self.fm_data.max()\n nii_header.structarr['cal_min'] = self.fm_data.min()\n nifti = nibabel.Nifti1Image(self.fm_data, None, nii_header)\n nibabel.save(nifti, outbase + '_B0.nii.gz')", "def plotTI():\n min_dl = dlam[dlam != 0].min()\n S = int(0.4/min_dl)\n fig = pl.figure(figsize = (8,6))\n ax = fig.add_subplot(1,1,1)\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for k, spine in ax.spines.items():\n spine.set_zorder(12.2)\n\n xs, ndx, dx = [0], 0, 0.001\n colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']\n min_y, max_y = 0, 0\n\n lines = tuple()\n ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper\n lv_names2 = []\n for j in range(n_components):\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())\n\n for j in range(n_components):\n\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n\n # Get the coordinates.\n lj = lchange[:,j]\n x = lv[:,j][lj]\n y = y[lj]/P.beta_report\n\n if 'TI' in P.methods:\n # Plot the TI integration area.\n ss = 'TI'\n for i in range(len(x)-1):\n min_y = min(y.min(), min_y)\n max_y = max(y.max(), max_y)\n #pl.plot(x,y)\n if i%2==0:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)\n xlegend = [-100*wnum for wnum in range(len(lv_names2))]\n pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper\n\n if 'TI-CUBIC' in P.methods and not cubspl[j]==0:\n # Plot the TI-CUBIC interpolation curve.\n ss += ' and TI-CUBIC'\n xnew = numpy.arange(0, 1+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)\n\n else:\n # Plot the TI-CUBIC integration area.\n ss = 'TI-CUBIC'\n for i in range(len(x)-1):\n xnew = numpy.arange(x[i], x[i+1]+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n ynew[0], ynew[-1] = y[i], y[i+1]\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n if i%2==0:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)\n\n # Store the abscissa values and update the subplot index.\n xs += (x+ndx).tolist()[1:]\n ndx += 1\n\n # Make sure the tick labels are not overcrowded.\n xs = numpy.array(xs)\n dl_mat = numpy.array([xs-i for i in xs])\n ri = range(len(xs))\n\n def getInd(r=ri, z=[0]):\n primo = r[0]\n min_dl=ndx*0.02*2**(primo>10)\n if dl_mat[primo].max()<min_dl:\n return z\n for i in r:\n for j in range(len(xs)):\n if dl_mat[i,j]>min_dl:\n z.append(j)\n return getInd(ri[j:], z)\n\n xt = [i if (i in getInd()) else '' for i in range(K)]\n pl.xticks(xs[1:], xt[1:], fontsize=10)\n pl.yticks(fontsize=10)\n #ax = pl.gca()\n #for label in ax.get_xticklabels():\n # label.set_bbox(dict(fc='w', ec='None', alpha=0.5))\n\n # Remove the abscissa ticks and set up the axes limits.\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n pl.xlim(0, ndx)\n min_y *= 1.01\n max_y *= 1.01\n pl.ylim(min_y, max_y)\n\n for i,j in zip(xs[1:], xt[1:]):\n pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')\n if ndx>1:\n lenticks = len(ax.get_ymajorticklabels()) - 1\n if min_y<0: lenticks -= 1\n if lenticks < 5:\n from matplotlib.ticker import AutoMinorLocator as AML\n ax.yaxis.set_minor_locator(AML())\n pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)\n pl.ylabel(r'$\\mathrm{\\langle{\\frac{ \\partial U } { \\partial \\lambda }}\\rangle_{\\lambda}\\/%s}$' % P.units, fontsize=20, color='#151B54')\n pl.annotate('$\\mathit{\\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')\n if not P.software.title()=='Sire':\n lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)\n for l in lege.legendHandles:\n l.set_linewidth(10)\n pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))\n pl.close(fig)\n return", "def do(sts):\n sts = sts.replace(tzinfo=pytz.timezone(\"UTC\"))\n\n ets = sts + datetime.timedelta(days=1)\n\n interval = datetime.timedelta(hours=3)\n\n now = sts\n while now < ets:\n archivefn = now.strftime((\"/mesonet/ARCHIVE/data/%Y/%m/%d/model/\"\n \"NARR/rad_%Y%m%d%H%M.nc\"))\n if os.path.isfile(archivefn):\n now += interval\n continue\n uri = now.strftime((\"http://nomads.ncdc.noaa.gov/thredds/ncss/\"\n \"grid/narr-a/%Y%m/%Y%m%d/\"\n \"narr-a_221_%Y%m%d_%H00_000.grb?\"\n \"var=Downward_shortwave_radiation_flux&spatial=all\"\n \"&temporal=all\"))\n\n try:\n req = urllib2.Request(uri)\n data = urllib2.urlopen(req).read()\n except:\n print 'NARR Download failed for: %s' % (uri,)\n sys.exit()\n\n tmpfn = tempfile.mktemp()\n o = open(tmpfn, 'w')\n o.write(data)\n o.close()\n\n cmd = (\"/home/ldm/bin/pqinsert -p 'data a %s bogus \"\n \"model/NARR/rad_%s.nc nc' %s\"\n ) % (now.strftime(\"%Y%m%d%H%M\"),\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(cmd, shell=True)\n\n os.remove(tmpfn)\n now += interval", "def save_grtrans_image(grt_obj):\n I_im = grt_obj.ivals[:,0,0].reshape(npix,npix).flatten()\n Q_im = grt_obj.ivals[:,1,0].reshape(npix,npix).flatten()\n U_im = grt_obj.ivals[:,2,0].reshape(npix,npix).flatten()\n V_im = grt_obj.ivals[:,3,0].reshape(npix,npix).flatten()\n\n # convert to Tb\n factor = 3.254e13/(RF**2 * psize_rad**2)\n I_im *= factor\n Q_im *= factor\n U_im *= factor\n V_im *= factor\n\n x = np.array([[i for i in range(npix)] for j in range(npix)]).flatten()\n y = np.array([[j for i in range(npix)] for j in range(npix)]).flatten()\n\n x -= npix/2\n y -= npix/2\n x = x*psize_uas\n y = y*psize_uas\n\n outdat = np.vstack((x.T,y.T,I_im.T,Q_im.T,U_im.T,V_im.T)).T\n np.savetxt('../rrjet_and_riaf/'+FNAME,outdat)\n #np.savetxt('../rrjet_and_riaf/grtrans_jet_compare_positron_noconv.txt',outdat)\n return", "def store_individual_noises(src_file: H5File) -> None:\n paths = rawnav.pump_group_paths(src_file)\n for path in paths:\n par_path = path + '/par'\n perp_path = path + '/perp'\n ref_path = path + '/ref'\n points = len(src_file[perp_path])\n for channel in [perp_path, par_path, ref_path]:\n signal_data = np.empty(points, dtype=np.float64)\n src_file[channel].read_direct(signal_data)\n src_file[channel].attrs['noise'] = noise(signal_data)\n return", "def ns_analysis():\r\n\r\n global params\r\n n = params['n']\r\n level = params['level']\r\n nsmin = params['nsmin']\r\n nsmax = params['nsmax']\r\n nsstep = params['nsstep']\r\n bf = params['bf']\r\n po = params['po']\r\n pe = params['pe']\r\n pf = params['pf']\r\n ns_dir = create_dir(TIMES_PATH + 'ns')\r\n filename = datetime.datetime.now().strftime(DATE_FORMAT) + '.csv'\r\n with open(ns_dir + filename, 'wb') as csvfile:\r\n fw = csv.writer(csvfile)\r\n fw.writerow(('lv', 'ns', 'nt', 'no', 'ne', 'nf',\r\n 'method1', 'method2', 'method3_1', 'method3_2'))\r\n ns = nsmin\r\n while ns <= nsmax:\r\n nt = int(round(bf * ns))\r\n no = max(int(round(po * nt)), 1)\r\n ne = max(int(round(pe * no)), 1)\r\n nf = max(int(round(pf * nt)), 1)\r\n times = [0] * 4\r\n for i in range(n):\r\n times = map(sum, zip(times, get_times(level, ns, nt, no, ne, nf)))\r\n show_progress('ns', ns, nsmin, nsmax, nsstep, i, n)\r\n times = map(lambda x: float(x) / n, times)\r\n fw.writerow((level, ns, nt, no, ne, nf) + tuple(times))\r\n ns += nsstep\r\n csvfile.close()", "def do_SIR(self, t_max=200, dt=1.):\n dt = float(dt)\n\n g = Graph()\n\n for node in ['I', 'C', 'R', 'H', 'B', 'U', 'D']:\n g.add_node(node, 0)\n\n g.set_node('I', self.N_init)\n\n # cumulative time series\n I = [g.get_node_value('I')] # noqa Infected\n C = [g.get_node_value('C')] # Confirmed\n R = [g.get_node_value('R')] # Recovered\n\n ts = [0.] # time series\n nms = ['prob', 'lag']\n\n # En este modelo todos los infectados se confirman a los 10\n # dias y se curan a los 20 dias de confirmados\n T_IC = int(self.t_incubation / dt)\n T_CR = 20\n f_IC = 1.\n f_CR = 1.\n\n g.add_edge('I', 'I', nms, [self.R, 0])\n g.add_edge('I', 'C', nms, [f_IC, T_IC])\n g.add_edge('C', 'R', nms, [f_CR, T_CR])\n\n t, time_steps = 0., 0\n while t < t_max:\n\n time_steps = time_steps + 1\n\n t = t + dt\n ts.append(t)\n\n # (( I ))\n prob_II = g.get_edge('I', 'I', 'prob')\n\n prob_IC = g.get_edge('I', 'C', 'prob')\n lag_IC = g.get_edge('I', 'C', 'lag')\n update_IC = I[-lag_IC] if lag_IC < len(I) else 0.\n\n n_I = (\n min(I[-1] + I[-1] * prob_II * dt, self.population) - # noqa\n update_IC * prob_IC * dt)\n n_I = max(n_I, 0)\n\n I.append(n_I)\n\n # (( C ))\n prob_CR = g.get_edge('C', 'R', 'prob')\n lag_CR = g.get_edge('C', 'R', 'lag')\n update_CR = C[-lag_CR] if lag_CR < len(C) else 0.\n\n n_C = (\n min(C[-1] + update_IC * prob_IC * dt, self.population) - # noqa\n update_CR * prob_CR * dt)\n n_C = max(n_C, 0)\n C.append(n_C)\n\n # (( R ))\n # recuperados nuevos\n n_R = min(R[-1] + update_CR * prob_CR * dt, self.population)\n n_R = max(n_R, 0)\n R.append(n_R)\n\n df = pd.DataFrame(\n {'ts': ts, 'I': I, 'C': C, 'R': R}).set_index(\"ts\")\n\n extra = attr.asdict(self)\n extra[\"model_name\"] = \"SIR\"\n return ModelResultFrame(df=df, extra=extra)", "def save_nii(img_path, data, affine, header):\n nimg = nib.Nifti1Image(data, affine=affine, header=header)\n nimg.to_filename(img_path)", "def save(self, rs, ths, xs, ys, vals, file=None):\n\n\t\tif file is None:\n\t\t\tfile = 'PR_c%d_r%d_th%d.h5' % (self.cbins, self.rbins, self.thbins)\n\n\t\twith File(file, 'w') as file:\n\t\t\tfile.create_dataset('cbins' , data=self.cbins)\n\t\t\tfile.create_dataset('rbins' , data=self.rbins)\n\t\t\tfile.create_dataset('thbins', data=self.thbins)\n\t\t\tfile.create_dataset('rs' , data=rs)\n\t\t\tfile.create_dataset('ths' , data=ths)\n\t\t\tfile.create_dataset('xs' , data=xs)\n\t\t\tfile.create_dataset('ys' , data=ys)\n\t\t\tfile.create_dataset('vals' , data=vals)", "def astrometryScan(self, plot=False, writeOut=False, overwrite=False,\n store_intermediate=False, max_err_um = .5,\n max_GD_um=None, sigma_clipping=10.0, Nmax=None,\n min_snr=4):\n et = []\n t = [] # time at maximum\n snr = [] # SNR at max\n if Nmax==None:\n Nmax = self.scan_nscans\n for k in range(np.minimum(self.scan_nscans, int(Nmax))):\n tmp = self.scanGetTimeMaxEnv(k)\n # time of maximum:\n t.append( tmp[0])\n # err bar in t:\n et.append(10e-6)\n snr.append(tmp[1])\n\n t = np.array(t)\n et = np.array(et)\n snr = np.array(snr)\n\n # get FTK status around the time of maximum\n ftk_status = []\n for tau in t:\n w = np.where(np.abs(self.raw['OPDC'].data.field('TIME')-tau)<1e5)\n tmp = self.raw['OPDC'].data.field('STATE')[w].mean()\n ftk_status.append(tmp)\n ftk_status = np.array(ftk_status)\n\n w = np.where((ftk_status>=5.0))\n print ' useful data: %d/%d' % ( len(w[0]), len(t))\n t = t[w]\n et = et[w]\n snr = snr[w]\n ftk_status=ftk_status[w]\n\n # FSU which is tracking\n fsu_opdc = self.getKeyword('DEL FT SENSOR')\n\n if self.insmode=='SWAPPED' and \\\n self.getKeyword('DEL FT SENSOR')=='FSUB':\n print \" -> I suspect a wrong FSU config in \"+\\\n \"the header based on the INS.MODE\"\n if fsu_opdc == 'FSUB':\n fsu_dopdc = 'FSUA'\n else:\n fsu_dopdc = 'FSUB'\n\n ### group delay dOPDC\n GDdopdc = lambda x: np.interp(x,\n self.raw['IMAGING_DATA_'+fsu_dopdc].data.field('TIME'),\n self.raw['IMAGING_DATA_'+fsu_dopdc].data.field('GD'))\n\n ### group delay OPDC\n GDopdc = lambda x: np.interp(x,\n self.raw['IMAGING_DATA_'+fsu_opdc].data.field('TIME'),\n self.raw['IMAGING_DATA_'+fsu_opdc].data.field('GD'))\n\n ### primet A-B\n deltal = lambda x: np.interp(x,\n self.raw['METROLOGY_DATA'].data.field('TIME'),\n self.raw['METROLOGY_DATA'].data.field('DELTAL'))\n\n ### astrometric observable\n deltal_2 = deltal(t) + GDopdc(t) - GDdopdc(t)\n\n ### error bar = agreement between AC and BD\n err = 0.5*np.abs(deltal(t+et) + GDopdc(t+et) -\n deltal(t-et) - GDopdc(t-et) )\n\n ### select points with good agreement\n if not min_snr is None:\n w = np.where(snr > min_snr)\n print 'error clipping:', len(err), '->', len(w[0])\n else:\n w = range(len(err))\n\n deltal_2 = deltal_2[w]\n t = t[w]\n err = err[w]\n snr = snr[w]\n mjd = t*1e-6/(24*3600.0)+self.mjd_start\n\n if not sigma_clipping is None:\n n_before = len(mjd)\n for k in range(10):\n c = np.polyfit(mjd-mjd.mean(), deltal_2,1)\n # residuals\n res = deltal_2 - np.polyval(c,mjd-mjd.mean())\n s = res.argsort()\n pseudoSTD = 0.5*(res[s[int(.84*len(s))]] -\n res[s[int(.16*len(s))]])\n print k, 'STD=', pseudoSTD\n werr = np.where(np.abs(res)<sigma_clipping*pseudoSTD)\n if len(werr[0]) == len(err):\n break\n deltal_2 = deltal_2[werr]\n t = t[werr]\n err = err[werr]\n mjd = mjd[werr]\n snr = snr[werr]\n err = np.ones(len(err))*pseudoSTD/np.sqrt(snr-min_snr+1)\n print 'sigma clipping:', n_before, '->', len(mjd)\n\n if writeOut:\n hdu = pyfits.PrimaryHDU(None)\n # copy original header\n for i in self.raw[0].header.items():\n if len(i[0])>8:\n hie = 'HIERARCH '\n else:\n hie = ''\n if len(i)==2:\n hdu.header.update(hie+i[0], i[1])\n elif len(i)==3:\n hdu.header.update(hie+i[0], i[1], i[2])\n # prepare data for a binary table\n cols=[]\n cols.append(pyfits.Column(name='MJD', format='F12.5',\n array=mjd))\n cols.append(pyfits.Column(name='D_AL', format='E', unit='m',\n array=deltal_2))\n cols.append(pyfits.Column(name='D_AL_ERR', format='E', unit='m',\n array=err))\n hducols = pyfits.ColDefs(cols)\n hdub = pyfits.new_table(hducols)\n hdub.header.update('EXTNAME', 'ASTROMETRY_BINNED', '')\n\n # combine all HDUs\n thdulist = pyfits.HDUList([hdu, hdub])\n # write file\n outfile = os.path.join(\n self.dirname, self.filename.split('.')[0]+'_RED.fits')\n if overwrite and os.path.exists(outfile):\n os.remove(outfile)\n print 'writting ->', outfile\n thdulist.writeto(outfile)\n thdulist=[]\n return outfile\n\n if plot:\n plt.plot(13)\n plt.clf()\n ax1 = plt.subplot(211)\n plt.plot(mjd, deltal_2, '.b')\n if not sigma_clipping is None:\n plt.plot(mjd, np.polyval(c,mjd-mjd.mean()), 'b-')\n plt.plot(mjd, np.polyval(c,mjd-mjd.mean())+pseudoSTD, 'y-')\n plt.plot(mjd, np.polyval(c,mjd-mjd.mean())-pseudoSTD, 'y-')\n\n plt.ylabel('position of max env')\n plt.subplot(212, sharex=ax1)\n plt.plot(mjd, snr, '.k')\n #plt.yscale('log')\n plt.ylabel('SNR')\n return", "def findROICentres(smt_file): #{\n rois = []\n ary = None\n state = 0\n roi_centres = { \n 'head_ct': None,\n 'neck_cb': None,\n 'shoulder_l': None,\n 'shoulder_r': None,\n 'elbow_l': None,\n 'elbow_r': None,\n 'pelvis_c': None,\n 'wrist_l': None,\n 'wrist_r': None,\n 'hip_l': None,\n 'hip_r': None,\n 'knee_l': None,\n 'knee_r': None,\n 'ankle_l': None,\n 'ankle_r': None,\n 'toes_e': None}\n\n vrbMsg(1, 'findROICentres() smt_file = ' + smt_file)\n # Read the smoothed Woolz object and create a NumPy array from it\n err_num, smt_obj = readWoolzObj(smt_file)\n if(not bool(err_num)): #{\n\t err_num, org, ary = wlzObjToNP(smt_obj)\n #}\n sz = np.shape(ary)\n vrbMsg(5, 'findROICentres() object size = ' + str(sz))\n # Work down the scan finding coordinates, ordering has dependency\n # but this is checked in the individual functions\n if((sz[0] >= img_size_min) and (sz[1] >= img_size_min)): #{\n findProfileHeadCT(roi_centres, ary)\n findProfileToes(roi_centres, ary)\n #}\n if(bool(roi_centres['toes_e']) and bool(roi_centres['head_ct'])): #{\n findProfileShoulders(roi_centres, ary)\n findProfilePelvis(roi_centres, ary)\n findProfileHip(roi_centres, ary)\n findProfileElbow(roi_centres, ary)\n findProfileWrist(roi_centres, ary)\n findProfileKnee(roi_centres, ary)\n findProfileAnkle(roi_centres, ary)\n rois = roi_centres\n for cen in rois: #{\n # numpy gives int64 which is not always handled (eg by json) so convert\n pos = rois[cen]\n if(not (pos is None)): #{\n rois[cen] = [int(pos[0]), int(pos[1])]\n #}\n #}\n #}\n vrbMsg(1, 'findROICentres() rois = ' + str(rois))\n return rois", "def _ion_densities_datafiles(self):\n ne = self.ne_in\n nD = self.ni_in[0,:]\n nC = (ne-nD)/6.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC", "def G_SNR(T2s, TEs):\r\n N = len(TEs)\r\n TEs = TEs-TEs[0]\r\n \r\n return N / np.sqrt(np.sum(np.exp(TEs / T2s)**2))", "def write_nifti(self, output_path):\n nib.save(self.niftiImage, output_path)\n print('Image saved at: {}'.format(output_path))" ]
[ "0.5856596", "0.58353704", "0.58029324", "0.5760767", "0.57194346", "0.5685501", "0.565635", "0.5610267", "0.5596072", "0.5580801", "0.5550373", "0.55146116", "0.55122143", "0.54595554", "0.53850317", "0.53831476", "0.53568536", "0.5345135", "0.53432506", "0.5322765", "0.53195083", "0.53150904", "0.5302821", "0.52760804", "0.52736145", "0.5260181", "0.5254817", "0.5254216", "0.52344", "0.5229716", "0.5220142", "0.52192575", "0.5204042", "0.52023816", "0.51953423", "0.5193932", "0.51785934", "0.51547456", "0.5154285", "0.51498765", "0.514857", "0.5143521", "0.51421195", "0.51262033", "0.5123879", "0.5122884", "0.51143014", "0.5112798", "0.5095889", "0.50789094", "0.50750756", "0.506163", "0.5052254", "0.50511503", "0.5049222", "0.5045908", "0.5045428", "0.504462", "0.50369334", "0.5034644", "0.5033192", "0.50259715", "0.50240034", "0.5023302", "0.50165206", "0.50113857", "0.50038594", "0.500213", "0.49938756", "0.49925864", "0.49857765", "0.49842328", "0.4969718", "0.49659893", "0.4964805", "0.49613428", "0.4959475", "0.49505994", "0.49436814", "0.49411607", "0.49392116", "0.49369198", "0.49339637", "0.49323758", "0.49283168", "0.49233967", "0.49222884", "0.4918073", "0.4905633", "0.48999876", "0.4894735", "0.48910412", "0.4890585", "0.48867083", "0.4877011", "0.48760593", "0.4871597", "0.4870998", "0.48682794", "0.48654935" ]
0.78557074
0
Flip axes to orientation fslview expects.
def FSLFlip(self, infile, prefix): cmd = '3dresample -orient LPI -prefix %s.nii -inset %s+orig' % \ (prefix, infile) self.CheckExec(cmd, ['%s.nii' % prefix]) fname = '%s+orig.BRIK' % infile if os.path.exists(fname): os.remove(fname) fname = '%s+orig.HEAD' % infile if os.path.exists(fname): os.remove(fname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flip(self, axes=None):\n axes = self._make_axes_as_num(axes)\n vals = self.values\n if 0 in axes:\n vals = vals[::-1, :]\n if 1 in axes:\n vals = vals[:, ::-1]\n return Signal2D(vals, index=self.index, columns=self.columns)", "def flip_axes(input_file, flipx=True, flipy=True, flipz=False,\n use_matrix=False, use_header=True):\n import os\n import numpy as np\n import nibabel as nb\n\n # Load image volume\n img = nb.load(input_file)\n dat = img.get_data()\n if use_matrix:\n mat = img.get_affine()\n if use_header:\n hdr = img.get_header()\n lenx, leny, lenz = np.shape(dat)\n dat_new = np.zeros((lenx, leny, lenz))\n\n # Flip x\n if flipx:\n for x in range(lenx):\n dat_new[lenx-1-x,:,:] = dat[x,:,:]\n\n # Flip y\n if flipy:\n for y in range(leny):\n dat_new[:,leny-1-y,:] = dat[:,y,:]\n\n # Flip z\n if flipz:\n for z in range(lenz):\n dat_new[:,:,lenz-1-z] = dat[:,:,z]\n\n # Save output\n out_file = 'reorient_' + os.path.basename(input_file)\n if use_matrix:\n if use_header:\n img = nb.Nifti1Image(dat_new, mat, hdr)\n else:\n img = nb.Nifti1Image(dat_new, mat)\n elif use_header:\n img = nb.Nifti1Image(dat_new, np.eye(4,4), hdr)\n else:\n img = nb.Nifti1Image(dat_new, np.eye(4,4))\n\n img.to_filename(out_file)\n\n return out_file", "def flip_plot_axes(self):\n self.plot_inverted = not self.plot_inverted\n\n if not self.plot_inverted:\n self.draw_plot(self.data_x_axis, self.data_y_axis, self.label_x_axis, self.label_y_axis)\n else:\n self.draw_plot(self.data_y_axis, self.data_x_axis, self.label_y_axis, self.label_x_axis)", "def transform(x: np.array, params: TransformParams) -> np.array:\n if params.do_hor_flip:\n x = flip_axis(x, 1)\n\n if params.do_vert_flip:\n x = flip_axis(x, 0)\n\n return x", "def td_flip(self):\n self.cw_rotate()\n self.cw_rotate()\n self.lr_flip()\n self.find_edges()", "def flip(self, axes=None, inplace=False, i=False):\n d = _inplace_enabled_define_and_cleanup(self)\n super(DimensionCoordinate, d).flip(axes=axes, inplace=True)\n\n direction = d._custom.get(\"direction\")\n if direction is not None:\n d._custom[\"direction\"] = not direction\n\n return d", "def flip(self, mode='h'):\n # TODO: Implement the flip function. Remember to record the boolean values is_horizontal_flip and\n # is_vertical_flip.\n if mode == 'h':\n self.is_horizontal_flip = True\n self.x = np.flipud(self.x)\n elif mode == 'v':\n self.is_vertical_flip = True\n self.x = np.fliplr(self.x)\n else:\n self.is_vertical_flip = True\n self.is_horizontal_flip = True\n self.x = np.fliplr(self.x)\n self.x = np.flipud(self.x)\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def mirror_axis(self, axis_index=0, reverse=False):\n self.smd3.mirror(axis_index=axis_index, reverse=reverse)\n self.logic.mirror(axis_index=axis_index, reverse=reverse)\n min_vector, max_vector = self.smd3.get_min_max_vector()\n self.header.set_box(min_vector, max_vector)\n self.header.update(self.smd3)\n self.logic.update(self.smd3)\n # self.meta.mirror(axis_index=axis_index, reverse=reverse)", "def toggle_axis(self):\n # cycle through three axis states\n states = [False, 'world', 'all']\n # the state after toggling\n index = (states.index(self.view['axis']) + 1) % len(states)\n # update state to next index\n self.view['axis'] = states[index]\n # perform gl actions\n self.update_flags()", "def set_orientation(self, axes):\n if debug:\n logger.debug('set_orientation ...')\n logger.debug('%s -> %s', str(self.axes_names), str(axes))\n\n if set(axes) != set(self.axes_names):\n raise Exception('Required orientation %s does not contain '\n 'all axes %s' % (str(axes), str(self.axes_names)))\n\n if axes == self.axes_names: # already in the asked orientation\n return\n\n for i, axis in enumerate(axes):\n logger.debug('Rolling axis %s, cur pos=%d -> dest pos=%d',\n axis, self.axes_names.index(axis), i)\n logger.debug('Shape: %s', str(self.data.shape))\n cur_i = self.axes_names.index(axis)\n self.data = np.rollaxis(self.data, cur_i, i)\n self.axes_names.pop(cur_i)\n self.axes_names.insert(i, axis)\n logger.debug('After rolling. Shape: %s, new axes: %s',\n str(self.data.shape), str(self.axes_names))\n logger.debug('')\n\n self.axes_ids = dict([(a, i) for i, a in enumerate(self.axes_names)])", "def flip(tensor, axis=None):\n raise NotImplementedError", "def flip(a, axis):\n a_ndim = a.ndim\n if a_ndim < 1:\n raise core.core._AxisError('Input must be >= 1-d')\n\n axis = int(axis)\n if not -a_ndim <= axis < a_ndim:\n raise core.core._AxisError(\n 'axis must be >= %d and < %d' % (-a_ndim, a_ndim))\n\n return _flip(a, axis)", "def _update_data_transforms(self, axisOrder='col-major'):\n self._dataTransform = QtGui.QTransform()\n self._inverseDataTransform = QtGui.QTransform()\n if self.axisOrder == 'row-major': # transpose both\n self._dataTransform.scale(1, -1)\n self._dataTransform.rotate(-90)\n self._inverseDataTransform.scale(1, -1)\n self._inverseDataTransform.rotate(-90)", "def convert_flip(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n\n for i, ax in enumerate(axis):\n if i == 0:\n out = _op.reverse(x, ax)\n else:\n out = _op.reverse(out, ax)\n\n g.add_node(op.output(\"Out\")[0], out)", "def _apply_swap(self, state, axes, **kwargs):\n all_axes = list(range(len(state.shape)))\n all_axes[axes[0]] = axes[1]\n all_axes[axes[1]] = axes[0]\n return self._transpose(state, all_axes)", "def flip_dir(self, axis):\n res = self.empty_like()\n # Negate directions and quantum numbers in form data.\n res.dirs[axis] *= -1\n res.qhape[axis] = [self._qod_func(-q) for q in res.qhape[axis]]\n # Flip the quantum numbers of the keys of self.sects.\n keys = set(self.sects.keys())\n while keys:\n k = keys.pop()\n kf = list(k)\n kf[axis] = self._qod_func(-kf[axis])\n kf = tuple(kf)\n # Swap self[kf] and self[k], at least if both exist.\n vf = self[k]\n if kf in keys:\n v = self[kf]\n keys.discard(kf)\n res[k] = v\n res[kf] = vf\n return res", "def change_orientation(self):\n self.shape = self.shape.T", "def reorderAxesEvent(self):\n axisB = self.sender().text()\n self.myParent.swapAxes(self.axisName, axisB)\n self.myParent.setVistrailsVariableAxes()", "def flip(self):", "def inverse_transform(data, flip, rotate):\n if flip == 'FLIP_LEFT_RIGHT':\n data = np.fliplr(data)\n\n if rotate == 'ROTATE_90':\n data = np.rot90(data, 3)\n\n if rotate == 'ROTATE_180':\n data = np.rot90(data, 2)\n\n if rotate == 'ROTATE_270':\n data = np.rot90(data, 1)\n\n return data", "def swapaxes(self, a1, a2):\n an = self.axes_names[:]\n ia1, ia2 = self.get_axis_id(a1), self.get_axis_id(a2)\n an[ia2], an[ia1] = an[ia1], an[ia2]\n return xndarray(np.swapaxes(self.data, ia1, ia2), an, self.axes_domains,\n self.value_label, self.meta_data)", "def change_axis_names(self, axis_map):\n axes = self.axes\n\n # Partition axes\n self.axes = [axis_map[axis] for axis in axes]\n\n # Flipped axes\n flip = self.flip\n if flip:\n self.flip = [axis_map[axis] for axis in flip]", "def flip(self, xflip=True, yflip=False):\n self.drawer.flush()\n img = self.img\n if xflip: img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n if yflip: img = img.transpose(PIL.Image.FLIP_TOP_BOTTOM)\n self.img = img\n self.update_drawer_img()\n return self", "def flip_vertical(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n\r\n #flipping image vertically\r\n newimg = im.transpose(PIL.Image.FLIP_TOP_BOTTOM)\r\n return img", "def flip(self):\n self.width, self.height = self.height, self.width", "def flip_x(self):\n self.x_lim_helper.flip_limits()", "def flip_x(self):\n self.x_lim_helper.flip_limits()", "def flip(self, horizontal):\n try:\n self._is_transformable()\n horizontal = get_int(horizontal)\n except NotTransformable as e:\n self._app[\"statusbar\"].message(str(e) + \" flip\", \"error\")\n return\n except StringConversionError as e:\n self._app[\"statusbar\"].message(str(e), \"error\")\n return\n images = self.get_images(\"Flipped\")\n # Apply changes\n for fil in images:\n if fil not in self._changes:\n self._changes[fil] = [0, 0, 0]\n if horizontal:\n self._changes[fil][1] = \\\n (self._changes[fil][1] + 1) % 2\n else:\n self._changes[fil][2] = \\\n (self._changes[fil][2] + 1) % 2\n # Flip the image shown\n if self._app.get_path() in images:\n self.emit(\"changed\", \"flip\", horizontal)\n # Reload thumbnails of flipped images immediately\n if self._app[\"thumbnail\"].toggled:\n self.apply()", "def saveflip(image, fname, outpath, axis='x', preserve_name=False):\n if not preserve_name:\n fpath = genSavePath(outpath, fname, modstring=f\"mirror_{axis}\")\n else:\n fpath = genSavePath(outpath, fname)\n im = copy(image)\n if axis == 'x':\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n elif axis == 'y':\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n else:\n raise Exception('No valid axis procvided for flipping: {} - received value: {}'.format(fname, axis))\n try:\n im.save(fpath, subsample=\"keep\", qtables=image.quantization, optimize=True)\n\n except IOError as m:\n print( \"Flipped({}) image creation failed for: {}. \\nReason:{}\".format(axis,fname,m))", "def flip(self, bev_direction: str = 'horizontal') -> None:\n pass", "def reverse_axis(self, x: bool, y: bool, z: bool) -> None:\n current_directions = self.i2c.read(self.Register.AXIS_MAP_SIGN, 1)[0]\n if x:\n current_directions |= (1 << 2)\n else:\n current_directions &= ~(1 << 2)\n if y:\n current_directions |= (1 << 1)\n else:\n current_directions &= ~(1 << 1)\n if z:\n current_directions |= (1 << 0)\n else:\n current_directions &= ~(1 << 0)\n self.i2c.write(self.Register.AXIS_MAP_SIGN, current_directions)", "def swapaxes(a, axis1, axis2):\n # TODO(okuta): check type\n return a.swapaxes(axis1, axis2)", "def vflip(img):\n #if not _is_pil_image(img):\n # raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.transpose(Image.FLIP_TOP_BOTTOM)", "def flip(self, horizontally):\n\t\tself.currentPixbuf = self.currentPixbuf.flip(horizontally)\n\t\tself.scaleCache[1] = 0\n\t\tgc.collect()", "def set_flipped(self, x, y):\n self.pieces[x + (y * self.width)].set_flipped()", "def flip_image(img, vert=True):\n if vert:\n return img.transpose(Image.FLIP_TOP_BOTTOM)\n else:\n return img.transpose(Image.FLIP_LEFT_RIGHT)", "def reorder_axes(cls, array: np.ndarray, axes: str) -> np.ndarray:\n return cls._reorder_axes(array, axes, cls.array_axis_order)", "def flip_horizontal(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n\r\n #flipping image horizontally\r\n newimg = im.transpose(PIL.Image.FLIP_LEFT_RIGHT)\r\n \r\n return img", "def fl_flip_yorigin():\n _fl_flip_yorigin = library.cfuncproto(\n library.load_so_libforms(), \"fl_flip_yorigin\",\\\n None, [],\\\n \"\"\"void fl_flip_yorigin()\"\"\")\n _fl_flip_yorigin()", "def orient_img_hwd(data, slice_axis):\n if slice_axis == 0:\n return data.transpose(2, 1, 0)\n elif slice_axis == 1:\n return data.transpose(2, 0, 1)\n elif slice_axis == 2:\n return data", "def flip(self):\n self.align = self._left if self.align == self._right else self._right\n self.group.layout_all()", "def adjust_axes(axes):\n # TODO: Uncomment & decide for each subplot!\n for ax in axes.itervalues():\n core.hide_axis(ax)\n\n for k in [\n \"placeholder\",\n \"placeholder1\",\n \"placeholder2\",\n \"spikes_stim\",\n \"spikes_stim1\",\n \"spikes_stim2\",\n \"spikes_post\",\n \"stimulation_schema\"\n ]:\n axes[k].set_frame_on(False)", "def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)", "def flip(self, bev_direction='horizontal', points=None):\n assert bev_direction in ('horizontal', 'vertical')\n if bev_direction == 'horizontal':\n self.tensor[:, 0::7] = -self.tensor[:, 0::7]\n if self.with_yaw:\n self.tensor[:, 6] = -self.tensor[:, 6] + np.pi\n elif bev_direction == 'vertical':\n self.tensor[:, 2::7] = -self.tensor[:, 2::7]\n if self.with_yaw:\n self.tensor[:, 6] = -self.tensor[:, 6]\n\n if points is not None:\n assert isinstance(points, (torch.Tensor, np.ndarray, BasePoints))\n if isinstance(points, (torch.Tensor, np.ndarray)):\n if bev_direction == 'horizontal':\n points[:, 0] = -points[:, 0]\n elif bev_direction == 'vertical':\n points[:, 2] = -points[:, 2]\n elif isinstance(points, BasePoints):\n points.flip(bev_direction)\n return points", "def flipud(self) -> xr.DataArray | xr.Dataset:\n y_dim = self.y_dim\n # NOTE don't use ycoords to work for rotated grids\n yrev = self._obj[y_dim].values[::-1]\n obj_filpud = self._obj.reindex({y_dim: yrev})\n # y_dim is typically a dimension without coords in rotated grids\n if y_dim not in self._obj.coords:\n obj_filpud = obj_filpud.drop_vars(y_dim)\n return obj_filpud", "def reorient(self, orientation):\n if orientation == self.axes_names:\n return self\n else:\n new_c = self.copy()\n new_c.set_orientation(orientation)\n return new_c", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def SwapAxis(self, axis0, axis1):\n\n axis0 = int(axis0)\n axis1 = int(axis1)\n\n self.points[:,[axis0,axis1]] = self.points[:,[axis1,axis0]]", "def flip_augmentation():\n return lambda image: ImageOps.flip(image)", "def test_tranform_1(self):\n fig_test = plt.figure()\n ax = fig_test.add_subplot(111, projection='ternary')\n ax.plot([0, 1], [0, 1], transform=ax.transAxes)", "def vflip(img, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n h_axis = _get_image_h_axis(data_format)\n\n return img.flip(axis=[h_axis])", "def reverseXZrotation(self):\n rot = np.zeros((3, 3), dtype = 'float64')\n rot[0, 0] = np.cos(self.currtheta)\n rot[0, 1] = -np.sin(self.currtheta)\n rot[0, 2] = 0.0\n rot[1, 0] = np.sin(self.currtheta) * np.cos(self.currphi)\n rot[1, 1] = np.cos(self.currtheta) * np.cos(self.currphi)\n rot[1, 2] = - np.sin(self.currphi)\n rot[2, 0] = np.sin(self.currtheta) * np.sin(self.currphi)\n rot[2, 1] = np.cos(self.currtheta) * np.sin(self.currphi)\n rot[2, 2] = np.cos(self.currphi)\n\n self.ds = np.dot(rot, self.ds1)", "def mirrorImage(self):\n\n im = Image.open(self.ActivePhoto)\n out = im.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n out.save(self.ActivePhoto)\n self.photo.setPixmap(QtGui.QPixmap(self.ActivePhoto))\n print (\"Flipped image\")", "def flip(self):\n \n if self.faceup:\n self.faceup = False\n else:\n self.faceup = True", "def set_MRI_orientation(self):\n\n if self.has_axes(MRI3Daxes):\n orientation = MRI3Daxes[:]\n if self.has_axis('time'):\n orientation += ['time']\n if self.has_axis('iteration'):\n orientation += ['iteration']\n if self.has_axis('condition'):\n orientation += ['condition']\n\n orientation += sorted(set(self.axes_names).difference(orientation))\n\n self.set_orientation(orientation)", "def forward_transform(data, flip, rotate):\n if flip == 'FLIP_LEFT_RIGHT':\n data = np.fliplr(data)\n\n if rotate == 'ROTATE_90':\n data = np.rot90(data, 1)\n\n if rotate == 'ROTATE_180':\n data = np.rot90(data, 2)\n\n if rotate == 'ROTATE_270':\n data = np.rot90(data, 3)\n\n return data", "def __flip(img, flip, flip_type=Image.FLIP_LEFT_RIGHT):\n if flip:\n return img.transpose(flip_type)\n return img", "def flip_image_vertical(image):\n return cv.flip(image, 1)", "def flip(self, x, y):\n self.pieces[x + (y * self.width)].flip()", "def setHorizontalFlip(self, flag):\n\t\tself.flipHorizontally = flag", "def clean_axes(axl):\n cleanAxes(axl)", "def flip_image(image_path, saved_location):\n image_obj = Image.open(image_path)\n rotated_image = image_obj.transpose(Image.FLIP_LEFT_RIGHT)\n rotated_image.save(saved_location)", "def ipix_swap_axes(self, ipix, colwise=False):\n xy = self.ipix_to_xypix(ipix, colwise)\n return self.xypix_to_ipix(xy, not colwise)", "def clean_axes(axes, left=True, right=False):\n if not type(axes) in [np.ndarray, list]:\n axes = [axes]\n elif type(axes) == np.ndarray:\n axes = axes.flatten()\n for axis in axes:\n axis.tick_params(direction='out')\n axis.spines['top'].set_visible(False)\n if not right:\n axis.spines['right'].set_visible(False)\n if not left:\n axis.spines['left'].set_visible(False)\n axis.get_xaxis().tick_bottom()\n axis.get_yaxis().tick_left()", "def edges_flip_orientation(edges):\n edges_flipped = []\n for e in edges:\n edges_flipped.insert(0, (e[1], e[0]))\n return edges_flipped", "def flip(self, xbool, ybool):\n self._surf = pygame.transform.flip(self._surf, xbool, ybool).convert_alpha()", "def flipped_dimensions(transformation, size):\n dim = len(size)\n # transform start point\n start = [0.0] * dim\n transformed_start = transformation.TransformPoint(start)\n flipped = [False] * dim\n for i in range(dim):\n # set current end point and transform it\n end = [0.0] * dim\n end[i] = size[i]\n transformed_end = transformation.TransformPoint(end)\n # check, if transformed_start and transformed_end changed position\n flipped[i] = transformed_start[i] > transformed_end[i]\n return flipped", "def flip_image(image, rects, axis = 1):\n\n if axis not in [-1,0,1]:\n return image, rects\n\n height = image.shape[0]\n widht = image.shape[1]\n \n image = cv2.flip(image,axis)\n\n if axis == 1:\n rects[:,1] = widht - rects[:,1]\n rects[:,3] = widht - rects[:,3]\n rects[:,3], rects[:,1] = rects[:,1].copy(),rects[:,3].copy()\n elif axis == 0:\n rects[:,2] = height - rects[:,2]\n rects[:,4] = height - rects[:,4]\n rects[:,4], rects[:,2] = rects[:,2].copy(),rects[:,4].copy()\n elif axis == -1:\n rects[:,1] = widht - rects[:,1]\n rects[:,3] = widht - rects[:,3]\n rects[:,3], rects[:,1] = rects[:,1].copy(),rects[:,3].copy()\n rects[:,2] = height - rects[:,2]\n rects[:,4] = height - rects[:,4]\n rects[:,4], rects[:,2] = rects[:,2].copy(),rects[:,4].copy()\n\n return image, rects", "def reflect(self, axis):\n if axis == \"x\":\n self.y = - self.y\n elif axis == \"y\":\n self.x = - self.x\n else:\n print(\"The argument axis only accepts values 'x' and 'y'!\")", "def _flip_isolated_fluxes(l: Lattice, bonds: np.ndarray, fluxes: np.ndarray):\n indices_to_flip = np.where(fluxes == -1)[0]\n\n def pos(p):\n return l.plaquettes[p].center\n\n def distance_func(a, b):\n return straight_line_length(pos(a), pos(b))\n\n close_pairs = _greedy_plaquette_pairing(indices_to_flip, distance_func)\n\n for a, b in close_pairs:\n plaquettes, edges_to_flip = path_between_plaquettes(l,\n a,\n b,\n maxits=l.n_edges)\n bonds[edges_to_flip] *= -1\n fluxes[a] *= -1\n fluxes[b] *= -1\n\n return bonds, fluxes", "def rescale_axes(self, x=True, y=True, xlim=None, ylim=None, \n tighten_up=0): \n \n # First, figure out what limits should be\n col_xlim = [[1e10, -1e10] for i in range(self.dims[0])]\n row_ylim = [[1e10, -1e10] for i in range(self.dims[1])]\n \n # Loop over axes\n for i in range(self.N):\n if self.grid[i] is None:\n continue\n \n # column, row\n j, k = self.axis_position(i)\n \n if self.above_diagonal(i):\n continue\n \n if x and xlim is None:\n col_xlim[j][0] = min(col_xlim[j][0], self.grid[i].dataLim.min[0])\n col_xlim[j][1] = max(col_xlim[j][1], self.grid[i].dataLim.max[0]) \n elif x:\n col_xlim[j][0] = xlim[0]\n col_xlim[j][1] = xlim[1]\n \n if self.diagonal is not None and i in self.diag:\n continue\n \n if y and (ylim is None): \n row_ylim[k][0] = min(row_ylim[k][0], self.grid[i].dataLim.min[1])\n row_ylim[k][1] = max(row_ylim[k][1], self.grid[i].dataLim.max[1]) \n elif y:\n row_ylim[k][0] = ylim[0]\n row_ylim[k][1] = ylim[1] \n \n # Apply limits \n for i in range(self.N):\n if self.grid[i] is None:\n continue\n \n # column, row \n j, k = self.axis_position(i)\n \n col_tmp = [col_xlim[j][0] * (1. + tighten_up * np.sign(col_xlim[j][0])),\n col_xlim[j][1] * (1. - tighten_up * np.sign(col_xlim[j][1]))]\n \n row_tmp = [row_ylim[k][0] * (1. + tighten_up * np.sign(row_ylim[k][0])),\n row_ylim[k][1] * (1. - tighten_up * np.sign(row_ylim[k][1]))]\n\n # Kludge\n if np.all(np.isfinite(col_tmp)):\n self.grid[i].set_xlim(col_tmp)\n \n if self.diagonal and i in self.diag:\n continue\n\n if np.all(np.isfinite(row_tmp)):\n self.grid[i].set_ylim(row_tmp)\n\n pl.draw()", "def flip_vertical(original_image: Image) -> Image :\r\n \r\n new_image = copy(original_image)\r\n \r\n pixel_width = get_width(original_image)\r\n pixel_height = get_height(original_image) \r\n\r\n \r\n for x in range(pixel_width) :\r\n for y in range(pixel_height) :\r\n original_vertical_pixel = get_color(original_image, x, y)\r\n opposite_vertical_pixel = pixel_height - 1 - y\r\n set_color(new_image, x, opposite_vertical_pixel, original_vertical_pixel)\r\n \r\n return new_image", "def apply_transforms(self):\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)", "def reverse_transformation(self, rot90, flip_lr=False):\n\n transformed_ROI = copy.deepcopy(self) #Make a copy of itself to transform and return\n\n # Undo the left-right flip\n if flip_lr:\n transformed_ROI.center = (transformed_ROI.center[0], self.img_shape[1] - transformed_ROI.center[1])\n\n #Undo the rotation of the image\n cent = transformed_ROI.center\n if rot90 == 1:\n transformed_ROI.center = (cent[1], self.img_shape[0] - cent[0])\n transformed_ROI.size = (transformed_ROI.size[1], transformed_ROI.size[0])\n elif rot90 == 2:\n transformed_ROI.center = (self.img_shape[0] - cent[0], self.img_shape[1] - cent[1])\n elif rot90 == 3:\n transformed_ROI.center = (self.img_shape[1] - cent[1], cent[0])\n transformed_ROI.size = (transformed_ROI.size[1], transformed_ROI.size[0])\n\n return transformed_ROI", "def flipNormals(self):\n self.flip = not self.flip", "def side_wheel_from_axis():", "def _swap_axis(input_tensor, dim_index, last_index, name=None):\n return array_ops.transpose(\n input_tensor,\n array_ops.concat([\n math_ops.range(dim_index), [last_index],\n math_ops.range(dim_index + 1, last_index), [dim_index]\n ], 0),\n name=name)", "def test_surfer6binary_invert_axis_methods(text_grd_file):\n\n grd = Surfer6TextGrid.load(text_grd_file)\n\n dm = np.arange(150).reshape(15, 10)\n\n assert np.alltrue(grd.dm == dm)\n assert grd.ylo == 10\n assert grd.yhi == 38\n assert grd.xhi == 9\n assert grd.xlo == 0\n grd.invert_yaxis()\n assert np.alltrue(grd.dm == dm[::-1, :])\n assert grd.ylo == 38\n assert grd.yhi == 10\n grd.invert_xaxis()\n assert np.alltrue(grd.dm == dm[::-1, ::-1])\n assert grd.xlo == 9\n assert grd.xhi == 0\n grd.invert_yaxis()\n assert np.alltrue(grd.dm == dm[:, ::-1])\n grd.invert_xaxis()\n assert np.alltrue(grd.dm == dm)\n assert grd.ylo == 10\n assert grd.yhi == 38\n assert grd.xhi == 9\n assert grd.xlo == 0", "def swap_axis(ogr_geom):\n\n osr_sref = ogr_geom.GetSpatialReference()\n sref = SpatialRef.from_osr(osr_sref)\n if (sref.epsg == 4326) and GDAL_3_ENABLED and (osr_sref.GetAxisMappingStrategy() == 1):\n ogr_geom.SwapXY()\n osr_sref.SetAxisMappingStrategy(0)\n ogr_geom.AssignSpatialReference(osr_sref)\n\n return ogr_geom", "def flip(self, x: bool, y: bool) -> 'BaseImage':\n assert isinstance(x, bool)\n assert isinstance(y, bool)\n assert (x or y), 'at least one axis should be True'\n self._surface = pygame.transform.flip(self._surface, x, y)\n return self", "def right_angle_axes(self, right_angle_axes):\n\n self.container['right_angle_axes'] = right_angle_axes", "def flip(x, dim):\n dim = x.dim() + dim if dim < 0 else dim\n inds = tuple(slice(None, None) if i != dim\n else x.new(torch.arange(x.size(i) - 1, -1, -1).tolist()).long()\n for i in range(x.dim()))\n return x[inds]", "def setVerticalFlip(self, flag):\n\t\tif self.ext.lower() in [\"png\", \"jpg\", \"jpeg\"]:\n\t\t\tself.flipVertically = not flag\n\t\telse:\n\t\t\tself.flipVertically = flag", "def inverse_transform(self, Xt):\n # Inverse transform\n columns = []\n start = 0\n\n for j in range(self.n_dims):\n dim = self.dimensions[j]\n offset = dim.transformed_size\n\n if offset == 1:\n columns.append(dim.inverse_transform(Xt[:, start]))\n else:\n columns.append(dim.inverse_transform(Xt[:, start : start + offset]))\n\n start += offset\n\n # Transpose\n rows = []\n\n for i in range(len(Xt)):\n r = []\n for j in range(self.n_dims):\n r.append(columns[j][i])\n\n rows.append(r)\n\n return rows", "def shift_axes(self, shift, axes=None):\n axes = self._get_axes_numbers(axes)\n shift = self._cook_args(shift, axes, [0.0, 0.0])\n return Signal2D(self.values, index=self.index-shift[0], columns=self.columns-shift[1])", "def setaxesnames(self):\n if not self._axesnames or self.prop['skipsai']:\n return\n debug('ControllerStartup.setaxesnames()')\n oldaxes = self.pidevice.qSAI_ALL()\n for i, newaxis in enumerate(self.axesnames):\n if newaxis != oldaxes[i] or self.prop['forcesai']:\n setstage = False\n if self.pidevice.HasqCST():\n if self.pidevice.qCST()[oldaxes[i]] == 'NOSTAGE':\n try:\n debug('try rename NOSTAGE to TEMP (0x3C)')\n self.pidevice.SPA(oldaxes[i], 0x3c, 'TEMP')\n setstage = True\n except GCSError:\n pass\n self.pidevice.SAI(oldaxes[i], newaxis)\n if setstage:\n self.pidevice.SPA(newaxis, 0x3c, 'NOSTAGE')\n debug('restore NOSTAGE (0x3C)')", "def set_axes(self, a):\r\n self.axes = a", "def reverse_x():\n plt.xlim(plt.xlim()[::-1])", "def setLabelOrientation(orientation='horizontal',axes='XYZ'):\n orientdict = {'horizontal':'HORI', 'vertical':'VERT'}\n dislin.labtyp(orientdict[orientation],axes)", "def flip_quat(orientation):\n r,p,y = quat_to_euler(orientation)\n r += np.pi\n return euler_to_quat(r,p,y)", "def rotate_ship(self):\n\t\tif self.cur_orient == GameBoard.O_HORIZONTAL:\n\t\t\tself.cur_orient = GameBoard.O_VERTICAL\n\t\telse:\n\t\t\tself.cur_orient = GameBoard.O_HORIZONTAL", "def SetAxisOrientation(*args, **kwargs):\n return _gdi_.DC_SetAxisOrientation(*args, **kwargs)", "def _flip_adjacent_fluxes(l: Lattice, bonds: np.ndarray, fluxes: np.ndarray):\n for edge_index, (p_a, p_b) in enumerate(l.edges.adjacent_plaquettes):\n if (p_a == INVALID) or (p_b == INVALID):\n break\n if (fluxes[p_a] == -1) and (fluxes[p_b] == -1):\n bonds[edge_index] *= -1\n fluxes[p_a] *= -1\n fluxes[p_b] *= -1\n\n #attempt at vectorising, check this at somepoint\n #adj_fluxes = fluxes[l.edges.adjacent_plaquettes]\n #to_flip = np.where((adj_fluxes[:, 0] == -1) & (adj_fluxes[:, 1] == -1))\n #bonds[to_flip] *= -1\n #fluxes_to_flip = l.edges.adjacent_plaquettes[to_flip].flatten()\n #fluxes[fluxes_to_flip] *= -1\n\n return bonds, fluxes", "def flip_boxes_horizontally(boxes):\n # Flip boxes horizontally.\n ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)\n flipped_xmin = tf.subtract(1.0, xmax)\n flipped_xmax = tf.subtract(1.0, xmin)\n flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)\n return flipped_boxes", "def restore_orientation_matrix(self):\n self.Umat = self.init_p", "def flip(h):\n return np.flip(h)", "def flip(h):\n return np.flip(h)", "def convert_axis( mv, axisold, axisindnew ):\n (axisnew, indexina3) = axisindnew\n axes = allAxes(mv)\n kold = None\n for k in range(len(axes)):\n if axes[k]==axisold: kold=k\n if kold==None:\n print \"ERROR. convert_axis cannot find axis\",axisold,\" in variable\",mv\n if len(axisold)==len(axisnew):\n mv.setAxis( kold, axisnew )\n return\n # Here's what we would do in 1-D:\n # newdata = ma.ones(len(axisnew))*mv.missing_value # Note that a FileVariable's missing_value is a tuple.\n # for i in range(len(axisold)):\n # newdata[ indexina3[i] ] = ma[i]\n # newmv = cdms2.createVariable( newdata, id=mv.id )\n # >1-D is the same idea, but more dimensions are coming along for the ride,\n # making it more complicated...\n shape0 = mv.shape\n shape0[kold] = len(axisnew)\n newdata = ma.ones(shape0)*mv.missing_value # Note that a FileVariable's missing_value is a tuple.\n # We want to copy ma to newdata - except that we need indirect indexing for the kold-th axis.\n # There seems to be nothing in numpy for treating one axis differently from the rest\n # (except for ellipsis, but it makes sense to use only one ellipsis and we would need two here).\n # The following will do the job. It would be very slow for an array with many big dimensions,\n # but the arrays here have already been reduced for graphics; the index sets will be small or\n # empty...\n ranges = map( range, shape0[0:kold] )\n for i in range(len(axisold)):\n for idx in apply(itertools.product,ranges):\n idx = idx + [indexina3(i)] + [Ellipsis]\n idxo = idx + [i] + [Ellipsis]\n newdata[ tuple(idx) ] = mv[idxo]\n newmv = cdms2.createVariable( newdata, id=mv.id )", "def extend_dataset_flip_axis(data,\n labels,\n height=90,\n width=160,\n channels=3):\n all_images = []\n all_labels = []\n flat_shape = data.shape[1]\n for i in range(data.shape[0]):\n orig_label = labels[i]\n if orig_label == 0:\n continue\n frame = data[i].reshape((height, width, channels))\n\n if orig_label == 1:\n flip_cmd = 2\n else:\n flip_cmd = 1\n flip = np.flip(frame, axis=1)\n flip = np.array(flip.reshape(flat_shape))\n all_images.append(flip)\n all_labels.append(flip_cmd)\n all_labels = np.array(all_labels).astype('uint8')\n all_labels = all_labels.reshape((all_labels.shape[0], 1))\n extended_images = np.concatenate((data, all_images), axis=0)\n extended_labels = np.concatenate((labels, all_labels), axis=0)\n return extended_images, extended_labels", "def flip(self, index):\n head, tail = self.get_extreme_points(index)\n centroid = self.get_position(index)\n\n if tail is not None and centroid is not None:\n self.set_angle(index, points_angle(centroid, tail) )", "def flip_ras_lps(vol, affine):\n vol_flipped = np.flip(vol, (0,1))\n affine_flipped = affine.copy()\n affine_flipped[0,-1] = (-1 * affine @ np.array([vol.shape[0]-1,0,0,1]))[0]\n affine_flipped[1,-1] = (-1 * affine @ np.array([0,vol.shape[1]-1,0,1]))[1]\n\n return vol_flipped, affine_flipped" ]
[ "0.7156261", "0.7035609", "0.7033058", "0.6533171", "0.64487714", "0.63454676", "0.6332849", "0.6305558", "0.63048905", "0.62795544", "0.61534804", "0.6124441", "0.6114931", "0.60523576", "0.6050565", "0.5979607", "0.5953482", "0.5949251", "0.5904345", "0.5891284", "0.58893", "0.5879466", "0.5845572", "0.57949686", "0.5777062", "0.5748649", "0.5748649", "0.574592", "0.57452726", "0.57370806", "0.5698076", "0.5681145", "0.5655644", "0.5645772", "0.5605551", "0.5590453", "0.5557051", "0.554656", "0.5545495", "0.55450004", "0.5515334", "0.5510841", "0.54881805", "0.54848915", "0.5481436", "0.54800075", "0.5474346", "0.54656833", "0.54523575", "0.5433069", "0.54280853", "0.5422308", "0.54112506", "0.53531396", "0.5352776", "0.5344959", "0.5332802", "0.53171", "0.53090584", "0.5306327", "0.53024125", "0.5301364", "0.53003776", "0.52988166", "0.5297362", "0.52850664", "0.5283662", "0.52676886", "0.5260868", "0.5252846", "0.52467304", "0.52409464", "0.52387637", "0.52311796", "0.5223342", "0.5217415", "0.52127457", "0.52086085", "0.5206685", "0.52021664", "0.52006316", "0.51946306", "0.51904565", "0.51874137", "0.5172381", "0.51654476", "0.5162056", "0.51590645", "0.514877", "0.51478404", "0.51408875", "0.51385564", "0.5133834", "0.5128809", "0.5125853", "0.5119835", "0.5119835", "0.5116832", "0.51139814", "0.5099972", "0.50993276" ]
0.0
-1
Change ownership to group read read/write.
def Chown(self): cmd = 'chmod -R 0775 %s' % self.procdir self.ExecCmd(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def changeOwnership(self, document):\n document.changeOwnership(getSecurityManager().getUser(), False)", "def chown(self, user, group, rec=0):\n uid = getuserid(user)\n gid = getgroupid(group)\n if rec:\n for x in self.visit(rec=lambda x: x.check(link=0)):\n if x.check(link=0):\n error.checked_call(os.chown, str(x), uid, gid)\n error.checked_call(os.chown, str(self), uid, gid)", "def changeOwn():\n os.system('sudo chown -R test:users /etc/resolv.conf')\n os.system('sudo chown -R test:named /etc/named.conf')", "def chown(self, path, owner=None, group=None):\n kwargs = {}\n if owner is not None:\n kwargs[\"owner\"] = owner\n if group is not None:\n kwargs[\"group\"] = group\n self._call(\"SETOWNER\", method=\"put\", path=path, **kwargs)", "def _set_rw_permissions_for_all(self, nms, path):\n nms.appliance.execute('chmod ugo+rw %s' % path)", "def set_ownership(self):\n\n os.chmod(os.path.join(\"%s\" % NetworkManager_conf_dir, self.connection._id), 0600)", "def chgrp_file(filename, group, sudo=True):\n LOG.info(\"Changing file permissions for {}\".format(filename))\n cmd = \"chgrp {} {}\".format(group, filename)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)", "def chgrp_perms( path, group='climatew', permissions=None ):\n global std_file_perms, std_dir_perms\n if group is None:\n _group = -1 # means don't change the group\n elif not isinstance(group, int):\n _group = grp.getgrnam(group)[2]\n if permissions is None:\n if os.path.isdir(path):\n permissions = std_file_perms\n else:\n permissions = std_dir_perms\n os.chown( path, -1, _group )\n os.chmod( path, permissions )", "def change_ownership(obj, userid):\n assert isinstance(userid, string_types)\n old_owner = obj.creators[0]\n if userid == old_owner:\n return\n #Remove Owner group from old owner\n obj.local_roles.remove(old_owner, ROLE_OWNER)\n #Add new owner\n obj.local_roles.add(userid, ROLE_OWNER)\n #Set new owner in creators attr - this will also trigger reindex catalog event so keep it last!\n obj.set_field_appstruct({'creators': (userid,)})\n return userid", "def _switch_group_right(group_id, action, value, workspace, request_user):\n group = group_api.get_group_by_id(group_id)\n\n if action == workspace_constants.ACTION_READ:\n if value:\n workspace_api.add_group_read_access_to_workspace(workspace, group, request_user)\n else:\n workspace_api.remove_group_read_access_to_workspace(workspace, group, request_user)\n elif action == workspace_constants.ACTION_WRITE:\n if value:\n workspace_api.add_group_write_access_to_workspace(workspace, group, request_user)\n else:\n workspace_api.remove_group_write_access_to_workspace(workspace, group, request_user)", "def set_read_only(self, bReadOnly):\n\t\tcall_sdk_function('PrlShare_SetReadOnly', self.handle, bReadOnly)", "def _ensure_read_write_access(tarfileobj):\n dir_perm = tarfile.TUREAD | tarfile.TUWRITE | tarfile.TUEXEC\n file_perm = tarfile.TUREAD | tarfile.TUWRITE\n\n for tarinfo in tarfileobj.getmembers():\n tarinfo.mode |= (dir_perm if tarinfo.isdir() else file_perm)", "def _set_read_only(self, read_only):\n self._read_only = read_only", "def request_access_to_groups(self, ceph):\n for ceph_group in (\"volumes\", \"images\", \"vms\"):\n ceph.request_access_to_group(\n name=ceph_group,\n object_prefix_permissions={\"class-read\": [\"rbd_children\"]},\n permission=\"rwx\",\n )", "def renounceOwnership():\n\n assert msg.sender == self.owner, \"Access is denied.\"\n\n log.OwnershipRenounced(msg.sender)\n self.owner = ZERO_ADDRESS", "def grant_access_rw(message, db, reason):\n hf.grant(message, db.lower(), reason, \"readwrite\")", "def test_00_mail_group_access_rights(self):\n cr, uid, user_bert_id, user_raoul_id = self.cr, self.uid, self.user_bert_id, self.user_raoul_id\n\n # Do: Bert reads Jobs -> ok, public\n self.mail_group.read(cr, user_bert_id, [self.group_jobs_id])\n # Do: Bert read Pigs -> ko, restricted to employees\n self.assertRaises(except_orm, self.mail_group.read,\n cr, user_bert_id, [self.group_pigs_id])\n # Do: Raoul read Pigs -> ok, belong to employees\n self.mail_group.read(cr, user_raoul_id, [self.group_pigs_id])\n\n # Do: Bert creates a group -> ko, no access rights\n self.assertRaises(except_orm, self.mail_group.create,\n cr, user_bert_id, {'name': 'Test'})\n # Do: Raoul creates a restricted group -> ok\n new_group_id = self.mail_group.create(cr, user_raoul_id, {'name': 'Test'})\n # Do: Bert added in followers, read -> ok, in followers\n self.mail_group.message_subscribe_users(cr, uid, [new_group_id], [user_bert_id])\n self.mail_group.read(cr, user_bert_id, [new_group_id])\n\n # Do: Raoul reads Priv -> ko, private\n self.assertRaises(except_orm, self.mail_group.read,\n cr, user_raoul_id, [self.group_priv_id])\n # Do: Raoul added in follower, read -> ok, in followers\n self.mail_group.message_subscribe_users(cr, uid, [self.group_priv_id], [user_raoul_id])\n self.mail_group.read(cr, user_raoul_id, [self.group_priv_id])\n\n # Do: Raoul write on Jobs -> ok\n self.mail_group.write(cr, user_raoul_id, [self.group_priv_id], {'name': 'modified'})\n # Do: Bert cannot write on Private -> ko (read but no write)\n self.assertRaises(except_orm, self.mail_group.write,\n cr, user_bert_id, [self.group_priv_id], {'name': 're-modified'})\n # Test: Bert cannot unlink the group\n self.assertRaises(except_orm,\n self.mail_group.unlink,\n cr, user_bert_id, [self.group_priv_id])\n # Do: Raoul unlinks the group, there are no followers and messages left\n self.mail_group.unlink(cr, user_raoul_id, [self.group_priv_id])\n fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])\n self.assertFalse(fol_ids, 'unlinked document should not have any followers left')\n msg_ids = self.mail_message.search(cr, uid, [('model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])\n self.assertFalse(msg_ids, 'unlinked document should not have any followers left')", "async def claim_group_owner(self, group_id: int):\n r = await self.request.request(url=f'https://groups.roblox.com/v1/groups/{group_id}/claim-ownership',\n method='post')\n return r", "def test_protect_owner(self):\n self.collection.set_permission(Permission.SHARE, self.user1)\n\n # User with share permission cannot grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertNotIn(\"owner\", self.collection.get_permissions(self.user2))\n self.assertFalse(PermissionModel.objects.filter(user=self.user2).exists())\n\n # User with share permission cannot revoke ``owner`` permission\n self.collection.set_permission(Permission.OWNER, self.user2)\n data = {\"users\": {self.user2.pk: \"editor\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n self.collection.set_permission(Permission.NONE, self.user2)\n\n # Now let user1 be owner on collection.\n set_permission(Permission.OWNER, self.user1, self.collection)\n\n # ``owner`` permission cannot be assigned to a group\n data = {\"groups\": {self.group.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(PermissionModel.objects.filter(group=self.group).exists())\n\n # User with owner permission can grant ``owner`` permission\n data = {\"users\": {self.user2.pk: \"owner\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(\n self.collection.get_permissions(self.user2),\n [Permission.VIEW, Permission.EDIT, Permission.SHARE, Permission.OWNER],\n )\n\n # User with owner permission can revoke ``owner`` permission\n data = {\"users\": {self.user2.pk: \"edit\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(\n PermissionModel.objects.filter(\n user=self.user2, value=Permission.OWNER.value\n ).exists()\n )\n\n # User with owner permission cannot remove all owners\n data = {\"users\": {self.user1.pk: \"edit\", self.owner.pk: \"edit\"}}\n\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(resp.data[\"detail\"], \"Object must have at least one owner.\")\n\n owner_permissions = self.collection.permission_group.permissions.filter(\n value=Permission.OWNER.value\n )\n owner_count = owner_permissions.count()\n self.assertEqual(owner_count, 2)\n\n # User can delete his owner permission if there is at least one other owner\n self.assertTrue(owner_permissions.filter(user=self.user1).exists())\n data = {\"users\": {self.user1.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertFalse(owner_permissions.filter(user=self.user1.pk).exists())", "def grant_read_write(self, identity: aws_cdk.aws_iam.IGrantable, objects_key_pattern: typing.Any=None) -> aws_cdk.aws_iam.Grant:\n ...", "def chown_chmod ( fspath, uid=None, gid=None, mode=None, pretend=False ):\n return ChownChmod ( uid, gid, mode, pretend ).chown_chmod ( fspath )", "def set_file_owner_perm(path, permission, user, group):\n uid = pwd.getpwnam(user).pw_uid\n gid = grp.getgrnam(group).gr_gid\n\n current_perm = get_permissions(path)\n try:\n logger.debug('Current permission: {0}, changing to {1}'.format(current_perm, oct(permission)))\n os.chmod(path, permission)\n os.chown(path, uid, gid)\n except Exception as e:\n logger.warning('Unable to change permissions on {0}: {1}'.format(path, e))", "def pid_permissions():\n config = Config()\n try:\n user = pwd.getpwnam(config.user)\n group = grp.getgrnam(config.group)\n os.chown(config.pidfile, user.pw_uid, group.gr_gid)\n except (KeyError, PermissionError):\n logger.error(\"Unable to change pidfile ownership permissions.\")\n raise SystemExit(os.EX_USAGE)", "def set_owner(self, role, check_mode=True):\n query = 'ALTER SUBSCRIPTION %s OWNER TO \"%s\"' % (self.name, role)\n return self.__exec_sql(query, check_mode=check_mode)", "def make_readonly(path):\n mode = Path.stat(path).st_mode\n Path.chmod(path, mode & ~stat.S_IWRITE)", "def grant_read_write(self, identity: aws_cdk.aws_iam.IGrantable, objects_key_pattern: typing.Any=None) -> aws_cdk.aws_iam.Grant:\n return jsii.invoke(self, \"grantReadWrite\", [identity, objects_key_pattern])", "def grant_read_write(self, identity: aws_cdk.aws_iam.IGrantable, objects_key_pattern: typing.Any=None) -> aws_cdk.aws_iam.Grant:\n return jsii.invoke(self, \"grantReadWrite\", [identity, objects_key_pattern])", "def chown(config):\n\n path = config.device_path()\n\n args = [\"sudo\", \"chown\", \"%d:%d\" % (os.getuid(), os.getgid()), path]\n iotests.log(\" \".join(args), filters=[iotests.filter_chown])\n proc = subprocess.Popen(args,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n msg = proc.communicate()[0]\n\n if proc.returncode != 0:\n raise Exception(msg)", "def chown_file(filename, file_owner, sudo=True):\n LOG.info(\"Changing the user that owns {}\".format(filename))\n cmd = \"chown {} {}\".format(file_owner, filename)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)", "def sync_volumeaccessright_record( vac ):\n \n syndicate_caps = \"UNKNOWN\" # for exception handling\n \n # get arguments\n config = observer_core.get_config()\n principal_id = vac.owner_id.email\n volume_name = vac.volume.name\n syndicate_caps = observer_core.opencloud_caps_to_syndicate_caps( vac.cap_read_data, vac.cap_write_data, vac.cap_host_data ) \n \n logger.info( \"Sync VolumeAccessRight for (%s, %s)\" % (principal_id, volume_name) )\n \n # validate config\n try:\n observer_secret = observer_core.get_syndicate_observer_secret( config.SYNDICATE_OBSERVER_SECRET )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"syndicatelib config is missing SYNDICATE_RG_DEFAULT_PORT, SYNDICATE_OBSERVER_SECRET\")\n raise e\n \n # ensure the user exists and has credentials\n try:\n rc, user = observer_core.ensure_principal_exists( principal_id, observer_secret, is_admin=False, max_UGs=1100, max_RGs=1 )\n assert rc is True, \"Failed to ensure principal %s exists (rc = %s,%s)\" % (principal_id, rc, user)\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to ensure user '%s' exists\" % principal_id )\n raise e\n\n # grant the slice-owning user the ability to provision UGs in this Volume\n try:\n rc = observer_core.ensure_volume_access_right_exists( principal_id, volume_name, syndicate_caps )\n assert rc is True, \"Failed to set up Volume access right for slice %s in %s\" % (principal_id, volume_name)\n \n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to set up Volume access right for slice %s in %s\" % (principal_id, volume_name))\n raise e\n \n except Exception, e:\n traceback.print_exc()\n logger.error(\"Faoed to ensure user %s can access Volume %s with rights %s\" % (principal_id, volume_name, syndicate_caps))\n raise e\n\n return True", "def test_component_chown_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('component chown component2 changed_owner')\n rv, output = self._execute('component list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def chown_file ( self, fspath ):\n return", "def testChAttrs(self):\n def _check(results):\n self.flushLoggedErrors()\n self.assertTrue(results[0].startswith(b'-rw-r--r--'))\n self.assertEqual(results[1], b'')\n self.assertTrue(results[2].startswith(b'----------'), results[2])\n self.assertEqual(results[3], b'')\n\n d = self.runScript('ls -l testfile1', 'chmod 0 testfile1',\n 'ls -l testfile1', 'chmod 644 testfile1')\n return d.addCallback(_check)\n # XXX test chgrp/own", "def _remove_group_rights(object_id, workspace, request_user):\n group = group_api.get_group_by_id(object_id)\n workspace_api.remove_group_read_access_to_workspace(workspace, group, request_user)\n workspace_api.remove_group_write_access_to_workspace(workspace, group, request_user)", "def manage_changeOwnershipType(\n self,\n explicit=1,\n RESPONSE=None,\n REQUEST=None\n ):\n old = getattr(self, '_owner', None)\n if explicit:\n if old is not None:\n return\n owner = self.getOwnerTuple()\n if owner is not None and owner is not UnownableOwner:\n self._owner = owner\n else:\n if old is None:\n return\n new = aq_get(aq_parent(self), '_owner', None, 1)\n _m = object()\n if old is new and (self.__dict__.get('_owner', _m) is not _m):\n del self._owner\n\n if RESPONSE is not None:\n RESPONSE.redirect(REQUEST['HTTP_REFERER'])", "def setup_volume_access( user_email, volume_name, caps, RG_port, slice_secret, RG_closure=None ):\n client = connect_syndicate()\n \n try:\n rc = ensure_volume_access_right_exists( user_email, volume_name, caps )\n assert rc is True, \"Failed to create access right for %s in %s\" % (user_email, volume_name)\n \n except Exception, e:\n logger.exception(e)\n return False\n \n RG_name = syndicate_provisioning.make_gateway_name( \"OpenCloud\", \"RG\", volume_name, \"localhost\" )\n RG_key_password = syndicate_provisioning.make_gateway_private_key_password( RG_name, slice_secret )\n \n try:\n rc = syndicate_provisioning.ensure_RG_exists( client, user_email, volume_name, RG_name, \"localhost\", RG_port, RG_key_password, closure=RG_closure )\n except Exception, e:\n logger.exception(e)\n return False\n \n return True", "def test_transfer_new_inherited_owner(self):\n self.assertEqual(self.project.get_owner().user, self.user_owner)\n self.assertEqual(\n self.project.get_owners(inherited_only=True)[0].user,\n self.user_owner_cat,\n )\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_owner_cat.username,\n 'old_owner_role': PROJECT_ROLE_CONTRIBUTOR,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_owner_cat)\n self.owner_as.refresh_from_db()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n self.assertEqual(self.owner_as.role, self.role_contributor)\n self.assertEqual(\n self.project.get_role(self.user_owner_cat),\n RoleAssignment.objects.get(\n project=self.project,\n user=self.user_owner_cat,\n role=self.role_owner,\n ),\n )", "def grant_access_rw(message, db, reason):\n grant_sql_access(message, db, reason, False)", "def test_get_role_set_inherited_only(self):\n self.make_assignment(self.project, self.user_alice, self.role_owner)\n self.assertEqual(\n self.project.get_role(self.user_alice, inherited_only=True),\n self.owner_as_cat,\n )", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def read_only(self, reddit):\n # Require tests to explicitly disable read_only mode.\n reddit.read_only = True", "def _change_access(course, user, level, action):\r\n\r\n try:\r\n role = ROLES[level](course.id)\r\n except KeyError:\r\n raise ValueError(\"unrecognized level '{}'\".format(level))\r\n\r\n if action == 'allow':\r\n role.add_users(user)\r\n elif action == 'revoke':\r\n role.remove_users(user)\r\n else:\r\n raise ValueError(\"unrecognized action '{}'\".format(action))", "def privilegedStartService(self):\n super(GroupOwnedUNIXServer, self).privilegedStartService()\n\n # Unfortunately, there's no public way to access this. -glyph\n fileName = self._port.port\n chown(fileName, getuid(), self.gid)", "def set_read_only(self, read_only):\n self.__aceQLHttpApi.set_read_only(read_only)", "def grant_write(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:\n ...", "def read_only(self, read_only):\n\n self._read_only = read_only", "def read_only(self, read_only):\n\n self._read_only = read_only", "def test_read_group(self):\n pass", "def chgrp_perms_updown( path, group='climatew' ):\n global std_file_perms, std_dir_perms\n # First go down\n for (root,dirs,files) in os.walk(path):\n chgrp_perms( root, group, std_dir_perms )\n for file in files:\n chgrp_perms( os.path.join(root,file), group, std_file_perms )\n dir = path\n try:\n while len(dir)>21: # len(\"/p/css03/esgf_publish\")\n chgrp_perms( dir, group, std_dir_perms )\n dir = os.path.split(dir)[0]\n except: # If we don't have write permission, we're finished.\n pass", "def _setup_permissions(self, chown, chmod):\n if chown is not None:\n if isinstance(chown, str):\n user, group = chown, None\n\n else:\n try:\n # Try to extract tuple.\n user, group = chown\n\n except ValueError:\n # If length of iterable is not 2, then allow 1.\n assert len(chown) == 1, 'chown must be user or tuple'\n user, group = chown[0], None\n\n except TypeError:\n # If not iterable, use given value as user.\n user, group = chown, None\n\n # Lookup user id.\n if isinstance(user, str):\n user_info = pwd.getpwnam(user)\n user = user_info.pw_uid\n\n # Lookup group id, or use -1 (do not change group)\n if isinstance(group, str):\n group = grp.getgrnam(group).pw_gid\n\n elif group is None:\n group = -1\n\n # Return tuple usable by os.chown().\n chown = (user, group)\n\n # Ensure chmod is numeric if given.\n if chmod is not None:\n assert isinstance(chmod, numbers.Number), 'chmod must be a number'\n\n return chown, chmod", "def test_transfer_old_inherited_owner(self):\n self.owner_as_cat.user = self.user_owner\n self.owner_as_cat.save()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_guest.username,\n 'old_owner_role': PROJECT_ROLE_OWNER,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_guest)\n self.assertIsNone(\n RoleAssignment.objects.filter(\n project=self.project, user=self.user_owner\n ).first()\n )\n self.assertEqual(\n self.project.get_role(self.user_owner), self.owner_as_cat\n )\n self.assertEqual(self.owner_as.role, self.role_owner)", "def set_owner_allowed(self, data):\n self._owner_allowed = self._uni(data)", "def test_filter_owner_permission(self):\n User = get_user_model()\n user1 = User.objects.create(username=\"test_user1\", email=\"user1@test.com\")\n obj = DescriptorSchema.objects.create(contributor=user1)\n obj.set_permission(Permission.VIEW, user1)\n\n data_template = {\n \"users\": {user1.id: \"view\"},\n \"groups\": {1: \"edit\", 2: \"NONE\"},\n }\n\n check_owner_permission(data_template, False, obj)\n\n # Check that only owner can set owner permission.\n data = deepcopy(data_template)\n data[\"users\"][1] = \"owner\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that only owner can rewoke owner permission.\n obj.set_permission(Permission.OWNER, user1)\n data = deepcopy(data_template)\n data[\"users\"][1] = \"edit\"\n with self.assertRaises(exceptions.PermissionDenied):\n check_owner_permission(data, False, obj)\n check_owner_permission(data, True, obj)\n\n # Check that group can not be owner.\n obj.set_permission(Permission.VIEW, user1)\n data = deepcopy(data_template)\n data[\"groups\"][1] = \"owner\"\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, False, obj)\n with self.assertRaises(exceptions.ParseError):\n check_owner_permission(data, True, obj)", "def test_mark_entry_as_read_groups(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def writable(name):", "def room_to_read_only(self, room_id):\n # set room to read-only\n response = requests.patch(\n f\"{self.uri}/rooms/{room_id}/attribute/id/text\",\n json={\"attribute\": \"readonly\", \"value\": \"True\"},\n headers={\"Authorization\": f\"Bearer {self.token}\"},\n )\n if not response.ok:\n logging.error(f\"Could not set room to read_only: {response.status_code}\")\n response.raise_for_status()\n response = requests.patch(\n f\"{self.uri}/rooms/{room_id}/attribute/id/text\",\n json={\"attribute\": \"placeholder\", \"value\": \"This room is read-only\"},\n headers={\"Authorization\": f\"Bearer {self.token}\"},\n )\n if not response.ok:\n logging.error(f\"Could not set room to read_only: {response.status_code}\")\n response.raise_for_status()\n\n for user in self.players_per_room[room_id]:\n response = requests.get(\n f\"{self.uri}/users/{user['id']}\",\n headers={\"Authorization\": f\"Bearer {self.token}\"},\n )\n if not response.ok:\n logging.error(f\"Could not get user: {response.status_code}\")\n response.raise_for_status()\n etag = response.headers[\"ETag\"]\n\n response = requests.delete(\n f\"{self.uri}/users/{user['id']}/rooms/{room_id}\",\n headers={\"If-Match\": etag, \"Authorization\": f\"Bearer {self.token}\"},\n )\n if not response.ok:\n logging.error(\n f\"Could not remove user from task room: {response.status_code}\"\n )\n response.raise_for_status()\n logging.debug(\"Removing user from task room was successful.\")", "def test_leave_accrual_access_rights(self):\n accrual = self.employee.get_leave_accrual(self.leave_type.id)\n accrual.write({\n 'line_ids': [(0, 0, {\n 'name': 'Test',\n 'amount_cash': 100,\n 'date': datetime.now(),\n })],\n })\n\n self.assertRaises(\n Exception,\n accrual.sudo(self.user_3.id).check_access_rule, 'read')\n\n self.assertRaises(\n Exception,\n accrual.sudo(self.user_2.id).check_access_rights, 'write')\n\n accrual.sudo(self.user_1.id).check_access_rule('read')\n self.assertTrue(\n accrual.sudo(self.user_1.id).check_access_rights('read'))\n\n # The manager can not access the leave accruals of the employee 2\n # because he is not the employee's manager\n accrual_2 = self.employee_2.get_leave_accrual(self.leave_type.id)\n\n self.assertRaises(\n Exception,\n accrual_2.sudo(self.user_1.id).check_access_rule, 'read')\n\n self.user_1.write({\n 'groups_id': [(4, self.ref('base.group_hr_manager'))]})\n\n for operation in ['read', 'write', 'create', 'unlink']:\n accrual_2.sudo(self.user_1.id).check_access_rule(operation)\n self.assertTrue(\n accrual_2.sudo(self.user_1.id).check_access_rights(operation))", "def test_transfer_old_inherited_member(self):\n self.make_assignment(\n self.category, self.user_owner, self.role_contributor\n )\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_guest.username,\n 'old_owner_role': PROJECT_ROLE_CONTRIBUTOR,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_guest)\n self.owner_as.refresh_from_db()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n self.assertEqual(self.owner_as.role, self.role_contributor)", "def raise_for_ownership(self, resource: Model) -> None:\n\n # pylint: disable=import-outside-toplevel\n from superset import db\n\n if self.is_admin():\n return\n\n # Set of wners that works across ORM models.\n owners: List[User] = []\n\n orig_resource = db.session.query(resource.__class__).get(resource.id)\n\n if orig_resource:\n if hasattr(resource, \"owners\"):\n owners += orig_resource.owners\n\n if hasattr(resource, \"owner\"):\n owners.append(orig_resource.owner)\n\n if hasattr(resource, \"created_by\"):\n owners.append(orig_resource.created_by)\n\n if g.user.is_anonymous or g.user not in owners:\n raise SupersetSecurityException(\n SupersetError(\n error_type=SupersetErrorType.MISSING_OWNERSHIP_ERROR,\n message=f\"You don't have the rights to alter [{resource}]\",\n level=ErrorLevel.ERROR,\n )\n )", "def read_release(self):\n self.is_locked = False\n self.rwlock = RWLock().read_acquire()", "def setup_permissions():\n sudo('chown %s:%s -R %s' % (env.apache_user, env.apache_user, env.whole_path_symlinked))", "def _RemountRootAsReadWrite(self):\n self.RunCmdOnDevice(['mount', '-o', 'remount,rw', '/'])", "def chown_chmod ( self, fspath ):\n # should be renamed to chmod_chown()\n return (\n self.chmod ( fspath ),\n self.chown ( fspath )\n )", "def del_ro(action, name, exc):\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)", "def transferOwnership(_newOwner: address):\n assert msg.sender == self.owner, \"Access is denied.\"\n assert _newOwner != ZERO_ADDRESS, \"Invalid owner supplied.\"\n\n log.OwnershipTransferred(msg.sender, _newOwner)\n self.owner = _newOwner", "def is_read_only(self):\n\t\treturn bool(call_sdk_function('PrlShare_IsReadOnly', self.handle))", "def test_06_self_cannot_upgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.VIEW)\n with self.assertRaises(PermissionDenied):\n dog.uaccess.share_group_with_user(\n meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))", "def match_owner_group(dest_path, source_path):\n source_stat = os.stat(source_path)\n return os.chown(dest_path, source_stat[stat.ST_UID], source_stat[stat.ST_GID])", "def assign_permissions(sender, instance, created, **kwargs):\n if created:\n assign_perm('view_strand', instance.owner.group, instance)\n assign_perm('change_strand', instance.saver, instance)\n assign_perm('delete_strand', instance.saver, instance)\n assign_perm('view_strand', instance.saver, instance)", "def chown ( self, fspath ):\n if os.path.isdir ( fspath ):\n return self.chown_dir ( fspath )\n else:\n return self.chown_file ( fspath )", "def set_permissions(self, object, replace=False):\r\n if isinstance(self.config.origin, S3Origin):\r\n if self.config.origin.origin_access_identity:\r\n id = self.config.origin.origin_access_identity.split('/')[-1]\r\n oai = self.connection.get_origin_access_identity_info(id)\r\n policy = object.get_acl()\r\n if replace:\r\n policy.acl = ACL()\r\n policy.acl.add_user_grant('READ', oai.s3_user_id)\r\n object.set_acl(policy)\r\n else:\r\n object.set_canned_acl('public-read')", "def test_set_permission(self):\n collection = Collection.objects.create(\n contributor=self.owner, name=\"Test collection 2\"\n )\n self.assertEqual(collection.get_permission(self.user1), Permission.NONE)\n self.assertEqual(self.collection.get_permission(self.user1), Permission.NONE)\n Collection.objects.filter(name__startswith=\"Test collection\").set_permission(\n Permission.VIEW, self.user1\n )\n self.assertEqual(collection.get_permission(self.user1), Permission.VIEW)\n self.assertEqual(self.collection.get_permission(self.user1), Permission.VIEW)", "def changePermissions(self, event):\n pass", "def set_read_group( in_bam_path, out_bam_path, id:str, pl:str, lb:str, sm:str, pu:str, threads=4 ):\n\n \"\"\"\n read_groups(set/dict) : set or dictionary which contains read groups. The dictionary should have the format { read_group_id (str)\n { 'ID': ID, 'LB':library,\n 'PL':platform,\n 'SM':sampleLib,\n 'PU':readGroup }\n \"\"\"\n\n read_groups = {id:{ 'ID': id, 'LB':lb,\n 'PL':pl,\n 'SM':sm,\n 'PU':pu }}\n\n with pysam.AlignmentFile(in_bam_path, threads = threads) as input_bam:\n\n input_header = input_bam.header.as_dict()\n\n # Write provenance information to BAM header\n write_program_tag(\n input_header,\n program_name='bamReadGroupFormat',\n command_line=\" \".join(\n sys.argv),\n version=singlecellmultiomics.__version__,\n description=f'SingleCellMultiOmics read group formatting, executed at {datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")}')\n\n with sorted_bam_file(out_bam_path, header=input_header, read_groups=read_groups, input_is_sorted=True) as out:\n print('Started writing')\n for read in input_bam:\n rg_id = id\n read.set_tag('RG',rg_id)\n out.write(read)", "def chown_dir ( self, fspath ):\n return", "def read_acquire(self):\n self.is_locked = True\n self.rwlock = RWLock().read_acquire()", "def update_readonly_flag(self, volume, read_only):\n return self._impl.update_readonly_flag(volume, read_only=read_only)", "def change_permissions(path, permission='777'):\r\n if os.path.exists(path):\r\n subprocess.call('chmod -R %s %s'%(permission,path),shell=True)\r\n else:\r\n raise NameError('invalid path %s'% path)", "def setReadOnly(self, state: bool) -> None:\n ...", "async def reacrole(self, ctx: commands.Context):\n pass", "def pre_security_group_read(self, resource_id):\n pass", "def MakeWorldReadable(path):\n\n # No need to do anything special on Windows.\n if IsWindows():\n return\n\n perms = stat.S_IMODE(os.stat(path)[stat.ST_MODE])\n if os.path.isdir(path):\n # Directories need read and exec.\n os.chmod(path, perms | 0555)\n else:\n os.chmod(path, perms | 0444)", "def set_object_copy_only_permissions(self, agent):\n\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 1, PermissionsMask.Copy)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 0, PermissionsMask.Modify)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 0, PermissionsMask.Transfer)", "def MakeWritable():\n return shell.ShellCommand(\n name = \"make writable\",\n haltOnFailure = 1,\n description = [\"making writable\"],\n descriptionDone = [\"made writable\"],\n command = [\"chmod\", \"-R\", \"+w\", \".\"],\n )", "def __reader_get_new_ownership(self):\n if self._transfer_cmp_event.is_set() and not self.put_queue_flag:\n self.logger.info(\"Received transfer/accept request event in reader\") \n for key in self._reader_map.keys():\n if int(key) not in self.msg.get_ownershipList():\n del self._reader_map[key]\n self.msg.put_into_Queue()\n self.put_queue_flag = True\n elif not self._transfer_cmp_event.is_set():\n self.put_queue_flag = False", "def post_security_group_read(self, resource_id, resource_dict):\n pass", "def grant_read(self, identity: aws_cdk.aws_iam.IGrantable, objects_key_pattern: typing.Any=None) -> aws_cdk.aws_iam.Grant:\n ...", "def set_group_access(self, group, allowed_signals, allowed_controls):\n self._access_lists.set_group_access(group, allowed_signals, allowed_controls)", "def _fix_r_res(res, o, g, p):\n print green(\"Setting remote resource %s parameters %s, %s, %s\" %\n (res, o, g, p))\n run('chown %s:%s %s' % (o, g, res))\n run('chmod %s %s' % (p, res))", "def enable_acm_readonly(self):\n self._request({\"enable-acm-readonly\": True})", "def set_object_transfer_only_permissions(self, agent):\n\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 0, PermissionsMask.Copy)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 0, PermissionsMask.Modify)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 1, PermissionsMask.Transfer)", "def transfer_ownership(self, user):\n new_owner = get_user_model().objects.filter(is_active=True) \\\n .get(pk=user.pk)\n self.owner = new_owner", "def grant(self, group, repo, privilege):\r\n url = '{0}/group-privileges/{1}/{2}/{1}/{3}/'.format(\r\n self.parent.parent.get_url(), self.user, repo, group)\r\n\r\n return http.Request('PUT', url, privilege), parsers.parse_empty", "def _make_writeable(filename):\n import stat\n if sys.platform.startswith('java'):\n # On Jython there is no os.access()\n return\n if not os.access(filename, os.W_OK):\n st = os.stat(filename)\n new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR\n os.chmod(filename, new_permissions)", "def set_access_for_others(self, nAccessForOthers):\n\t\tcall_sdk_function('PrlAcl_SetAccessForOthers', self.handle, nAccessForOthers)", "def update_readonly_flag(self, volume, read_only):\n aname = \"cinder_v%s.update_readonly_flag\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volumes.update_readonly_flag(\n volume, read_only)", "def allowed_group_access_change(user, group):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n return (user.has_perm(\"vnswww.group_change_any\")\n or (user.has_perm(\"vnswww.group_change_org\")\n and group.org == up.org))", "def _read_only(object, name, value):\n raise TraitError(\n \"The '%s' trait of %s instance is 'read only'.\"\n % (name, class_of(object))\n )", "def adjust_permission_base_dir(base_dir, destination):\n\n if destination==\"tegner-login-1\":\n #Change group and set permissions for PDC Stockholm\n user_group = DATA_USER_PDC + \":\" + DATA_GROUP_PDC\n \n subprocess.Popen( [\"chown\", \"-R\", user_group, base_dir],\n stdout=subprocess.PIPE )\n \n\n subprocess.Popen( [\"setfacl\", \"-R\", \"-M\", \"/cfs/klemming/projects/xenon/misc/basic\", base_dir],\n stdout=subprocess.PIPE )", "def test_transfer_new_inherited_member(self):\n self.make_assignment(\n self.category, self.user_new, self.role_contributor\n )\n self.assertEqual(\n self.project.get_owners(inherited_only=True)[0].user,\n self.user_owner_cat,\n )\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_new.username,\n 'old_owner_role': PROJECT_ROLE_CONTRIBUTOR,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_new)\n self.owner_as.refresh_from_db()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n self.assertEqual(self.owner_as.role, self.role_contributor)" ]
[ "0.66892415", "0.62186366", "0.6200737", "0.6195593", "0.6075884", "0.6041262", "0.599601", "0.59780955", "0.5939623", "0.5920451", "0.58722764", "0.5808822", "0.5740759", "0.565018", "0.56067586", "0.55544484", "0.5519573", "0.5503808", "0.5484997", "0.5481513", "0.54399353", "0.5434692", "0.54086477", "0.53880817", "0.53870636", "0.53663975", "0.53663975", "0.5362936", "0.53531086", "0.52928156", "0.52316505", "0.5224458", "0.5214807", "0.5198643", "0.5172099", "0.514449", "0.51427233", "0.51405805", "0.5130294", "0.5129552", "0.5111754", "0.5108394", "0.5105414", "0.5105336", "0.5105204", "0.50948834", "0.50948834", "0.50774235", "0.50690544", "0.50651205", "0.5063244", "0.5060551", "0.5058698", "0.5042138", "0.5035244", "0.50251853", "0.501794", "0.50176466", "0.50074345", "0.5007332", "0.4999267", "0.49988243", "0.4995351", "0.49906075", "0.49769816", "0.4970408", "0.4962522", "0.49497515", "0.49486372", "0.49462727", "0.4944149", "0.4938897", "0.4934152", "0.49274203", "0.49268585", "0.49222368", "0.49142677", "0.49121448", "0.49084613", "0.49000874", "0.48985815", "0.4897585", "0.4895833", "0.48876172", "0.48873466", "0.48850584", "0.48792762", "0.48760295", "0.48701686", "0.48562813", "0.48550895", "0.48497063", "0.48446828", "0.48436925", "0.48400295", "0.48332655", "0.48287657", "0.48270518", "0.48266616", "0.4819047" ]
0.56320053
14
Store some useful information in the log file.
def LogProcess(self): time = datetime.today().strftime('%a %Y%b%d %X') # Get user name. f = os.popen("whoami","r") user = f.read().strip() f.close() entry = '%s\t%s\t%s\t%s\n' % (time, self.topdir, user, self.version) if ismounted(c.exams_file): # Append info to the exams file. try: f = open(c.exams_file,'a+') f.seek(0, 2) f.write(entry) f.close() except: # Not a huge problem if this doesn't work. pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_data(self):\n\n self.check_dir()\n with open(self.log_file, \"a\") as logger_file:\n logger_file.write(\"{}, {}\\n\".format(self.time, self.msg))", "def log(self):\n f = open(self.log_dir + 'parsed.log', 'a')\n try:\n # Write: local time | CurrentCost \"time\" | id | temp/C | power/W \n f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\n\" \n % (self.ts('now'), self.ts('cc'), self.id, self.temp, self.watts))\n finally:\n f.close()", "def logsave(self):\n log_file = open(self.conf[\"output_prefix\"] + \"_log.txt\", \"w\")\n try:\n log_file.write(self.log)\n finally:\n log_file.close()", "def log_info(info):\n log = open(log_path, 'a+')\n log.write(info + '\\n')\n log.close()", "def log(self):\n\t\tfilename = '/var/log/postunsuspendacct-%s.log' % self.argv.get('user')\n\t\tfileobj = open(filename, 'w');\n\t\tfileobj.write(self.title)\n\t\tfor (key, value) in self.argv.items():\n\t\t\tfileobj.write('%s: %s\\n' % (key, value))\n\t\tfileobj.close()\n\t\tprint \"[%s] Log saved '%s'\" % (ctime(), filename)", "def _log_to_file(self, message):\n if self.log is not None:\n message = \"[%s] %s\" % (datetime.datetime.utcnow().strftime('%H:%M:%S'), message)\n self.log.write(\"%s\\n\" % (message,))\n self.log.flush()\n print message", "def writeLog(self):\n if self.logBuffer != None and self.logging :\n f = open(self.logfileName, 'w')\n self.logBuffer += \"Final Fitness: %f\\n\" % self.getTotalReward()\n self.logBuffer += \"\\n\"\n f.write(self.logBuffer)\n f.close()", "def log(self, loginfo):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s:%(message)s',\n datefmt='%d %b %Y %H:%M:%S',\n filename=self.logfilepath,\n filemode='w')\n filelog = logging.FileHandler(self.logfilepath)\n logging.getLogger('Functest').addHandler(filelog)\n logging.info(loginfo)", "def log(self, msg):\n current_datetime = self.get_date_time()\n self.file.write(\"%s %s\\n\" % (current_datetime, msg))", "def write_log(self, log_filename, data):\n open(log_filename, 'a').write(str(data))", "def write_data(self, data):\n print \"Writing data...\"\n # Write data into log\n self.log.write_file(data)\n\n # Close log so information can be sent\n self.log.close_log()", "def __log__(self, val):\n if lm_settings[\"debug\"]:\n try:\n log_file = open(\"language_manager/info/language_manager.log\", \"a\")\n except FileNotFoundError:\n log_file = open(lm_settings[\"logfile\"], \"w\")\n log_file.write(val)\n log_file.close()", "def write_log(self, msg: str):\n self.cta_engine.write_log(msg, self)", "def _logger(self):\r\n\r\n # Create filename for log\r\n filenameF = self._vna.getDateFormatted() + \".txt\"\r\n filenameF = \"Logs/\" + filenameF \r\n f = open(filenameF, \"a+\") # Log saved in directory named logs located in same directory as this file\r\n \r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._voltages)):\r\n # f.write('%s\\t\\t\\t' % self._voltages[i][0])\r\n # else:\r\n for i in range(len(self._voltages)):\r\n f.write('%s\\t\\t' % self._voltages[i][0])\r\n f.write('\\n')\r\n\r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._voltages[0])):\r\n # line = \"\"\r\n # for j in range(len(self._voltages)):\r\n # line = line + str(self._frequency[j][i]) + '\\t' + str(self._intensity[j][2*i]) + \\\r\n # str(self._intensity[j][2*i + 1]) + '\\t'\r\n # f.write(line)\r\n # f.write('\\n')\r\n # else: \r\n for i in range(len(self._voltages[0])):\r\n line = \"\"\r\n for j in range(len(self._voltages)):\r\n line = line + str(self._frequency[j][i]) + '\\t' + str(self._intensity[j][i]) + '\\t' \r\n f.write(line)\r\n f.write('\\n')", "def log(self, message):\n timestamp = time.strftime(\"[%H:%M:%S]\", time.localtime(time.time()))\n self.file.write('%s %s\\n' % (timestamp, message))\n self.file.flush()", "def log(self):\n self.logger = logging.getLogger(self.log_name)\n self.logger.info(f\"Name: {self.name}\")\n self.logger.info(f\"Grid points: {self.gp}\")\n self.logger.info(f\"Nadir points: {self.nadir_p}\")\n self.logger.info(f\"Penalty weight: {self.eps}\")\n self.logger.info(f\"Early exit: {self.early_exit}\")\n self.logger.info(f\"Bypass coefficient: {self.bypass}\")\n self.logger.info(f\"Flag array: {self.flag}\")\n self.logger.info(f\"CPU Count: {self.cpu_count}\")\n self.logger.info(f\"Redivide work: {self.redivide_work}\")\n self.logger.info(f\"Shared flag array: {self.shared_flag}\")\n self.logger.info(Helper.separator())", "def writeToLogFile(self, event):\n outPutStr = '{:013}'.format(0)\n logOutPutStr = outPutStr + '\\t' + '{:.2f}'.format (time ()) + '\\t' + event + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ')\n printOutPutStr = outPutStr + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ') + '\\t' + event\n print (printOutPutStr)\n if self.logFP is not None:\n self.logFP.write(logOutPutStr + '\\n')\n self.logFP.flush()", "def saveToLogFile(self, msg):\n path = os.path.join(self.parent.progpath, \"logfile.txt\")\n fo = open(path, 'a')\n # prefix with current date and time from now variable\n msg = \"\\n#{0}\\n\".format(datetime.datetime.now()) + msg\n fo.write(msg)\n fo.close()", "def saveToLogFile(self, msg):\n path = os.path.join(self.parent.progpath, \"logfile.txt\")\n fo = open(path, 'a')\n # prefix with current date and time from now variable\n msg = \"\\n#{0}\\n\".format(datetime.datetime.now()) + msg\n fo.write(msg)\n fo.close()", "def log(self, line):\n now = datetime.datetime.now()\n time = datetime.datetime.strftime(now, '(%d %b %Y %H:%M:%S)')\n with open(self.logfile, 'a') as log:\n log.write(time + ' ' + line + '\\n')", "def log(self, txt):\n if self.logfile:\n self.logfile.write(txt)", "def set_info(self, message):\n if self.log_file_exist(self.file_path_name):\n logging.info(message)\n else:\n print \"The log \"+ self.name_log + \"does not exist in the directory\"", "def log_to_file(text, status='INFO'):\n outfile = open(LogName, 'a')\n outfile.write(timestamp()+' - '+status+' - '+str(text)+'\\n')\n outfile.close()", "def __logtofile(self, log_name):\n logger = logging.getLogger(log_name)\n\n file_path = os.path.join(self.log_file_path, log_name + '.txt')\n\n formatter = logging.Formatter('<%(asctime)s> %(levelname)-8s %(message)s',\n datefmt='%y-%m-%d %H:%M:%S')\n self.file_handlers[logger] = logging.FileHandler(file_path, mode='w')\n self.file_handlers[logger].setFormatter(formatter)\n self.file_handlers[logger].setLevel(logging.DEBUG)\n logger.addHandler(self.file_handlers[logger])\n\n logger.info('SAVING LOGS IN: %s' % file_path)", "def log(self, msg=None):\n f = open(self.logbook, 'a')\n # if send or receive, write message\n if msg: \n f.write(\" System time: \" + str(datetime.now()) + \n \" Logical clock time: \" + str(self.clock_time) + \n \" \" + str(msg) + '\\n')\n # if it is an internal event just write the system time and current\n # logical clock time\n else:\n f.write(\" System time: \" + str(datetime.now()) + \n \" Logical clock time: \" + str(self.clock_time) + '\\n')\n f.close()", "def save(*messages):\n data = Parser.parse_texts(*messages[1:])\n hour = time.strftime(\"_%H_%M_%S\")\n today = time.strftime(\"_%d_%m_%Y\")\n title = Parser.parse_text(messages[0])\n\n file = open(\"./logs/\"+threading.currentThread().getName()+today+\".log\",'a+')\n file.write(\"\\n==\"+title+hour+\"==\\n\")\n if type(data) is dict: #Dictionary with each value being a triplet. From get_all_items\n for key in data.keys():\n file.write(Parser.parse_text(key) + \" -> \"+ Parser.parse_text(str(data[key].x)) +\"\\n\")\n elif type(data) is list: #From get_item, market item, attribute listings\n for listing in data:\n file.write(str(listing.id)+\" - \"+str(listing.price/100)+\" euros\\n\")\n else: #plain text\n file.write(Parser.parse_text(data))\n file.write(\"=====================================\\n\")\n file.close()", "def logFile(self):\n\n event = 'stim'\n mStr = '{:013}'.format(self.mouse.tag) + '\\t'\n outPutStr = mStr + \\\n datetime.fromtimestamp(int(time())).isoformat(' ') + '\\t' + event\n print (outPutStr)\n if self.textfp != None:\n outPutStr = mStr + '{:.2f}'.format(time()) + '\\t' + event\n self.textfp.write(outPutStr + '\\n')\n self.textfp.flush()", "def write_debug_log(self, msg):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n with open(self.debug_log, 'a+') as logfile:\n logfile.write(\"%s: %s\\n\" % (now, msg))", "def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)", "def _write_log(self, log_data):\n # for data in log_data:\n # self.log_file.write(\"{}\\n\".format(data).encode('utf-8'))\n self.log_file.write(\"{}\\n\".format(log_data).encode('utf-8'))\n self.log_file.flush()", "def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()", "def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass", "def store_action_log(self, filename):\n t = self.get_current_timeindex()\n camera_obs = self.get_camera_observation(t)\n self._action_log[\"final_object_pose\"] = {\n \"t\": t,\n \"pose\": camera_obs.object_pose,\n }\n\n with open(filename, \"wb\") as fh:\n pickle.dump(self._action_log, fh)", "def write(self, msg, flag_print=True):\n file = open(self.log_path, \"a\")\n insert_time=datetime.now().strftime('%H:%M:%S.%f')[:-3]\n current_time = \"[\"+insert_time+\"]\"\n log_msg = current_time + \" \" + msg + \"$\" +\"\\n\" \n file.write(log_msg)\n # if flag_print is True:\n print(log_msg)", "def print_info(self):\n\n self.logging.info(str(self.filename) + ':' + str(self.__info_dict))", "def InsertLog():", "def _write_log(self, log_data):\n self.log_file.write(ensure_bytes(log_data + \"\\n\"))\n self.log_file.flush()", "def printToLogfile (self, text):\n if self.logFile is not None:\n self.logFile.write(text)\n self.logFile.flush()", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def logentry(self, string=None):\n if (self._OIFlogging):\n oiflogfile = open(self._commslogfilename, \"a\")\n oiflogfile.write(\"# \" + \"%04.6fs: \" % (self._gettime() - self._logstarttime) + string + \"\\n\")\n oiflogfile.flush()\n else:\n# if self._print_once:\n# self._print_once = 0\n# print self.hilite(\"Warning: Not logging OIF transactions. Use\\n it.logfile(<filename>) to set log filename and\\n it.logging(True) to enable logging\", False, True)\n print 'Unable to write log entry', string\n return", "def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()", "def dump_to_log(self, log_dir, log_filename):\n\t\twith open(os.path.join(log_dir, log_filename), \"w\") as f:\n\t\t\tf.write(\"================ Arguments ==================\\n\")\n\t\t\tfor k, v in vars(self).items():\n\t\t\t\tf.write(\"{} : {}\\n\".format(k, v))\n\t\t\tf.write(\"=============================================\\n\")", "def store(status, message, **kwargs):\r\n LogRecord(status, message, **kwargs)", "def _init_log(self):\n if not os_path_exists(self.log_file):\n self._write('', 'w')", "def _write(self, data, mode):\n check_path(self.config_path)\n\n with open(self.log_file, mode) as log:\n if mode == 'a' and self.add_time:\n msg = self.TIME_TEMPLATE.format(time=strftime('%c'), error_msg=data)\n else:\n msg = data\n\n log.write(msg.encode(self._encoding, 'ignore'))", "def to_log(self, namefile=None):\n if namefile is None:\n namefile = self.name.replace(' ', '_')+'.log'\n f = open(namefile, 'w')\n f.write(self.__str__())\n f.close()", "def log_all(self):\n self.save_raw()\n self.log()", "def write_log(self):\n if self.hash_log_curr:\n temp_dict = {}\n count = 0\n for key, value in self.hash_log_curr.iteritems():\n temp_dict[value[4] + str(count)] = key\n count += 1\n temp_sort = temp_dict.keys()\n temp_sort.sort()\n temp_sort.reverse()\n\n try:\n log = open(self.log_path + r'\\hash_log.txt', 'w')\n # log header\n log.write(self.log_header)\n # write hash_log_content to log\n for key in temp_sort:\n value = self.hash_log_curr[temp_dict[key]]\n log.write(value[0]+'|'+value[1]+'|'+value[2]+'|'+value[3]+'|'+value[4]+'|'+value[5] + '\\n')\n log.close()\n self.print_to_log('New log writen to file: ' + self.log_path + r'\\hash_log.txt' )\n except IOError:\n self.print_to_log('Cannot open log file to write')\n raise\n except:\n self.print_to_log('Unknown Error')\n raise", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def logInfo(self, timestamp, info):\n self.logs['messages'].write(','.join([str(i) for i in [\n self.formatTimestamp(timestamp), info]]) + '\\n')\n self.logs['messages'].flush()", "def write_to_log(self, log_file, log_data):\n with open(self.gamelogs_path + log_file, 'a') as f:\n writer = csv.writer(f)\n writer.writerow(log_data)\n f.close()", "def writeLog(self, log_path):\r\n f = open(log_path, 'w')\r\n f.write(str(self))\r\n f.close()", "def save_exception(exc):\n LOG.error(\"Error - %s\", str(exc))\n hour = time.strftime(\"_%H_%M_%S\")\n today = time.strftime(\"_%d_%m_%Y\")\n data = (str(exc)+traceback.format_exc())\n\n file = open(\"./logs/ERROR_\"+threading.currentThread().getName()+today+\".log\",'a+') #Replace to fix OSError\n file.write(\"\\n==\"+hour+\"==\\n\")\n file.write(Parser.parse_text(data))\n file.write(\"=====================================\\n\")\n file.close()", "def log(self, msg):\n logging.info(\"Logging Message\")\n ml = self.monk_logs\n today = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n ml.insert({today: {\"log\": msg,\n \"sentiment\": self.get_sentiment(msg),\n \"weather\": Weather.get_weather()}})", "def save_log(file_path=None, stat=\"\"):\n fp = open(file_path, mode=\"a+\")\n fp.write(stat + \"\\n\")\n fp.close()", "def local(self, text):\n\t\tlogf = open(\"update_log.txt\", \"a\")\n\t\tdate = datetime.datetime.now()\n\t\tlogf.writelines(\"[\" + str(date) + \"] \" + text + \"\\n\")\n\t\tlogf.close()", "def save(self, log_information):\n with self.connection.cursor() as cursor:\n # Create a new record\n sql = \"\"\"INSERT INTO `logs` (\n `latitude`, `longitude`, `address`, `number`,\n `neighborhood`, `city`, `postal_code`, `state`,\n `country`\n ) VALUES (\n %(latitude)s, %(longitude)s, %(address)s, %(number)s,\n %(neighborhood)s, %(city)s, %(postal_code)s, %(state)s,\n %(country)s\n )\"\"\"\n cursor.execute(sql, log_information)\n\n # connection is not autocommit by default. So we must commit to save\n # changes.\n self.connection.commit()", "def data_log(self, file, spectra):\n if self.datalogflag:\n with open(file, 'a') as f:\n f.write('{0}, '.format(spectra))\n self.vprint(\n 2, 'Writing spectra to data log at {}'.format(file))", "def log(self, message):\n try:\n stat = os.stat(self.logpath)\n if stat.st_size >= 1000000:\n os.rename(self.logpath, self.logpath + '.1')\n except:\n pass\n logfile = open(self.logpath, 'a+')\n logfile.write(message + \"\\n\")\n logfile.close()", "def _addLogEntry(request, action, pagename, filename):\n from MoinMoin.logfile import editlog\n t = wikiutil.timestamp2version(time.time())\n fname = wikiutil.url_quote(filename)\n\n # Write to global log\n log = editlog.EditLog(request)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)\n\n # Write to local log\n log = editlog.EditLog(request, rootpagename=pagename)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)", "def log(self, my_string):\n ## Open/Close each call is ridiculously inefficient.\n ## This was just a quick solution to build from\n ## TODO: Improve the logging mechanism\n logto = open(self.logfile, 'a')\n logto.write(my_string)\n logto.close()", "def log(self, message):", "def logging_data(self):\n with open('sensor_data.log','w') as f:\n json.dump(self.read_continuous_data, f)", "def write(self, identifier: str, message: str):\n self.log[identifier] = message", "def _log(self, data):\n if self.log_data is not None:\n self.log_data(data)", "def write_log(text):\n write_file(read_file(log_file), log + '\\n' + text)", "def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")", "def log_to_file(self, filename=None):\n if not filename:\n filename = '%s/../../output/sentimentpy.log' % os.path.dirname(os.path.realpath(__file__))\n file_handler = RotatingFileHandler(filename, 'a', 1000000, 1)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(self.formatter)\n self.log.addHandler(file_handler)\n return self", "def loginfo(self, msg):\n self.logger.info(msg)", "def putLog(self, log):\n \n moduleCoordinator.ModuleCoordinator().addEvent(moduleCoordinator.LOG_EVENT, log, self.hash, self.config)", "def update_log(er=\"File created\",log_path=log_path, s3_path_log = s3_path_log, upload=False):\n print(er)\n with open(log_path, 'a') as file:\n file.write(str(datetime.now()) + ',' + str(er) + '\\n')\n # if upload is True:\n # s3.meta.client.upload_file(log_path, bucket_name, s3_path_log)", "def _log(self, message):\n pass", "def write(self):\n with open(\"log.txt\", 'w') as f:\n for message in self.message_list:\n f.write(message + \"\\n\")", "def WriteErrorsToFile():\n if(not __errorsTracked__ is None):\n if(len(__errorsTracked__)>0):\n formattedLogName = '_'.join[\"ErrorLog\",\"GarageChecker\",datetime.date,datetime.time]\n WriteToFile(formattedLogName,__errorsTracked__)\n __errorsTracked__ = []", "def log_tofile(self, inst):\n self._tick += 1\n if self._tick >= self._second:\n self.logger.log(inst)\n self._tick = 0", "def log_message(self, message):\n with open(LOGFILE, \"a\") as f:\n currentDt = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S.%f)\")\n message = \"\\n\" + currentDt + '---' + message\n f.write(message)", "def log(self, message: str):", "def log(self, msg, alwaysPrint = False):\n if self.fileObject is None or alwaysPrint:\n print msg\n if self.fileObject:\n self.fileObject.write( msg + '\\n' )", "def write_log(self):\n with open(self.trav_stat_file, 'a') as stat_file:\n travel_writer = csv.writer(stat_file)\n # Every row starts with the start and destnation\n row = [self.start, self.dest]\n # This uses a static list so that the order is fixed\n for state in [\"waiting\", \"riding\", \"transferring\"]:\n state_total = sum(self.time_record[state])\n row.append(state_total)\n travel_writer.writerow(row)", "def data_log(self, file, **kwargs):\n time_string = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n average_data = kwargs.get('average_data')\n if self.datalogflag:\n with open(file, 'a') as f:\n f.write('{0}, {1}'.format(time_string, average_data))\n f.write('\\n')\n self.vprint(2, 'Writing average air quality data to data log at {}'.format(file))", "def append_to_logfile(self):\n with open(self.path, \"a+\") as f:\n for item in self.logs:\n f.write(item)\n self.logs.clear()", "def write_to_file(self, *args, **kwargs) -> None:\n with open(self._log_file, 'a') as file:\n print(file=file, *args, **kwargs)", "def log_and_print(self, message):\n self.f.write(message + \"\\n\")\n print message", "def set_log_file(filename):\n pass", "def _createLogFile(LogFile,date,LocalPath,ShowTagsResult):\n try:\n LOG = open(LogFile,\"w\")\n if _verbose:\n print(\"Writing Production Host, Location, Release and Tags information in %s\" % LogFile) \n LOG.write(\"These performance tests were executed on host %s and published on %s\" % (HOST,date))\n LOG.write(\"They were run in %s\" % LocalPath)\n LOG.write(\"Results of showtags -r in the local release:\\n%s\" % ShowTagsResult)\n LOG.close()\n except IOError as detail:\n print(\"WARNING: Can't create log file\") \n print(detail)", "def create_log(self, num_machines):\n\n # generates a folder for logs if one does not exist\n os.makedirs('logs', exist_ok=True)\n\n # record extra info at the top of the log file\n extra_info = [f'num machines: {num_machines}', f'ticks per second: {self.ticks_per_second}', f'lifetime: {self.lifetime}']\n dummy_info_dict = {k:info for k, info in zip(LogEntry.ENTRY_ORDER, extra_info)}\n\n with open(self.log_filename, mode='a') as log_file:\n writer = csv.DictWriter(log_file, fieldnames=LogEntry.ENTRY_ORDER)\n writer.writerow(dummy_info_dict)\n writer.writeheader()", "def _stdlog(self, msg):\n print msg\n logger.info(msg)", "def log(self):\n\n\t\t# Only every 1/10 second (or so) to avoid flooding networktables\n\t\tif not self.log_timer.running or not self.log_timer.hasPeriodPassed(self.log_timer_delay):\n\t\t\treturn\n\n\t\twpilib.SmartDashboard.putString('Pressure', '{0:.2f}'.format(self.get_pressure()))\n\t\twpilib.SmartDashboard.putBoolean(\"Garbo?\", self.is_pbot)\n\n\t\tself.drive.log()\n\t\tself.elevator.log()\n\t\tself.intake.log()", "def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def log_info(self, msg):\n self.logger.info(msg)", "def recordLog(project, status, memo):\n path = getPath(project)\n log = open(path, 'a')\n writer = csv.writer(log, lineterminator='\\n')\n writer.writerow((time.time(), status, memo))\n log.close()\n if status == 'a':\n print(\"Tracking your time on \" + project)\n if status == 's':\n print(\"Tracking suspended on \" + project)\n if status == 't':\n print(\"Time shifted on \" + project)\n if not path == '.sourglass':\n store = open(os.path.join(basepath, 'last'), 'w')\n store.write(project)\n store.close", "def init_log_file(self):\r\n try:\r\n os.makedirs(config[\"server_log_path\"])\r\n except OSError:\r\n if not os.path.isdir(config[\"server_log_path\"]):\r\n raise\r\n server_log_file = logging.FileHandler(\r\n config[\"server_log_path\"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')\r\n server_log_file.setLevel(logging.DEBUG)\r\n server_log_file.setFormatter(file_formatter)\r\n server_log.addHandler(server_log_file)", "def _print_log(self, step, data=None):\n \n # Set mode to append to log file\n mode = 'a'\n\n if self.logfile is None:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n if step == 1:\n # Output log header\n output = \"\\nSKLearnForQlik Log: {0} \\n\\n\".format(time.ctime(time.time()))\n # Set mode to write new log file\n mode = 'w'\n \n elif step == 2:\n # Output the parameters\n output = \"Model Name: {0}\\n\\n\".format(self.model.name)\n output += \"Execution arguments: {0}\\n\\n\".format(self.exec_params)\n \n try:\n output += \"Scaler: {0}, missing: {1}, scale_hashed: {2}, scale_vectors: {3}\\n\".format(\\\n self.model.scaler, self.model.missing,self.model.scale_hashed, self.model.scale_vectors)\n output += \"Scaler kwargs: {0}\\n\\n\".format(self.model.scaler_kwargs)\n except AttributeError:\n output += \"scale_hashed: {0}, scale_vectors: {1}\\n\".format(self.model.scale_hashed, self.model.scale_vectors)\n\n try:\n if self.model.dim_reduction:\n output += \"Reduction: {0}\\nReduction kwargs: {1}\\n\\n\".format(self.model.reduction, self.model.dim_reduction_args)\n except AttributeError:\n pass\n \n output += \"Estimator: {0}\\nEstimator kwargs: {1}\\n\\n\".format(self.model.estimator, self.model.estimator_kwargs)\n \n elif step == 3: \n # Output the request dataframe\n output = \"REQUEST: {0} rows x cols\\nSample Data:\\n\\n\".format(self.request_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.request_df.head().to_string(), self.request_df.tail().to_string())\n \n elif step == 4:\n # Output the response dataframe/series\n output = \"RESPONSE: {0} rows x cols\\nSample Data:\\n\\n\".format(self.response.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.response.head().to_string(), self.response.tail().to_string())\n \n elif step == 5:\n # Print the table description if the call was made from the load script\n output = \"\\nTABLE DESCRIPTION SENT TO QLIK:\\n\\n{0} \\n\\n\".format(self.table)\n \n elif step == 6:\n # Message when model is loaded from cache\n output = \"\\nModel {0} loaded from cache.\\n\\n\".format(self.model.name)\n \n elif step == 7:\n # Message when model is loaded from disk\n output = \"\\nModel {0} loaded from disk.\\n\\n\".format(self.model.name)\n \n elif step == 8:\n # Message when cache is updated\n output = \"\\nCache updated. Models in cache:\\n{0}\\n\\n\".format([k for k,v in self.__class__.model_cache.items()])\n \n elif step == 9:\n # Output when a parameter grid is set up\n output = \"Model Name: {0}, Estimator: {1}\\n\\nGrid Search Arguments: {2}\\n\\nParameter Grid: {3}\\n\\n\".\\\n format(self.model.name, self.model.estimator, self.model.grid_search_args, self.model.param_grid)\n \n elif step == 10:\n # self.model.estimator_kwargs['architecture']\n output = \"\\nKeras architecture added to Model {0}:\\n\\n{1}\\n\\n\".format(self.model.name,\\\n self.model.architecture.to_string())\n\n elif step == 11:\n # Output after adding lag observations to input data\n output = \"Lag observations added ({0} per sample). New input shape of X is {1}.\\n\\n\".format(self.model.lags, data.shape)\n output += \"Feature Definitions:\\n{0}\\n\\n\".format(self.model.features_df.to_string())\n output += \"Sample Data:\\n{0}\\n...\\n{1}\\n\\n\".format(data.head(5).to_string(), data.tail(5).to_string())\n \n sys.stdout.write(output)\n with open(self.logfile, mode, encoding='utf-8') as f:\n f.write(output)", "def log_success(self):\n with open(self.logfile, 'a+') as f:\n f.write(self.BEGIN + self.message + '\\n' + self.END)\n self.message = ''", "def writeLog(pid):\n\tglobal processes,logfile,strikes,sleep\n\tproc = processes[pid]\n\tlogfile.write('[%s] %d %s %f%%cpu %f%%mem (over %d s): %s\\n'%(time.strftime('%b %d %H:%M:%S'),pid,proc.user,proc.cpu,proc.mem,proc.count*sleep,proc.command))", "def store(self, **stats):\n if self.first_row:\n self.log_headers = list(stats.keys())\n for key in stats:\n assert key in self.log_headers, f\"Can't introduce a new key that you didn't include before: {key}\"\n\n # Write to output file\n if self.first_row:\n self.file_writer.writerow(self.log_headers)\n self.file_writer.writerow(stats.values())\n self.output_file.flush()\n\n # Display in stdout\n if self.log_freq > 0 and self.counter % self.log_freq == 0:\n _print_table(stats)\n\n self.first_row = False\n self.counter += 1", "def log(self, message):\n self._log += \"%s\\n\" % message\n print message", "def send_log():\n log.info(f\"UUID={UUID}\")\n log.info(f\"SPLIT={SPLIT}\")\n log.info(f\"BATCH_SIZE={BATCH_SIZE}\")\n log.info(f\"EPOCHS={EPOCHS}\")\n log.info(f\"PATIENCE={PATIENCE}\")\n log.info(f\"X_FREQ={X_FREQ}\")\n log.info(f\"LOOK_BACK={LOOK_BACK}\")\n log.info(f\"LOOK_AHEAD={LOOK_AHEAD}\")\n log.info(f\"KERNEL_SIZE={KERNEL_SIZE}\")\n log.info(f\"FILTERS={FILTERS}\")\n log.info(f\"L1L2={L1L2}\")\n log.info(f\"D1={D1}\")\n log.info(f\"D2={D2}\")\n log.info(f\"DOUT={DOUT}\")\n log.info(f\"PLOT={PLOT}\")\n log.info(f\"SHUFFLE={SHUFFLE}\")", "def appendLog(self):\n if self.logBuffer == None :\n self.logBuffer = \"Some header\\nhere\\n\\n\"\n self.logBuffer += \"\\tx\\ty\\ttheta : ul\\tur\\tt-neur\\n\";\n \n self.logBuffer += '%2.1f: %2.6f\\t %2.6f\\t %2.6f : ' % \\\n\t ( self.t, self.env.state[0], self.env.state[2], self.env.state[4] )\n self.logBuffer += '%1.3f\\t %1.3f \\t%1.2f \\t' % \\\n ( self.env.action[0], self.env.action[1], self.env.action[2] )\n self.logBuffer += 'Dst/Theta/Speed: \\t%f\\t%f\\t%f \\tF: %.2f \\n' % \\\n ( self.env.getDistance(), self.env.getOrientation(), self.env.getDistance(), self.getReward() )" ]
[ "0.7579466", "0.7374402", "0.7347465", "0.70612556", "0.69241893", "0.69229144", "0.69018483", "0.6860829", "0.68271667", "0.67728823", "0.6762465", "0.6749048", "0.6685187", "0.66748893", "0.6671111", "0.66622746", "0.6618556", "0.6599946", "0.6599946", "0.6597023", "0.65964586", "0.65901273", "0.6573837", "0.65586334", "0.65402126", "0.6531082", "0.65134573", "0.6505944", "0.6489775", "0.648779", "0.64620155", "0.6453281", "0.6441042", "0.64346576", "0.64226913", "0.6420089", "0.6416695", "0.64069146", "0.63937783", "0.6378739", "0.6324976", "0.6317836", "0.63045454", "0.63021135", "0.63015926", "0.62966657", "0.6273819", "0.6263009", "0.62533414", "0.62488246", "0.6246205", "0.62461054", "0.6233486", "0.6233434", "0.6217805", "0.6186134", "0.61666036", "0.61629546", "0.61588585", "0.6157825", "0.6151425", "0.6140395", "0.61398184", "0.6139343", "0.61342895", "0.6128415", "0.6128036", "0.61238927", "0.6105306", "0.60935354", "0.6080301", "0.60608804", "0.6059511", "0.60579944", "0.60486996", "0.60485244", "0.60454506", "0.60436535", "0.6036961", "0.60369337", "0.603646", "0.6028476", "0.60211504", "0.6019191", "0.59965575", "0.5990896", "0.59853864", "0.5973779", "0.5970662", "0.59681076", "0.59668326", "0.59646153", "0.5962683", "0.59614235", "0.59512365", "0.59496", "0.59411025", "0.59394187", "0.59342223", "0.59315306" ]
0.64186484
36
Delete temporary files, close log files and email results.
def CleanUp(self): if (not self.keep_epi_raw or not self.keep_epi_mot) \ and not self.opts.debug_tmp: self.tmp.Clean() overall_msg = self.SummaryErrorMessage() if self.tmplt and not self.no_email: EmailResults(self.tmplt['email'], overall_msg, \ self.topdir, self.dumpfile, self.logfile, self.motcor_summary) # Write the error message to the log file. if self.f_log is None: # Log file not opened yet, do it now. if self.logdir is not None: logfile = '%s/preprocess.log' % self.logdir f_log = open(logfile,'w') f_log.write('\n%s\n' % overall_msg) f_log.close() else: self.f_log.write('\n%s\n' % overall_msg) sys.exit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def teardown():\n for filename in files_to_delete:\n delete_file(filename)", "def cleanup(self):\n if self.log_fo:\n self.log_fo.close()", "def classCleanup(cls):\n cls.RemoveTempFile(\"child_send1.txt\")\n cls.RemoveTempFile(\"child_read1.txt\")\n cls.RemoveTempFile(\"child_send2.txt\")\n cls.RemoveTempFile(\"child_read2.txt\")", "def tearDown(self):\r\n\r\n # turn off the alarm\r\n signal.alarm(0)\r\n\r\n remove_files(self.files_to_remove, False)\r\n if self.server_socket:\r\n self.server_socket.close()\r\n # give clients time to clean up\r\n sleep(1)\r\n if exists(self.tmp_dir):\r\n try:\r\n rmdir(self.tmp_dir)\r\n except OSError:\r\n # give clients some more time, fail if still error\r\n sleep(5)\r\n rmdir(self.tmp_dir)", "def clear_log_files(log_files):\n for log_file in log_files:\n try:\n open(log_file, 'w', 0).close()\n except IOError:\n pass", "def __call__(self):\n for tmp_file in filter(lambda x: x.exists(), self.temp_files):\n tmp_file.unlink()\n\n for proc in self.processes:\n try:\n os.kill(proc, signal.SIGTERM)\n except ProcessLookupError:\n pass", "def clean_up_temp_dir():\n files = glob.glob(f'{CONFIG_DIR}/tmp/*')\n for f in files:\n try:\n os.remove(f)\n except Exception:\n pass", "def tearDown(self):\r\n remove_files(self.files_to_remove, False)\r\n if self.tmpdir:\r\n rmtree(self.tmpdir)\r\n\r\n # clean up the file from init_flowgram_file\r\n if (hasattr(self, \"tmp_filename\") and exists(self.tmp_filename)):\r\n remove(self.tmp_filename)", "def clean(self):\n # Wait for all threads to finish\n [x.join() for x in self.__threads]\n\n if self.recording:\n self.record_file.close()\n\n if self.simulating:\n self.simulate_file.close()\n\n self.s.shutdown(socket.SHUT_RDWR)\n self.s.close()\n print(\"Cleaned\")", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def tearDown(self):\n try:\n shutil.rmtree(self._msgdir)\n shutil.rmtree(self._tmp_dir)\n except OSError as e:\n print('Error removing temporary directory %s' % self._tmp_dir)\n print(e)", "def cleanup_logs(self):\n\n _now = time.time()\n\n for _id in self.open_logs.keys():\n try:\n if _now > (self.open_logs[_id]['last_time'] + self.FILE_ACTIVITY_TIMEOUT):\n # Flush and close the log file, and pop this element from the dictionary.\n self.open_logs[_id]['log'].flush()\n self.open_logs[_id]['log'].close()\n self.open_logs.pop(_id, None)\n self.log_info(\"Closed log file for %s\" % _id)\n except Exception as e:\n self.log_error(\"Error closing log for %s - %s\" % (_id, str(e)))", "def exitLogCleanup(*args):\n for logFile in args:\n os.unlink(logFile)\n return None", "def __del__(self):\n self.close_files()", "def delete_temporary_files(request, tmp_path_factory):\r\n _tmp_path_factory = tmp_path_factory\r\n\r\n def cleanup():\r\n tmp_path = _tmp_path_factory.getbasetemp()\r\n if pathlib.Path(tmp_path).exists() and pathlib.Path(tmp_path).is_dir():\r\n shutil.rmtree(tmp_path)\r\n\r\n request.addfinalizer(cleanup)", "def _delete_temp():\n global _TEMP_NAME\n\n try:\n database.delete_temp(_TEMP_NAME)\n outputtools.delete_temp(_TEMP_NAME)\n except:\n raise", "def __del__(self):\n\t\tif self.temp_dir:\n\t\t\tself.temp_dir.cleanup()", "def exit_and_clean_up(temp_folder):\n # Capture the traceback\n logging.info(\"There was an unexpected failure\")\n exc_type, exc_value, exc_traceback = sys.exc_info()\n for line in traceback.format_tb(exc_traceback):\n logging.info(line)\n\n # Delete any files that were created for this sample\n logging.info(\"Removing temporary folder: \" + temp_folder)\n shutil.rmtree(temp_folder)\n\n # Exit\n logging.info(\"Exit type: {}\".format(exc_type))\n logging.info(\"Exit code: {}\".format(exc_value))\n sys.exit(exc_value)", "def cleanup(bot, client, servers, print_all):\n try:\n if bot:\n bot.stop()\n finally:\n if servers:\n servers.stop()\n if print_all:\n if bot:\n bot.dump_log()\n if servers:\n servers.dump_log()\n if client:\n client.dump_log()\n if not Test.leak:\n shutil.rmtree(Test.tmpdir)", "def pytest_sessionfinish(session, exitstatus):\n\n # dat files are created when using attachements\n print(\"\\n-------------------------\\nClean dpytest_*.dat files\")\n fileList = glob.glob('./dpytest_*.dat')\n for filePath in fileList:\n try:\n os.remove(filePath)\n except Exception:\n print(\"Error while deleting file : \", filePath)", "def dispose(self):\n rmtree(self._temp_path)", "def remove_temporary_files():\n try:\n xml_file_path, bin_file_path = get_ida_exported_files()\n if os.path.isfile(xml_file_path):\n os.remove(xml_file_path)\n\n if os.path.isfile(bin_file_path):\n os.remove(bin_file_path)\n\n except Exception:\n print(\"GhIDA:: [!] Unexpected error while removing temporary files.\")", "def delete_log(self):\n os.system('rm -rf *.log')\n os.system('rm -rf *.log~')\n os.system('rm -rf *.last')\n os.system('rm -rf *.last~')", "def tearDown(self):\n if os.path.isfile(LOGFILENAME):\n os.remove(LOGFILENAME)", "def _remove_tmpfiles():\n for f in tmpfiles:\n try:\n os.remove(f)\n except OSError:\n pass", "def tearDown(self):\n self.tempdir.cleanup()", "def _clean_up_temporary_files(dataset_dir):\n return", "def cleanup(self):\n async def close_session():\n await self.logi.close()\n\n self.loop.run_until_complete(close_session())\n\n self.loop.close()\n self.logi = None\n if os.path.isfile(CACHE_FILE):\n os.remove(CACHE_FILE)", "def __del__(self):\n shutil.rmtree('tmp')\n self.quit_browser()", "def close(self):\n try:\n process_id = get_pid(\"broker_mqtts\")\n except subprocess.CalledProcessError:\n pass\n else:\n os.system(\"kill -9 {}\".format(process_id))\n if os.path.isfile(self.log):\n os.system(\"rm -rf \" + self.log)", "def release_logger_files():\n for hl in logging.getLogger().handlers:\n if isinstance(hl, logging.FileHandler):\n hl.close()\n logging.getLogger().removeHandler(hl)", "def release_logger_files():\n for hl in logging.getLogger().handlers:\n if isinstance(hl, logging.FileHandler):\n hl.close()\n logging.getLogger().removeHandler(hl)", "def unload(self):\n for f in self.logs.values():\n f.close()", "def __del__(self):\n\n self.logfd.close()", "def delete_logs(self):\n if self.etw_log is not None:\n files = sorted(glob.glob(self.etw_log + '*'))\n for path in files:\n try:\n os.remove(path)\n except Exception:\n pass", "def __del__(self):\n for f in self._files:\n f.close()", "def clean_up_temp_files():\n global __tmp_model_dir\n\n if __tmp_model_dir is not None:\n FileUtils.deleteDirectory(__tmp_model_dir)\n __tmp_model_dir = None", "def cleanup():\n default_log_dir = Path(DEFAULT_LOGDIR)\n if default_log_dir.exists():\n shutil.rmtree(default_log_dir)\n yield \"Cleanup\"\n if default_log_dir.exists():\n shutil.rmtree(default_log_dir)", "def delFiles(self):\r\n \r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n if os.path.exists(self.h5File): \r\n os.remove(self.h5File) \r\n logger.debug(\"{0:s} File {1:s} deleted.\".format(logStr,self.h5File)) \r\n except XmError:\r\n raise \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def _CloseOutputFiles(self):\n self.gfile.close()\n self.efile.close()", "def _remove_mp_logs(self):\n for i, fn in enumerate(self.logfiles):\n os.remove(fn)", "def tearDown(self):\n print(\n \"\\nDeleting temporary files...\\n\")\n try:\n shutil.rmtree(TEST_DIR)\n except OSError:\n pass", "def cleanup(self):\r\n if self.tempDirectory != None:\r\n shutil.rmtree(self.tempDirectory, True)\r\n self.tempDirectory = None", "def tearDown(self):\n # Fail if any WARNING/ERROR/CRITICAL logs were generated\n self.logging_handler.assertNoLogsOver(logging.INFO)\n\n logging.getLogger('COT').removeHandler(self.logging_handler)\n\n self.validate_with_ovftool(self.temp_file)\n\n # Delete the temporary directory\n if os.path.exists(self.temp_dir):\n logger.debug(\"Deleting temp dir %s\", self.temp_dir)\n shutil.rmtree(self.temp_dir)\n self.temp_dir = None\n self.temp_file = None\n\n tmps2 = set(glob.glob(os.path.join(tempfile.gettempdir(), 'cot*')))\n delta = tmps2 - self.tmps\n if delta:\n self.fail(\"Temp directory(s) {0} left over after test!\"\n .format(delta))\n\n # Clear output caches for helper commands:\n for helper in helpers.values():\n helper.cached_output.clear()\n\n # Let's try to keep things lean...\n delta_t = time.time() - self.start_time\n if delta_t > 5.0:\n print(\"\\nWARNING: Test {0} took {1:.3f} seconds to execute. \"\n \"Consider refactoring it to be more efficient.\"\n .format(self.id(), delta_t))", "def cleanup(self, keep_logs : bool = False):\n if self.path.exists():\n try:\n shutil.rmtree(self.path)\n except Exception as e:\n warn(e)\n if self.log_path.exists() and not keep_logs:\n try:\n os.remove(self.log_path)\n except Exception as e:\n warn(e)", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()", "def __exit__(self, exc_type, exc_value, traceback):\n logger.debug(\"Cleaning up temporary directory: %s\", self.temporary_directory)\n shutil.rmtree(self.temporary_directory)\n del self.temporary_directory", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def tearDown(self):\n utils.rm_rf(TMP_DIR_PATH)", "def cleanup_temp_dir(context):\n\n try:\n os.chdir(context.cwd)\n except:\n print(\"Current working file record does not exist\")\n\n try:\n context.tempdir.cleanup()\n except:\n print(\"Temporary directory cannot be cleaned up - does it exist?\")", "def tearDown(self):\n self.tmp.cleanup()", "def teardown_class(cls):\n os.remove(logfilename)", "def _cleanup(self):\n # delete stdout/stderr\n if os.path.isfile(self.stdout):\n os.unlink(self.stdout)", "def tearDown(self):\n for f in os.listdir('/tmp'):\n if not f.startswith(self.FILE_PREFIX):\n continue\n\n os.remove(os.path.join('/tmp', f))", "def clear_tmp_folder(self):\r\n for file in os.listdir(self.temp_dir):\r\n if file.endswith('.png') or file.endswith('.jpg'):\r\n path = os.path.join(self.temp_dir, file)\r\n print ('Cleaned up {}'.format(path))\r\n os.remove(path)", "def cleanUp(self):\n self.dirMonitor.stop()\n self.filesList.cleanUp()", "def cleanup():\n if config.get('global').get('no_cleanup'):\n return\n logging.info('Cleaning up temp directories')\n try:\n tmp_path = config.get('global').get('tmp_path')\n if os.path.exists(tmp_path):\n rmtree(tmp_path)\n except Exception as e:\n logging.error(format_debug(e))\n print_message('Error removing temp directories')\n\n try:\n archive_path = os.path.join(\n config.get('global').get('output_path'),\n 'script_archive',\n time.strftime(\"%Y-%m-%d-%I-%M\"))\n if not os.path.exists(archive_path):\n os.makedirs(archive_path)\n run_script_path = config.get('global').get('run_scripts_path')\n if os.path.exists(run_script_path):\n move(run_script_path, archive_path)\n except Exception as e:\n logging.error(format_debug(e))\n logging.error('Error archiving run_scripts directory')", "def close(self):\n if not self.__closed:\n self.counters = { \"error\": 0, \"warning\": 0, \"success\": 0, \"failure\": 0 }\n\n try:\n self.__flush_count = 0\n for handler in self.__filehandlers:\n handler.flush()\n self.__logger.removeHandler(handler)\n handler.close()\n except:\n # do nothing\n pass\n self.__closed = True", "def clean_temp_storage_dir(self, filenames):\n for fn in filenames:\n try:\n pathlib.Path(pathlib.PurePath(self.temp_storage_dir, fn)).unlink()\n except FileNotFoundError:\n pass", "def close(self):\r\n self._report_file.close()\r\n # Make sure everything's closed.\r\n for files in self._output_files.values():\r\n for f in files.values():\r\n f.close()", "def close_files(self):\n self.wb_alm.close()\n self.wb_defect.close()\n self.wb_enhancement.close()\n self.wb_incident.close()\n self.wb_destination.close()", "def release(self):\n self.errorlog.close()\n del self.errorlog\n del self.printer", "def classCleanup(cls):\n cls.RemoveTempFile(SettingsCommandTestCase.output_file_name)", "def cleanup(self):\n\n # NOTE(jbresnah) call stop on each of the servers instead of\n # checking the pid file. stop() will wait until the child\n # server is dead. This eliminates the possibility of a race\n # between a child process listening on a port actually dying\n # and a new process being started\n servers = [self.api_server, self.conductor_server, ]\n for s in servers:\n try:\n s.stop()\n except Exception:\n pass\n\n for f in self.files_to_destroy:\n if os.path.exists(f):\n os.unlink(f)", "def main():\n dir_path = '/home/ubuntu/test_files' # path for the log files that needs to be pruned\n stat_file_name = 'file_status_info' # temp file will be created to store the stat of each files to calculate when to delete\n \n # Get the list of all the files where we want to perfrom the delete operations\n file_list = get_list_of_files_in_dir(dir_path)\n\n # Get the current system date\n current_date = get_current_date()\n\n # Iterate through all the log, error, info files in the specified directory path and check for the criteria of file older than 5 days and delete.\n for fil in file_list:\n get_file_stat(dir_path, stat_file_name, fil)\n filename, file_date = get_file_last_modification_date(stat_file_name)\n\n print(\"*********** %s file stat is written **************\" % fil)\n days = abs(current_date - file_date).days\n \n # Check if the file modification date if older than 5 days.\n if days > 5:\n remove_files(os.path.join(dir_path, fil))\n else:\n print(\"No eligible file(s) found to be deleted\")", "def clean_file_before_test():\n\n if os.path.exists(LOG_FOLDER):\n for file in os.listdir(LOG_FOLDER):\n os.remove(LOG_FOLDER + \"/\" + file)", "def clean_cwd():\n\n # Generator of the files generated for each runs\n del_files = (file for file in os.listdir() if file.endswith('.vtk')\n or file.endswith('.dat')\n or file.startswith('eeldata')\n or file.endswith('.log'))\n\n for file in del_files:\n try:\n os.remove(file)\n print(\"\\rRemoved {:s} succesfully!\".format(file), end=' '*15)\n except:\n print(\"\\rFailed to remove {:s}\".format(file))\n raise\n\n print('')", "def clean_outputs(self) -> None:\n\n def _delete_if_not_none(fn: Optional[str]) -> None:\n if fn is not None:\n Path(fn).unlink()\n\n _delete_if_not_none(self.config[\"LOG_FILE\"])\n\n for file_ in self.exporter.get_all_files():\n file_.unlink()", "def _cleanup(self):\n try:\n tmpdir = self.tmpdir\n except AttributeError:\n # Don't need to do anything if the temp dir isn't set\n return\n shutil.rmtree(tmpdir)", "def __del__(self) -> None:\n try:\n shutil.rmtree(self.temp_path)\n except FileNotFoundError:\n pass", "def close_log():\n\n global log_file\n if log_file is not None:\n try:\n log_file.flush()\n finally:\n log_file.close()", "def on_cleanup(self):\n self.close()", "def on_cleanup(self):\n self.close()", "def teardown(self):\n super(TestCisPickleOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def tearDown(self):\n logging.info(\"Delete %s\", self._temp_dir)\n shutil.rmtree(self._temp_dir)", "def close(self):\n\n sp.call([\"convert\", \"{}_*\".format(self.tmp_prefix),\n self.filename])\n\n sp.call(\"rm {}_*\".format(self.tmp_prefix), shell=True)\n sp.call([\"rmdir\", self.tmp_dir])", "def reopen_files(self):\r\n for log in (self.error_log, self.access_log):\r\n for h in log.handlers:\r\n if isinstance(h, logging.FileHandler):\r\n h.acquire()\r\n h.stream.close()\r\n h.stream = open(h.baseFilename, h.mode)\r\n h.release()", "def __del__(self):\n\n # cleanup logging handlers\n for handler in self.logger.handlers[::-1]:\n try:\n handler.acquire()\n handler.flush()\n handler.close()\n except (OSError, ValueError):\n pass\n finally:\n handler.release()", "def tearDown(self):\n \tshutil.rmtree(self.tempdir)", "def _cleanup(self):\n if self.current_session is not None:\n self.current_session.close()\n self.current_session = None\n\n for handler in list(self.logger.root_logger.handlers):\n self.logger.root_logger.removeHandler(handler)\n handler.flush()\n handler.close()", "def _cleanup(self):\n if self.pidfile:\n os.unlink(self.pidfile)", "def cleanup(self):\n self.all_wav_to_mp3()\n self.past_songs_db.close()\n self.move_tracks_to_music_folder( )\n self.delete_leftovers()\n print \"Cleanup finished\"", "def tearDown(self):\n\n for fname in self.fnames:\n FileSystem.unlink(fname)", "def cleanup(self):\n try:\n self.disconnect()\n self.log_file.close()\n SerialAsyncLoop.stop_loop(self.logger)\n except Exception as e:\n self.applog.exception(f\"Exception --> {SerialConsole.cleanup.__qualname__}\", exc_info=e)\n raise", "def cleanup(self):\n\n print \"Cleaning up...\",\n sys.stdout.flush()\n\n builddir = os.path.join(self.build)\n\n comm = 'rm -rf '+builddir\n #+' '+libdir+' '+logdir\n (output, error, retz) = runShellCommand(comm)\n\n print \"done.\"", "def LogAndRemoveFiles(temp_dir, regex_pattern):\n regex = re.compile(regex_pattern)\n if not os.path.isdir(temp_dir):\n return\n for dir_item in os.listdir(temp_dir):\n if regex.search(dir_item):\n full_path = os.path.join(temp_dir, dir_item)\n print 'Removing leaked temp item: %s' % full_path\n try:\n if os.path.islink(full_path) or os.path.isfile(full_path):\n os.remove(full_path)\n elif os.path.isdir(full_path):\n chromium_utils.RemoveDirectory(full_path)\n else:\n print 'Temp item wasn\\'t a file or directory?'\n except OSError, e:\n print >> sys.stderr, e\n # Don't fail.", "def tearDown(self):\n self.clearTempDir()", "def tearDown(self):\n # Fail if any WARNING/ERROR/CRITICAL logs were generated\n self.logging_handler.assertNoLogsOver(logging.INFO)\n\n logging.getLogger('COT').removeHandler(self.logging_handler)\n\n if hasattr(self, 'instance'):\n self.instance.destroy()\n self.instance = None\n\n self.validate_with_ovftool(self.temp_file)\n\n # Delete the temporary directory\n if os.path.exists(self.temp_dir):\n logger.debug(\"Deleting temp dir {0}\".format(self.temp_dir))\n shutil.rmtree(self.temp_dir)\n self.temp_dir = None\n self.temp_file = None\n\n tmps2 = set(glob.glob(os.path.join(tempfile.gettempdir(), 'cot*')))\n delta = tmps2 - self.tmps\n if delta:\n self.fail(\"Temp directory(s) {0} left over after test!\"\n .format(delta))\n\n # Let's try to keep things lean...\n delta_t = time.time() - self.start_time\n if delta_t > 5.0:\n print(\"\\nWARNING: Test {0} took {1:.3f} seconds to execute. \"\n \"Consider refactoring it to be more efficient.\"\n .format(self.id(), delta_t))", "def delete_log():\n log_path = Path.cwd() / \"premise.log\"\n if log_path.exists():\n log_path.unlink()", "def del_tmp() -> None:\n for elem in os.listdir('./tmp'):\n path = f\"./tmp/{elem}\"\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)", "def cleanup(tempdir):\n try:\n shutil.rmtree(tempdir)\n except OSError:\n pass", "def tearDown(self):\n testing_dir = os.path.split(os.path.realpath(__file__))[0]\n for f in glob.glob(os.path.join(testing_dir, \"*\")):\n if f.split(\".\")[-1] in [\"o\", \"out\", \"pyc\", \"log\"]:\n subprocess.call(['rm', f])", "def __exit__(self, exc_type, exc_val, exc_tb):\n os.rmdir(self._tempdir)", "def tearDown(self):\n with contextlib.suppress(FileNotFoundError):\n Path(\"test.xlsx\").absolute().unlink()", "def test_monitor_correctly_deletes_temporary_directory_in_the_case_of_any_error(\n self,\n ):\n # Arrange\n feed_pages = [fixtures.PROQUEST_FEED_PAGE_1, fixtures.PROQUEST_FEED_PAGE_2]\n\n client = create_autospec(spec=ProQuestAPIClient)\n client.download_all_feed_pages = MagicMock(\n return_value=list(map(fixtures.serialize, feed_pages))\n )\n\n client_factory = create_autospec(spec=ProQuestAPIClientFactory)\n client_factory.create = MagicMock(return_value=client)\n\n monitor = ProQuestOPDS2ImportMonitor(\n client_factory, self._db, self._proquest_collection, ProQuestOPDS2Importer\n )\n monitor.import_one_feed = MagicMock(return_value=([], []))\n\n results = {\"temp_directory\": None, \"temp_files\": []}\n original_mkdtemp = tempfile.mkdtemp\n original_temp_file_constructor = tempfile.NamedTemporaryFile\n original_rmtree = shutil.rmtree\n\n def create_temp_directory():\n results[\"temp_directory\"] = original_mkdtemp()\n\n return results[\"temp_directory\"]\n\n def create_temp_file(**kwargs):\n temp_file = original_temp_file_constructor(**kwargs)\n results[\"temp_files\"].append(temp_file.name)\n\n return temp_file\n\n # Act\n with patch(\"tempfile.mkdtemp\") as mkdtemp_mock, patch(\n \"tempfile.NamedTemporaryFile\"\n ) as named_temporary_file_constructor_mock, patch(\n \"shutil.rmtree\"\n ) as rmtree_mock, patch(\n \"api.proquest.importer.parse_feed\"\n ) as parse_feed_mock:\n mkdtemp_mock.side_effect = create_temp_directory\n named_temporary_file_constructor_mock.side_effect = create_temp_file\n rmtree_mock.side_effect = original_rmtree\n parse_feed_mock.side_effect = core.opds2_import.parse_feed\n\n # An exception will be raised while trying to parse the feed page.\n parse_feed_mock.side_effect = Exception(\"\")\n\n monitor.run_once(False)\n\n # Assert\n # Ensure that the temp directory was successfully created.\n tempfile.mkdtemp.assert_called_once()\n\n # Ensure that only one temp file was created, after this an exception was raised and the process stopped.\n tempfile.NamedTemporaryFile.assert_has_calls(\n [call(mode=\"r+\", dir=results[\"temp_directory\"], delete=False)]\n )\n\n # Ensure that parse_feed method was called only once.\n parse_feed_mock.assert_has_calls([call(ANY, silent=False)])\n\n # Ensure that the temp directory was successfully removed.\n shutil.rmtree.assert_called_once_with(results[\"temp_directory\"])\n assert False == os.path.exists(results[\"temp_directory\"])", "def teardown(self):\n super(TestCisPlyOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def teardown(self):\n try:\n self._close(True)\n except:\n pass\n try:\n os.unlink(os.path.join(self.home_dir, DB_FILE))\n except FileNotFoundError as _:\n pass", "def _call_cleanup(self,\r\n input_fp,\r\n output_dir,\r\n params,\r\n job_prefix,\r\n poll_directly,\r\n suppress_submit_jobs):\r\n pass", "def __del__(self):\n for handle in self._filehandles:\n handle.close()", "def __del__(self):\n\n # close socket\n try:\n self.socket.shutdown(socket.SHUT_RDWR)\n self.socket.close()\n except OSError:\n pass\n\n # quit selenium driver\n if self.driver:\n self.driver.quit()\n\n # make sure to delete linkfinder temporary files\n files = glob.glob(\"linkfinder_tmp*\")\n for file in files:\n os.remove(file)\n\n # make sure helper process is dead\n try:\n os.kill(self.helper_pid, signal.SIGKILL)\n except ProcessLookupError:\n pass\n\n if os.path.exists(UNIX_SOCK_ADDR):\n os.remove(UNIX_SOCK_ADDR)" ]
[ "0.68277276", "0.67437613", "0.660802", "0.6526464", "0.6455161", "0.6444915", "0.64396834", "0.64194924", "0.6357929", "0.635103", "0.6348235", "0.6337223", "0.631956", "0.62995", "0.6263754", "0.6261455", "0.6259881", "0.6235108", "0.6226701", "0.62038773", "0.6193479", "0.619078", "0.6185075", "0.61716455", "0.6162405", "0.6148543", "0.61366403", "0.61211395", "0.6110119", "0.61008394", "0.6094485", "0.6094485", "0.60817474", "0.60717994", "0.6071512", "0.60656995", "0.60620594", "0.6057521", "0.60528797", "0.603094", "0.60286385", "0.60231495", "0.60209477", "0.6004534", "0.5996872", "0.5996733", "0.5996421", "0.5984027", "0.5979224", "0.59701145", "0.59607434", "0.5959775", "0.5945069", "0.5935149", "0.5897033", "0.58951557", "0.5885507", "0.5880851", "0.587962", "0.58740205", "0.5870042", "0.5868677", "0.58653396", "0.5851836", "0.5847082", "0.58420503", "0.5836608", "0.583468", "0.5834509", "0.58197486", "0.5819063", "0.5814661", "0.5814661", "0.58087206", "0.58036685", "0.5800616", "0.57937884", "0.57905906", "0.5789693", "0.57806677", "0.57727456", "0.57724655", "0.5770342", "0.5752853", "0.5746332", "0.5739621", "0.573046", "0.5728316", "0.57260895", "0.5721934", "0.5716965", "0.57097477", "0.5701906", "0.5698013", "0.56977355", "0.56958807", "0.5694483", "0.56871206", "0.5684093", "0.568106" ]
0.58914703
56
Create summary message for email.
def SummaryErrorMessage(self, error_log=None): if error_log is None: error_log = self.error_log # server = socket.gethostname().split('.')[0] mssg = '\nPreprocessing script complete for data in %s\n\nServer: %s\n'\ % (self.topdir, self.server) # Log time. ms = time.time() ms = int(1000*(ms - int(ms))) mssg += '\nTime: %s:%03d\n' % \ (datetime.today().strftime('%a %b %d, %Y; %X'), ms) if len(error_log) > 0: mssg += 'Command: %s\n\nSummary:\n' % (' '.join(sys.argv)) lines = error_log.split('\n') for line in lines: if line.startswith('Description:'): mssg += line[12:] mssg += '\n\nDetails:' + error_log else: mssg += '\nNo problems detected (this does NOT imply that everything was computed.).\n\n' return mssg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def composeSummaryEmail(self):\r\n message = \"\"\"From: Douglas Gregor <dgregor@osl.iu.edu>\r\nTo: boost@lists.boost.org\r\nReply-To: boost@lists.boost.org\r\nSubject: [Report] \"\"\"\r\n message += str(self.numFailures()) + \" failures on \" + branch\r\n if branch != 'trunk':\r\n message += ' branch'\r\n message += \" (\" + str(datetime.date.today()) + \")\"\r\n message += \"\"\"\r\n\r\nBoost regression test failures\r\n\"\"\"\r\n message += \"Report time: \" + self.date + \"\"\"\r\n\r\nThis report lists all regression test failures on high-priority platforms.\r\n\r\nDetailed report:\r\n\"\"\"\r\n\r\n message += ' ' + self.url + '\\n\\n'\r\n\r\n if self.numFailures() == 0:\r\n message += \"No failures! Yay!\\n\"\r\n return message\r\n \r\n # List the platforms that are broken\r\n any_broken_platforms = self.numReportableFailures() < self.numFailures()\r\n if any_broken_platforms:\r\n message += \"\"\"The following platforms have a large number of failures:\r\n\"\"\"\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n message += (' ' + platform + ' ('\r\n + str(len(self.platforms[platform].failures))\r\n + ' failures)\\n')\r\n\r\n message += \"\"\"\r\nFailures on these \"broken\" platforms will be omitted from the results below.\r\nPlease see the full report for information about these failures.\r\n\r\n\"\"\"\r\n \r\n # Display the number of failures\r\n message += (str(self.numReportableFailures()) + ' failures in ' + \r\n str(len(self.libraries)) + ' libraries')\r\n if any_broken_platforms:\r\n message += (' (plus ' + str(self.numFailures() - self.numReportableFailures())\r\n + ' from broken platforms)')\r\n \r\n message += '\\n'\r\n\r\n # Display the number of failures per library\r\n for k in sorted_keys( self.libraries ):\r\n library = self.libraries[k]\r\n num_failures = library.numFailures()\r\n message += ' ' + library.name + ' ('\r\n \r\n if library.numReportableFailures() > 0:\r\n message += (str(library.numReportableFailures())\r\n + \" failures\")\r\n \r\n if library.numReportableFailures() < num_failures:\r\n if library.numReportableFailures() > 0:\r\n message += ', plus '\r\n \r\n message += (str(num_failures-library.numReportableFailures()) \r\n + ' failures on broken platforms')\r\n message += ')\\n'\r\n pass\r\n\r\n message += '\\n'\r\n\r\n # Provide the details for the failures in each library.\r\n for k in sorted_keys( self.libraries ):\r\n library = self.libraries[k]\r\n if library.numReportableFailures() > 0:\r\n message += '\\n|' + library.name + '|\\n'\r\n for test in library.tests:\r\n if test.numReportableFailures() > 0:\r\n message += ' ' + test.name + ':'\r\n for failure in test.failures:\r\n platform = failure.platform\r\n if not platform.isBroken():\r\n message += ' ' + platform.name\r\n message += '\\n'\r\n\r\n return message", "def generateNotifyMessage(self):\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n today = date.today()\n current_date = today.strftime(\"%B %d, %Y\")\n\n subject = \"Progam operating warning - Not Running\"\n body = \"Since \" + current_date + \" at \" + current_time \n msg = f'Subject: {subject} \\n\\n{body}'\n return msg", "def summary_string(self) -> str:", "def construct_email_content(self):\n # Construct header of the message\n content = MAIL_HEAD_CONTENT.replace(\"TITLE_HOLDER\", self.title).replace('FAIL_JOB_HOLDER',\n self.fail_job_content).replace(\n \"TIME_HOLDER\", os.getenv(\"START_TIME\")).replace(\"GRAPH_HOLDER\", os.getenv(\"BENCHMARK_GRAPH\")).replace(\n \"JOB_HOLDER\", os.getenv(\"BENCHMARK_TYPE\")).replace(\"DEVICE_HOLDER\", os.getenv(\"DEVICE_TYPE\")).replace(\"CUDA_HOLDER\", os.getenv(\"VERSION_CUDA\")).replace('DISPLAY', self.job_display)\n\n if not self.alarm_info:\n return\n # Construct alarm content\n content += self.alarm_info\n # Construct the tail of the message\n content += MAIL_TAIL_CONTENT.replace(\"BENCHMARK_WEBSITE1\", os.getenv(\"BENCHMARK_WEBSITE1\", \"\")).strip().replace(\n 'RUN_ENV_HOLDER', self.env_content).replace(\"BENCHMARK_WEBSITE2\", os.getenv(\"BENCHMARK_WEBSITE2\"))\n\n with open(os.path.join(self.log_path, \"mail.html\"), \"w\") as f_object:\n f_object.write(content)", "def formatted_message(self):\n message = MIMEMultipart()\n message['From'] = self.sender\n message['To'] = self.receiver\n message['Subject'] = self.subject.format(**self.params)\n message.attach(MIMEText(self.body.format(**self.params), 'plain'))\n return message.as_string()", "def _construct_message(self):\n self.message[\"text\"] = \"\"\n if self.from_:\n self.message[\"text\"] += \"From: \" + self.from_ + \"\\n\"\n if self.subject:\n self.message[\"text\"] += \"Subject: \" + self.subject + \"\\n\"\n\n self.message[\"text\"] += self.body\n self._add_attachments()", "def compose_email(self, donation_id=-1, write_totals=False):\n if not write_totals:\n amount = self.donations[donation_id]\n else:\n amount = self.total_donations\n email_string = f\"\\nDear {self.name},\\n Thank you for your generous\\\n gift of ${amount:.2f}! It will help Local Charity achieve our mission.\\n\\\n Best regards,\\n\\\n Local Charity\\n\\n\"\n return email_string", "def build_message():\n outgoing_mail = Mail()\n outgoing_mail.from_email = Email(email_from_address, email_from_name)\n outgoing_mail.subject = subject\n personalization = Personalization()\n for recipient in email_to_addresses:\n personalization.add_to(Email(recipient))\n outgoing_mail.add_personalization(personalization)\n outgoing_mail.add_content(Content(\"text/plain\", str.join('\\n', _log)))\n outgoing_mail.add_content(Content(\"text/html\", \"<html><body> {} </body></html>\".format(str.join(' <br /> ', _log))))\n return outgoing_mail.get()", "def make_email_message(itrf_begin, epoch_begin, itrf_final, epoch_final, velocity, date):\n\n message = \"Estimado Usuario,\\n\\nEn adjunto encontrará los resultados de la transformacion ITRF de acuerdo a la siguiente configuración:\\n\\nITRF inicial: \"+str(itrf_begin)+\"\\nEpoca inicial: \"+str(epoch_begin)+\"\\nITRF final: \"+str(itrf_final)+\"\\nEpoca final: \"+str(epoch_final)+\"\\nModelo de velocidad: \"+velocity+\"\\nFecha de la solicitud de la transformación: \"+date+\"\\n\\n\\nSaludos Cordiales,\\n\\nEquipo de Geodesia del IGVSB.\"\n return message", "def _create_message(self, msg):\n head = msg[\"head\"]\n body = msg[\"body\"]\n body = body.format(**self.data)\n length = len(body)\n head = head.format(length=length, **self.data)\n return head + body", "def generate_plain_mesg(info, open_quests, owner, tags):\n\n msg = (\n \"This email is being sent to {} because that is the owner listed\\n\"\n \"for the systems with open Hermes labors listed below.\\n\\n\"\n \"Due dates, if any, are noted with each quest.\\n\".format(owner)\n )\n msg += (\n \"\\nTo throw an event manually, you can run the following command \"\n \"on a shell server:\"\n \"\\n\\n\"\n \"$ hermes event create [event] --host [hostname].\\n\\n\"\n \"Or you can visit the quests linked below.\\n\\n\".format(\n settings.frontend)\n )\n for quest_id in info[owner]:\n quest = find_quest(open_quests, quest_id)\n if quest:\n msg += (\n \"==[ QUEST {} ]================================\\n\"\n \"CREATOR: {}\\n\"\n ).format(\n quest_id, quest.creator\n )\n if quest.target_time:\n msg += \"DUE: {}\\n\".format(quest.target_time)\n msg += \"DESC: \\\"{}\\\"\\n\".format(textwrap.fill(\n quest.description,\n width=60, subsequent_indent=\"\"\n ))\n msg += \"LINK: {}/v1/quests/{}\\n\\n\".format(\n settings.frontend, quest_id\n )\n else:\n msg += \" Labors not associated with a quest:\\n\\n\"\n\n msg += \"Machines with labors:\\n\"\n\n for hostname in sorted(info[owner][quest_id]):\n if tags[hostname]:\n tags_str = \"{}\".format((\", \".join(tags[hostname])))\n else:\n tags_str = \"no services\"\n msg += \" {} ({})\\n\".format(hostname, tags_str)\n\n msg += \"\\n\\n\"\n\n return msg", "def email_article_summary(to_address, summary_filename, start_year, start_month, end_year, end_month, num_articles):\n \n host = HOST\n from_address = FROM_ADDRESS\n body = \"\"\"\n Good morning,\n \n There were %i peer-reviewed papers produced by researchers at this institute between %i/%i and %i/%i. A summary file containing the front page from each article is attached with this email. Please print out these summary pages, highlight the author(s) on each article and pin them to the monthly papers noticeboard.\n \n Thanks a bunch,\n \n Skynet.\n \n \"\"\" % (num_articles, start_month, start_year, end_month, end_year, )\n \n recipients = [to_address, ADMIN_ADDRESS]\n \n logging.info(\"Preparing summary email report for %s\" % (', '.join(recipients), ))\n \n successful = True\n for recipient in recipients:\n \n message = MIMEMultipart()\n message[\"From\"] = from_address\n message[\"To\"] = recipient\n message[\"Subject\"] = \"Refereed papers summary between %i/%i and %i/%i\" % (start_month, start_year, end_month, end_year, )\n message[\"Date\"] = formatdate(localtime=True)\n \n message.attach(MIMEText(textwrap.dedent(body).lstrip()))\n \n part = MIMEBase('application', 'octet-stream')\n part.set_payload(open(summary_filename, 'rb').read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(summary_filename))\n message.attach(part)\n \n server = smtplib.SMTP(host)\n \n try:\n failed = server.sendmail(from_address, to_address, message.as_string())\n server.close()\n \n except Exception as e:\n logging.critical(\"Unable to send email to %s. Error: %s\" % (recipient, str(e), ))\n successful = False\n \n else:\n logging.info(\"Email successfully sent to %s\" % recipient)\n \n \n return successful", "def composeTestingSummaryEmail(self):\r\n brokenPlatforms = 0\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n brokenPlatforms = brokenPlatforms + 1\r\n\r\n if brokenPlatforms == 0:\r\n return None;\r\n \r\n message = \"\"\"From: Douglas Gregor <dgregor@osl.iu.edu>\r\nTo: boost-testing@lists.boost.org\r\nReply-To: boost-testing@lists.boost.org\r\nSubject: [Report] \"\"\"\r\n message += str(brokenPlatforms) + \" potentially broken platforms on \" + branch\r\n if branch != 'trunk':\r\n message += ' branch'\r\n message += \" (\" + str(datetime.date.today()) + \")\"\r\n message += \"\"\"\r\n\r\nPotentially broken platforms for Boost regression testing\r\n\"\"\"\r\n message += \"Report time: \" + self.date + \"\"\"\r\n\r\nThis report lists the high-priority platforms that are exhibiting a\r\nlarge number of regression test failures, which might indicate a problem\r\nwith the test machines or testing harness.\r\n\r\nDetailed report:\r\n\"\"\"\r\n\r\n message += ' ' + self.url + '\\n'\r\n\r\n message += \"\"\"\r\nPlatforms with a large number of failures:\r\n\"\"\"\r\n for platform in sorted_keys( self.platforms ):\r\n if self.platforms[platform].isBroken():\r\n message += (' ' + platform + ' ('\r\n + str(len(self.platforms[platform].failures))\r\n + ' failures)\\n')\r\n\r\n return message", "def construct_message(self):\n msg_type = self.msg_type\n if msg_type == \"PUBMSG\":\n msg_type = \"PRIVMSG\"\n ret = \"{} {}\".format(msg_type, self.target)\n if self.content:\n ret += \" :{}\".format(self.content)\n return ret + \"\\r\\n\"", "def massage_addinfo(self) -> str:\n self.message_str = f'{self.time}\\n{self.sent_by}\\n'\n return self.message_str", "def summary(self):\n return ''", "def test_email_content():\n\n time_of_day = alerts.current_time()\n hostname = alerts.host_name()\n\n subject = \"Subject: Raspi-Sump Email Test\"\n message = \"Raspi-Sump Test Email\"\n\n return \"\\r\\n\".join(\n (\n f\"From: {configs['email_from']}\",\n f\"To: {configs['email_to']}\",\n f\"{subject}\",\n \"\",\n f\"{hostname} - {time_of_day} - {message}.\",\n )\n )", "def summary(self):\r\n return '%s%s: %s%s %s%s' % (BLUE, self.title,\r\n GREEN, self.description,\r\n NORMAL, self.link)", "def massage_addinfo(self) -> str:\n self.message_str= \"{}, {}\\n\".format(self.sent_by, self.time)", "def create_test_summary(args, TEST_RESULTS):\n logging.error(\"Creating test summary report...\")\n\n try:\n test_summary = \"Performance Metrics of {APP} Application Tested from this PR\\n\".format(APP=args.bundle_id)\n test_summary += \"---------------------------------------------------------------\\n\"\n\n for element in TEST_RESULTS:\n if element != LAUNCHES:\n test_summary += \"> {KEY}: {VALUE}\".format(KEY=element, VALUE=TEST_RESULTS[element])\n if element == INSTALL_LAUNCH_DURATION:\n if int(TEST_RESULTS[INSTALL_LAUNCH_DURATION]) > args.duration_limit:\n test_summary += \"ms :x:\\n\"\n else:\n test_summary += \"ms :white_check_mark:\\n\"\n\n if element == INSTALL_MEMORY_USAGE:\n if int(TEST_RESULTS[INSTALL_MEMORY_USAGE]) > args.memory_limit:\n test_summary += \"MB :x:\\n\"\n else:\n test_summary += \"MB :white_check_mark:\\n\"\n\n if element == APP_SIZE:\n if int(TEST_RESULTS[APP_SIZE]) > args.size_limit:\n test_summary += \"MB :x:\\n\"\n else:\n test_summary += \"MB :white_check_mark:\\n\"\n test_summary += \"---------------------------------------------------------------\\n\"\n\n for element in TEST_RESULTS[LAUNCHES]:\n test_summary += \"> DEVICE: {DEVICE} | LAUNCH TYPE: {LAUNCH_TYPE} | \".format(DEVICE=element[DEVICE], LAUNCH_TYPE=element[LAUNCH_TYPE])\n test_summary += \"DURATION: {DURATION}ms \".format(DURATION=element[LAUNCH_DURATION])\n if int(element[LAUNCH_DURATION]) > args.duration_limit:\n test_summary += \" :x: | \"\n else:\n test_summary += \" :white_check_mark: | \"\n\n test_summary += \"MEMORY USAGE: {MEMORY_USAGE}MB \".format(MEMORY_USAGE=element[MEMORY_USAGE])\n if int(element[MEMORY_USAGE]) > args.memory_limit:\n test_summary += \" :x:\\n\"\n else:\n test_summary += \" :white_check_mark:\\n\"\n test_summary += \"----------------------------------------------------\\n\"\n\n except Exception as e:\n logging.error(\"Creating test summary failed with error '{ERROR}'\".format(ERROR=e))\n return None\n\n logging.info(test_summary)\n return test_summary", "def add_summary_header(self):\n self.fontSize(22, bold=True)\n self.PDF.setFillColor(\"black\")\n self.PDF.drawString(75, 260, \"Summary\")\n self.fontSize(FONT_XXS)\n self.PDF.setFillColor(HexColor(\"#9CA3AF\"))\n # self.PDF.drawString(\n # 185,\n # 260,\n # f\"{self.invoice.subscription.start_date} - {self.invoice.subscription.end_date}\",\n # )\n self.PDF.setFillColor(\"black\")\n self.fontSize(FONT_XS)\n self.PDF.setFillColor(HexColor(\"#9CA3AF\"))\n self.PDF.drawString(75, 290, \"Services\")\n self.PDF.drawString(475, 290, \"Amount\")\n self.PDF.setFillColor(\"black\")\n self.draw_line(305)", "async def summarise(self, ctx, start=None, end=None):\n if ctx.message.author.bot:\n return\n\n if not start or not end:\n await ctx.send(\n \"Insufficient arguments!\\n Arguements: <start ID> <end ID>\"\n )\n return\n\n summary, keywords, clean_messages = await convert_to_summary(\n ctx, start, end\n )\n\n if summary:\n summary = \"```\\n\" + summary + \"```\"\n await ctx.send(summary)\n else:\n await ctx.send(\"```Not enough messages to generate summary```\")\n\n if keywords:\n keyword_str = \"Keywords: \"\n for word in keywords:\n keyword_str += f\"{word}, \"\n\n keyword_str = \"```\\n\" + keyword_str + \"```\"\n await ctx.send(keyword_str)\n else:\n await ctx.send(\"```Not enough messages to generate keywords```\")", "def __str__(self):\n email_template = '\\n'.join((f'\\n\\nDear {self._full_name},\\n',\n f'Thank you for your very kind donation of ${self.last_donation:.2f}.\\n',\n 'It will be put to very good use.\\n',\n ' Sincerely,',\n ' -The Team\\n'))\n return email_template", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = 'parkingpermit-donotreply@janelia.hhmi.org'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def summary(self) -> str:\n pass", "def _render_mail(self, rebuild, success, canceled):\n subject_template = 'Image %(image)s; Status %(endstate)s; Submitted by %(user)s'\n body_template = '\\n'.join([\n 'Image: %(image)s',\n 'Status: %(endstate)s',\n 'Submitted by: %(user)s',\n 'Logs: %(logs)s',\n ])\n\n endstate = None\n if canceled:\n endstate = 'canceled'\n else:\n endstate = 'successful' if success else 'failed'\n url = None\n if self.url and self.workflow.openshift_build_selflink:\n url = urljoin(self.url, self.workflow.openshift_build_selflink + '/log')\n\n formatting_dict = {\n 'image': self.workflow.image,\n 'endstate': endstate,\n 'user': '<autorebuild>' if rebuild else self.submitter,\n 'logs': url\n }\n return (subject_template % formatting_dict, body_template % formatting_dict)", "def write_plaintext_body(self, recipient: str) -> str:\n body = f'Dear {recipient},\\n\\n' \\\n + 'You are receiving this message because you participated ' \\\n + f'in {self.trip_title}.\\n\\n' \\\n + 'I hope you had a pleasant time.\\n\\n' \\\n + 'If you have debts to repay, please be courteous and ' \\\n + 'reimburse your fellow participant(s) in a timely fashion.\\n' \\\n\n subbody: str = ''\n subbody_debt: List = list()\n subbody_credit: List = list()\n for currency, reimbs in self.reimbursement_matrices.items():\n debts = reimbs[recipient].dropna()\n credits = reimbs.loc[recipient].dropna()\n for creditor, credit in debts.iteritems():\n #creditor = creditor.rjust(self._max_name_len)\n amount = f'{credit} {currency}'.ljust(6+1+3)\n subbody_debt.append('\\t' + creditor + ' | ' + amount)\n for debtor, debt in credits.iteritems():\n #debtor = debtor.rjust(self._max_name_len)\n amount = f'{debt} {currency}'.ljust(6+1+3)\n subbody_credit.append('\\t' + debtor + ' | ' + amount)\n\n if len(subbody_debt) == 1:\n subbody += 'Please reimburse the following participant:\\n' \\\n + subbody_debt[0]\n elif len(subbody_debt) > 1:\n subbody += 'Please reimburse the following participants:\\n' \\\n + '\\n'.join(subbody_debt)\n else:\n subbody += 'You don\\'t have any payable reimbursements.'\n\n subbody += '\\n\\n'\n\n if len(subbody_credit) == 1:\n subbody += 'The following participant is obligated to ' \\\n + 'reimburse you:\\n' + subbody_credit[0]\n elif len(subbody_credit) > 1:\n subbody += 'The following participants are obligated to ' \\\n + 'reimburse you:\\n' + '\\n'.join(subbody_credit)\n else:\n subbody += 'You don\\'t have any receivable reimbursements.' \\\n\n body += '\\n' + subbody\n\n return body", "def summary(self):\n name = 'name : ' + self.get_name()\n damage = 'damage : ' + str(self.get_damage())\n ammos = 'ammo : ' + str(self.get_ammos())\n owner = 'owner : ' + str(self.get_owner())\n return '\\n'.join([name, damage, ammos, owner])", "def summary_str(self):\n if not self.results:\n return self.summary.empty() or ''\n elif self.state == Ok:\n return self.summary.ok(self.results) or ''\n return self.summary.problem(self.results) or ''", "def initialize_error_summary() -> str:\n error_summary = '\\nSummary of <span class=\"tex-fatal\">Critical Errors:</span>\\n\\n<ul>\\n'\n return error_summary", "def summary_print(self):\r\n self.ensure_one()\r\n self.sent = True\r\n #return self.env['ir.actions.report'].report_action(self, 'proandsys_purchase_14.summary_landed_report')\r\n return self.env.ref('proandsys_purchase_14.summary_landedcost').report_action(self)", "def create_group_message(group_name,subject,message,status,createdby):\n query=\"INSERT INTO groupmails(group_name,subject,message,status,createdby)VALUES('{}','{}','{}','{}','{}')\".format(\n group_name,subject,message,status,createdby\n )\n cur.execute(query)", "def _message(self, recipient, connection, context=None):\n base_subject = '{{ event.calendar.course.name }} {{ event.title }}'\n if not self.event.get_documents(True):\n template_name = self.REQUEST_TEMPLATE\n subject = 'Got a {} study guide?'.format(base_subject)\n else:\n template_name = self.PUBLISH_TEMPLATE\n subject = '{} study guide'.format(base_subject)\n\n subject = Template(subject).render(context)\n body = get_template(template_name).render(context)\n\n return make_email_message(subject, body,\n make_display_email(\n self.sender_address,\n self.sender_name),\n recipient, connection)", "def email_body_review_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr>td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px; padding-right:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t <font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">We hope you had a great appointment!<br>'\n\tmsg = msg + '\\t\\t\\t Your opinion goes a long way&mdash;write up your review of the appointment so others can learn from your experience with <a href=\"#\" style=\"color:#1488CC\">{user\\'s name}</a></font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:200px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<a href=\"#\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:100px;text-align:center;\" target=\"_blank\">Rate & Review</a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def email_body_to_user_sending_msg(profile, message):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Way to get the conversation started! You messaged <a href=\\\"https://127.0.0.1:5000/profile?hero=' + profile.prof_id + '\\\" style=\"color:#1488CC\">' + profile.prof_name.encode('utf8', 'ignore') + '</a> and should get a response soon.<br><br>'\n\tmsg = msg + 'Until then, stand tight. <br><br>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def generate_email(self, length=10):\n self.update()\n # Generate the emails\n lines = []\n line_count = 0\n while line_count < length:\n sent = self.markov.make_sentence()\n if sent != None:\n lines.append(sent)\n line_count +=1\n else:\n print(sent)\n email_body = '\\n'.join(lines)\n email_header = random.choice(get_best_trigrams(filter(email_body),5))\n return email_header.decode('utf-8').strip() + \"\\n\" + email_body.decode('utf-8').strip()", "def summary(self, summary: str):\n return self.swag({\n 'summary': summary\n })", "def generate_email(self):\n email_dict = {'donor_name':self.name,\n 'donation_amount':self.last_donation(),\n 'total_amount':self.total_donations()}\n\n # Create formatted email that can be copied & pasted\n email = ('\\n'.join(['Dear {donor_name},','',\n 'Thank you for your generous donation of ${donation_amount:.2f}.',\n 'To date, you have donated a total of ${total_amount:.2f} to our charity.',\n 'Your contributions help new arrivals receive the highest quality care possible.',\n 'Please know that your donations make a world of difference!',\n '','Sincerely,','The Good Place Team'])).format(**email_dict)\n\n return(email)", "def mysummary(self):\n return self.sprintf(\"IGMPv3 Group Record %IGMPv3gr.type% %IGMPv3gr.maddr%\")", "def text_summary_message(self):\n failed = [e for e in self.evaluations if not e.passes]\n if failed == []:\n return \"SUCCESS - all constraints evaluations pass\"\n else:\n return \"FAILURE: %d constraints evaluations failed\" % len(failed)", "def create_messages(application, action, remedy):\n\n messages = [] \n messages.append(\"\"\"Your Resources: </br><pre style=\"margin-left: 40px\">\"\"\" + application + \"</br></pre>\" + action + \"\"\" in AWS. <strong style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; margin: 0;\">\"\"\" + remedy +\"\"\"</strong>\n </td>\n </tr><tr style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; margin: 0;\"><td class=\"content-block\" style=\"font-family: 'Helvetica Neue',Helvetica,Arial,sans-serif; box-sizing: border-box; font-size: 14px; vertical-align: top; margin: 0; padding: 0 0 20px;\" valign=\"top\">\n This message was sent to inform you of changes happening to your resources.\n <ul>\n <li>New instances are auto-tagged with an expiration date, an NT ID, and a patch group if invalid.</li>\n <li>Instances without the necessary tags are notified through email and Slack.</li>\n </ul>\n If you have any further questions, please reply to this email.\"\"\")\n \n messages.append(\"Your Resources:\\n\\n\" + application + \"\\n\\n\" + action + \" in AWS. \" + remedy + \"\\n\" + \n (\"\\nThis message was sent to inform you of changes happening to your resources.\\n\"\n \"\\nNew instances are auto-tagged with an expiration date, an NT ID, and a patch group if invalid.\"\n \"Instances without Owner Mail and Owner Team tags are notified through email and slack.\\n\"\n \"\\nIf you have any further questions, please reply to this email.\")) \n\n return messages", "def createThankYouEmail(self):\n result = (\"\\nDear {:s},\\n\\n\"\n \"\\tThank you so much for your generous donation of ${:,.2f}!\\n\\n\"\n \"\\tIt will be put to very good use.\\n\\n\"\n \"\\t\\tSincerely,\\n\\t\\t\\t- The Team\".format(self.name, self.getTotDonation())\n )\n return result", "def summary(self, fromdt, todt):\r\n totalSaved = self.miser.totalSaved(fromdt, todt) \r\n sumStr = \"%s: %s to %s\\n\" % (self.miser.name, fromdt, todt)\r\n sumStr += \"Total saved: %.2f\" % totalSaved\r\n\r\n sumStr += \"\\n\\nGoals:\\n\"\r\n sumStr += self._goalsMetStr(fromdt, todt, totalSaved)\r\n\r\n return sumStr", "def _create_msg(self, tr_id, i_triples, i_type, r_triples, r_type, confirm):\n params = SSAP_UPDATE_PARAM_TEMPLATE % (str(i_type).upper(),\n str(i_triples),\n str(r_type).upper(),\n str(r_triples),\n str(confirm).upper())\n tmp = SSAP_MESSAGE_TEMPLATE % (str(self.node_id), str(self.targetSS),\n self.tr_type, str(tr_id), params)\n return tmp", "def produce_message_for_sending() -> str:\n return f\"You can donate your money here:\\n`{card_donations}`\"", "async def report(cls, description, **kwargs):\n return await cls.message(description, color = discord.Color(0xbfbfbf))", "def verification_email_body(case_name, url, display_name, category, subcategory, breakpoint_1, breakpoint_2, hgnc_symbol, panels, gtcalls, tx_changes, name, comment):\n html = \"\"\"\n <ul>\n <li>\n <strong>Case {case_name}</strong>: <a href=\"{url}\">{display_name}</a>\n </li>\n <li><strong>Variant type</strong>: {category} ({subcategory})\n <li><strong>Breakpoint 1</strong>: {breakpoint_1}</li>\n <li><strong>Breakpoint 2</strong>: {breakpoint_2}</li>\n <li><strong>HGNC symbols</strong>: {hgnc_symbol}</li>\n <li><strong>Gene panels</strong>: {panels}</li>\n <li><strong>GT call</strong></li>\n {gtcalls}\n <li><strong>Amino acid changes</strong></li>\n {tx_changes}\n <li><strong>Comment</strong>: {comment}</li>\n <li><strong>Ordered by</strong>: {name}</li>\n </ul>\n \"\"\".format(\n case_name=case_name,\n url=url,\n display_name=display_name,\n category=category,\n subcategory=subcategory,\n breakpoint_1=breakpoint_1,\n breakpoint_2=breakpoint_2,\n hgnc_symbol=hgnc_symbol,\n panels=panels,\n gtcalls=gtcalls,\n tx_changes=tx_changes,\n name=name,\n comment=comment)\n\n return html", "def generate_html_mesg(info, open_quests, owner, tags):\n\n msg = '<html>' \\\n '<body style=\"font-family: Verdana; font-size: 1em; color: #000\">'\n msg += (\n \"<div style='padding: 10px; border-radius: 5px; background: #232f3e; \"\n \"color: #fff; font-weight: bold; font-size: 1.25em;'>\"\n \"Hermes Notifications\"\n \"</div>\"\n \"<div style='padding: 10px;'><p>This email is being sent to {} because that is the owner listed\\n\"\n \"for the systems with open Hermes labors listed below.</p>\"\n \"<p>Due dates, if any, are noted with each quest.</p>\"\n \"\".format(owner)\n )\n msg += (\n \"<p>To throw an event manually, you can run the following command \"\n \"on a shell server:</p>\"\n \"<pre style='font-size: 1.2em'>$ hermes event create [event] --host \"\n \"[hostname]</pre>\"\n \"<p>Or you can visit the quests linked below.</p></div>\".format(\n settings.frontend)\n )\n for quest_id in info[owner]:\n quest = find_quest(open_quests, quest_id)\n if quest:\n msg += (\n \"<div style='border-radius: 5px; background: #dce1e6; \"\n \"padding: 10px; margin-bottom: 10px;'>\"\n \"<span style='font-size: 1.1em; font-weight: bold'>QUEST {}</span><br/>\"\n \"<strong>CREATOR:</strong> {}<br />\"\n ).format(\n quest_id, quest.creator\n )\n if quest.target_time:\n msg += \"<strong>DUE:</strong> {}<br/>\".format(quest.target_time)\n msg += \"<strong>DESC:</strong><p> \\\"{}\\\"</p>\".format(quest.description)\n msg += \"<strong>LINK:</strong> <code>{}/v1/quests/{}</code><br/>\".format(\n settings.frontend, quest_id\n )\n else:\n msg += (\n \"<div style='border-radius: 5px; background: #dce1e6; \"\n \"padding: 10px; margin-bottom: 10px;'>\"\n \"<span style='font-size: 1.1em; font-weight: bold'>Labors not \"\n \"associated with a quest:</span><br />\"\n )\n\n msg += \"<p>Machines with labors:</p>\"\n\n msg += \"<pre style='margin-left: 10px; font-size: 1.2em'>\"\n for hostname in sorted(info[owner][quest_id]):\n if tags[hostname]:\n tags_str = \"{}\".format((\", \".join(tags[hostname])))\n else:\n tags_str = \"no services\"\n msg += \"{} ({})\\n\".format(hostname, tags_str)\n\n msg += \"</pre></div>\"\n\n msg += \"</body>\"\n\n return msg", "def email_body_meeting_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Drats. <a href=\"#\" style=\"color:#1488CC\">{insert seller name} cancelled your appointment</a>.<br><br>'\n\tmsg = msg + '\\t\\t\\t <a href=\"#\" style=\"color:#1488CC\">Reschedule</a> or you can send a message to inquire about the cancellation. <br><br>'\n\tmsg = msg + '\\t\\t\\t And, don\\'t worry! You won\\'t be charged, promise. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def summary(self) -> str:\n return pulumi.get(self, \"summary\")", "def create_base_message(subject, msg=None):\n message = EmailMessage()\n message[\"From\"] = email_config.from_address\n message[\"To\"] = email_config.to_addresses\n message[\"Subject\"] = subject\n message.set_content(\"\\n\".join([\n 50 * \"_\",\n f\"Hostname: {gethostname()}\",\n f\"Time: {datetime.now().strftime(config.datetime_format)}\",\n f\"Log file: {LOGGER.log_file_path}\",\n 50 * \"_\",\n ]))\n # Add `msg` to the contents if it is not None\n if msg is not None:\n _append_content(message, msg)\n # Attach the log file if it is available\n if LOGGER.log_file_path is not None and config.email_attach_log_file:\n _attach_log_file(message)\n return message.as_string()", "def _create_msg(self, tr_id, payload, confirm, expire_time, encoding):\n tmp = [\"<SSAP_message><transaction_type>INSERT</transaction_type>\",\n \"<message_type>REQUEST</message_type>\"]\n tmp.extend([\"<transaction_id>\", str(tr_id), \"</transaction_id>\"])\n tmp.extend([\"<node_id>\", str(self.node_id), \"</node_id>\"])\n tmp.extend([\"<space_id>\", str(self.targetSS), \"</space_id>\"])\n tmp.extend(['<parameter name=\"insert_graph\" encoding=\"%s\">' % encoding.upper(),\n str(payload), \"</parameter>\"])\n tmp.extend(['<parameter name = \"confirm\">',\n str(confirm).upper(),\n \"</parameter>\",\n \"</SSAP_message>\"])\n return \"\".join(tmp)", "def mergeSummary(self, *args):\n if len(args) == 1:\n lvl = args[0].level\n msg = args[0].message\n elif len(args) == 2:\n lvl = args[0]\n msg = args[1]\n\n if (lvl > DiagnosticStatus.OK) == (self.level > DiagnosticStatus.OK):\n if len(self.message) > 0:\n self.message += '; '\n self.message += msg\n elif lvl > self.level:\n self.message = msg\n\n if lvl > self.level:\n self.level = lvl", "def create_mail_content(daily: bool = False):\n if not daily:\n order = STATE['order'] if STATE['order'] else get_closed_order()\n trade_part = create_report_part_trade(order)\n performance_part = create_report_part_performance(daily)\n advice_part = create_report_part_advice()\n settings_part = create_report_part_settings()\n general_part = create_mail_part_general()\n\n if not daily:\n trade = [\"Last trade\", \"----------\", '\\n'.join(trade_part['mail']), '\\n\\n']\n performance = [\"Performance\", \"-----------\",\n '\\n'.join(performance_part['mail']) + '\\n* (change within 24 hours)', '\\n\\n']\n advice = [\"Assessment / advice\", \"-------------------\", '\\n'.join(advice_part['mail']), '\\n\\n']\n settings = [\"Your settings\", \"-------------\", '\\n'.join(settings_part['mail']), '\\n\\n']\n general = [\"General\", \"-------\", '\\n'.join(general_part), '\\n\\n']\n\n bcs_url = 'https://bitcoin-schweiz.ch/bot/'\n text = '' if daily else '\\n'.join(trade)\n\n if not CONF.info:\n text += '\\n'.join(performance) + '\\n'.join(advice) + '\\n'.join(settings) + '\\n'.join(general) + bcs_url + '\\n'\n else:\n text += '\\n'.join(performance) + '\\n'.join(advice) + '\\n'.join(settings) + '\\n'.join(general) + CONF.info \\\n + '\\n\\n' + bcs_url + '\\n'\n\n csv = None if not daily else INSTANCE + ';' + str(datetime.datetime.utcnow().replace(microsecond=0)) + ' UTC;' + \\\n (';'.join(performance_part['csv']) + ';' + ';'.join(advice_part['csv']) + ';' +\n ';'.join(settings_part['csv']) + ';' + CONF.info + '\\n')\n\n return {'text': text, 'csv': csv}", "def format(self, message):", "def __str__(self):\n return f'{self.message} {self.description}'", "def summary(self, *args):\n if len(args) == 1:\n self.level = args[0].level\n self.message = args[0].message\n elif len(args) == 2:\n self.level = args[0]\n self.message = str(args[1])", "def msg(self):\n ended = time.time()\n started_wait = datetime.datetime.fromtimestamp(self.started).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n raised_date = datetime.datetime.fromtimestamp(ended).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n duration = ended - self.started\n return \"Info[started at {}, raised at {} after {}s]\".format(\n started_wait, raised_date, round(duration, 2)\n )", "def summaryText(self):\n\n print('\\nReport Summary:\\n')\n for author in self.lowQuality.keys():\n if len(self.lowQuality[author]) > 0:\n print('Author: ' + author)\n print('---------------------')\n # do some sorting for readability\n files = []\n file2rating = {}\n for fileRating in self.lowQuality[author]:\n files.append(fileRating[1])\n file2rating[fileRating[1]] = fileRating[0]\n files.sort()\n for fileRating in files:\n print(file2rating[fileRating] + ' :: ' + fileRating)\n print('\\n\\n')", "def create_workflow_info(html=True):\n if html:\n hdr = \"<h3>Workflow Theses</h3>\"\n state_info = \"<p><b>stav:</b>\" + self.workflow.workflow.state + \"</p>\"\n started_info = \"<p><b>spuštěno:</b>\" + str(self.workflow.workflow.started) + \"</p>\"\n stopped_info = \"<p><b>zastaveno:</b>\" + str(self.workflow.workflow.stopped) + \"</p>\"\n double_stop = \"<br><br>\"\n batches_count_info = \"<p><b>počet zpracovaných dávek:</b>\" + \\\n str(len(self.workflow.batches_processing)) + \"</p><br><br>\"\n else:\n hdr = \"Workflow Theses\\n\\n\"\n state_info = \"stav:\" + self.workflow.workflow.state + \"\\n\"\n started_info = \"spuštěno:\" + str(self.workflow.workflow.started) + \"\\n\"\n stopped_info = \"zastaveno:\" + str(self.workflow.workflow.stopped) + \"\\n\"\n double_stop = \"\\n\\n\"\n batches_count_info = \"počet zpracovaných dávek:\" + str(len(self.workflow.batches_processing)) + \"\\n\\n\"\n\n msg_part = hdr + state_info + started_info + stopped_info + double_stop + batches_count_info\n\n return msg_part", "def generate_test_report(self, message):\n pass", "def printSummary(self):\n pass", "def publish_summary(self, jobs):\n pass", "def summary(self):\n name = 'name : ' + self.get_name()\n description = 'description : ' + self.get_description()\n agility = 'agility : ' + str(self.get_agility())\n strength = 'strength : ' + str(self.get_strength())\n health_points = 'health_points : ' + str(self.get_health_points())\n summary = '\\n'.join([name, description, agility, strength, health_points])\n if self.take_weapon():\n summary += self.take_weapon().summary()\n return summary", "def main_email(name, total, answered, not_answered, declines, remaining):\n\n start = smtplib.SMTP(host=HOST, port=PORT)\n start.starttls()\n start.login(ADDRESS, PASSWORD)\n\n date = datetime.datetime.now()\n date_now = date.strftime(\"%m-%d-%Y\")\n\n print_list, email_dict = simple_contacts('contacts.txt')\n\n emails = get_emails(print_list, email_dict)\n\n message_template = read_template()\n\n for mail in emails:\n pretty_print(f\"Sending email to {mail}\", \"!\")\n msg = MIMEMultipart()\n\n message = message_template.substitute(PERSON_NAME=name, DATE=date_now, TOTAL_CALLED=total, ANSWERED=answered, NOT_ANSWERED=not_answered, DECLINES=declines, REMAINING=remaining)\n\n msg['From'] = ADDRESS\n msg['To'] = mail\n msg['Subject'] = f\"{name} - Calling Campaign Summary - {date_now}\"\n\n msg.attach(MIMEText(message, 'plain'))\n start.send_message(msg)\n pretty_print(f\"Mail sent to {mail}\", \"!\")\n\n del msg\n\n start.quit()", "def make_incident_description(self, incident):\n i_summary = incident.get('summary', \"Proofpoint TRAP Incident - Lacks a summary\")\n i_event_count = len(incident['events'])\n i_sources = incident.get('event_sources', 'No Sources Found')\n\n description = '{}\\n\\nTotal Events in incident: {}\\n\\nFrom: {}\\n'.format(i_summary, i_event_count, i_sources)\n return {'format': 'text', 'content': description}", "def format_body(self, notice, fullname=None):\n if not fullname:\n fullname = self.fullname or self.name\n\n out = StringIO()\n out.write(\"Attention: {0}\\n\".format(fullname))\n out.write(\"Notification Type: {0}\\n\".format(notice.type))\n if notice.origin:\n out.write(\"Origin: {0}\\n\".format(notice.origin))\n out.write(\"\\n\")\n if notice.title:\n out.write(textwrap.fill(notice.title, 80) + \"\\n\\n\")\n\n desc = notice.description\n if desc:\n if not isinstance(desc, list):\n desc = [desc]\n for d in desc:\n if notice.doformat:\n d = textwrap.fill(d, 80)\n out.write(d)\n out.write('\\n\\n')\n\n out.write(\"Issued: {0}\\n\".format(notice.issued))\n for key in notice.metadata:\n val = notice.metadata[key]\n if isinstance(val, list):\n val = json.dumps(val, indent=2, separators=(', ', ': '))\n else:\n val = str(val)\n out.write(\"{0}: {1}\\n\".format(key, val))\n\n return out.getvalue()", "def composeEmail(self, report):\r\n\r\n # Determine if we need to send a message to this developer.\r\n requires_message = False\r\n for platform in self.platforms:\r\n if platform.isBroken():\r\n requires_message = True\r\n break\r\n\r\n if not requires_message:\r\n return None\r\n\r\n # Build the message header\r\n message = \"\"\"From: Douglas Gregor <dgregor@osl.iu.edu>\r\nTo: \"\"\"\r\n message += self.name + ' <' + self.email + '>'\r\n message += \"\"\"\r\nReply-To: boost@lists.boost.org\r\nSubject: Large number of Boost failures on a platform you maintain as of \"\"\"\r\n message += str(datetime.date.today()) + \" [\" + report.branch + \"]\"\r\n message += \"\"\"\r\n\r\nYou are receiving this report because one or more of the testing\r\nplatforms that you maintain has a large number of Boost failures that\r\nare not accounted for. A full version of the report is sent to the\r\nBoost developer's mailing list.\r\n\r\nDetailed report:\r\n\"\"\"\r\n message += ' ' + report.url + \"\"\"\r\n\r\nThe following platforms have a large number of failures:\r\n\"\"\"\r\n\r\n for platform in self.platforms:\r\n if platform.isBroken():\r\n message += (' ' + platform.name + ' ('\r\n + str(len(platform.failures)) + ' failures)\\n')\r\n\r\n return message", "def GetKMsgSummary(kmsgp, prefix_str=\"\"):\n kmsghp = kmsgp.ikm_header\n kmsgh = dereference(kmsghp)\n out_string = \"\"\n out_string += \"{0: <20s} {1: <#019x} {2: <8s} {3: <#011x} \".format(\n ' ', unsigned(kmsgp), ' '*8, kmsgh.msgh_id)\n prefix_str = \"{0: <20s} \".format(' ') + prefix_str\n disposition = \"\"\n bits = kmsgh.msgh_bits & 0xff\n \n # remote port\n if bits == 17:\n disposition = \"rS\"\n elif bits == 18:\n disposition = \"rO\"\n else :\n disposition = \"rX\" # invalid\n \n out_string += \"{0: <2s}\".format(disposition)\n \n # local port\n disposition = \"\"\n bits = (kmsgh.msgh_bits & 0xff00) >> 8\n \n if bits == 17:\n disposition = \"lS\"\n elif bits == 18:\n disposition = \"lO\"\n elif bits == 0:\n disposition = \"l-\"\n else:\n disposition = \"lX\" # invalid\n \n out_string += \"{0: <2s}\".format(disposition)\n \n # voucher\n disposition = \"\"\n bits = (kmsgh.msgh_bits & 0xff0000) >> 16\n \n if bits == 17:\n disposition = \"vS\"\n elif bits == 0:\n disposition = \"v-\"\n else:\n disposition = \"vX\"\n\n out_string += \"{0: <2s}\".format(disposition) \n \n # complex message\n if kmsgh.msgh_bits & 0x80000000:\n out_string += \"{0: <1s}\".format(\"c\")\n else:\n out_string += \"{0: <1s}\".format(\"s\")\n \n # importance boost\n if kmsgh.msgh_bits & 0x20000000:\n out_string += \"{0: <1s}\".format(\"I\")\n else:\n out_string += \"{0: <1s}\".format(\"-\")\n \n dest_proc_name = \"\"\n if kmsgp.ikm_header.msgh_remote_port:\n dest_proc_name = GetDestinationProcessFromPort(kmsgp.ikm_header.msgh_remote_port)\n\n out_string += \"{0: ^6d} {1: <#019x} {2: <26s} {3: <26s}\\n\".format(\n unsigned(kmsgh.msgh_size), unsigned(kmsgh.msgh_local_port),\n GetKMsgSrc(kmsgp), dest_proc_name)\n \n if kmsgh.msgh_bits & 0x80000000:\n out_string += prefix_str + \"\\t\" + GetKMsgComplexBodyDesc.header + \"\\n\"\n out_string += prefix_str + \"\\t\" + GetKMsgComplexBodyDesc(kmsgp, prefix_str + \"\\t\") + \"\\n\"\n \n return out_string", "def summarize(self):\n txtStr = \"%s to %s, %d flight legs.\" %\\\n (self.origin, self.destination, self.nlegs)\n txtStr += \"\\nTakeoff at %s\\nLanding at %s\\n\" %\\\n (self.takeoff, self.landing)\n txtStr += \"Flight duration of %s including %s observing time\" %\\\n (str(self.flighttime), self.obstime)\n\n return txtStr", "def add_daily_summary(self):\n auth_date = self.report_date.strftime(\"%b %-d, %Y\")\n now = datetime.now().strftime(\"%x %X\")\n report_title = ' '.join([\n f'Report for {self.origin_value} participant consents authored on: {auth_date} 12:00AM-11:59PM UTC',\n f'(generated on {now} Central)'\n ])\n\n report_notes = [\n ['Notes:'],\n [f'Validation details on this sheet for {self.origin_value} participants only'],\n ['Checkbox validation currently only performed on GROR consents'],\n ['Total Errors can exceed Consents with Errors if any consents had multiple validation errors']\n ]\n\n self._add_text_rows(text_rows=[[report_title]], format_spec=self.format_specs.get('bold_text'))\n # Add any explanatory text / details about the report that have been included in the layout\n self._add_text_rows(text_rows=report_notes, format_spec=self.format_specs.get('legend_text'),\n row_pos=self.row_pos + 1)\n\n if not self._has_needs_correcting(self.consent_df):\n self._add_text_rows(text_rows=[['No consent validation errors detected']],\n format_spec=self.format_specs.get('italic_text'), row_pos=self.row_pos+1)\n\n # Daily summary counts for all the recently authored consents that were processed (regardless of errors)\n self._add_text_rows([['Total Consent Validation Counts']],\n format_spec=self.format_specs.get('bold_text'), row_pos=self.row_pos+1)\n self._add_consent_issue_count_header_section(hpo='All Entities')\n self._add_consent_issue_counts(self.consent_df, show_all_counts=True)", "def generate_audit_email_body(audit_id):\n msg = None\n try:\n ad_list, err = get_entries(audit_id)\n if err:\n raise Exception(err)\n if ad_list:\n ad = ad_list[0]\n msg = ' Audit time: %s\\n Performed from: %s\\n Performed by: %s\\n Action: %s.' % (\n ad['time'], ad['ip'], ad['username'], ad['action_str'])\n except Exception, e:\n return None, 'Error generating audit email message body : %s' % str(e)\n else:\n return msg, None", "def createMessage( self, *args, **kw ):\n if not kw.has_key('charset'):\n kw['charset'] = self.getOutputCharset()\n kw['to_mail'] = 1\n return MailServerBase.createMessage( self, *args, **kw )", "def generate_withno_attachement(sender, recipient, subject, body):\n # Basic Email formatting\n message = email.message.EmailMessage()\n message[\"From\"] = sender\n message[\"To\"] = recipient\n message[\"Subject\"] = subject\n message.set_content(body)\n return message", "def email_body_to_user_receiving_msg(profile, message):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + 'You\\'ve got mail. It\\'s from <a href=\\\"https://127.0.0.1:5000/profile?hero=' + str(profile.prof_id) + '\\\" style=\"color:#1488CC\">' + profile.prof_name.encode('utf8', 'ignore') + '</a>.'\n\tmsg = msg + '<br><i>' + message.msg_content + '</i>'\n\tmsg = msg + '</font><br>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:200px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<a href=\"#\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center;\" target=\"_blank\">Reply</a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def consolidate_messages(self, msg):", "def generate_message(self, mtu):\r\n raise SystemExit(self.sm.__end_msg__)", "def summary(self):\n if self.intact and self.valid:\n return 'INTACT:' + ','.join(self.summary_fields())\n else:\n return 'INVALID'", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())", "def send_status_mail(self):\n from django.core.mail import send_mail\n subject = \"App Load Status | %s - %s | S%02d/-R%02d\" % (\n self.publication.account, self.title, self.load_count, self.reject_count)\n body = \"Account: %s\\nPublication: %s\\nStatus: %s\\nTime:%s - %s\\nLoaded: %02d\\nRejected: %02d\\n\\nComments\\n:%s\\n\" % (\n self.publication.account, self.publication, self.load_status, self.start_time, self.end_time, self.load_count, self.reject_count, self.comments)\n body = body + settings.EMAIL_DEFAULT_SIGNATURE\n if self.publication.id in [2,3,60,61,62,63,370,39]:\n return send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [settings.ADMIN_EMAIL, 'anand.kumar@contify.com', 'rajesh.swain@contify.com', 'tapan.puhan@contify.com'], fail_silently=True)\n else:\n return send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,\n [settings.ADMIN_EMAIL], fail_silently=True)", "def summary_line_and_description():", "def generate_web_service_email(details):\n subject = details[\"subject\"]\n body = details[\"message\"]\n from_email = settings.DEFAULT_FROM_ADDR\n reply_to_email = [settings.EMAIL_TARGET_W]\n to_email = details[\"email_to\"]\n\n email = GenericEmailGenerator(subject=subject, to_emails=to_email, bcc=reply_to_email, from_email=from_email,\n reply_to=reply_to_email, body=body, context={'mrkdwn': True})\n\n return email", "def makeAMQPmsg(self):\n msg = {'msgType' : 'AgentUpdate',\n 'AgentType': 'Bus',\n 'Extnum':self.Extnum,\n 'Vm': self.cv['Vm'],\n 'Va': self.cv['Va'],\n }\n return msg", "def make_info(self, v_queue_mean, v_tr_cv):\n info = '\\n - average queue size: %.2f packets'\n info += '\\n - CV for transmission rate: %.2f'\n return info % (v_queue_mean, v_tr_cv)", "def generate_selfservice_notice_email(context):\n subject = \"Self Service Form Submission\"\n from_email = settings.DEFAULT_FROM_ADDR\n to_email = [settings.EMAIL_TARGET_W, settings.EMAIL_TARGET_VP]\n\n cont_html = render_to_string('emails/email_selfservice.html', context)\n cont_text = render_to_string('emails/email_selfservice.txt', context)\n\n email = EmailMultiAlternatives(subject, cont_text, from_email, to_email)\n email.attach_alternative(cont_html, \"text/html\")\n\n return email", "def summarize(self):\n\n if self.summarizer == None:\n return \"The summarizer has not been initialised\"\n elif not isinstance(self.summarizer, Summarizer):\n raise TypeError(\"summarizer is not a Summarizer object\")\n\n self.summary = self.summarizer.summarize(self.article)\n return self.summary", "def create_message(self, subject, message, destination, origin = None):\n msg = MIMEText(message)\n if origin is not None:\n msg['From'] = origin\n else:\n msg['From'] = self.origin\n msg['Subject'] = subject\n msg['To'] = destination\n self.messages.append(msg)", "def generate_message(ctx, question, answer):\n return preamble.format(channel=rules_channel(ctx).id) + question + answer", "def gen_report(self):\n self.report = '#Report for {0}\\n'.format(self.ip)\n self.report += 'This report was generated by the chameleon pentest bot. We cannot grant 100% accurate results.\\n'\n self.report += '###Services:\\n'\n for service in self.services:\n self.report += '#####{0}:\\n- Port: {1}\\n- Info:{2}'.format(service.name, service.port, service.info)\n self.report += '###Vulnerabilities:\\n'\n for vuln in self.vulns:\n self.report += '- {0}\\n'.format(vuln.name)\n self.report += 'Open an issue for wrong results at github.com/coretool/chameleon.'", "def create_report(self):\n text_string = \"\\n\"\n text_string += \"Donor Name | Total Given | Num Gifts |\\\n Average Gift\\n\\\n ------------------------------------------------------------------\\n\"\n for donor in sorted(self.donors, key=Donor.sort_key, reverse=True):\n text_string += f\"{donor.name:<26} $ {donor.total_donations:>11.2f}\\\n {donor.number_of_donations:>10} $ {donor.average_gift:>11.2f}\\n\"\n\n return text_string", "def create_message(sender, to, subject, message_text_html, message_text_plain):\r\n message = MIMEMultipart('alternative')\r\n message['to'] = to\r\n message['from'] = sender\r\n message['subject'] = subject\r\n message_html = MIMEText(message_text_html, 'html') # HTML version\r\n message_plain = MIMEText(message_text_plain) # plain text version\r\n message.attach(message_plain)\r\n message.attach(message_html)\r\n return {'raw': base64.urlsafe_b64encode(message.as_string().encode()).decode()}", "def summary_string(self) -> str:\n return f\"dixonoid: {self.plain_rules}\"", "def summaryView(request):\n\n alert_errors = []\n alert_infos = []\n alert_filters = []\n\n runs = get_runs_from_request_filters(\n request, alert_errors, alert_infos, alert_filters\n )\n\n summary = SummaryReport(runs)\n\n context = {\n \"refs\": summary.reference_runs(),\n \"runs\": summary.runs_checked_per_type(),\n \"tk_maps\": summary.tracker_maps_per_type(),\n \"certified_runs\": summary.certified_runs_per_type(),\n \"sums\": summary.sum_of_quantities_per_type(),\n \"alert_errors\": alert_errors,\n \"alert_infos\": alert_infos,\n \"alert_filters\": alert_filters,\n }\n\n return render(request, \"certhelper/summary.html\", context)", "def summarize(self):\n txtSumm = ''\n\n if self.legtype == 'Takeoff':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Landing':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Other':\n txtSumm = \"%02d -- %s\" %\\\n (self.legno, self.legtype)\n elif self.legtype == 'Observing':\n txtSumm = \"%02d -- %s, RA: %s, Dec: %s, LegDur: %s, ObsDur: %s\" %\\\n (self.legno, self.target, self.ra, self.dec,\n str(self.duration),\n str(self.obsdur))\n txtSumm += \"\\n\"\n if self.nonsid is True:\n txtSumm += \"NONSIDERIAL TARGET -- NAIFID: %d\" % (self.naifid)\n txtSumm += \"\\n\"\n txtSumm += \"(The SOFIA project sincerely hopes you enjoy \"\n txtSumm += \"your observing breaks due to XFORMS crashes)\"\n txtSumm += \"\\n\"\n txtSumm += \"ObsPlan: %s, ObsBlk: %s\" % (self.obsplan, self.obsblk)\n txtSumm += \"\\n\\n\"\n txtSumm += \"Elevation Range: %.1f, %.1f\" % (self.range_elev[0],\n self.range_elev[1])\n txtSumm += \"\\n\\n\"\n txtSumm += \"ROF Range: %.1f, %.1f\" % (self.range_rof[0],\n self.range_rof[1])\n txtSumm += \"\\n\"\n txtSumm += \"ROF Rate Range: %.1f, %.1f %s\" % (self.range_rofrt[0],\n self.range_rofrt[1],\n self.range_rofrtu)\n txtSumm += \"\\n\\n\"\n txtSumm += \"True Heading Range: %.1f, %.1f\" % (self.range_thdg[0],\n self.range_thdg[1])\n txtSumm += \"\\n\"\n txtSumm += \"True Heading Rate Range: %.1f, %.1f %s\" %\\\n (self.range_thdgrt[0],\n self.range_thdgrt[1],\n self.range_thdgrtu)\n txtSumm += \"\\n\"\n txtSumm += \"Moon Angle: %.1f, Moon Illumination: %s\" %\\\n (self.moonangle, self.moonillum)\n\n return txtSumm", "def present_summary(services, methods, count, backup):\n print_heading(\"Summary\")\n if backup is not None:\n writer(f\"Backup: {backup}\")\n writer(f\"Showing {count[0]}/{len(services)} Services\")\n writer(f\"Showing {count[1]}/{len(methods)} Methods\\n\")", "def show_summary(self, lang):\n return self.summary % self.vars", "def _show_summary(self):\n print 'Summary:'\n print ' Reports downloaded successfully: %d' % self.counts\n print ' Reports not downloaded: %d\\n' % self.failed", "def email_body_new_proposal_notification_to_seller(meeting, buyer_name, buyer_profile_id):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">\\n<tbody>\\n\\t<tr><td align=\"center\" valign=\"top\">\\n\\t</td></tr>\\n</tbody>\\n</table>'\n\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\">'\n\tmsg = msg + '\\n<tbody><tr>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tbody>'\n\tmsg = msg + '\\n\\t\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" />'\n\tmsg = msg + '\\n\\t\\t\\t\\t</a>'\n\tmsg = msg + '\\n\\t\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t\\t</tbody>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">'\n\tmsg = msg + '\\n\\t\\t\\t\\tGreat! You received a new proposal from <a href=\\\"https://127.0.0.1:5000/profile?hero=' + buyer_profile_id + '\\\" style=\"color:#1488CC\">'+ buyer_name + '</a>.'\n\tmsg = msg + '\\n\\t\\t\\t\\t<br><br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDescription: ' + meeting.get_description_html() + '<br><br>'\n\tmsg = msg + '\\n\\t\\t\\t</font><br><br>'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:150px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.accept_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center;\" target=\"_blank\">Accept</a> '\n\tmsg = msg + '\\n\\t\\t\\t<a href=\\\"'+ meeting.reject_url() +'\\\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #e55e62;font-size: 14px;border-radius: 3px;border: 1px solid #e55e62;font-family:Garamond, EB Garamond, Georgia, serif; width:50px;text-align:center\" target=\"_blank\">Reject</a> '\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\n\\t\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\tmsg = msg + '\\n</tr></tbody>'\n\tmsg = msg + '</table>'\n\treturn msg", "def __str__(self):\n return '- ' + str.join('\\n- ', [self.format_message(record)\n for record in self._messages]) + '\\n'", "def build_message(self, subject, body, attachment=None):\n msg = MIMEMultipart()\n msg['From'] = self.src_addr\n msg['To'] = self.dest_addr\n msg['Subject'] = subject\n msg.attach(MIMEText(body, 'plain'))\n\n if attachment is not None:\n if os.path.exists(attachment):\n with open(attachment, 'rb') as a:\n part = MIMEBase('application', 'octet-stream')\n part.set_payload(a.read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename= {}'.format(attachment))\n msg.attach(part)\n\n self.outbox.append(msg)" ]
[ "0.63322425", "0.61821836", "0.61753136", "0.6175085", "0.61733943", "0.6131707", "0.61256206", "0.60573125", "0.6047461", "0.6034194", "0.60252714", "0.6003194", "0.5889488", "0.5864201", "0.584809", "0.58364767", "0.58144146", "0.5763619", "0.57625306", "0.5760647", "0.5760176", "0.57419", "0.57328933", "0.57066536", "0.570077", "0.5686128", "0.5668326", "0.566309", "0.5615482", "0.5602021", "0.55937946", "0.55879915", "0.5584277", "0.55776787", "0.55430233", "0.55384487", "0.55302644", "0.5523392", "0.5522491", "0.54989254", "0.5488263", "0.54727036", "0.54683685", "0.5459027", "0.54586744", "0.54437554", "0.54390126", "0.5437434", "0.5433293", "0.5432784", "0.54015404", "0.5399378", "0.53976834", "0.5397042", "0.5394274", "0.5392775", "0.53848547", "0.53840697", "0.53795433", "0.53764665", "0.53734994", "0.5372144", "0.53712076", "0.5368467", "0.5365127", "0.5360165", "0.5359771", "0.5358905", "0.5358369", "0.5348092", "0.53448355", "0.53347886", "0.5332572", "0.5330909", "0.53070706", "0.530156", "0.53015006", "0.52921396", "0.5280913", "0.52801985", "0.52743185", "0.5270821", "0.5268508", "0.5256264", "0.52554184", "0.52455723", "0.52455634", "0.52440715", "0.524214", "0.52418166", "0.52381957", "0.5234165", "0.5231968", "0.523013", "0.52233344", "0.521263", "0.5202129", "0.5198537", "0.5195391", "0.51933044" ]
0.5509327
39
Bear in mind some simulations cannot contain neither peds nor objs
def __init__(self, no_peds=0, peds_topics=[], num_s_samples=1, objs=None): self.global_ts = -1 self.PEDESTRIAN_TOPICS = peds_topics self.drone = DroneModel(num_s_samples) self.subject = SubjectModel(num_s_samples) if no_peds > 0: self.peds = { str(i): PedestrianModel(num_s_samples) for i in range(no_peds) } else: self.peds = None self.objs = objs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_simulations(self):\n pass", "def simulation():\n\n return {\n \"type\": \"class\",\n \"base\": \"iso.process_step\",\n \"is_abstract\": False,\n \"is_document\": True,\n \"pstr\": (\"({}/{}/{})\", (\"used\", \"ran_for_experiments\", \"ensemble_id\")),\n \"properties\": [\n (\n \"part_of_project\",\n \"linked_to(designing.project)\",\n \"1.N\",\n \"Project or projects for which simulation was run\",\n ),\n (\n \"ran_for_experiments\",\n \"linked_to(designing.numerical_experiment)\",\n \"1.N\",\n \"One or more experiments with which the simulation is \"\n \"associated\",\n ),\n (\n \"sub_experiment\",\n \"linked_to(designing.numerical_experiment)\",\n \"0.1\",\n \"For start-date ensembles, this will indicate the beginning \"\n \"year; for offline models driven by output from another \"\n \"model, this will provide the source_id and variant_label \"\n \"for the 'driving' model.\",\n ),\n (\n \"used\",\n \"linked_to(science.model)\",\n \"1.1\",\n \"The model used to run the simulation\",\n ),\n (\n \"primary_ensemble\",\n \"linked_to(activity.ensemble)\",\n \"0.1\",\n \"Primary Ensemble (ensemble for which this simulation was \"\n \"first run).\",\n ),\n (\n \"institution\",\n \"linked_to(shared.party)\",\n \"0.1\",\n \"institution which carried out the simulation\",\n ),\n (\n \"parent_of\",\n \"linked_to(activity.child_simulation)\",\n \"0.N\",\n \"If appropriate, links to simulations which branched from \"\n \"this one\",\n ),\n (\n \"produced\",\n \"linked_to(data.dataset)\",\n \"0.N\",\n \"Products of the simulation\",\n ),\n (\n \"had_performance\",\n \"linked_to(platform.performance)\",\n \"0.1\",\n \"Performance of the simulation.\",\n ),\n (\n \"ran_on\",\n \"linked_to(platform.machine)\",\n \"0.1\",\n \"The machine on which the simulation was run.\",\n ),\n (\n \"errata\",\n \"shared.online_resource\",\n \"0.1\",\n \"Link to errata associated with this simulation.\",\n ),\n (\n \"ensemble_id\",\n \"activity.axis_member\",\n \"0.N\",\n \"Identification within ensemble axes via axis member. \"\n \"(Multiple axis members within a simulation cannot share the \"\n \"same ensemble_axis.) (There must be an axis_member instance \"\n \"for each ensemble axis in a parent ensemble.)\",\n ),\n # Time\n (\n \"start_time\",\n \"time.date_time\",\n \"0.1\",\n \"The start date-time of the simulation. e.g. \"\n \"2012-04-01 00:00:00\",\n ),\n (\n \"end_time\",\n \"time.date_time\",\n \"0.1\",\n \"The end date-time of the simulation. e.g. \"\n \"2087-11-30 12:00:00\",\n ),\n (\n \"calendar\",\n \"time.calendar\",\n \"0.1\",\n \"The calendar used in the simulation\",\n ),\n # Further Info URL\n (\n \"documentation\",\n \"shared.online_resource\",\n \"0.1\",\n \"On-line location of additional documentation\",\n ),\n # Extra attributes\n (\n \"extra_attributes\",\n \"shared.extra_attribute\",\n \"0.N\",\n \"Additional attributes provided with simulation.\",\n ),\n ],\n \"constraints\": [\n (\"cardinality\", \"rationale\", \"0.0\"),\n ],\n }", "def make_simulation(self):\n pass", "def test_empty() -> None:\n for class_name in [SimulationData, SystemData, ObservableData, TrajectoryData]:\n print(f\"Testing empty {class_name.__name__} objects\")\n object1 = class_name()\n object2 = class_name()\n assert object1 == object2\n\n object1 = EnsembleData(\"NVE\", natoms=10, volume=1)\n object2 = EnsembleData(\"NVE\", natoms=10, volume=1)\n assert object1 == object2", "def __init__(self,ensemble_obj,truths):\n\n# if ensemble_obj==None or truths==None:\n# print 'Warning: inputs not complete'\n self.ensemble_obj = ensemble_obj\n self.truths = truths", "def test_poms_unwrapped(self):\n import electrochemistry\n import numpy as np\n # Create model\n model = electrochemistry.POMModel(DEFAULT_POMS)\n times = model.suggest_times()\n values = model.simulate(times)\n\n self.assertEqual(len(values), len(times))", "def run_Simulation2(k,N=100,T=10,start = 1,p=0.5,q=0.08,startcenter = False,startcorner=False):\n recover = [0]\n infect = [start]\n suspect = [N-start]\n pop = [Person() for i in range(N)]\n ##we need to change the code for the case start people infected\n for i in range(start):\n pop[i].get_infected();\n if(startcenter):\n resetcenter(start,pop)\n if(startcorner):\n resetcorner(start,pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n #may have problem here\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand()< k:\n pop[j].get_recovered()\n\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [i/N for i in recover]\n newsuspect = [s/N for s in suspect]\n newinfect = [i/N for i in infect]\n plt.plot(range(T+1),newrecover,label = \"r: percentage of removed \")\n plt.plot(range(T+1),newsuspect,label = \"s: percentage of susceptible\")\n plt.plot(range(T+1),newinfect,label = \"i: percentage of infected\")\n plt.xlabel(\"T\")\n plt.ylabel(\"percentage\")\n plt.title(\"Percentage of Population, Discrete\")\n plt.legend()\n plt.show()", "def test_particle_obj():\n # Set up the base parameters describing a particle object\n T = 273.15 + 15.\n P = 150e5\n Sa = 35.\n Ta = 273.15 + 4.\n composition = ['methane', 'ethane', 'propane', 'oxygen']\n yk = np.array([0.85, 0.07, 0.08, 0.0])\n de = 0.005\n K = 1.\n Kt = 1.\n fdis = 1e-6\n\n # Compute a few derived quantities\n bub = dbm.FluidParticle(composition)\n m0 = bub.masses_by_diameter(de, T, P, yk)\n\n # Create a `SingleParticle` object\n bub_obj = dispersed_phases.SingleParticle(bub, m0, T, K, fdis=fdis,\n K_T=Kt)\n\n # Check if the initial attributes are correct\n for i in range(len(composition)):\n assert bub_obj.composition[i] == composition[i]\n assert_array_almost_equal(bub_obj.m0, m0, decimal=6)\n assert bub_obj.T0 == T\n assert bub_obj.cp == seawater.cp() * 0.5\n assert bub_obj.K == K\n assert bub_obj.K_T == Kt\n assert bub_obj.fdis == fdis\n for i in range(len(composition)-1):\n assert bub_obj.diss_indices[i] == True\n assert bub_obj.diss_indices[-1] == False\n\n # Check if the values returned by the `properties` method match the input\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = bub_obj.properties(m0, T, P,\n Sa, Ta, 0.)\n us_ans = bub.slip_velocity(m0, T, P, Sa, Ta)\n rho_p_ans = bub.density(m0, T, P)\n A_ans = bub.surface_area(m0, T, P, Sa, Ta)\n Cs_ans = bub.solubility(m0, T, P, Sa)\n beta_ans = bub.mass_transfer(m0, T, P, Sa, Ta)\n beta_T_ans = bub.heat_transfer(m0, T, P, Sa, Ta)\n assert us == us_ans\n assert rho_p == rho_p_ans\n assert A == A_ans\n assert_array_almost_equal(Cs, Cs_ans, decimal=6)\n assert_array_almost_equal(beta, beta_ans, decimal=6)\n assert beta_T == beta_T_ans\n assert T == T_ans\n\n # Check that dissolution shuts down correctly\n m_dis = np.array([m0[0]*1e-10, m0[1]*1e-8, m0[2]*1e-3, 1.5e-5])\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = bub_obj.properties(m_dis, T, P,\n Sa, Ta, 0)\n assert beta[0] == 0.\n assert beta[1] == 0.\n assert beta[2] > 0.\n assert beta[3] > 0.\n m_dis = np.array([m0[0]*1e-10, m0[1]*1e-8, m0[2]*1e-7, 1.5e-16])\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = bub_obj.properties(m_dis, T, P,\n Sa, Ta, 0.)\n assert np.sum(beta[0:-1]) == 0.\n assert us == 0.\n assert rho_p == seawater.density(Ta, Sa, P)\n\n # Check that heat transfer shuts down correctly\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = bub_obj.properties(m_dis, Ta, P,\n Sa, Ta, 0)\n assert beta_T == 0.\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = bub_obj.properties(m_dis, T, P,\n Sa, Ta, 0)\n assert beta_T == 0.\n\n # Check the value returned by the `diameter` method\n de_p = bub_obj.diameter(m0, T, P, Sa, Ta)\n assert_approx_equal(de_p, de, significant=6)\n\n # Check functionality of insoluble particle\n drop = dbm.InsolubleParticle(isfluid=True, iscompressible=True)\n m0 = drop.mass_by_diameter(de, T, P, Sa, Ta)\n\n # Create a `Particle` object\n drop_obj = dispersed_phases.SingleParticle(drop, m0, T, K, fdis=fdis,\n K_T=Kt)\n\n # Check if the values returned by the `properties` method match the input\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = drop_obj.properties(\n np.array([m0]), T, P, Sa, Ta, 0)\n us_ans = drop.slip_velocity(m0, T, P, Sa, Ta)\n rho_p_ans = drop.density(T, P, Sa, Ta)\n A_ans = drop.surface_area(m0, T, P, Sa, Ta)\n beta_T_ans = drop.heat_transfer(m0, T, P, Sa, Ta)\n assert us == us_ans\n assert rho_p == rho_p_ans\n assert A == A_ans\n assert beta_T == beta_T_ans\n\n # Check that heat transfer shuts down correctly\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = drop_obj.properties(m_dis, Ta, P,\n Sa, Ta, 0)\n assert beta_T == 0.\n (us, rho_p, A, Cs, beta, beta_T, T_ans) = drop_obj.properties(m_dis, T, P,\n Sa, Ta, 0)\n assert beta_T == 0.\n\n # Check the value returned by the `diameter` method\n de_p = drop_obj.diameter(m0, T, P, Sa, Ta)\n assert_approx_equal(de_p, de, significant=6)", "def check_learning_mol(\n n_representations,\n p_representations,\n n_atoms_list,\n p_atoms_list,\n properties,\n n_training=DEFAULT_N_TRAINING):\n\n properties = np.array(properties)\n\n n_points = len(properties)\n indexes = np.arange(n_points, dtype=int)\n np.random.shuffle(indexes)\n\n n_valid = 50\n v_idxs = indexes[-n_valid:]\n vn_repr = n_representations[v_idxs]\n vp_repr = p_representations[v_idxs]\n vn_atoms = [n_atoms_list[i] for i in v_idxs]\n vp_atoms = [p_atoms_list[i] for i in v_idxs]\n v_props = properties[v_idxs]\n\n\n errors = []\n\n hp = {\n \"sigma\": 4\n }\n\n for n in n_training:\n\n start_time = time.time()\n\n t_idxs = indexes[:n]\n t_props = properties[t_idxs]\n\n tn_repr = n_representations[t_idxs]\n tn_atoms = [n_atoms_list[i] for i in t_idxs]\n\n tp_repr = p_representations[t_idxs]\n tp_atoms = [p_atoms_list[i] for i in t_idxs]\n\n # Train\n tn_K = create_local_kernel(tn_repr, tn_repr, tn_atoms, tn_atoms, **hp)\n tp_K = create_local_kernel(tp_repr, tp_repr, tp_atoms, tp_atoms, **hp)\n # tnp_K = create_local_kernel(tn_repr, tp_repr, tn_atoms, tp_atoms)\n # tpn_K = create_local_kernel(tp_repr, tn_repr, tp_atoms, tn_atoms)\n\n # print(tn_K)\n # print(tp_K)\n # print(tnp_K)\n # print(tpn_K)\n\n # K_bind = tn_K + tp_K - tnp_K - tpn_K\n K_bind = tp_K - tn_K\n\n if False:\n\n pca = kpca(K_bind, n=2)\n\n # plot\n fig, axs = plt.subplots(2, 1, figsize=(5,10))\n\n sc = axs[0].scatter(*pca, c=t_props)\n fig.colorbar(sc, ax=axs[0])\n\n im = axs[1].imshow(K_bind)\n fig.colorbar(im, ax=axs[1])\n\n fig.savefig(\"_tmp_pca_{:}.png\".format(n))\n\n\n # Train model\n t_alpha = krr(K_bind, t_props, solver=\"svd\", rcond=10**-4)\n\n # Test and predict\n vn_K = create_local_kernel(tn_repr, vn_repr, tn_atoms, vn_atoms, **hp)\n vp_K = create_local_kernel(tp_repr, vp_repr, tp_atoms, vp_atoms, **hp)\n\n v_K = vp_K - vn_K\n\n p_props = np.dot(v_K, t_alpha)\n\n # rmse error\n p_rmse, le, ue = rmse(p_props, v_props)\n\n end_time = time.time()\n\n print(\"{:5d}\".format(n), \"{:10.2f} ± {:4.2f} t={:4.1f}\".format(p_rmse, ue, end_time-start_time))\n\n errors.append([p_rmse, le, ue])\n\n errors = np.array(errors)\n\n return errors", "def potential_new_obs(self) -> Iterable[GriddedPerm]:", "def test_outV_traversals(self):\r\n results = self.eric.outV()\r\n assert len(results) == 1\r\n assert self.physics in results", "def test_population_movements_without_compilation(self):\n self._pystepx = PySTEPXIsland(nb_islands=4, init_script=init_script)\n self._pystepx._rc[0].execute('elems = gp_engine.get_evolver().select_and_remove_individuals(0.01, False)',\n block=True)\n print self._pystepx._rc[0]['elems']", "def test_order(self):\n class Mock(object):\n def __init__(self):\n self.spike = [False]\n\n def evolve(self, t, dt):\n self.spike = [True]\n\n G0 = Mock()\n M0 = simulation.EventMonitor(G0)\n sim = simulation.Simulation(G0, M0)\n sim.run(sim.dt)\n\n self.assertEqual(len(M0.t), 1)\n self.assertEqual(len(M0.i), 1)", "def simulate_memories(simulation_length):\n \n \n pass", "def simulationTwoDrugsVirusPopulations():\n #TODO", "def test_when_opponent_all_Ds(self):\n self.responses_test([D, D, D, D], [D, D, D, D], [D, D, D],\n random_seed=5)", "def initialise_sim(self):\n pass", "def test_simulation(self):\r\n positions = [1, 10, 100,1000]\r\n num_trials = 10000\r\n result = Investment(positions, num_trials)\r\n result = result.simulation(positions, num_trials) \r\n for pos in positions:\r\n self.assertEqual(len(result[pos]), num_trials)\r\n self.assertTrue(result[pos].all() == 1 or result[pos].all() == -1)", "def run_test0():\r\n \r\n ndia, nadi, nnucl, ntraj = 1, 1, 2, 500\r\n\r\n # ======= Hierarchy of Hamiltonians =======\r\n ham = nHamiltonian(ndia, nadi, nnucl)\r\n ham.init_all(2)\r\n print \"id=\", ham.id, \" level=\", ham.level\r\n\r\n ham1 = [] \r\n for tr in xrange(ntraj):\r\n ham1.append( nHamiltonian(ndia, nadi, nnucl) ) \r\n print ham1[tr].id, ham1[tr].level\r\n ham1[tr].init_all(2)\r\n ham.add_child(ham1[tr])\r\n print Cpp2Py(ham1[tr].get_full_id())\r\n\r\n # Set up the models and compute internal variables\r\n # Initialization\r\n # Model parameters \r\n params = { \"model\":1 }\r\n\r\n # Simulation parameters\r\n dt = 1.0\r\n\r\n # Dynamical variables and system-specific properties\r\n mean_q = MATRIX(nnucl,1); \r\n sigma_q = MATRIX(nnucl,1); \r\n mean_p = MATRIX(nnucl,1); \r\n sigma_p = MATRIX(nnucl,1); \r\n iM = MATRIX(nnucl,1);\r\n\r\n for i in xrange(nnucl):\r\n mean_q.set(i,0, -1.0) \r\n sigma_q.set(i,0, 0.05) \r\n mean_p.set(i,0, 0.0) \r\n sigma_p.set(i,0, 0.0)\r\n iM.set(i,0, 1.0/2000.0)\r\n\r\n rnd = Random()\r\n q = MATRIX(nnucl,ntraj); aux_functs.sample(q, mean_q, sigma_q, rnd)\r\n p = MATRIX(nnucl,ntraj); aux_functs.sample(p, mean_p, sigma_p, rnd) \r\n\r\n # Initial calculations\r\n q.show_matrix()\r\n\r\n # Compute Initial trajectory probability distributions for all dof\r\n #bin(q, -2.0, 2.0, 0.01)\r\n\r\n ham.compute_diabatic(compute_model, q, params, 1)\r\n ham.compute_adiabatic(1, 1);\r\n ham.add_ethd_adi(q, iM, 1)\r\n\r\n os.system(\"mkdir _2D_dist\")\r\n out1 = open(\"_output.txt\", \"w\"); out1.close() \r\n\r\n # Do the propagation\r\n for i in xrange(100):\r\n\r\n aux_functs.bin2(q, -2.0, 2.0, 0.1, -2.0, 2.0, 0.1, \"_2D_dist/_2D_distrib_\"+str(i)+\"_.txt\")\r\n\r\n Verlet1(dt, q, p, iM, ham, compute_model, params, 1)\r\n\r\n #=========== Properties ==========\r\n\r\n Ekin, Epot, Etot = aux_functs.compute_etot(ham, p, iM)\r\n\r\n # Print the ensemble average - kinetic, potential, and total energies\r\n # Print the tunneling information. Here, we count each trajectory across the barrier.\r\n out1 = open(\"_output.txt\", \"a\")\r\n out1.write( \" %8.5f %8.5f %8.5f %8.5f\\n\" % ( i*dt, Ekin, Epot, Etot ) )\r\n out1.close()", "def test_sample(self):\n seed = 5\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim1 = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=(2, 2))\n space.register(dim1)\n dim2 = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim2)\n dim3 = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim3)\n\n point = space.sample(seed=seed)\n rng = check_random_state(seed)\n test_point = [\n dict(\n yolo=dim1.sample(seed=rng)[0],\n yolo2=dim2.sample(seed=rng)[0],\n yolo3=dim3.sample(seed=rng)[0],\n )\n ]\n assert len(point) == len(test_point) == 1\n assert len(point[0].params) == len(test_point[0]) == 3\n assert np.all(point[0].params[\"yolo\"] == test_point[0][\"yolo\"])\n assert point[0].params[\"yolo2\"] == test_point[0][\"yolo2\"]\n assert point[0].params[\"yolo3\"] == test_point[0][\"yolo3\"]\n\n points = space.sample(2, seed=seed)\n rng = check_random_state(seed)\n points1 = dim1.sample(2, seed=rng)\n points2 = dim2.sample(2, seed=rng)\n points3 = dim3.sample(2, seed=rng)\n test_points = [\n dict(yolo=points1[0], yolo2=points2[0], yolo3=points3[0]),\n dict(yolo=points1[1], yolo2=points2[1], yolo3=points3[1]),\n ]\n assert len(points) == len(test_points) == 2\n for i in range(2):\n assert len(points[i].params) == len(test_points[i]) == 3\n assert np.all(points[i].params[\"yolo\"] == test_points[i][\"yolo\"])\n assert points[i].params[\"yolo2\"] == test_points[i][\"yolo2\"]\n assert points[i].params[\"yolo3\"] == test_points[i][\"yolo3\"]", "def testInvisiblePositive(self):\n slhafile=\"./testFiles/slha/higgsinoStop.slha\"\n model = Model(BSMList,SMList)\n model.updateParticles(slhafile)\n topos = decomposer.decompose ( model, .1*fb, False, True, 5.*GeV )\n tested = False\n for topo in topos:\n if str(topo)!=\"[][]\":\n continue\n for element in topo.elementList:\n if str(element)!=\"[[],[]]\":\n continue\n tested = True\n trueMothers = [mother for mother in element.motherElements if not mother is element]\n if not trueMothers: continue\n self.assertEqual(str(trueMothers[0]),\"[[],[[nu,nu]]]\")\n self.assertEqual(len(trueMothers), 1)\n self.assertTrue(tested)", "def simulationTwoDrugsDelayedTreatment():\n\n # TODO", "def test_contains():\n world, bodies, _ = example_world()\n\n geometry = metis.geometry.ManyShapeGeometry(world, bodies)\n dynamics = metis.dynamics.MagneticDynamics(bodies)\n factored_graph = FactoredRandomGeometricGraph(\n geometry, dynamics, default_count=5, blacklist=NoObjectContactBlacklist())\n\n assert {'robot': (None, 0), 'box1': (None, 0), 'box2': (None, 0)} in factored_graph\n assert {'robot': ('box1', 0), 'box1': (None, 0), 'box2': (None, 0)} in factored_graph\n assert {'robot': ('box1', 5), 'box1': (None, 0), 'box2': (None, 0)} not in factored_graph\n assert {'robot': ('robot', 0), 'box1': (None, 0), 'box2': (None, 0)} not in factored_graph\n assert {'robot': ('box1', 0), 'box1': ('robot', 0), 'box2': (None, 0)} not in factored_graph", "def test_object_with_observables() -> None:\n system_name = \"Water900\"\n simulation_id_1 = \"NPT-lowT-lowP\"\n simulation_id_2 = \"NPT-highT-lowP\"\n system = database.system(system_name)\n parser = FlatfileParser()\n simulation_data_1 = parser.get_simulation_data(\n units=system.units,\n ensemble=system.ensemble(simulation_id_1),\n system=system.system_data,\n kinetic_ene_file=system.observable_flat_file(\n simulation_id_1, \"kinetic_energy\"\n ),\n potential_ene_file=system.observable_flat_file(\n simulation_id_1, \"potential_energy\"\n ),\n total_ene_file=system.observable_flat_file(simulation_id_1, \"total_energy\"),\n volume_file=system.observable_flat_file(simulation_id_1, \"volume\"),\n )\n simulation_data_1_copy = parser.get_simulation_data(\n units=system.units,\n ensemble=system.ensemble(simulation_id_1),\n system=system.system_data,\n kinetic_ene_file=system.observable_flat_file(\n simulation_id_1, \"kinetic_energy\"\n ),\n potential_ene_file=system.observable_flat_file(\n simulation_id_1, \"potential_energy\"\n ),\n total_ene_file=system.observable_flat_file(simulation_id_1, \"total_energy\"),\n volume_file=system.observable_flat_file(simulation_id_1, \"volume\"),\n )\n simulation_data_2 = parser.get_simulation_data(\n units=system.units,\n ensemble=system.ensemble(simulation_id_2),\n system=system.system_data,\n kinetic_ene_file=system.observable_flat_file(\n simulation_id_2, \"kinetic_energy\"\n ),\n potential_ene_file=system.observable_flat_file(\n simulation_id_2, \"potential_energy\"\n ),\n total_ene_file=system.observable_flat_file(simulation_id_2, \"total_energy\"),\n volume_file=system.observable_flat_file(simulation_id_2, \"volume\"),\n )\n\n assert simulation_data_1 == simulation_data_1_copy\n assert simulation_data_1 != simulation_data_2", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def test_w_and_without():\n A = Node(\"A\", [\"B\"], {\"B\": np.array([[1,0],[1,.1]])})\n B = Node(\"B\", [], {})\n net = CyberNet([A,B])\n T=10\n data = gen_data(T, net, {\"A\": \"normal\", \"B\":\"normal\"})\n logn_fact = gen_logn_fact(data)\n pdata_no_a = prob_model_no_attacker(net, data, T, logn_fact)\n pdata_a = prob_model_given_data_times(net, data, {}, T, logn_fact,\n {\"A\": \"normal\",\n \"B\":\"normal\"})\n np.testing.assert_almost_equal(pdata_no_a, pdata_a)\n\n np.testing.assert_almost_equal(np.log(poisson.pmf(len(data[0]), 10)), pdata_a)", "def xo(mom:list,eval_mom,dad:list,eval_dad,slices):\n legal = False\n legal_son = False\n legal_daughter = False\n son = []\n daughter = []\n point_track = set()\n while not legal:\n\n slice_point = randint(0, len(dad)-1)\n if slice_point in point_track:\n continue\n point_track.add(slice_point)\n if len(point_track) == len(dad): # exhausted all possible points with no success\n if legal_son is True: # and legal_daughter is False\n return son,eval_son,mom,eval_mom\n if legal_daughter is True: # and legal_son is False\n return dad, eval_dad, daughter, eval_daughter\n if legal_son is False:\n son = dad[:slice_point]+mom[slice_point:]\n eval_son = evaluateOne(son)\n if eval_son > -1:\n legal_son = True\n if legal_daughter is False:\n daughter = mom[:slice_point]+dad[slice_point:]\n eval_daughter = evaluateOne(daughter)\n if eval_daughter > -1:\n legal_daughter = True\n\n legal = legal_son and legal_daughter\n\n return son,eval_son,daughter,eval_daughter", "def test_inE_traversals(self):\r\n results = self.jon.inE()\r\n assert len(results) == 1\r\n assert self.jon_physics in results", "def simulate_trajectories(kav):\n print \"Simulating \"+str(kav)\n wt_trajectories = []\n avp_trajectories = []\n vip_trajectories = []\n for tn in range(100):\n # get random initial condition\n # initial phases\n init_conditions_AV = [single_osc.lc(wt_T*np.random.rand()) \n for i in range(AVPcells+VIPcells)]\n init_conditions_NAV = [single_osc.lc(wt_T*np.random.rand())[:-1]\n for i in range(NAVcells)]\n y0_random = np.hstack(init_conditions_AV+init_conditions_NAV)\n\n # do the simulation\n model = GonzeModelManyCells(param, kav=kav, \n initial_values=y0_random)\n wt_trajectories.append(model.run(show_labels=False, seed=0))\n\n # avp bmalko\n avp_model = GonzeModelManyCells(param, bmalko='AVP', kav=kav, \n initial_values=y0_random)\n avp_trajectories.append(avp_model.run(show_labels=False, seed=0))\n\n # vip bmalko\n vip_model = GonzeModelManyCells(param, bmalko='VIP', kav=kav, \n initial_values=y0_random)\n vip_trajectories.append(vip_model.run(show_labels=False, seed=0))\n\n # save results\n with open(\"Data/params/wt_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(wt_trajectories, output_file)\n with open(\"Data/params/avp_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(avp_trajectories, output_file)\n with open(\"Data/params/vip_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(vip_trajectories, output_file)\n\n return {'wt': wt_trajectories,\n 'avp': avp_trajectories,\n 'vip': vip_trajectories}", "def test_active_inference_SPM_1b(self):", "def build_and_run_(spec):\n opt = spec['opt']\n print \"pool starting \", opt\n\n # lenght of simulation \n tf = float(opt.get('tf', 100))\n\n # model # coupling function # connectivity \n simargs = {}\n for mod, key in [(models, 'model'), \n (connectivity, 'connectivity'),\n (coupling, 'coupling')]:\n simargs[key] = build_sim_part(mod, opt[key])\n\n # noise # integrator \n optint = opt['integrator']\n if 'noise' in optint:\n optint['noise'] = build_sim_part(noise, optint['noise'])\n simargs['integrator'] = build_sim_part(integrators, optint)\n\n # monitors \n if not type(opt['monitors']) in (list,):\n opt['monitors'] = [opt['monitors']]\n simargs['monitors'] = []\n for mon in opt['monitors']:\n simargs['monitors'].append(build_sim_part(monitors, mon))\n\n # stimulus \n # NotImplemented\n\n # simulator \n sim = simulator.Simulator(**simargs)\n sim.configure()\n\n # TODO open HDF5 first, figure out correct sizes, etc\n\n # loop, writing data to h5\n ts = [[] for _ in opt['monitors']]\n ys = [[] for _ in opt['monitors']]\n for i, all_monitor_data in enumerate(sim(tf)):\n for j, mondata in enumerate(all_monitor_data):\n if not mondata is None:\n t, y = mondata\n ts[j].append(t)\n ys[j].append(y)\n\n # write data to hdf5 file\n path = os.path.abspath(opt.get('wd', './'))\n h5fname = os.path.join(path, \"tvb_%s.h5\" % (spec['md5sum'], ))\n h5 = h5py.File(h5fname, 'w')\n\n for i, (mon, (t, y)) in enumerate(zip(simargs['monitors'], zip(ts, ys))):\n mname = \"mon_%d_%s\" % (i, mon.__class__.__name__)\n g = h5.create_group(mname)\n g.create_dataset('ts', data=t)\n g.create_dataset('ys', data=y)\n\n h5.close()\n\n # return filename\n print \"pool finished\", opt\n return h5fname", "def test_sim(self):\n nxfe = 4\n ipopt = get_solver(\"ipopt\")\n\n m_steady = self.make_steady_model(nfe=nxfe)\n self.fix_model_inlets(m_steady, inlet_pressure=50.0 * pyo.units.bar)\n m_steady.fs.compressor.boost_pressure[:].fix(7.0 * pyo.units.bar)\n ipopt.solve(m_steady, tee=True)\n time_steady = m_steady.fs.time\n scalar_data = self.get_scalar_data_from_model(m_steady, time_steady)\n initial_data = self.get_data_from_model_at_time(m_steady, time_steady)\n\n m = pyo.ConcreteModel()\n default = {\n \"dynamic\": True,\n \"time_set\": [0.0, 20.0],\n \"time_units\": pyo.units.hr,\n }\n m.fs = idaes.FlowsheetBlock(**default)\n m.fs.properties = NaturalGasParameterBlock()\n pipeline_config = {\n \"property_package\": m.fs.properties,\n \"finite_elements\": nxfe,\n }\n m.fs.pipeline = GasPipeline(**pipeline_config)\n pipeline = m.fs.pipeline\n compressor_config = {\"property_package\": m.fs.properties}\n m.fs.compressor = Compressor(**compressor_config)\n compressor = m.fs.compressor\n m._compressor_to_pipeline = Arc(\n ports=(compressor.outlet_port, pipeline.inlet_port),\n )\n expand_arcs = pyo.TransformationFactory(\"network.expand_arcs\")\n expand_arcs.apply_to(m)\n\n cv = m.fs.pipeline.control_volume\n assert_units_consistent(m)\n\n disc = pyo.TransformationFactory(\"dae.finite_difference\")\n ntfe = 20\n disc.apply_to(m, nfe=ntfe, wrt=m.fs.time, scheme=\"BACKWARD\")\n\n time = m.fs.time\n t0 = m.fs.time.first()\n x0 = cv.length_domain.first()\n xf = cv.length_domain.last()\n j = next(iter(m.fs.properties.component_list))\n\n # Fix geometry variables\n m.fs.pipeline.diameter.fix(0.92 * pyo.units.m)\n cv.length.fix(300.0 * pyo.units.km)\n\n # Fix boost pressure\n compressor.boost_pressure[:].fix()\n\n # Inlets to the compressor are fixed, except for flow, where\n # the outlet is fixed.\n state = compressor.inlet_state\n state[:].pressure.fix()\n state[:].mole_frac_comp[j].fix()\n state[:].temperature.fix()\n cv.flow_mass[:, xf].fix()\n\n # Fix initial conditions. Here, pressure and volume for all\n # non-specified points.\n for x in cv.length_domain:\n if x != x0:\n cv.pressure[t0, x].fix()\n if x != xf:\n cv.flow_mass[t0, x].fix()\n\n # I want to deactivate differential equations at (t0, xf)\n # Material balance already doesn't exist here.\n cv.momentum_balance[t0, xf].deactivate()\n\n self.assertEqual(degrees_of_freedom(m), 0)\n\n # Load initial steady state into model at all time points.\n for name, val in initial_data.items():\n var = m.find_component(name)\n for t in time:\n var[t].set_value(val)\n # Load scalar data from initial steady state\n # (initialize area, basically)\n for name, val in scalar_data.items():\n var = m.find_component(name)\n var.set_value(val)\n\n cv.material_accumulation[...].set_value(0.0)\n cv.flow_mass_dt[...].set_value(0.0)\n\n for con in large_residuals_set(m):\n resid = pyo.value(con.body - con.upper)\n print(resid, con.name)\n ipopt.solve(m, tee=True)\n\n # Load input sequence into model\n sample_points = [4.0, 20.0]\n input_name = \"fs.pipeline.control_volume.flow_mass[*,1.0]\"\n nominal_density = 0.72\n val = 12.0 * 1e6 / 24 * nominal_density # 12 (1e6 SCM)/day\n input_series_data = (\n sample_points,\n {input_name: [val, val]},\n )\n input_interval_data = interval_data_from_time_series(input_series_data)\n load_inputs_into_model(m, time, input_interval_data)\n # Solve with loaded inputs\n res = ipopt.solve(m, tee=True)\n self.assertIs(\n res.solver.termination_condition,\n pyo.TerminationCondition.optimal,\n )\n\n # These predicted values come from a simulation of a single pipeline\n # model from the Pyomo DAE example. flow_mass has been converted\n # to kg/hr from (1e4 SCM/hr) by a factor of 0.72*1e4, where\n # 0.72 kg/m**3 is the gas density at standard conditions.\n pred_values = (\n list(time),\n {\n \"fs.pipeline.control_volume.flow_mass[*,%s]\"\n % x0: [\n 3.000e5,\n 2.999e5,\n 2.999e5,\n 2.999e5,\n 3.000e5,\n 3.174e5,\n 3.301e5,\n 3.389e5,\n 3.449e5,\n 3.492e5,\n 3.523e5,\n 3.544e5,\n 3.560e5,\n 3.571e5,\n 3.579e5,\n 3.585e5,\n 3.589e5,\n 3.592e5,\n 3.594e5,\n 3.595e5,\n 3.597e5,\n ],\n \"fs.pipeline.control_volume.pressure[*,%s]\"\n % xf: [\n 50.90,\n 50.90,\n 50.90,\n 50.90,\n 50.90,\n 49.83,\n 49.31,\n 48.95,\n 48.69,\n 48.51,\n 48.38,\n 48.29,\n 48.22,\n 48.17,\n 48.14,\n 48.11,\n 48.10,\n 48.08,\n 48.07,\n 48.07,\n 48.06,\n ],\n \"fs.compressor.power[*]\": [\n 1.590e3,\n 1.590e3,\n 1.590e3,\n 1.590e3,\n 1.590e3,\n 1.682e3,\n 1.750e3,\n 1.796e3,\n 1.828e3,\n 1.851e3,\n 1.867e3,\n 1.878e3,\n 1.887e3,\n 1.892e3,\n 1.897e3,\n 1.900e3,\n 1.902e3,\n 1.904e3,\n 1.905e3,\n 1.906e3,\n 1.906e3,\n ],\n },\n )\n output_names = [\n \"fs.pipeline.control_volume.flow_mass[*,%s]\" % x0,\n \"fs.pipeline.control_volume.pressure[*,%s]\" % xf,\n \"fs.compressor.power[*]\",\n ]\n actual_values = (\n list(time),\n {\n name: [var.value for var in m.find_component(name).values()]\n for name in output_names\n },\n )\n # Note: We fail with a reltol of 0.01, due to flow rate discrepancies\n # in positions 6, 7, 8, and 9. A reltol of 0.02 seems reasonable to me.\n self.assertStructuredAlmostEqual(pred_values, actual_values, reltol=0.02)", "def test_variability(self):\n # some reproducible arbitrariness\n np.random.seed(343143)\n\n n = 10\n t_max = 20.0\n dt = 0.1\n G = RandomLayer(n)\n\n M1 = simulation.EventMonitor(G)\n\n sim1 = simulation.Simulation(G, M1, dt=dt)\n sim1.run(t_max)\n \n M2 = simulation.EventMonitor(G)\n sim2 = simulation.Simulation(G, M2, dt=dt)\n sim2.run(t_max)\n\n self.assertNotEqual(len(M1.t), 0)\n self.assertNotEqual(len(M2.t), 0)\n self.assertNotEqual(M1.t, M2.t)", "def test_no_jitter(self):\n n = 10\n t_max = 25\n dt = 0.1\n G = RateHVCLayer(n)\n G.burst_noise = 0.0\n\n M1 = simulation.StateMonitor(G, 'out')\n\n sim1 = simulation.Simulation(G, M1, dt=dt)\n sim1.run(t_max)\n \n M2 = simulation.StateMonitor(G, 'out')\n sim2 = simulation.Simulation(G, M2, dt=dt)\n sim2.run(t_max)\n\n self.assertTrue(np.allclose(M1.out, M2.out))", "def test_shape(self):\n M = simulation.EventMonitor(self.G)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertEqual(len(M.t), len(M.i))", "def test_shared_members_N(self):\r\n def populate_module(m,x):\r\n m.x=x\r\n m.lx=[x]\r\n m.llx=[[x],[x]]\r\n m.ltx=[(x,)]\r\n m.ldx=[{'x':x}]\r\n m.tx=(x,)\r\n m.tlx=([x],)\r\n m.ttx=((x,),)\r\n m.tdx=({'x':x},)\r\n m.dx={'x':x}\r\n m.dlx={'x':[x]}\r\n m.dtx={'x':(x,)}\r\n m.ddx={'x':{'x':x}}\r\n\r\n def get_element(i):\r\n return [i.x,i.lx[0],i.tx[0],i.dx['x'],i.llx[0][0], i.llx[1][0], i.ltx[0][0], i.ldx[0]['x'], i.tlx[0][0], i.tlx[0][0], i.tdx[0]['x'], i.dlx['x'][0], i.dtx['x'][0], i.ddx['x']['x']]\r\n m1=Module()\r\n m2=Module()\r\n m3=Module()\r\n m4=Module()\r\n x=T.dscalar()\r\n populate_module(m1,x)\r\n populate_module(m2,(x))\r\n populate_module(m4,(x))\r\n #m1.x and m2.x should not be shared as their is no hierarchi link between them.\r\n inst1=m1.make()\r\n inst2=m2.make()\r\n m1.m2=m2\r\n m2.m3=m3\r\n m3.m4=m4\r\n #m1.x and m2.x should be shared as their is a hierarchi link between them.\r\n inst3=m1.make()\r\n inst1.x=1\r\n inst2.x=2\r\n inst3.x=3\r\n for f in get_element(inst1):\r\n assert f==1\r\n for f in get_element(inst2):\r\n assert f==2\r\n for f in get_element(inst3)+get_element(inst3.m2)+get_element(inst3.m2.m3.m4):\r\n assert f==3\r\n\r\n inst3.m2.x=4\r\n for f in get_element(inst3)+get_element(inst3.m2)+get_element(inst3.m2.m3.m4):\r\n assert f==4", "def test_OBJT_pass(self):\n for O in self.mod.objts.itervalues():\n self.assertTrue(O.isset)", "def test_basic_setup(self):\n random_vars = ['D', 'I', 'G', 'S', 'L']\n\n for rv in random_vars:\n self.assertTrue(rv in self.Gs.nodes)\n self.assertTrue(isinstance(self.Gs.nodes[rv], DiscreteNetworkNode))", "def test_single_game_seed_works(self):\n sim = ss.Simulation(seed=23)\n game1 = sim.single_game()\n sim = ss.Simulation(seed=23)\n game2 = sim.single_game()\n assert game1 == game2, 'Your seed in Simulation class is not working.'", "def test_no_jitter(self):\n # some reproducible arbitrariness\n np.random.seed(3249823)\n\n n = 10\n t_max = 25\n dt = 0.1\n G = HVCLikeLayer(n)\n G.burst_noise = 0.0\n G.spike_noise = 0.0\n\n M1 = simulation.EventMonitor(G)\n\n sim1 = simulation.Simulation(G, M1, dt=dt)\n sim1.run(t_max)\n \n M2 = simulation.EventMonitor(G)\n sim2 = simulation.Simulation(G, M2, dt=dt)\n sim2.run(t_max)\n\n self.assertEqual(M1.t, M2.t)", "def __post_init__(self):\n assert self.data.shape == self.theory.shape", "def v1_kcat_validate():\n\n name = MPI.Get_processor_name()\n rank = MPI.COMM_WORLD.Get_rank()\n size = MPI.COMM_WORLD.Get_size()\n\n if rank == 0:\n # create ident object first\n v1_ident = ModelIdent(ident_fun=kotte_model.flux_1_kcat_ident,\n arranged_data_file_name=os.path.join(os.getcwd(), 'exp/exp_v1_2_experiments'),\n ident_data_file_name=os.path.join(os.getcwd(), 'ident/ident_v1_kcat'),\n **{'original_exp_file': os.path.join(os.getcwd(), 'exp/experiments'),\n 'flux_id': 1, 'flux_choice': 2,\n 'values_figure': os.path.join(os.getcwd(), 'results/v1_kcat_parameter_values.eps'),\n 'ident_figure': os.path.join(os.getcwd(), 'results/v1_kcat_ident.eps'),\n 'exp_figure': os.path.join(os.getcwd(), 'results/v1_kcat_exp.eps'),\n 'figure_format': 'eps',\n 'ident_index_label': ['sample_name', 'data_set_id']})\n\n # retrieve identifiability data and process it for validation\n v1_ident.validation_info()\n\n user_ode_opts = {'iter': 'Newton', 'discr': 'Adams', 'atol': 1e-10, 'rtol': 1e-10,\n 'time_points': 200, 'display_progress': True, 'verbosity': 30}\n # initial ss to begin all simulations from\n y0 = np.array([5, 1, 1])\n # get and set true parameter values, if available separately\n default_parameters = true_parameter_values()\n\n v1_valid_obj = ValidateSim(kotte_model.kotte_ck_ode, kotte_model.kotte_ck_flux,\n **{'kinetics': 2, 'ode_opts': user_ode_opts, 't_final': 200, 'wt_y0': y0,\n 'i_parameter': default_parameters, 'sample_size': 1, 'noise_std': 0.05,\n 'validate_index_label': ['estimate_id', 'sample_name', 'data_set_id',\n 'experiment_id'],\n 'validate_file_name': os.path.join(os.getcwd(), 'validate/v1_kcat_validate'),\n 'original_exp_file': v1_ident.original_exp_file,\n 'c_validate_file': os.path.join(os.getcwd(), 'results/v1_kcat_c_validate.eps'),\n 'f_validate_file': os.path.join(os.getcwd(), 'results/v1_kcat_f_validate.eps'),\n 'c_exp_file': os.path.join(os.getcwd(), 'results/v1_kcat_c_exp.eps'),\n 'v_exp_file': os.path.join(os.getcwd(), 'results/v1_kcat_f_exp.eps'),\n 'format': v1_ident.figure_format})\n\n parameter_estimates, estimate_info = v1_valid_obj.create_parameter_list(v1_ident.select_values)\n\n job = ParallelValidate(slaves=range(1, size))\n\n validate_results = job.run_all(task='initial_sim', **{'parameters': parameter_estimates,\n 'estimate_info': estimate_info, 'sim_obj': v1_valid_obj})\n job.terminate_slaves()\n\n # process validation data for plots\n validate_processing(v1_valid_obj, validate_results)\n\n else:\n\n print('I am %s Slave with rank %s of %s' % (name, str(rank), str(size)))\n ValidateSlave().run()\n\n return None", "def mewe_misspecified(M,N,m,n,target):\r\n\toutput = []\r\n\tfor k in tqdm(range(0,M)):\r\n\t\t# Allocate space for output\r\n\t\tmewe_store = np.zeros((len(n),target['thetadim']))\r\n\t\tmewe_runtimes = np.zeros(len(n))\r\n\t\tmewe_evals = np.zeros(len(n))\r\n\t\t\r\n\t\t# generate all observations and sets of randomness to be used\r\n\t\t\r\n\t\tif target[\"observed_law\"] == \"Gamma\":\r\n\t\t\tobs_all = np.random.gamma(true_theta[0], true_theta[1],np.max(n))\r\n\t\telif target[\"observed_law\"] == \"Cauchy\":\r\n\t\t\tobs_all = np.random.standard_cauchy(np.max(n))\r\n\t\telse : \r\n\t\t\treturn(\"Not implemented law\")\r\n\t\t\tbreak\r\n\t\t# la ligne du dessus est modifiée pour générer un échantillon contaminé\r\n\t\t\r\n\t\t# generate the synthetic randomness, sort.\r\n\t\t\r\n\t\trandomness = [target['generate_randomness'](m) for i in range(N)]\r\n\t\t\r\n\t\tfor i in range(0,len(n)):\r\n\t\t\t# subset observations and sort\r\n\t\t\tobs = obs_all[:n[i]]\r\n\t\t\tsort_obs = np.sort(obs)\r\n\t\t\tsort_obs_mult = np.repeat(sort_obs, m / n[i], axis = 0)\r\n\t\t\t\r\n\t\t\t# Define the objective to be minimized to find the MEWE\r\n\t\t\t\r\n\t\t\tdef obj1(theta):\r\n\t\t\t\tif(theta[1] < 0 ):\r\n\t\t\t\t\tout = 10e6\r\n\t\t\t\telse :\r\n\t\t\t\t\twass_dists = [target['dist'](sort_obs_mult, np.sort(target['simulation'](theta, x))) for x in randomness]\r\n\t\t\t\t\tout = np.mean(wass_dists)\r\n\t\t\t\t\r\n\t\t\t\treturn out\r\n\t\t\t\t\r\n\t\t\t# Optimization\r\n\t\t\t\r\n\t\t\tt_mewe = time.process_time()\r\n\t\t\tmewe = minimize(fun = obj1, x0 = true_theta)\r\n\t\t\tt_mewe = time.process_time() - t_mewe\r\n\t\t\t\r\n\t\t\t# Save the results\r\n\t\t\tmewe_store[i] = mewe.x\r\n\t\t\tmewe_runtimes[i] = t_mewe\r\n\t\t\tmewe_evals[i] = mewe.nit\r\n\t\t\r\n\t\toutput_cbind = np.c_[mewe_store, mewe_runtimes, mewe_evals, n, np.arange(len(n))]\r\n\t\toutput.append(output_cbind)\r\n\t\t\r\n\treturn output", "def simulation(G, # graph object\n pos = None, # positions of nodes\n n = 5, # number of simulation steps\n \n # wrapped args for simulation_step function\n kernel = 'weights', # simulation kernel\n custom_kernel = None, # custom simulation kernel\n WERE_multiplier = 10, # multiplier for WERE kernel\n oblivion = False, # enable information oblivion\n engagement_enforcement = 1.01,\n draw = False, # draw graph\n show_attr = False): # show attributes \n \n #=======================================#\n # append nodes data from 0 step to list #\n #=======================================#\n \n graph_list = []\n graph_list.append(copy.deepcopy(list(G.nodes.data() ) ) )\n \n\n #===================#\n # Run n simulations #\n #===================#\n \n for i in range(n):\n dp.simulation_step(G = G, \n pos = pos, \n \n kernel = kernel,\n custom_kernel = custom_kernel,\n WERE_multiplier = WERE_multiplier, \n oblivion = oblivion, \n engagement_enforcement = engagement_enforcement,\n draw = draw, \n show_attr = show_attr)\n\n # save nodes data to to list\n graph_list.append(copy.deepcopy(list(G.nodes.data() ) ) )\n \n \n #======================================================#\n # Count aware agents before and after simulation steps #\n #======================================================#\n \n # Check number of aware agents in 0 step\n #global aware_first\n aware_first = []\n for i in range(len(graph_list[0])):\n aware_first.append(graph_list[0][i][1]['state'])\n aware_first_c = aware_first.count('aware')\n \n # graph_list[0][1][1]['state']\n \n # Check number of aware agents in the last step\n #global aware_last\n aware_last = []\n graph_list_len = len(graph_list) - 1\n for i in range(len(graph_list[0])):\n aware_last.append(graph_list[graph_list_len][i][1]['state']) # n is the last sim\n aware_last_c = aware_last.count('aware')\n \n #graph_list[5][0][1]['state']\n \n #=================================#\n # diffusion performance measuring #\n #=================================#\n \n # equation for diffusion performance measuring\n avg_aware_inc_per_step = (aware_last_c - aware_first_c) / n\n \n # show graph statistics\n return graph_list, avg_aware_inc_per_step", "def test_multiple_rng(self):\r\n rng1 = RandomStreams(1234)\r\n rng2 = RandomStreams(2392)\r\n assert rng1.random_state_variables is not rng2.random_state_variables", "def test_find_singletons(self):\n self._build_sample_graph()\n # Adding singletons\n sg = self.skill_graph.add(Skill.build('g', ''))\n sh = self.skill_graph.add(Skill.build('h', ''))\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).singletons()\n expected = [sg.id, sh.id]\n self.assertEqual(sorted(expected), sorted(result))", "def random_structure (params) :\n\n import random\n random.seed(0)\n from scitbx.array_family import flex\n flex.set_random_seed(0)\n from cctbx.development import random_structure\n\n uc_volume = params.target_unit_cell.volume()\n asu_volume = uc_volume / params.target_space_group.group().order_z()\n target_number_scatterers = int(asu_volume)//128 # Very approximate rule of thumb for proteins with ~50% solvent content\n element_unit = ['O']*19 + ['N']*18 + ['C']*62 + ['S']*1\n element_pallet = element_unit * (1 + ( target_number_scatterers//len(element_unit) ))\n assert len(element_pallet) >= target_number_scatterers\n # XXX Ersatz hard limit to prevent excessive execution time of\n # xray_structure() below.\n elements = element_pallet[:min(1000, target_number_scatterers)]\n\n xs = random_structure.xray_structure(\n space_group_info = params.target_space_group,\n unit_cell = params.target_unit_cell,\n elements=elements,\n min_distance=1.2)\n xs.show_summary()\n phil2 = mmtbx.command_line.fmodel.fmodel_from_xray_structure_master_params\n params2 = phil2.extract()\n # adjust the cutoff of the generated intensities to assure that\n # statistics will be reported to the desired high-resolution limit\n # even if the observed unit cell differs slightly from the reference.\n params2.high_resolution = params.d_min / math.pow(\n 1 + params.unit_cell_length_tolerance, 1 / 3)\n params2.output.type = \"real\"\n if (params.include_bulk_solvent) :\n print(\"Sorry, can't include bulk solvent for randomly-generated sites.\")\n f_model = mmtbx.utils.fmodel_from_xray_structure(\n xray_structure = xs,\n f_obs = None,\n add_sigmas = True,\n params = params2).f_model\n if not params.merge_anomalous:\n f_model_possibly_anomalous = f_model.generate_bijvoet_mates()\n else:\n f_model_possibly_anomalous = f_model\n i_model = f_model_possibly_anomalous.as_intensity_array()\n\n if params.scaling.mtz_file is not None:\n f_fake = f_model.as_amplitude_array()\n # as the code that consumes the mtz f-obs expects non-anomalous data\n mtzdata = f_fake.as_mtz_dataset(column_root_label=\"f-obs\")\n mtzdata.mtz_object().write(params.scaling.mtz_file)\n\n return i_model", "def test_shared_members(self):\r\n\r\n def populate_module(m,x):\r\n m.x=x\r\n m.lx=[x]\r\n m.llx=[[x],[x]]\r\n m.ltx=[(x,)]\r\n m.ldx=[{'x':x}]\r\n m.tx=(x,)\r\n m.tlx=([x],)\r\n m.ttx=((x,),)\r\n m.tdx=({'x':x},)\r\n m.dx={'x':x}\r\n m.dlx={'x':[x]}\r\n m.dtx={'x':(x,)}\r\n m.ddx={'x':{'x':x}}\r\n\r\n def get_element(i):\r\n return [i.x,i.lx[0],i.tx[0],i.dx['x'],i.llx[0][0], i.llx[1][0], i.ltx[0][0], i.ldx[0]['x'], i.tlx[0][0], i.tlx[0][0], i.tdx[0]['x'], i.dlx['x'][0], i.dtx['x'][0], i.ddx['x']['x']]\r\n\r\n m1=Module()\r\n m2=Module()\r\n x=T.dscalar()\r\n populate_module(m1,x)\r\n populate_module(m2,x)\r\n #m1.x and m2.x should not be shared as their is no hierarchi link between them.\r\n inst1=m1.make()\r\n inst2=m2.make()\r\n m1.m2=m2\r\n #m1.x and m2.x should be shared as their is a hierarchi link between them.\r\n inst3=m1.make()\r\n inst1.x=1\r\n inst2.x=2\r\n inst3.x=3\r\n for f in get_element(inst1):\r\n assert f==1\r\n for f in get_element(inst2):\r\n assert f==2\r\n for f in get_element(inst3)+get_element(inst3.m2):\r\n assert f==3\r\n\r\n inst3.m2.x=4\r\n for f in get_element(inst3)+get_element(inst3.m2):\r\n assert f==4", "def test_speciesDestruction():\n sys = LVsystem.Ecosystem()\n sys.addSpecies('rabbit')\n sys.addSpecies('fox')\n sys.addSpecies('wolf')\n \n sys.removeSpecies('fox')\n \n assert len(sys.species_list) == 2\n assert not ('fox' in sys.species_list)\n for key in sys.intMatrix:\n assert not ('fox' in key)\n \n sys.removeSpecies('wolf')\n \n assert sys.species_list == ['rabbit']\n for key in sys.intMatrix:\n assert not ('wolf' in key)\n \n sys.removeSpecies('rabbit') \n \n assert sys.intMatrix == {}\n assert sys.species_list == []\n for key in sys.intMatrix:\n assert not ('rabbit' in key)", "def __init__(self,\n num_masks=1,\n num_fake=0,\n epsilon_greedy=0.0,\n num_classes = 2,\n num_samples = 1000,\n max_length = 30,\n ambiguity=False,\n num_bondtypes=1):\n\n self.num_masks = num_masks\n self.num_fake = num_fake\n self.epsilon_greedy = epsilon_greedy\n self.num_classes = num_classes\n self.num_samples = num_samples\n\n self.molecule_generator = MoleculeGenerator(num_classes=num_classes, \n max_length=max_length, \n ambiguity=ambiguity,\n num_bondtypes=num_bondtypes)\n\n self.corruption = CorruptionTransform(num_masks=num_masks, num_fake=num_fake, epsilon=epsilon_greedy)\n\n\n self.data = []\n\n\n for i in range(self.num_samples):\n molecule = self.molecule_generator.generate_molecule()\n molecule = chem.AddHs(molecule)\n\n\n Adj = chem.rdmolops.GetAdjacencyMatrix(molecule)\n atoms = np.asarray([periodic_table[atom.GetAtomicNum()] for atom in molecule.GetAtoms()])\n smiles = chem.MolToSmiles(molecule)\n self.data += [MoleculeSample(atoms, Adj, {}, smiles)]", "def test_determinism_2():\n\n def run_sgd(mode):\n # Must be seeded the same both times run_sgd is called\n disturb_mem.disturb_mem()\n rng = np.random.RandomState([2012, 11, 27])\n\n batch_size = 5\n train_batches = 3\n valid_batches = 4\n num_features = 2\n\n # Synthesize dataset with a linear decision boundary\n w = rng.randn(num_features)\n\n def make_dataset(num_batches):\n disturb_mem.disturb_mem()\n m = num_batches*batch_size\n X = rng.randn(m, num_features)\n y = np.zeros((m, 1))\n y[:, 0] = np.dot(X, w) > 0.\n\n rval = DenseDesignMatrix(X=X, y=y)\n\n rval.yaml_src = \"\" # suppress no yaml_src warning\n\n X = rval.get_batch_design(batch_size)\n assert X.shape == (batch_size, num_features)\n\n return rval\n\n train = make_dataset(train_batches)\n valid = make_dataset(valid_batches)\n\n num_chunks = 10\n chunk_width = 2\n\n class ManyParamsModel(Model):\n \"\"\"\n Make a model with lots of parameters, so that there are many\n opportunities for their updates to get accidentally re-ordered\n non-deterministically. This makes non-determinism bugs manifest\n more frequently.\n \"\"\"\n\n def __init__(self):\n super(ManyParamsModel, self).__init__()\n self.W1 = [sharedX(rng.randn(num_features, chunk_width)) for i\n in xrange(num_chunks)]\n disturb_mem.disturb_mem()\n self.W2 = [sharedX(rng.randn(chunk_width))\n for i in xrange(num_chunks)]\n self._params = safe_union(self.W1, self.W2)\n self.input_space = VectorSpace(num_features)\n self.output_space = VectorSpace(1)\n\n disturb_mem.disturb_mem()\n model = ManyParamsModel()\n disturb_mem.disturb_mem()\n\n class LotsOfSummingCost(Cost):\n \"\"\"\n Make a cost whose gradient on the parameters involves summing many\n terms together, so that T.grad is more likely to sum things in a\n random order.\n \"\"\"\n\n supervised = True\n\n def expr(self, model, data, **kwargs):\n self.get_data_specs(model)[0].validate(data)\n X, Y = data\n disturb_mem.disturb_mem()\n\n def mlp_pred(non_linearity):\n Z = [T.dot(X, W) for W in model.W1]\n H = [non_linearity(z) for z in Z]\n Z = [T.dot(h, W) for h, W in safe_izip(H, model.W2)]\n pred = sum(Z)\n return pred\n\n nonlinearity_predictions = map(mlp_pred,\n [T.nnet.sigmoid,\n T.nnet.softplus,\n T.sqr,\n T.sin])\n pred = sum(nonlinearity_predictions)\n disturb_mem.disturb_mem()\n\n return abs(pred-Y[:, 0]).sum()\n\n def get_data_specs(self, model):\n data = CompositeSpace((model.get_input_space(),\n model.get_output_space()))\n source = (model.get_input_source(), model.get_target_source())\n return (data, source)\n\n cost = LotsOfSummingCost()\n\n disturb_mem.disturb_mem()\n\n algorithm = SGD(cost=cost,\n batch_size=batch_size,\n learning_rule=Momentum(.5),\n learning_rate=1e-3,\n monitoring_dataset={'train': train, 'valid': valid},\n update_callbacks=[ExponentialDecay(decay_factor=2.,\n min_lr=.0001)],\n termination_criterion=EpochCounter(max_epochs=5))\n\n disturb_mem.disturb_mem()\n\n train_object = Train(dataset=train,\n model=model,\n algorithm=algorithm,\n extensions=[PolyakAveraging(start=0),\n MomentumAdjustor(final_momentum=.9,\n start=1,\n saturate=5), ],\n save_freq=0)\n\n disturb_mem.disturb_mem()\n\n train_object.main_loop()\n\n output = cStringIO()\n record = Record(file_object=output, replay=False)\n record_mode = RecordMode(record)\n\n run_sgd(record_mode)\n\n output = cStringIO(output.getvalue())\n playback = Record(file_object=output, replay=True)\n playback_mode = RecordMode(playback)\n\n run_sgd(playback_mode)", "def test_checks_population_size(self):\n with pm.Model() as model:\n n = pm.Normal(\"n\", mu=0, sigma=1)\n for stepper in TestPopulationSamplers.steppers:\n step = stepper()\n with pytest.raises(ValueError, match=\"requires at least 3 chains\"):\n pm.sample(draws=10, tune=10, chains=1, cores=1, step=step)\n # don't parallelize to make test faster\n pm.sample(\n draws=10,\n tune=10,\n chains=4,\n cores=1,\n step=step,\n compute_convergence_checks=False,\n )", "def test_when_opponent_all_Ds(self):\n self.responses_test([C, C, C, C], [D, D, D, D], [D, D, D], random_seed=5)", "def exists(sim):\n return (sim['N'] > 0)", "def testMass(self):\n tested = False\n slhafile=\"./testFiles/slha/higgsinoStop.slha\"\n model = Model(BSMList,SMList)\n model.updateParticles(slhafile,promptWidth = 1e-12*GeV)\n topos = decomposer.decompose(model, .1*fb, True, False, 5.*GeV)\n for topo in topos:\n if str(topo)!=\"[1][1]\":\n continue\n for element in topo.elementList:\n if str(element)!=\"[[[b]],[[b]]]\":\n continue\n masses=element.motherElements[0].mass\n tested = True\n dm=abs(masses[0][1]-masses[0][2])/GeV\n #If intermediate BSM states are compared there are two elements ([[[b],[c,q]],[[b],[q,q]]])\n # which do not get combined because their branches differ by the charges of the intermediate states\n self.assertEqual(len(element.motherElements),24)\n self.assertTrue(dm < 5.0)\n self.assertTrue(tested)", "def test_creature(self):\n self.assertEqual(len(self.processor), 3)", "def test_am_basic(Simulator, plt, seed, rng):\n\n D = 64\n vocab = Vocabulary(D, rng=rng)\n vocab.parse('A+B+C+D')\n\n with nengo.Network('model', seed=seed) as m:\n am = AssociativeMemory(vocab)\n in_node = nengo.Node(output=vocab.parse(\"A\").v, label='input')\n nengo.Connection(in_node, am.input)\n\n in_p = nengo.Probe(in_node)\n out_p = nengo.Probe(am.output, synapse=0.03)\n\n sim = Simulator(m)\n sim.run(0.2)\n t = sim.trange()\n\n plt.subplot(2, 1, 1)\n plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.ylim(top=1.1)\n plt.legend(vocab.keys, loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))\n plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.8, c='g', lw=2)\n plt.ylabel(\"Output\")\n plt.legend(vocab.keys, loc='best')\n\n assert similarity(sim.data[in_p][t > 0.15], vocab.parse(\"A\").v) > 0.99\n assert similarity(sim.data[out_p][t > 0.15], vocab.parse(\"A\").v) > 0.8", "def sngl_obj_evo(self, lacking):\n prob, algo = self.probinit('jde', 0)\n l = list()\n u = 6+(self.N-3)*4\n for i in range(lacking):\n archi = archipelago(algo,prob,8,16, topology=fully_connected())\n for j in range(u):\n archi.evolve(5)\n stdout.write(\"\\r{0} / {1}\".format(i*u+j+1, lacking*u))\n stdout.flush()\n tmp = [isl for isl in archi]\n tmp.sort(key = lambda x: x.population.champion.f[0]);\n l.append(tmp[0].population.champion)\n stdout.write(\" Done. \")\n return l, prob", "def test_observed_species(self):\n c = array([4,3,4,0,1,0,2])\n obs = observed_species(c)\n exp = 5\n self.assertEqual(obs, exp)\n c = array([0,0,0])\n obs = observed_species(c)\n exp = 0\n self.assertEqual(obs, exp)\n self.assertEqual(observed_species(self.TestData), 9)", "def test_population_movements_with_compilation(self):\n self._pystepx = PySTEPXIsland(nb_islands=4, init_script=init_script)\n print self._pystepx._rc[0]['gp_engine']\n self._pystepx._rc[0].execute('elems = gp_engine.get_evolver().select_and_remove_individuals(0.01)',\n block=True)\n print self._pystepx._rc[0]['elems']", "def test_6(self):\n params_spec, options_spec = generate_random_model()\n respy_obj = RespyCls(params_spec, options_spec)\n respy_obj = simulate_observed(respy_obj)\n\n # Extract class attributes\n (\n periods_rewards_systematic,\n mapping_state_idx,\n seed_prob,\n periods_emax,\n num_periods,\n states_all,\n num_points_interp,\n edu_spec,\n num_draws_emax,\n is_myopic,\n is_debug,\n is_interpolated,\n optim_paras,\n optimizer_options,\n file_sim,\n num_types,\n ) = dist_class_attributes(\n respy_obj,\n \"periods_rewards_systematic\",\n \"mapping_state_idx\",\n \"seed_prob\",\n \"periods_emax\",\n \"num_periods\",\n \"states_all\",\n \"num_points_interp\",\n \"edu_spec\",\n \"num_draws_emax\",\n \"is_myopic\",\n \"is_debug\",\n \"is_interpolated\",\n \"optim_paras\",\n \"optimizer_options\",\n \"file_sim\",\n \"num_types\",\n )\n\n shocks_cholesky = optim_paras[\"shocks_cholesky\"]\n shocks_cov = shocks_cholesky.dot(shocks_cholesky.T)\n coeffs_common = optim_paras[\"coeffs_common\"]\n coeffs_a = optim_paras[\"coeffs_a\"]\n coeffs_b = optim_paras[\"coeffs_b\"]\n delta = optim_paras[\"delta\"]\n\n # Add some additional objects required for the interfaces to the functions.\n period = np.random.choice(num_periods)\n\n periods_draws_emax = create_draws(\n num_periods, num_draws_emax, seed_prob, is_debug\n )\n\n draws_emax_standard = periods_draws_emax[period, :, :]\n\n draws_emax_risk = transform_disturbances(\n draws_emax_standard, np.zeros(4), shocks_cholesky\n )\n\n # Initialize Python version and solve.\n state_space = StateSpace(\n num_periods, num_types, edu_spec[\"start\"], edu_spec[\"max\"], optim_paras\n )\n\n # Integrate periods_emax in state_space\n state_space.emaxs = np.column_stack(\n (\n np.zeros((state_space.num_states, 4)),\n periods_emax[~np.isnan(periods_emax) & (periods_emax != MISSING_FLOAT)],\n )\n )\n\n # Fill emaxs_a - emaxs_home in the requested period\n states_period = state_space.get_attribute_from_period(\"states\", period)\n\n # Do not get the emaxs from the previous period if we are in the last one.\n if period != state_space.num_periods - 1:\n state_space.emaxs = get_emaxs_of_subsequent_period(\n states_period, state_space.indexer, state_space.emaxs, edu_spec[\"max\"]\n )\n\n num_states = state_space.states_per_period[period]\n\n shifts = np.random.randn(4)\n\n # Slight modification of request which assures that the interpolation code is\n # working.\n num_points_interp = min(num_points_interp, num_states)\n\n # Get the IS_SIMULATED indicator for the subset of points which are used for the\n # predication model.\n is_simulated = get_simulated_indicator(\n num_points_interp, num_states, period, is_debug\n )\n\n # Unpack necessary attributes\n rewards_period = state_space.get_attribute_from_period(\"rewards\", period)\n emaxs_period = state_space.get_attribute_from_period(\"emaxs\", period)[:, :4]\n max_education = (\n state_space.get_attribute_from_period(\"states\", period)[:, 3]\n >= edu_spec[\"max\"]\n )\n\n # Construct the exogenous variables for all points of the state space.\n exogenous, max_emax = get_exogenous_variables(\n rewards_period, emaxs_period, shifts, optim_paras[\"delta\"], max_education\n )\n\n # Align output between Python and Fortran version.\n py = (exogenous, max_emax)\n\n f90 = fort_debug.wrapper_get_exogenous_variables(\n period,\n num_periods,\n num_states,\n periods_rewards_systematic,\n shifts,\n mapping_state_idx,\n periods_emax,\n states_all,\n edu_spec[\"start\"],\n edu_spec[\"max\"],\n delta,\n coeffs_common,\n coeffs_a,\n coeffs_b,\n num_types,\n )\n\n assert_almost_equal(py[0], f90[0])\n assert_almost_equal(py[1], f90[1])\n\n # Construct endogenous variable so that the prediction model can be fitted.\n endogenous = get_endogenous_variable(\n rewards_period,\n emaxs_period,\n max_emax,\n is_simulated,\n draws_emax_risk,\n optim_paras[\"delta\"],\n max_education,\n )\n\n f90 = fort_debug.wrapper_get_endogenous_variable(\n period,\n num_periods,\n num_states,\n periods_rewards_systematic,\n mapping_state_idx,\n periods_emax,\n states_all,\n is_simulated,\n num_draws_emax,\n max_emax,\n draws_emax_risk,\n edu_spec[\"start\"],\n edu_spec[\"max\"],\n shocks_cov,\n delta,\n coeffs_common,\n coeffs_a,\n coeffs_b,\n )\n assert_almost_equal(endogenous, replace_missing_values(f90))\n\n py = get_predictions(endogenous, exogenous, max_emax, is_simulated)\n\n f90 = fort_debug.wrapper_get_predictions(\n endogenous,\n exogenous,\n max_emax,\n is_simulated,\n num_points_interp,\n num_states,\n file_sim,\n False,\n )\n\n # This assertion fails if a column is all zeros.\n if not exogenous.any(axis=0).any():\n assert_array_almost_equal(py, f90)", "def prove_NN() -> Proof:\n # Optional Task 6.7c", "def test_pickling(simulation_factory, two_particle_snapshot_factory,\n external_params):\n # unpack parameters\n cls_obj, param_attr, list_params, evaluator = external_params\n\n # create class instance, get/set params when not attached\n obj_instance = cls_obj()\n getattr(obj_instance, param_attr)['A'] = list_params[0]\n\n pickling_check(obj_instance)\n # set up simulation\n snap = two_particle_snapshot_factory(d=3.7)\n sim = simulation_factory(snap)\n sim.operations.integrator = hoomd.md.Integrator(dt=0.001)\n sim.operations.integrator.forces.append(obj_instance)\n sim.run(0)\n pickling_check(obj_instance)", "def test_ne():\n # Define some universal gsps\n gsp = galsim.GSParams(maxk_threshold=1.1e-3, folding_threshold=5.1e-3)\n\n # Pixel. Params include scale, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.Pixel(scale=1.0),\n galsim.Pixel(scale=1.1),\n galsim.Pixel(scale=1.0, flux=1.1),\n galsim.Pixel(scale=1.0, gsparams=gsp)]\n all_obj_diff(gals)\n\n # Box. Params include width, height, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.Box(width=1.0, height=1.0),\n galsim.Box(width=1.1, height=1.0),\n galsim.Box(width=1.0, height=1.1),\n galsim.Box(width=1.0, height=1.0, flux=1.1),\n galsim.Box(width=1.0, height=1.0, gsparams=gsp)]\n all_obj_diff(gals)\n\n # TopHat. Params include radius, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.TopHat(radius=1.0),\n galsim.TopHat(radius=1.1),\n galsim.TopHat(radius=1.0, flux=1.1),\n galsim.TopHat(radius=1.0, gsparams=gsp)]\n all_obj_diff(gals)", "def test_identity_oddballs(self):\n from keras.constraints import identity\n identity_instance = identity()\n\n oddball_examples = [\"Hello\", [1], -1, None]\n assert(oddball_examples == identity_instance(oddball_examples))", "def test_run_simulation_returns_nothing(self):\n sim = ss.Simulation()\n assert sim.run_simulation(10) is None", "def __init__(self):\n self.persons = []\n self.partnerships = []\n self.straight_males = set()\n self.females = set()\n self.high_sexual_activity = set()\n self.singles = set()\n self.infected = set()\n self.resistant = set()\n self.age_group = [set() for x in range(5)]\n self.age_group_mixing_prob = age_group_mixing()\n self.age_groups = [(i,j) for i in range(5) for j in range(5)]\n self.time = 0\n self.number_of_symptomatic = 0\n self.number_of_asymptomatic = 0\n self.number_of_res_symp = 0\n self.number_of_res_asymp = 0\n self.number_of_steady = 0\n self.r0_infected = []", "def test_run_sim():\n rnd = rand.Arrivals(31, 40)\n sim.run_sim(2, 1, 3, 4, 24, rnd)", "def make_sim(sim_name: str, object_names: List[str], epoch: datetime, \n integrator: str, steps_per_day: int, save_file: bool) -> rebound.Simulation:\n # Filename for archive\n file_date: str = epoch.strftime('%Y-%m-%d_%H-%M')\n fname_archive: str = f'../data/planets/{sim_name}_{file_date}.bin'\n\n # If this file already exists, load it and check for both extra and missing bodies\n sim: rebound.Simulation\n try:\n # Attempt to load the named file\n sim = rebound.Simulation(fname_archive)\n # print(f'Loaded {fname_archive}')\n\n # Generate list of missing object names\n objects_missing: List[str] = [nm for nm in object_names if nm not in sim.particles]\n\n # Extend the simulation and save it with the augmented bodies\n if objects_missing:\n print(f'Found missing objects in {fname_archive}:')\n print(objects_missing)\n extend_sim_horizons(sim, object_names = objects_missing, epoch=epoch)\n\n # Sets of named and input object hashes\n hashes_sim: Set[int] = set(p.hash.value for p in sim.particles)\n hashes_input: Set[str] = set(rebound.hash(nm).value for nm in object_names)\n\n # Filter the simulation so only the named objects are included\n hashes_remove: List[str] = [h for h in hashes_sim if h not in hashes_input] \n for h in hashes_remove:\n sim.remove(hash=h)\n\n except: \n # Initialize simulation\n sim = make_sim_horizons(object_names=object_names, epoch=epoch)\n\n # Move to center of momentum\n sim.move_to_com()\n\n # Set integrator and time step\n sim.integrator = integrator\n dt: float = 1.0 / steps_per_day if steps_per_day > 0 else 0\n sim.dt = dt\n if integrator == 'ias15':\n ias15 = sim.ri_ias15\n ias15.min_dt = dt\n\n # Save a snapshot to the archive file if requested\n if save_file:\n sim.simulationarchive_snapshot(filename=fname_archive, deletefile=True)\n\n # Return the simulation\n return sim", "def recurse(hp):\n global G\n nodes = G.nodes(data=True)\n p_insert = hp.p_insert if count_boxes() > hp.initial_boxes else 1.\n for node in nodes:\n try:\n if node[1][\"shape\"] is \"square\":\n if random.random() < p_insert:\n insert_motif(hp, id=node[0])\n except Exception as e:\n log('exception in recurse', e)", "def test_randomize_mp_fits(self):\n\t\tdetails = self.watcher.analyze(mp_fit=True, randomize=True, pool=True)\n\t\tself.assertTrue((details.rand_sigma_mp < 1.10).all())\n\t\tself.assertTrue((details.rand_sigma_mp > 0.96).all())\n\t\tself.assertTrue((details.rand_num_spikes.to_numpy() < 80).all())", "def test_setup(sim):\n n = 3\n data = []\n dt = 1\n\n for i in range(n):\n sim.setup(timestep=dt, min_delay=dt)\n p = sim.Population(1, sim.IF_curr_exp(i_offset=0.1))\n p.record('v')\n print('starting run ', i)\n sim.run(10.0)\n print('finished run ', i)\n\n print('start get_data run ', i)\n data.append(p.get_data())\n print('finished get_data run ', i)\n\n print('start sim end ', i)\n sim.end()\n print('finished sim end ', i)\n\n assert len(data) == n\n for block in data:\n assert len(block.segments) == 1\n signals = block.segments[0].analogsignals\n assert len(signals) == 1\n assert_array_equal(signals[0], data[0].segments[0].analogsignals[0])", "def __init__(self,input_dict):\n self.no_monomers = int(input_dict['Monomer types'])\n self.occupancy = float(input_dict['Site occupancy fraction'])\n \n # Mole fraction of monomers\n self.monomer_fraction = []\n \n # Functionality of monomers\n self.functionality = []\n \n # Molecular weight of monomers\n self.molecular_wt = []\n \n # Molecular volume of monomers\n self.molecular_vol = []\n \n # Molecular hopping rates\n self.hopping_rate = []\n \n # Interaction energy of monomers\n self.eps_self = []\n self.mean_eps_self = 0.0\n \n # Diffusivity factors\n self.d_factor_monomers = []\n \n # Propagation rate constant of monomers\n self.k_p = np.zeros((self.no_monomers,self.no_monomers))\n \n # Termination rate constant of monomers\n self.k_t = np.zeros((self.no_monomers,self.no_monomers))\n \n # Termination rate constant of monomers\n self.eps = np.zeros((self.no_monomers,self.no_monomers))\n \n # Probability distribution of monomers\n self.prob_set = np.zeros(shape=(self.no_monomers))\n \n # Population of monomers added to the network\n self.added_set = np.zeros(shape=(self.no_monomers))\n \n # Network size\n self.network_size = 0\n \n for mo_type in range(0,self.no_monomers):\n prefix = 'Monomer '+str(mo_type + 1)+': '\n \n self.monomer_fraction.append(float(input_dict[prefix+'Mole fraction']))\n self.functionality.append(float(input_dict[prefix+'Functionality']))\n self.molecular_wt.append(float(input_dict[prefix+'Molecular weight (in Da)']))\n self.molecular_vol.append(float(input_dict[prefix+'Molecular volume (in Angstroms^3)']))\n self.eps_self.append(float(input_dict[prefix+'Self interaction energy (in eV)']))\n self.hopping_rate.append(float(input_dict[prefix+'Hopping rate']))\n self.d_factor_monomers.append(1.0)\n \n self.mean_eps_self = sum(self.eps_self)/float(self.no_monomers)\n \n rate_constants_file = input_dict['Rate constants file']\n self.read_rate_constants(rate_constants_file)\n \n term_constants_file = input_dict['Termination constants file']\n self.read_term_constants(rate_constants_file)\n \n #interactions_file = input_dict['Interaction energy file']\n #self.read_interactions(interactions_file)\n \n self.interaction_range = int(input_dict['Interaction range'])\n \n self.create_interaction_matrix()", "def test_clear(self):\n from supvisors.statistics import StatisticsCompiler\n compiler = StatisticsCompiler(self.supvisors)\n # set data to a given address\n for address, period_instance in compiler.data.items():\n for period, instance in period_instance.items():\n instance.counter = 28\n instance.ref_stats = ('dummy', 0)\n instance.cpu = [13.2, 14.8]\n instance.mem = [56.4, 71.3, 68.9]\n instance.io = {'eth0': (123465, 654321), 'lo': (321, 321)}\n instance.proc = {('myself', 5888): (25.0, 12.5)}\n # check clearance of instance\n compiler.clear('10.0.0.2')\n for address, period_instance in compiler.data.items():\n if address == '10.0.0.2':\n for period, instance in period_instance.items():\n self.assertEqual(period / 5, instance.period)\n self.assertEqual(10, instance.depth)\n self.assertEqual(-1, instance.counter)\n self.assertIsNone(instance.ref_stats)\n self.assertIs(list, type(instance.cpu))\n self.assertFalse(instance.cpu)\n self.assertIs(list, type(instance.mem))\n self.assertFalse(instance.mem)\n self.assertIs(dict, type(instance.io))\n self.assertFalse(instance.io)\n self.assertIs(dict, type(instance.proc))\n self.assertFalse(instance.proc)\n else:\n for period, instance in period_instance.items():\n self.assertEqual(period / 5, instance.period)\n self.assertEqual(10, instance.depth)\n self.assertEqual(28, instance.counter)\n self.assertTupleEqual(('dummy', 0), instance.ref_stats)\n self.assertListEqual([13.2, 14.8], instance.cpu)\n self.assertListEqual([56.4, 71.3, 68.9], instance.mem)\n self.assertDictEqual({'eth0': (123465, 654321), 'lo': (321, 321)}, instance.io)\n self.assertDictEqual({('myself', 5888): (25.0, 12.5)}, instance.proc)", "def simulate_universe():\n\n # untreated_survival is the probability to survive if not treated\n # this is an exact law of the universe, the player will not have this information\n untreated_survival = random.uniform(MIN_DISEASE_SURVIVAL, MAX_DISEASE_SURVIVAL)\n\n trials: list[Trial] = []\n\n treated_survivals: dict[Trial, float] = {}\n\n for _ in range(random.randint(MIN_TREATMENTS_COUNT, MAX_TREATMENTS_COUNT)):\n group_size = random.randint(MIN_GROUP_SIZE, MAX_GROUP_SIZE)\n\n # treated_survival is the probability to survive if treated\n # this is an exact law of the universe, the player will not have this information\n # therefore it is stored in a separate dict and not in the given-to-player Trial object\n treated_survival = random.uniform(MIN_TREATED_SURVIVAL, MAX_TREATED_SURVIVAL)\n\n trial = Trial(group_size, untreated_survival, treated_survival)\n\n trials.append(trial)\n treated_survivals[trial] = treated_survival\n\n chosen_trial = playground.choose_trial(trials)\n\n if chosen_trial is None: # None means no treatment\n chosen_survival = untreated_survival\n else:\n chosen_survival = treated_survivals[chosen_trial]\n\n return random.uniform(0, 1) <= chosen_survival", "def test_fit_many_comps():\n\n run_name = 'stationary'\n savedir = 'temp_data/{}_expectmax_{}/'.format(PY_VERS, run_name)\n mkpath(savedir)\n data_filename = savedir + '{}_expectmax_{}_data.fits'.format(PY_VERS,\n run_name)\n # log_filename = 'temp_data/{}_expectmax_{}/log.log'.format(PY_VERS,\n # run_name)\n\n logging.basicConfig(level=logging.INFO, filemode='w',\n filename=log_filename)\n uniform_age = 1e-10\n sphere_comp_pars = np.array([\n # X, Y, Z, U, V, W, dX, dV, age,\n [-50,-50,-50, 0, 0, 0, 10., 5, uniform_age],\n [ 50, 50, 50, 0, 0, 0, 10., 5, uniform_age],\n ])\n starcounts = [200,200]\n ncomps = sphere_comp_pars.shape[0]\n\n # initialise z appropriately\n # start = 0\n # for i in range(ngroups):\n # nstars_in_group = int(group_pars[i,-1])\n # z[start:start+nstars_in_group,i] = 1.0\n # start += nstars_in_group\n\n true_memb_probs = np.zeros((np.sum(starcounts), ncomps))\n true_memb_probs[:200,0] = 1.\n true_memb_probs[200:,1] = 1.\n\n synth_data = SynthData(pars=sphere_comp_pars, starcounts=starcounts,\n Components=SphereComponent,\n )\n synth_data.synthesise_everything()\n tabletool.convert_table_astro2cart(synth_data.table,\n write_table=True,\n filename=data_filename)\n\n origins = [SphereComponent(pars) for pars in sphere_comp_pars]\n\n init_memb_probs = np.ones(true_memb_probs.shape) / ncomps\n\n best_comps, med_and_spans, memb_probs = \\\n expectmax.fit_many_comps(data=synth_data.table,\n ncomps=ncomps,\n rdir=savedir,\n init_memb_probs=init_memb_probs,\n trace_orbit_func=dummy_trace_orbit_func, )\n\n # compare fit with input\n try:\n assert np.allclose(true_memb_probs, memb_probs)\n except AssertionError:\n # If not close, check if flipping component order fixes things\n memb_probs = memb_probs[:,::-1]\n best_comps = best_comps[::-1]\n assert np.allclose(true_memb_probs, memb_probs)\n for origin, best_comp in zip(origins, best_comps):\n assert (isinstance(origin, SphereComponent) and\n isinstance(best_comp, SphereComponent))\n o_pars = origin.get_pars()\n b_pars = best_comp.get_pars()\n\n logging.info(\"origin pars: {}\".format(o_pars))\n logging.info(\"best fit pars: {}\".format(b_pars))\n assert np.allclose(origin.get_mean(),\n best_comp.get_mean(),\n atol=5.)\n assert np.allclose(origin.get_sphere_dx(),\n best_comp.get_sphere_dx(),\n atol=2.)\n assert np.allclose(origin.get_sphere_dv(),\n best_comp.get_sphere_dv(),\n atol=2.)\n assert np.allclose(origin.get_age(),\n best_comp.get_age(),\n atol=1.)", "def test_scrublet_simulate_doublets():\n pytest.importorskip(\"scrublet\")\n\n adata_obs = sc.datasets.pbmc3k()\n sc.pp.filter_genes(adata_obs, min_cells=3)\n sc.pp.filter_cells(adata_obs, min_genes=3)\n adata_obs.layers['raw'] = adata_obs.X\n sc.pp.normalize_total(adata_obs)\n logged = sc.pp.log1p(adata_obs, copy=True)\n\n _ = sc.pp.highly_variable_genes(logged)\n adata_obs = adata_obs[:, logged.var['highly_variable']]\n\n adata_sim = sce.pp.scrublet_simulate_doublets(adata_obs, layer='raw')\n\n assert 'doublet_parents' in adata_sim.obsm.keys()", "def test_subset_reconstruction_iterable(self, wires):\n circuit = hadamard_circuit(wires)\n bits, recipes = circuit()\n shadow = ClassicalShadow(bits, recipes)\n\n # choose 1000 random indices\n snapshots = np.random.choice(np.arange(10000, dtype=np.int64), size=1000, replace=False)\n state = shadow.global_snapshots(snapshots=snapshots)\n assert state.shape == (len(snapshots), 2**wires, 2**wires)\n\n # check the results against obtaining the full global snapshots\n expected = shadow.global_snapshots()\n for i, t in enumerate(snapshots):\n assert np.allclose(expected[t], state[i])", "def test_getters():\n\n A = np.random.randint(2, size=(10, 10))\n S = np.random.randint(2, size=10)\n stocks = np.random.rand(10)\n ones = np.ones(10)\n # Dummy values\n m = ExploitCore(A, S, stocks, ones, ones, ones, 0.5, 2.0)\n assert (m.get_adjacency() == A).all()\n assert (m.get_strategies() == S).all()\n assert (m.get_stocks() == stocks).all()\n assert m.get_time() == 0", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def sim_grasp_set_row(scene):\n gripper_model = burg.gripper.Robotiq2F85()\n\n target_object = None\n\n found = False\n while found == False:\n print([obj.object_type.identifier for obj in scene.objects])\n buffer_target_object = input(\"Wich object in the previous list do you want to grab ? \")\n\n i = 0\n while i < len(scene.objects):\n if (scene.objects[i].object_type.identifier == buffer_target_object):\n target_object = scene.objects[i]\n found = True\n break\n i += 1\n if found == False :\n print(\"the selected object is not in the list\")\n\n #test a grasp set and create a list of successful grasp\n grasp_set, contact_points, normals, approach_vectors = create_antipodal_grasp_set(target_object)\n sim = burg.sim.SceneGraspSimulator(target_object = target_object, gripper= gripper_model, scene=scene, verbose=False)\n scores = sim.simulate_grasp_set(grasp_set)\n successful_grasps = burg.grasp.GraspSet()\n index_successfull = []\n params_successfull = []\n for index in range(len(scores)):\n if scores[index] == 5:\n successful_grasps.add(grasp_set[index].as_grasp_set())\n index_successfull += [index]\n params_successfull+=[[contact_points[index], normals[index], approach_vectors[index]]]\n sim.dismiss()\n\n print(len(successful_grasps))\n print(\"hellllo\")\n\n time.sleep(10)\n\n #Evaluate each successfull grasp\n sim2 = burg.sim.SceneGraspSimulator(target_object = target_object, gripper= gripper_model, scene=scene, verbose=False)\n results = []\n for i, grasp in enumerate(successful_grasps):\n n_graspset, n_contact_points, n_normals, n_approach_vectors = create_antipodal_noisy_grasp_set(target_object, grasp, params_successfull[i][0][0])\n #n_graspset = perturbations.generate_perturb_grasp_set(grasp = grasp, nb_grasps = 50)\n #burg.visualization.show_grasp_set(objects = [target_object.object_type.mesh], gs = n_graspset, gripper = gripper_model, with_plane = True)\n params_noised = []\n for k in range(len(n_contact_points)):\n params_noised += [[n_contact_points[i], n_normals[i], n_approach_vectors[i]]]\n scores = sim2.simulate_grasp_set(n_graspset)\n robust = sum(scores)/len(scores)\n #metric = quality.probability_force_closure(n_graspset, params_noised)\n metric = quality.epsilon_quality(grasp, params_successfull[i] )\n results += [[grasp, robust, metric]]\n print([grasp, robust, metric])\n #results+= [[grasp, metric]]\n #results += [metric]\n\n print(results)\n sim2.dismiss()\n return results", "def independent(self):\n return True", "def test_single_game_works(self):\n sim = ss.Simulation(seed=154)\n game1 = sim.single_game()\n sim = ss.Simulation(seed=79)\n game2 = sim.single_game()\n assert game1 != game2, 'Your method single_game is not working.'", "def test_parameterisation_model(self):\n self.topo.assign_force_field(self.ff)\n for atom in self.topo.get_atoms(inc_alt_states=True):\n if atom.element != 'H':\n self.assertTrue(atom._ff_id is not None)", "def test_empty_structure():\n empty = SME_Struct()\n\n assert isinstance(empty.version, str)\n assert empty.teff is not None\n assert empty.logg is not None\n assert empty.vmic == 0\n assert empty.vmac == 0\n assert empty.vsini == 0\n\n assert empty.nseg == 0\n assert empty.wave is None\n assert empty.spec is None\n assert empty.uncs is None\n assert empty.synth is None\n assert empty.cont is None\n assert empty.mask is None\n assert empty.mask_good is None\n assert empty.mask_bad is None\n # assert empty.mask_line is None\n # assert empty.mask_continuum is None\n\n assert empty.cscale.shape == (0, 1)\n assert empty.vrad.shape == (0,)\n assert empty.cscale_flag == \"none\"\n assert empty.vrad_flag == \"none\"\n assert empty.cscale_degree == 0\n\n assert empty.mu is not None\n assert empty.nmu == 7\n\n # assert empty.md5 is not None\n\n assert empty.linelist is not None\n assert empty.species is not None\n assert len(empty.species) == 0\n assert empty.atomic is not None\n\n assert empty.monh == 0\n assert not np.isnan(empty[\"abund Fe\"])\n assert empty.abund[\"H\"] == 12\n assert not np.isnan(empty.abund()[\"Mg\"])\n\n assert empty.system_info is not None\n assert empty.system_info.arch == \"\"\n\n assert len(empty.fitparameters) == 0\n assert empty.fitresults is not None\n assert empty.fitresults.covariance is None\n\n assert empty.atmo is not None\n assert empty.atmo.depth is None\n\n assert empty.nlte is not None\n assert empty.nlte.elements == []", "def test_serializing_solvers(self):\n ratio = 0.5\n l_enet = 1e-2\n sd = ratio * l_enet\n\n proxes = [\n ProxZero(),\n ProxTV(2),\n ProxL1(2),\n ProxGroupL1(strength=1, blocks_start=[0, 3, 8],\n blocks_length=[3, 5, 2])\n ]\n solvers = [\n AdaGrad(step=1e-3, max_iter=100, verbose=False, tol=0),\n SGD(step=1e-3, max_iter=100, verbose=False, tol=0),\n SDCA(l_l2sq=sd, max_iter=100, verbose=False, tol=0),\n SAGA(step=1e-3, max_iter=100, verbose=False, tol=0),\n SVRG(step=1e-3, max_iter=100, verbose=False, tol=0)\n ]\n model_map = {\n ModelLinReg: SimuLinReg,\n ModelLogReg: SimuLogReg,\n ModelPoisReg: SimuPoisReg,\n ModelHinge: SimuLogReg,\n ModelQuadraticHinge: SimuLogReg,\n ModelSmoothedHinge: SimuLogReg,\n ModelAbsoluteRegression: SimuLinReg,\n ModelEpsilonInsensitive: SimuLinReg,\n ModelHuber: SimuLinReg,\n ModelLinRegWithIntercepts: SimuLinReg,\n ModelModifiedHuber: SimuLogReg\n }\n\n for solver in solvers:\n for mod in model_map:\n for prox in proxes:\n\n np.random.seed(12)\n n_samples, n_features = 100, 5\n w0 = np.random.randn(n_features)\n intercept0 = 50 * weights_sparse_gauss(n_weights=n_samples,\n nnz=30)\n c0 = None\n X, y = SimuLinReg(w0, c0, n_samples=n_samples, verbose=False,\n seed=2038).simulate()\n if mod == ModelLinRegWithIntercepts:\n y += intercept0\n\n model = mod(fit_intercept=False).fit(X, y)\n\n # prox = ProxZero() #(2.)\n solver.set_model(model)\n solver.set_prox(prox)\n\n pickled = pickle.loads(pickle.dumps(solver))\n\n self.assertTrue(solver._solver.compare(pickled._solver))\n\n self.assertTrue(\n solver.model._model.compare(pickled.model._model))\n\n self.assertTrue(solver.prox._prox.compare(pickled.prox._prox))\n\n if mod == ModelLinRegWithIntercepts:\n test_vector = np.hstack((X[0], np.ones(n_samples)))\n self.assertEqual(\n model.loss(test_vector),\n solver.model.loss(test_vector))\n else:\n self.assertEqual(model.loss(X[0]), solver.model.loss(X[0]))", "def simulation(nepisodes):\n # Initialize robots\n # print('I am inside the simulation')\n agents = [] # List containing all robots\n a1 = Agent(start = [0, 0], end = [grid_size-1, grid_size-1], nr = 1) # Create agent 1\n a2 = Agent(start = [0, grid_size-1], end = [grid_size-1, 0], nr = 2) # Create agent 2\n a3 = Agent(start = [grid_size-1, 0], end = [0, grid_size-1], nr = 3) # Create agent 3\n a4 = Agent(start = [grid_size-1, grid_size-1], end = [0, 0], nr = 4) # Create agent 4\n agents.append(a1)\n agents.append(a2)\n agents.append(a3)\n agents.append(a4)\n\n # for agent in agents:\n # agent.load_target('target_weights_{}.h5'.format(agent.nr))\n # agent.load_policy('policy_weights_{}.h5'.format(agent.nr))\n # print('loaded')\n\n steps_list = [[] for i in range(len(agents))]\n reward_list = [[] for i in range(len(agents))]\n cumulative_rewards = [[] for i in range(len(agents))]\n collisions_list = [[] for i in range(len(agents))]\n\n t = 0 # Set time to zero\n for i in range(nepisodes):\n t = episode(agents, t, i+1) # Run one episode\n\n print('End of episode ', i+1)\n agent_index = 0\n for agent in agents:\n steps_list[agent_index].append(agent.steps)\n reward_list[agent_index].append(agent.reward)\n collisions_list[agent_index].append(agent.collisions)\n if i == 0:\n cumulative_rewards[agent_index].append(agent.reward)\n else:\n cumulative_rewards[agent_index].append(agent.reward + cumulative_rewards[agent_index][i-1])\n agent_index += 1\n\n if i % 1000 == 0:\n with open('reward_4_agents_{}'.format(i),'wb') as f:\n pickle.dump(reward_list,f)\n\n with open('steps_4_agents_{}'.format(i), 'wb') as f:\n pickle.dump(steps_list, f)\n\n with open('cols_4_agents_{}'.format(i), 'wb') as f:\n pickle.dump(collisions_list, f)\n\n\n return steps_list, reward_list, collisions_list, cumulative_rewards", "def test_bothE_traversals(self):\r\n results = self.jon.bothE()\r\n assert len(results) == 2\r\n assert self.jon_physics in results\r\n assert self.jon_in_beekeeping in results", "def experiment(self) -> Any:", "def test_createData():\n\n sys = LVsystem.Ecosystem()\n\n sys.addSpecies('rabbit')\n sys.setInteraction('rabbit', 'hen', 0)\n sys.setInteraction('rabbit', 'fox', -1)\n sys.setInitialCond('rabbit', 30)\n sys.setGrowthRate('rabbit', 0.09)\n sys.setCarrCap('rabbit', 10000)\n sys.setChangeRate('rabbit', 400)\n\n sys.addSpecies('hen')\n sys.setInteraction('hen', 'rabbit', 0)\n sys.setInteraction('hen', 'fox', -1)\n sys.setInitialCond('hen', 10)\n sys.setGrowthRate('hen', 0.07)\n sys.setCarrCap('hen', 10000)\n sys.setChangeRate('hen', 500)\n\n sys.addSpecies('fox')\n sys.setInteraction('fox', 'rabbit', 1)\n sys.setInteraction('fox', 'hen', 1)\n sys.setInitialCond('fox', 20)\n sys.setGrowthRate('fox', -0.06)\n sys.setCarrCap('fox', 1)\n sys.setChangeRate('fox', 250)\n\n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n \n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def none_model_vars():\n obj_fun = None\n time_sol = None\n gap = None\n s_pos = None\n b_target = None\n threads = None\n\n return obj_fun, time_sol, gap, s_pos, b_target, threads", "def test_order(self):\n n = 3\n was_called = n*[False]\n class Mock(object):\n def __init__(self, i):\n self.i = i\n\n def evolve(self1, t, dt):\n was_called[self1.i] = True\n self.assertTrue(all(was_called[:self1.i]))\n\n sim = simulation.Simulation(*[Mock(_) for _ in xrange(n)])\n sim.run(sim.dt)", "def nits(self):", "def test_duplicate(self):\n test_file = os.path.join(INPUT_HYPM_PATH, 'unit_364-2013-225-1-0.mdd')\n\n mdd.procall([test_file])\n \n self.compare_node58()", "def prepare_simulation(master_seed, n_populations):\n nest.ResetKernel()\n # set global kernel parameters\n nest.SetKernelStatus(\n {\"communicate_allgather\": sim.allgather,\n \"overwrite_files\": sim.overwrite_existing_files,\n \"resolution\": sim.dt,\n \"total_num_virtual_procs\": sim.n_vp})\n if sim.to_text_file:\n nest.SetKernelStatus({\"data_path\": data_path_test})\n \n # Set random seeds\n \n # PYNEST\n #nest.sli_run('0 << /rngs [%i %i] Range { rngdict/gsl_mt19937 :: exch CreateRNG } Map >> SetStatus'%(\n # master_seed, master_seed + sim.n_vp - 1))\n #nest.SetKernelStatus({\"rng_seeds\" : range(master_seed, master_seed + sim.n_vp)})\n #nest.sli_run('0 << /grng rngdict/gsl_mt19937 :: %i CreateRNG >> SetStatus'%(master_seed + sim.n_vp))\n #nest.SetKernelStatus({\"grng_seed\" : master_seed + sim.n_vp})\n #pyrngs = [np.random.RandomState(s) for s in \n # range(master_seed + sim.n_vp + 1, master_seed + 2 * sim.n_vp + 1)]\n\n # SLI VERSION\n sli_str = \"0 << \\n\"\n #sli_str += \"/rngs %i [0 %i 1 sub] add Range { rngdict/gsl_mt19937 :: exch CreateRNG } Map\\n\"%(master_seed, sim.n_vp) # local RNG, seeded\n #sli_str += \"/grng rngdict/gsl_mt19937 :: %i %i add CreateRNG\\n\"%(master_seed, sim.n_vp) # global RNG\n sli_str += \"/rng_seeds %i [0 %i 1 sub] add Range\\n\"%(master_seed, sim.n_vp) # local RNG seeds\n sli_str += \"/grng_seed %i %i add\\n\"%(master_seed, sim.n_vp) # global RNG seed\n sli_str += \">> SetStatus\"\n nest.sli_run(sli_str)\n sli_str2 = \"/script_rngs [%i]\\n\"%sim.n_vp\n sli_str2 += \"{%i add rngdict /gsl_mt19937 get exch CreateRNG } Table def\\n\"%(master_seed + sim.n_vp)\n sli_str2 += \"/normal_rdvs script_rngs { rdevdict /normal get CreateRDV } Map def\"\n nest.sli_run(sli_str2)\n pyrngs = None\n return pyrngs", "def test_perfectModelEnsemble_init_da(PM_ds_initialized_1d):\n pm = PerfectModelEnsemble(PM_ds_initialized_1d.tos)\n assert pm", "def ncore(self):", "def test_molecule_loop_no_data(self):\n\n # Reset relax.\n reset()\n\n # Add a data pipe to the data store.\n ds.add(pipe_name='orig', pipe_type='mf')\n\n # Loop over the molecules.\n i = 0\n for molecule in mol_res_spin.molecule_loop():\n i = i + 1\n\n # Test loop length.\n self.assertEqual(i, 0)", "def generate(\n seeds=10,\n param_num_nodes=7,\n mode='train',\n param_dim=10,\n param_sel=100,\n param_mu=10,\n param_br=0.05,\n param_activity_wt=None,\n A=None,\n sp_to_id=None,\n min_coord=None,\n max_coord=None,\n org_pts=None,\n ):\n global dim, sel, mu, br, activity_wt, tree_lc, tree_rc, num_nodes\n\n dim=param_dim\n sel=param_sel\n mu=param_mu\n br=param_br\n activity_wt=param_activity_wt\n num_nodes = param_num_nodes\n\n sp_root = 0\n tree = None\n\n if mode == 'train':\n tree, tree_lc, tree_rc = generate_tree(sp_root, num_nodes)\n if param_activity_wt is None:\n # weights for the linear activity function\n num_wts = int(((dim * (dim + 1))/2) + 1)\n activity_wt = np.random.normal(0, 1, num_wts)\n\n if org_pts is None:\n org_pts = []\n # simulate data points\n # format: exampleID, species, values\n # region, species, coord1, coord2, ...., activity_value\n\n for i in tqdm(range(int(seeds))):\n pt_id = i\n\n # pick a random point of d-dimension\n rand_pt = np.random.uniform(min_coord, max_coord, dim)\n curr_pt = np.append([pt_id, sp_root], rand_pt)\n curr_activity = get_activity(modify_pt(rand_pt), activity_wt)\n # print('curr_pt:', curr_pt, 'curr_activity:', curr_activity); exit(0)\n org_pts.append(np.append(curr_pt, curr_activity))\n\n generated_points = []\n full_org_pts = []\n\n if mode == 'train':\n pool = Pool(16)\n sample_bag = pool.map(generate_bag, org_pts)\n for item in sample_bag:\n for val in item:\n val = list(val)\n full_org_pts.append(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n else:\n for val in org_pts:\n val = list(val)\n generated_points.append(val[:2]+modify_pt(val[2:-1])+[val[-1]])\n\n return generated_points, activity_wt, org_pts, full_org_pts, tree", "def solution(self) -> State:" ]
[ "0.62294304", "0.5881555", "0.5799523", "0.5603874", "0.5549049", "0.55471927", "0.5523414", "0.55085677", "0.5462701", "0.54552025", "0.54473364", "0.5418987", "0.5409605", "0.54048645", "0.5397913", "0.5395056", "0.53824604", "0.53729665", "0.53604305", "0.535905", "0.535402", "0.53459674", "0.5342061", "0.5338783", "0.5338299", "0.5330204", "0.5315376", "0.5310694", "0.53080636", "0.53077054", "0.5307604", "0.53001386", "0.53000486", "0.52976155", "0.5296482", "0.5286949", "0.52734643", "0.526898", "0.52682656", "0.526406", "0.5239242", "0.5238306", "0.5236805", "0.523653", "0.52342206", "0.52296776", "0.5227946", "0.5221033", "0.52183276", "0.521822", "0.52090883", "0.5208045", "0.5206338", "0.5205761", "0.52033246", "0.519695", "0.518926", "0.5180175", "0.51753825", "0.5174641", "0.51690036", "0.5164724", "0.51556885", "0.51537895", "0.5150561", "0.5148141", "0.51432526", "0.51358813", "0.5116993", "0.51056975", "0.510137", "0.5100057", "0.5097287", "0.5091948", "0.5088143", "0.50834423", "0.50675", "0.5067474", "0.5064519", "0.50584126", "0.50580305", "0.50535214", "0.50534064", "0.5052544", "0.50453496", "0.50411016", "0.50386566", "0.5038581", "0.50372666", "0.5035926", "0.5035543", "0.50354517", "0.5029772", "0.50289667", "0.5026618", "0.5025631", "0.50195867", "0.5017724", "0.501725", "0.5009026" ]
0.53972495
15
Check if the given data is not filled already in the bag
def is_coord_empty(self, data): check = False if data["topic"] in DRONE_POS_TOPICS: check = self.drone.check_if_pos(data["coord"]) elif data["topic"] in DRONE_VEL_TOPICS: check = self.drone.check_if_vel(data["coord"]) elif data["topic"] in DRONE_ACC_TOPICS: check = self.drone.check_if_acc(data["coord"]) elif data["topic"] in SUBJECT_TOPICS: check = self.subject.check_if_pos(data["coord"]) elif data["topic"] in self.PEDESTRIAN_TOPICS: check = self.peds[data["pid"]].check_if_pos(data["coord"]) return check
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_data(self):\n return ([0] != self.__contexts) and ([0] != self.__weights)", "def empty(self):\n return 0 >= len(self.__data)", "def verify_if_basket_is_empty(self):\n self._basket.verify_if_basket_is_empty()", "def not_empty(entry):\n gt_boxes = entry['boxes']\n return gt_boxes.shape[0] > 0", "def has_data(self):\n return len(self.data) > 0", "def tied(self):\n for (x, y) in self.fields:\n if self.fields[x, y] == self.empty:\n return False\n return True", "def is_empty(self):", "def is_empty(self):", "def is_data(i):\n keys = ['_id', '_time']\n return all(i != k for k in keys)", "def is_empty(self):\n return len(self.data) == 0", "def is_empty(self):\r\n return self.buff==[]", "def is_empty(self):\n for key, dataset in self.datasets.items():\n try:\n has_data = dataset.has_data()\n except MFDataException as mfde:\n raise MFDataException(\n mfdata_except=mfde,\n model=self._container_package.model_name,\n package=self._container_package._get_pname(),\n message=\"Error occurred while verifying\"\n ' data of dataset \"{}\" in block '\n '\"{}\"'.format(dataset.structure.name, self.structure.name),\n )\n\n if has_data is not None and has_data:\n return False\n return True", "def f_is_empty(self):\n return self._data is None", "def empty(self):\n return not self.mystack1 and not self.mystack2", "def filled(self):\n return self.list.count(0) == 0", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def is_empty(self):\n return len(self._data) == 0", "def check_unique(self, data: Union['LedGroup', 'Sequencer', 'Step'], datatype: str, seq: Optional['Sequencer']) \\\n -> bool:\n if datatype == 'LedGroup':\n return data.Name.lower() not in self.get_group_list()\n elif datatype == 'Sequencer':\n return data.Name.lower() not in self.get_seq_names()\n else:\n if seq is not None:\n return data.Name.lower() not in seq.get_steps_names()\n return False", "def its_empty(self) -> bool:\n return self.items == []", "def f_is_empty(self):\n return len(self._data) == 0", "def empty(self) -> bool:\n return not bool(self.data)", "def checkIfThereIsData(self, i):\n _, amountOfThings = self.weaviate.Get(\"/\" + i)\n if len(amountOfThings[i]) == 0:\n return False\n else:\n return True", "def is_empty(self):\n return not self.size()", "def add_data(self, data):\n for i, row in enumerate(self._grid):\n for j, column in enumerate(row):\n if self._grid[i][j] is None:\n self._grid[i][j] = data\n return True\n return False", "def store_in_bag(self, data):\n # timestamp is (s, nanos): data[\"ts\"], data[\"tnanos\"]\n\n self.bag.add(data)\n\n # Ensure that all data have the same timestamp and are not None\n # Also there can't be more than a sample per second.\n if self.bag.is_full():\n if random() > 0.99999:\n print(\"Telemetry data: \", data[\"topic\"])\n print(\"Bag data: \", self.bag.print_data())\n\n # Then flush the data to process it and empty the bag\n data = self.bag.get_data()\n self.on_full(data)", "def validate(self, data: Dict):\n for key in self.__dict__.keys():\n if not key.startswith('__') and key != 'id':\n if data[key] == '' or data[key] is None:\n raise ValidationError(\n message=f'{key} should not be \"{data[key]}\"'\n )", "def has_data(self, *args, **kwargs):\n return False", "def empty(self) -> bool:\n return self.data.empty()", "def is_empty(self):\n if len(self._data) == 0:\n return True\n return False", "def _check_data_valid(self):\n\n is_valid = (sum(~np.isnan(self.data).flatten()) > 0 and self.data.flatten().sum() != 0)\n if not is_valid:\n raise FITSException(f\"No data in {self.survey}\")", "def _check(data, path):\n if 'loading' in data:\n empties = (k for k, v in data.items() if not v)\n for empty in empties:\n logging.info('No data collected for %s in file %s.', empty, path)\n if 'errors' in data:\n logging.warning('\\n'.join(data['errors']))", "def is_empty(self) -> bool:", "def test_overFill(self):\r\n high = 15\r\n for _ in range(high):\r\n self.nb.add(_)\r\n\r\n self.assertFalse(self.nb.isEmpty())\r\n self.assertTrue(self.nb.isFull())\r\n self.assertEqual(5, len(self.nb))\r\n\r\n # check all are still present\r\n for _ in range(high-1, high - SIZE-1, -1):\r\n self.assertTrue(_ in self.nb)", "def empty(self) -> bool:\n return self.data.get_size() == 0", "def _check_onehot_data(self, data):\n if data.ndim > 1 and np.equal(data ** 2, data).all():\n shp = (data.shape[0],) + data.shape[2:]\n if np.equal(np.ones(shp), data.sum(axis=1)).all():\n return True\n return False", "def empty(self) -> bool:", "def has_data(self):\n return self._data is not None", "def check_Data(self):\r\n \r\n if self._target_data is None:\r\n self.processData()", "def isExist(data):\n return True/False", "def derives_empty(self):\n if self.known_to_derive_empty:\n return True\n for item in self.first():\n if item.is_empty():\n self.known_to_derive_empty = True\n return True\n return False", "def _is_empty(self):\n return self.size == 0", "def _check_already_present(self, new_da):\n for da in self:\n self._id_of_DataArrays_equal(da, new_da)", "def _is_missing(self, item):\n pass", "def is_empty(self):\n return self.items == []", "def is_empty(self) -> bool:\n return len(self._data) == 0", "def _isvalid(self, data):\n if data is None:\n return False\n elif isinstance(data, (list,tuple)):\n if len(data) <= 0:\n return False\n else:\n return True\n elif isinstance(data, (np.ndarray)):\n if data.size <= 0:\n return False\n else:\n return True\n elif not data:\n return False\n else:\n return True", "def __validate_node_data(self, data):\n\n # skipping check of 'grapheap_node_id' optimisation key\n if all(key in data for key in self.optimisation_keys[1:]):\n return True\n\n else:\n missing_keys = [\n x for x in self.optimisation_keys[1:] if x not in data]\n raise ValueError(\"Grapheap Error: \" + str(missing_keys) +\n \" optimisation keys missing in data\")", "def is_empty(self):\n return self.count == 0", "def is_empty(self):\n return self.count == 0", "def is_populated(self) -> bool:\n return 0 < self.count_compounds()", "def exists(self, data):\n return data in self.__stack", "def is_empty(self):\n return self.size == []", "def is_empty(self):\n\n # If the queue is an empty list, self._data would return False\n # So if the queue is empty we want to return true\n # modify with not self._data\n return not self._data", "def is_empty(self):\n return len(self) == 0", "def check(self, data):# ->bool:\r\n return check(self.gd, data)", "def is_empty( self ):\n \n return len(self.__deck) == 0", "def empty(self):\n return len(self.deck) == 0", "def is_empty(self):\n return not (\n self.has_label\n or self.has_name\n or self.has_points\n or self.has_attributes\n )", "def is_consistent_dataset(data: dict, parameter_box: np.ndarray = None) -> bool:\n train_set = copy.deepcopy(data)\n y, phi = train_set[\"outputs\"].pop(-1), train_set[\"features\"].pop(-1)\n y, phi = np.array(y)[..., np.newaxis], np.array(phi)[..., np.newaxis]\n if train_set[\"outputs\"] and train_set[\"features\"]:\n theta, _, gramian, beta = confidence_polytope(train_set, parameter_box=parameter_box)\n return is_valid_observation(y, phi, theta, gramian, beta)\n else:\n return True", "def is_empty(self): # -> bool:\n ...", "def is_empty(self):\n # type: () -> bool\n # If the Histogram contains at least one value, at least one element of\n # self.counts will be not null\n return not any(self.counts)", "def is_valid(self, dataset):\n pass", "def has_data(self) -> bool:\n raise NotImplementedError", "def is_empty(self) -> bool:\n return self._symmetries == [] or self._sq_paulis == [] or self._sq_list == []", "def empty(self):\n return len(self.a) + len(self.b) == 0", "def empty(self) -> bool:\n return self.q1==[]", "def isTileBlank(tile):\n for b in tile:\n if b: return False\n return True", "def is_empty(self, trace) -> bool:\n return len(trace) == 0", "def check_empty(self):\n if self.size():\n raise AttributeError", "def empty(self):\n return self.value == []", "def verify_queue_empty(self):\n self.assert_sample_queue_size(DataParticleType.VELOCITY_PARTICLE, 0)\n self.assert_sample_queue_size(DataParticleType.TIME_PARTICLE, 0)", "def has_data(self):\n\n return self._data is not None", "def has_data(self):\n\n return self._data is not None", "def is_empty(self):\n return len(self.items) == 0", "def is_empty(self):\n return len(self.values) == 0", "def is_empty(self):\n return self._sum() == 0", "def is_empty(self):\r\n return len(self) == 0", "def is_empty(self):\n\n return self.items == []", "def is_empty(self):\n\n return self.items == []", "def is_empty(self):\n return self._size == 0", "def empty(self) -> bool:\n return self.sk1_len==0", "def is_empty(self):\n return (self.counter == 0)", "def is_empty(self):\n #return not self.vulnerable_in and not self.fixed_in\n return not self.fixed_in", "def _isEmpty(self, x, y):\n\t\treturn self.getValue(x, y) == None", "def is_empty(self):\n return False", "def is_empty(self):\n return False", "def is_empty(self):\n return False", "def is_empty(self):\n return False", "def is_empty(self):\n return False", "def is_empty(self):\n return False", "def is_empty(self):\n return len(self) == 0", "def is_empty(self):\n return self.__size == 0", "def checkOpsimData(self, otherdata=None):\n data2check = self.opsim_data\n if otherdata:\n data2check = otherdata\n if type(data2check) != list:\n if type(data2check) == dict:\n if set(self._opsim_keys).issubset(set(data2check.keys())):\n return True, 'good'\n else:\n return False, 'Data is not complete'\n else:\n return False, 'Data is not a list nor a dictionary'\n else:\n nodict = 0\n baddict = 0\n for data in data2check:\n if type(data) != dict:\n nodict += 1\n else:\n if not set(self._opsim_keys).issubset(set(data.keys())):\n baddict += 1\n if (nodict == 0 and baddict == 0):\n return True, 'good'\n else:\n statement = '{0} non dictionary element(s) and {1} uncomplete\\\n dictionary(ies) in data list'.format(nodict, baddict)\n return (False, statement)", "def is_empty(self):\n return self.id is None or self.nb_cart_items == 0", "def missing_data(self, data):\n missing_fields = []\n for key in data:\n if not key in request.json:\n missing_fields.append(key)\n if missing_fields:\n message = 'Missing ' + ', '.join(missing_fields)\n return self.bad_request(message)\n return None", "def validate(self, data):\n if HashTag.objects.filter(name=data[\"hash_tag\"][\"name\"]).first() is None:\n raise serializers.ValidationError(\n {\"hash_tag\": data[\"hash_tag\"][\"name\"] + \" \" + _(\"HashTag does not exist.\")}\n )\n\n return data" ]
[ "0.65717185", "0.63385725", "0.6331197", "0.62527806", "0.61652166", "0.61505044", "0.608145", "0.608145", "0.60609126", "0.60432065", "0.60016364", "0.5989013", "0.59870905", "0.5972411", "0.59252816", "0.59170127", "0.59170127", "0.59170127", "0.59170127", "0.59170127", "0.59170127", "0.59170127", "0.5893321", "0.5889449", "0.58882296", "0.5886396", "0.5865447", "0.58651257", "0.58586276", "0.58512163", "0.58308935", "0.5826826", "0.58224964", "0.5809509", "0.57886654", "0.5776007", "0.57752746", "0.5774196", "0.5769365", "0.57595897", "0.5756273", "0.573443", "0.5734022", "0.5728281", "0.5728167", "0.5726175", "0.5723506", "0.5716772", "0.5713306", "0.5712191", "0.56938726", "0.5693762", "0.56878406", "0.56878406", "0.56843567", "0.56766623", "0.5672347", "0.56722265", "0.5644812", "0.5640878", "0.5636179", "0.56295294", "0.5628514", "0.5625296", "0.561773", "0.5609455", "0.5608869", "0.5601043", "0.5595306", "0.5592583", "0.558852", "0.5583733", "0.5580688", "0.5577714", "0.5577599", "0.55761325", "0.5575769", "0.5575769", "0.5575537", "0.5575068", "0.55735487", "0.5572166", "0.55629694", "0.55629694", "0.55602", "0.554836", "0.55469877", "0.55456215", "0.5544786", "0.55434275", "0.55434275", "0.55434275", "0.55434275", "0.55434275", "0.55434275", "0.5542141", "0.5541512", "0.5536896", "0.5533101", "0.55323005", "0.5531319" ]
0.0
-1
Stores the given data
def add(self, data): if data["topic"] in DRONE_POS_TOPICS: self.drone.set_pos_val(data["ts"], data["coord"], data["value"]) elif data["topic"] in DRONE_VEL_TOPICS: self.drone.set_vel_val(data["ts"], data["coord"], data["value"]) elif data["topic"] in DRONE_ACC_TOPICS: self.drone.set_acc_val(data["ts"], data["coord"], data["value"]) elif data["topic"] in SUBJECT_TOPICS: self.subject.set_val(data["ts"], data["coord"], data["value"]) elif data["topic"] in self.PEDESTRIAN_TOPICS: self.peds[data["pid"]].set_val(data["ts"], data["coord"], data["value"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_data(self, data):\n self.data.append(data)", "def store_data(self, store_data):\n self._store_data = store_data", "def saveData(self):\n pass", "def save_data(self):\n pass", "def save(self, data):\n\t\tif self.value:\n\t\t\tdata['value'] = self.value", "def store_data(self, data):\n if not self.light.hasAttr(self.custom_data_storage_attr_name):\n pm.addAttr(\n self.light,\n ln=self.custom_data_storage_attr_name,\n dt='string'\n )\n\n self.light.setAttr(self.custom_data_storage_attr_name, data)", "def save(self, data):\n self.write(data)", "def save_data(self):\n data = self.data\n if data is not None:\n data = base64.encodestring(pickle.dumps(data))\n connection = self._open_db()\n cursor = connection.cursor()\n cursor.execute('UPDATE sessions SET data = ? WHERE id = ?;',\n (data, self.sid))\n cursor.close()\n connection.commit()\n connection.close()", "def save(self, data):\n data['id'] = self.id\n\n self.db.append(data)", "def _storeData(self, data, table, query=None):\n print ('Storing data')\n conn = dbo.getConnection()\n\n if query == None:\n num_cols = len(data[0])\n cols = ','.join(['%s ' for i in range(0, num_cols)])\n query = \"INSERT INTO \" + table + \" VALUES (\" + cols + \")\"\n\n dbo.execute_query(conn, query, data, multiple=True)\n dbo.closeConnection(conn)\n return", "def put(data):", "def on_data(self, data):\n # store the data\n self._storage.append(data)", "def store_data(self, data):\n self.data = data\n # HERE\n the_main_dict = {**self.user_data(), **self.entities_data(), **self.extract_relevant(), **self.locate(),\n **self.calculate_days(), **self.clean_user_desc()}\n # The below is the reason that the table creation must be written in alphabetical order. This is simpler than\n # writing the complex joins that would otherwise be needed.\n my_keys_list = sorted(the_main_dict.keys())\n my_items = list(map(lambda x: str(the_main_dict[x]).replace(\"'\", ''), my_keys_list))\n try:\n # Unpacks the items into an insert statement for the SQLite table\n self.conn.execute(\"INSERT INTO {0} VALUES('{1}','{2}','{3}','{4}','{5}','{6}','{7}','{8}','{9}',\"\n \"'{10}','{11}','{12}','{13}','{14}','{15}','{16}','{17}','{18}','{19}','{20}',\"\n \"'{21}','{22}','{23}','{24}','{25}','{26}','{27}','{28}')\".format(self.table, *my_items))\n self.limiting += 1\n return 0\n except sqlite3.IntegrityError:\n return 1", "def store(self):\n\n pass", "def save_data(self, record):\n self.dbm.addRecord(record)", "def add_data(self, data):\n self.data = data", "def put_data(data):\n at_write = airtable.Airtable(app.config['AIRTABLE_BASE'],\n app.config['AIRTABLE_WRITE_KEY'])\n return at_write.create(app.config['AIRTABLE_TABLE'] , data)", "def _put(self, key, data):\n path = self._get_key_path(key)\n with open(path, \"wb\") as pickle_file:\n pickle.dump(data, pickle_file)", "def set_data(self, data):\n\n pass", "def save(self, data, file_id=None, metadata={}):\n pass", "def save_data(self):\n db.session.add(self)\n db.session.commit( )", "def setData(self, data):\n self._data = pickle.dumps(data)", "def data(self, data):\n self.__data = data", "def data(self, data):\n self._data = data", "def data(self, data):\n self._data = data", "def _make_information_storable( self, data ):\n\t\tpass", "def save_session_data(self, session_id, data):\n raise NotImplementedError()", "def write_data(self, data: Dict):\n raise NotImplementedError", "def store(self, key, a):\n if key in self.SMGData.keys():\n self.SMGData[key] = a\n else:\n raise Exception('Key does not exist in the data structure')", "def set_data(self, data):\n self.data = data", "def write_data():", "def store(self,key,start,end,data):\n\n pass", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def data(self, data):\n\n self._data = data", "def save_data(db, dict_key, url, data_to_store):\n if dict_key not in db:\n db[dict_key] = []\n data = db[dict_key]\n data.append({\n 'url': url,\n 'data': data_to_store,\n })\n db[dict_key] = data", "def save(self, **data):\n self.applyChanges(data)", "def save(self, *args, **kwargs):\n raise NotImplementedError('missing data mixin')", "def save(self, data, place):\n if isinstance(data, TrustList):\n self.__save_trust_list(place, data)\n\n if isinstance(data, (bytes, bytearray)):\n self.__save_blob(place, data)", "def save(self, output, data):\n pass", "def setData(self, data):\n self.data = data", "def setData(self, data):\n self.data = data", "def store_data(self, data, info_dict):\n\n size_in_MB = np.prod(data.shape) * data.dtype.itemsize / 1024 / 1024\n\n with mongo_connection(self.cfg_mongo) as mongo:\n client, coll = mongo\n if self.datastore == \"gridfs\":\n with mongo_storage_gridfs(client.get_database()) as fs:\n tmp = Binary(pickle.dumps(data))\n tic_io = time.perf_counter()\n fid = fs.put(tmp)\n toc_io = time.perf_counter()\n info_dict.update({\"result_gridfs\": fid})\n\n elif self.datastore == \"numpy\":\n with mongo_storage_numpy(self.cfg_mongo) as fname:\n tic_io = time.perf_counter()\n np.savez(fname, data=data)\n toc_io = time.perf_counter()\n info_dict.update({\"unique_filename\": fname})\n\n elif self.datastore == \"adios2\":\n # Use adios2's context manager\n datadir = join(self.cfg_mongo[\"datadir\"], self.cfg_mongo[\"run_id\"])\n if (isdir(datadir) == False):\n try:\n mkdir(datadir)\n except:\n self.logger.error(f\"Could not access path {datadir}\")\n raise ValueError(f\"Could not access path {datadir}\")\n\n fname = join(datadir, uuid.uuid1().__str__() + \".bp\")\n info_dict.update({\"unique_filename\": fname})\n\n # with open(fname, \"w\") as df:\n # tic_io = time.perf_counter()\n # df.write(info_dict[\"analysis_name\"])\n with adios2.open(fname, \"w\") as fh:\n tic_io = time.perf_counter()\n fh.write(info_dict[\"analysis_name\"], data, data.shape, [0] * data.ndim, data.shape)\n toc_io = time.perf_counter()\n \n\n # Calculate performance metric\n MB_per_sec = size_in_MB / (toc_io - tic_io)\n info_dict.update({\"Performance\": MB_per_sec})\n\n info_dict.update({\"timestamp\": datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")})\n info_dict.update({\"description\": \"analysis results\"})\n\n try:\n inserted_id = coll.insert_one(info_dict)\n except:\n self.logger.error(\"Unexpected error:\", sys.exc_info()[0])\n\n return None", "def store(self, key: object, value: object):\n self._user_data.update({key: value})", "def put(self,data):\n\n \n try:\n\n db = getDatabase()\n connection = db.connect()\n \n connection.put(self,data)\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def save(self, data, **kwargs):\n if self.persist==['data']: # 1 data shortcut\n self.output().save(data, **kwargs)\n else:\n targets = self.output()\n if not set(data.keys())==set(targets.keys()):\n raise ValueError('Save dictionary needs to consistent with Task.persist')\n for k, v in data.items():\n targets[k].save(v, **kwargs)", "def save_to_file(self, data):\n\t\tif self.data_file.write(data):\n\t\t\tprint(\"Data successfully added to file\")\n\t\telse:\n\t\t\tPrint(\"Problem occured during adding to file\")", "def save_data(self, payload):\n obj = Data(\n key=payload['UUID'],\n device_id=payload['ID'],\n rate=payload['RATE'],\n state=payload['VALUE'],\n date=payload['TIME']\n )\n obj.save()", "def save_data(self):\n # Validate\n try:\n self._data = self._schema(self._data)\n except vol.Invalid as ex:\n _LOGGER.error(\"Can't parse data: %s\",\n humanize_error(self._data, ex))\n\n # Load last valid data\n _LOGGER.warning(\"Reset %s to last version\", self._file)\n self.read_data()\n return\n\n # write\n try:\n write_json_file(self._file, self._data)\n except (OSError, json.JSONDecodeError) as err:\n _LOGGER.error(\"Can't store config in %s: %s\", self._file, err)", "def save_data(self, gauge_name, date_key, data):\n pass", "def add(self, data):\n if self._filter(data):\n id = self.db._generate_id(data)\n \n if not id == None:\n if self.db._store:\n self.db.append(id, str(data))\n print id, \"stored to\", self.db._generate_path(id)\n else:\n print id\n print data.show2()", "def _save(self, data: np.ndarray) -> None:\n ...", "def put(self, id, data):\n assert isinstance(data, dict)\n self._shelf[str(id)] = data", "def save(self, output, data):\n return", "def store_data(self):\n return self._store_data", "def save_data(self):\n with open(self.storage_path, 'w') as cache_file:\n json.dump(self.data, cache_file)", "def put(self, filename, data, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n file_obj = open(file_path, \"w\")\n file_obj.write(data)", "def setData(self, data):\n return None", "def store(self, args):\n pass", "def persist(data):\n conn = psycopg2.connect(host=\"localhost\", database=\"integration\", user=\"postgres\", password=\"postgres\")\n cursor = conn.cursor()\n cursor.execute(INSERT_SQL, (data[\"name\"], data[\"gender\"], data[\"age\"]))\n conn.commit()\n cursor.close()", "def save(self, output, data):", "def store(self, key, value):\n pass", "def store(self, key, headers, value):", "def _write(self, data):\n self.db.append(data)\n\n with open(self.DB_FILE, 'w') as outfile:\n json.dump(self.db, outfile)", "def store_in_bag(self, data):\n # timestamp is (s, nanos): data[\"ts\"], data[\"tnanos\"]\n\n self.bag.add(data)\n\n # Ensure that all data have the same timestamp and are not None\n # Also there can't be more than a sample per second.\n if self.bag.is_full():\n if random() > 0.99999:\n print(\"Telemetry data: \", data[\"topic\"])\n print(\"Bag data: \", self.bag.print_data())\n\n # Then flush the data to process it and empty the bag\n data = self.bag.get_data()\n self.on_full(data)", "def handle_data(self, d):\n\n # write the data to our file handler\n self.save_fh.write(d)", "def setData(self, data):\n self._data = data", "def writeData(self, dataDict):\n pass", "def save_event(self, data):\n rdb.table(self.rdb_table).insert(data)", "def save_to_users(self):\n Data.add_data(self.user_data())", "def storeData(data, filename='laue.dat'):\r\n import cPickle\r\n with open(filename, 'wb') as fp:\r\n cPickle.dump(data, fp)", "def set_data(self, data):\n\n self._data = data", "def set_data(self, data):\n\n self._data = data", "def set_data(self, data):\n\n self._data = data", "def _save_data(self, data):\n path = os.path.join(self._cache_path, '%s.data' % self._name)\n\n f = bz2.BZ2File(path, 'w')\n f.write(pickle.dumps(data))\n f.close()", "def setData(self,data):\n self.data = data\n self.size = len(data)", "def setData(self,data):\n self.data = data\n self.size = len(data)", "def handle_data(self, data):\r\n self.fed.append(data)", "def store_introspection_data(node_uuid, data, processed=True):\n introspection_data_manager = plugins_base.introspection_data_manager()\n store = CONF.processing.store_data\n ext = introspection_data_manager[store].obj\n ext.save(node_uuid, data, processed)", "def callback_object(self, data):\n\n try:\n # TODO support multiple of the same object\n # Save an array of object locations\n self.redis.set(self.prefix+\"_\"+data.name, json.dumps([{\n \"name\": data.name,\n \"time\": data.time,\n \"x\": data.x,\n \"y\": data.y,\n \"z\": data.z\n }]))\n except:\n rospy.logerr(\"Cannot insert row\")", "def save(self):\n self.wallet.storage.put(\n \"slp_data_version\", None\n ) # clear key of other older formats.\n data = {\n \"validity\": self.validity,\n \"token_quantities\": {\n k: [[v0, v1] for v0, v1 in v.items()]\n for k, v in self.token_quantities.items()\n },\n \"txo_byaddr\": {\n k.to_storage_string(): list(v) for k, v in self.txo_byaddr.items()\n },\n \"version\": self.DATA_VERSION,\n }\n self.wallet.storage.put(\"slp\", data)", "async def save(cls, data, save_file, *args, **kwargs):\n raise NotImplementedError()", "def store(self, data: Union[str, bytes, int, float]) -> str:\n key = str(uuid.uuid4())\n self._redis.set(key, data)\n return key", "def save(self, key, value):\n # deepcopy so that later modifications to value aren't reflected in the db\n self.data[key] = copy.deepcopy(value)", "def setData(self,newData):\r\n pass", "def add_data(self, data):\n self.data = self.data + data", "def save_shelf(self, shelf_name, data):\r\n shelf_path = os.path.join(self.full_dir, shelf_name)\r\n with shelve.open(shelf_path, 'c') as shelf:\r\n shelf['data'] = data", "def set_data(self, data):\n self._set_data(data)", "def store(self, data: Union[str, bytes, int, float]) -> str:\n k = str(uuid.uuid4())\n self._redis[k] = data\n return k", "def save(self):\n data = self.serialize()\n\n self.validate(data)\n\n saved_data = DATABASE_CONNECTION.insert(self.__class__.__name__, data)\n\n self.__dict__.update(saved_data)", "def store(self, data: dict) -> tuple:\n item = self.model(**data)\n try:\n item.save()\n items = self.get_all()\n\n return items\n except DataBaseException as e:\n return {'error': {'message': e.message}}, 500", "def store(self, quantitiy):\n self._stored = self._stored + quantitiy", "def SetData(self, data):\r\n\r\n self._data = data", "def insert(self, data):\r\n pass", "def set_data(self, new_data):\n self.data = new_data", "def persist(self, values):\n pass", "def save_monitor_data(self, data_type, data, metadata=None):\n if isinstance(data, np.ndarray):\n data = {'data': data.tolist()}\n data['data_type'] = data_type\n if metadata is not None:\n data.update(metadata)\n self.monitor_cache.append((data_type, data))", "def save(self, values):" ]
[ "0.82004386", "0.788964", "0.7607003", "0.75672203", "0.753746", "0.7510255", "0.74823844", "0.74805313", "0.7291415", "0.7250676", "0.71713775", "0.7075155", "0.7072606", "0.7048692", "0.69521874", "0.69172645", "0.6839893", "0.68263394", "0.68209374", "0.6810317", "0.6782455", "0.67814493", "0.67521983", "0.67308974", "0.67308974", "0.6730723", "0.6726373", "0.6720174", "0.6705719", "0.6697373", "0.66660845", "0.6664942", "0.66636825", "0.66636825", "0.66636825", "0.66636825", "0.66636825", "0.66636825", "0.66521025", "0.6646962", "0.66350234", "0.6634842", "0.65851295", "0.65849197", "0.65849197", "0.656612", "0.6563253", "0.6559191", "0.6547565", "0.65417063", "0.65318894", "0.6531413", "0.65297925", "0.65249795", "0.6513164", "0.6502445", "0.64880747", "0.6478076", "0.6474736", "0.64546853", "0.6453926", "0.6452969", "0.64454305", "0.64404595", "0.64350015", "0.64319366", "0.64231986", "0.64219236", "0.64117515", "0.6401982", "0.63949", "0.63905567", "0.6389426", "0.6378031", "0.6354721", "0.6354721", "0.6354721", "0.63404995", "0.6338161", "0.6338161", "0.6331593", "0.63310194", "0.632789", "0.6319098", "0.6316953", "0.6312859", "0.63088477", "0.6302199", "0.6290043", "0.62809753", "0.6274785", "0.62699175", "0.62610215", "0.62586147", "0.62504756", "0.6246361", "0.62458694", "0.624478", "0.6233857", "0.62307936", "0.62240505" ]
0.0
-1
Check if all models stored are complete for the given timestamp.
def is_full(self): core_full = self.drone.complete() and self.subject.complete() if self.peds is None: return core_full else: return core_full and all([p.complete() for p in self.peds.values()])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complete(self, verbose=False):\r\n if (self.__num_tasks == 0 or\r\n self.__arrival_time == 0 or\r\n self.__num_tasks != len(self.__tasks)):\r\n #\r\n if verbose:\r\n print (\"Request %s incomplete. %d expected tasks, %d recorded tasks, \"\r\n \"arrival time %s\") % (self.__id, self.__num_tasks,\r\n len(self.__tasks), self.__arrival_time)\r\n return False\r\n for task in self.__tasks.values():\r\n if not task.complete(verbose):\r\n return False\r\n return True", "def complete(self, verbose=False):\r\n #if self.scheduler_launch_time == INVALID_TIME: print \"scheduler launch\"\r\n #if self.node_monitor_launch_time == INVALID_TIME: print \"nm launch\"\r\n #if self.completion_time == INVALID_TIME: print \"completion\"\r\n if verbose:\r\n if self.node_monitor_get_task_time == INVALID_TIME:\r\n print \"Task %s incomplete: node monitor get_task time missing\" % self.id\r\n elif self.scheduler_launch_time == INVALID_TIME:\r\n print \"Task %s incomplete: Scheduler launch time missing\" % self.id\r\n elif self.node_monitor_launch_time == INVALID_TIME:\r\n print \"Task %s incomplete: Node monitor launch time missing\" % self.id\r\n elif self.completion_time == INVALID_TIME:\r\n print \"Task %s incomplete: Completion time missing\" % self.id\r\n return (self.node_monitor_get_task_time != INVALID_TIME and\r\n self.scheduler_launch_time != INVALID_TIME and\r\n self.node_monitor_launch_time != INVALID_TIME and\r\n self.completion_time != INVALID_TIME)", "def complete(self):\r\n if self.scheduler_launch_time == INVALID_TIME:\r\n print \"Missing task scheduler launch time\"\r\n return False\r\n if self.node_monitor_launch_time == INVALID_TIME:\r\n\t print \"Missing task node monitor launch time\"\r\n\t return False\r\n\tif self.completion_time == INVALID_TIME:\r\n\t print \"Missing task completion time\"\r\n\t return False\r\n\tif self.clock_skew == INVALID_TIME_DELTA:\r\n print \"Missing task clock skew\"\r\n\t return False\r\n\treturn True", "def Complete(self):\n return self.date.Complete() and self.time.Complete()", "def complete(self):\n\n return any(\n [\n self.is_complete(),\n self.is_failed(),\n self.is_out_of_memory(),\n self.is_timeout(),\n ]\n )", "def has_full_batch(self) -> bool:", "def complete(self):\r\n if (self.__num_tasks == 0 or\r\n self.__arrival_time == 0 or\r\n self.__num_tasks != len(self.__tasks)):\r\n print (\"Expected to find %s tasks; found %s\" %\r\n (self.__num_tasks, len(self.__tasks)))\r\n return False\r\n for task in self.__tasks.values():\r\n if not task.complete():\r\n return False\r\n if len(self.__probes) == 0:\r\n return False # Don't consider non-probing requests\r\n return True", "def __updater_check_complete_all_event(self):\n if self._complete_all_event.is_set():\n self.logger.info(\"Received complete all request event in updater\")\n self._updater_map = {}\n self.msg.put_into_Queue()\n return True", "def is_complete(self) -> bool:\r\n return path.exists(self._has_completed_path)", "def complete(self):\r\n\tif self.launch_time == INVALID_TIME:\r\n\t print \"Missing probe launch time\"\r\n return False\r\n if self.received_time == INVALID_TIME:\r\n print \"Missing probe received time\"\r\n return False\r\n if self.completion_time == INVALID_TIME:\r\n print \"Missing probe completion time\"\r\n return False\r\n return True", "def is_all_done():\n for job_set in job_sets:\n if job_set.status != SetStatus.COMPLETED:\n return False\n return True", "def is_blast_db_up_to_date(self):\n if not self.have_blast_db():\n return False\n\n # get time creation blast database files\n modification_times = []\n files = glob.glob(self.path)\n for i in files:\n mod_time_in_secs = os.stat(i).st_ctime\n modification_times.append(datetime.datetime.fromtimestamp(mod_time_in_secs))\n modification_times.sort(reverse=True)\n time_creation_blast = modification_times[0].replace(tzinfo=pytz.utc)\n\n # get time creation time edited sequences in our database\n time_created_queryset = Sequences.objects.all().order_by('-time_created')[:1]\n time_created = time_created_queryset[0].time_created\n\n time_edited_queryset = Sequences.objects.all().order_by('-time_edited')[:1]\n time_edited = time_edited_queryset[0].time_edited\n\n if time_created > time_creation_blast or time_edited > time_creation_blast:\n return False\n else:\n return True", "def is_complete(self):\n return not (self.year is None or\n self.month is None or\n self.day is None)", "def is_valid(self):\n return self.startTime <= ApiKey.get_now() < self.endTime", "def event_processing_finished(self):\n if self.status in ACTIVE_STATES:\n return False # tally of events is only available at end of run\n try:\n event_qs = self.get_event_queryset()\n except NotImplementedError:\n return True # Model without events, such as WFJT\n return self.emitted_events == event_qs.count()", "def should_update(self) -> bool:\n if CoronaCaseRaw.objects.all().count() == 0:\n return True\n last_updated = CoronaCaseRaw.objects.latest('date_received').date_received\n return timezone.now() >= last_updated + timezone.timedelta(seconds=self.interval)", "def is_available_at(self, datetime):\n for booking in self.booking_set.all():\n if booking.schedule_start <= datetime < booking.schedule_end and not booking.is_cancelled():\n return False\n return True", "def is_ready_to_run(self, at_time):\n return (self.next_time - at_time) <= 0", "def is_done(self):\n return time.time() - self._start > self._time", "def is_complete(self):\n status = self.get_status()\n return status[\"status\"] == 4", "def is_complete(self) -> bool:\n return (\n (\n self.materialized_subset | self.failed_and_downstream_subset\n ).num_partitions_and_non_partitioned_assets\n == self.target_subset.num_partitions_and_non_partitioned_assets\n )", "def check(self):\n\n self.check_auto_update()\n assert not self.empty()", "async def is_model_ready(\n self,\n model_name: str,\n model_version: str = ...,\n headers: dict[str, t.Any] = ...,\n ) -> bool:", "def is_complete(self):\n return all([\n len(strip_tags(score.notes)) > 0 for score in self.scores.all()\n ])", "def is_all_obsolete(self, now):\n if self._timestamps:\n return max(timestamps.last_active for timestamps in self._timestamps.itervalues()) + CANDIDATE_OBSOLETE < now\n return True", "def checkGameComplete(self):\n for rowKey in self.table:\n for ele in self.table[rowKey]:\n if type(ele) == int:\n return False # means not complete\n return True", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "def is_complete(self):\n pass", "def is_complete(self, assignment):\n for a in self.agents:\n if self.calc_agent_budget(a, assignment):\n return False\n for t in self.tasks:\n if self.calc_task_budget(t, assignment):\n return False\n return True", "def _check_all_systems_ready(self):\n for r in self.robots:\n r.joints = None\n while r.joints is None and not rospy.is_shutdown():\n try:\n r.joints = rospy.wait_for_message(\n r.ns + '/joint_states', JointState, timeout=3.0)\n except:\n rospy.logerr(\"Current /joint_states not ready yet.\\n\\\n Do you spawn the robot and launch ros_control?\")\n try:\n r.model_index = rospy.wait_for_message('/gazebo/model_states', ModelStates, 3).name.index(r.ns[1:])\n except rospy.exceptions.ROSException:\n rospy.logerr(\"Robot model does not exist.\")\n\n # rospy.logdebug(\"ALL SYSTEMS READY\")\n return True", "def isFresh(self, timestamp):\n pass;", "def isFresh(self, timestamp):\n if not os.path.exists(self.__resource):\n return False;\n return os.path.getmtime(self.__resource) < timestamp;", "def is_ready_to_reap(self):\n self.calc_progress()\n return self._num_results > 0 and (\n self._num_results == self.num_sown_batches\n )", "def _check_all_systems_ready(self):\n \n self._check_all_sensors_ready()\n #self._check_joint_states_ready()\n self._check_cmd_vel_pub()\n \n return True", "def isComplete(self):\n for n in range(9):\n for m in range(9):\n if self.puzzle[n][m] == 0:\n return False\n return True", "def must_run(self):\r\n self.current_time = datetime.now()\r\n return all([self._minute(), self._hour(), self._day_of_month(), self._month(), self._day_of_week()])", "def ready(self, flush_time):\n return len(self.t) > 0 and self.t[-1] >= flush_time", "def check_if_ok_to_update(self):\n current_time = int(time.time())\n last_refresh = self.last_refresh\n if last_refresh is None:\n last_refresh = 0\n if current_time >= (last_refresh + self.refresh_rate):\n return True\n return False", "def is_loaded(self):\n return self.known_stations != {}", "def is_complete(self):\n qstat = self._grep_qstat('complete')\n comp = self._grep_status('complete')\n if qstat and comp:\n return True\n return False", "def phone_timezones_have_been_processed():\n if settings.UNIT_TESTING:\n override = getattr(\n settings, 'PHONE_TIMEZONES_HAVE_BEEN_PROCESSED', None)\n if override is not None:\n return override\n return (_get_migration_status_from_threadlocals()\n == MigrationStatus.COMPLETE)", "def is_updated(self):\n return self.timestamp > 0", "def needs_update(self):\n now = time.time()/60\n return (self.last_update_time_in_minutes+self.timeout) < now", "def check_consistency(object) -> bool:\n time = np.array(list(object.keys()))\n time_diff = time[1:] - time[0:-1]\n return np.all(time_diff == 1)", "def has_t(self):\n return any(map(lambda s: s.is_temporal, self))", "def db_status_ok():\n for Model in apps.get_models():\n table_name = Model._meta.db_table\n if not db_table_exists(table_name):\n return False\n return True", "def needs_update(self, *path):\n dt_fmt = \"%Y-%m-%d %H:%M:%S\"\n try:\n linfo = self.info(*path)\n dt_local = datetime.datetime.strptime(\n linfo[\"datetime\"][:19], dt_fmt)\n dt_server = datetime.datetime.strptime(\n self.serverfiles.info(*path)[\"datetime\"][:19], dt_fmt)\n return dt_server > dt_local\n except FileNotFoundError:\n return True\n except KeyError:\n return True", "def check_if_full(self):\n pass", "def _check_if_statistics_calculation_is_needed():\n expiration_date = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(\n seconds=UploadHandler.EXPIRATION_TIME_IN_SECONDS)\n not_expired_data = UnprocessedData.objects.filter(uploaded_at__gte=expiration_date)\n sites_of_not_expired_data = not_expired_data.values_list('site_id', flat=True).distinct()\n all_sites = UnprocessedData.objects.filter(uploaded_at__lte=expiration_date).values_list('site_id',\n flat=True).distinct()\n for s in all_sites:\n if s not in sites_of_not_expired_data:\n from_date = UnprocessedData.objects.filter(site_id_id=s).order_by('from_date')[0].from_date\n to_date = UnprocessedData.objects.filter(site_id_id=s).order_by('-to_date')[0].to_date\n logger.info(\"should create stats for {} from {} to {}\".format(s, from_date, to_date))\n site_obj = get_object_or_404(Site, pk=s)\n UploadHandler.create_statistics(site=site_obj, from_date=from_date, to_date=to_date)\n UnprocessedData.objects.filter(site_id_id=s).delete()\n\n if len(sites_of_not_expired_data):\n Timer(UploadHandler.INTERVAL, UploadHandler._check_if_statistics_calculation_is_needed).start()\n else:\n UploadHandler.is_interval_running = False", "def found_schedules(self) -> bool:\n return self._schedule_list != []", "def is_done_in_the_past(self):\n return any(self.hash == rec['hash'] for rec in self.records)", "def testAllTimesExists(self):\n times = []\n for ref in self.coal.get_community_references():\n times.append(self.coal.get_community_parameters(ref)[\"time\"])\n for time in times:\n self.assertTrue(time in self.times, msg=\"Time {} not in times.\".format(time))\n for time in self.times:\n self.assertTrue(time in times, msg=\"Time {} not in times.\".format(time))", "def is_complete(self) -> bool:\n raise NotImplementedError(\"Base method not implemented\")", "def toc(self,timestamp):\n return self._timestamp > timestamp", "def objectsReady(self, n):\n return len(self.files) >= n", "def is_any_active(self, now):\n if self._timestamps:\n return (now < max(timestamps.last_walk for timestamps in self._timestamps.itervalues()) + CANDIDATE_WALK_LIFETIME or\n now < max(timestamps.last_stumble for timestamps in self._timestamps.itervalues()) + CANDIDATE_STUMBLE_LIFETIME)\n return False", "def is_complete(self):\n acquired_points = self.dset.shape[0]\n total_nr_pts = np.shape(self.get_sweep_points())[0]\n if acquired_points < total_nr_pts:\n return False\n elif acquired_points >= total_nr_pts:\n if self.soft_avg() != 1 and self.soft_iteration == 0:\n return False\n else:\n return True", "def complete(self):\n return (self.memberDevices <= len(self.members)) or not self.exists", "def complete(self):\n for runner in self._runners:\n if not runner.complete:\n return False\n return True", "def check_models_ready(self):\n if not self.models_ready:\n raise RuntimeError(\"Models aren't loaded yet.\")", "def allready(antReady) :\n return numNotready(antReady) == 0", "def isComplete(self):\n assert len(self._x) > 0\n assert len(self._y) > 0\n assert 2 == len(self._data_array.shape)\n assert self.wkt is not None\n assert self.wkt != ''\n\n return True", "def is_finished(self):\n self.refresh()\n return self.progress.remaining_budget is not None and self.progress.remaining_budget <= 0", "def valid(self):\n delta = datetime.datetime.now() - self._last_access\n return ((delta.seconds < 7200) and not self.is_empty()) or \\\n (delta.seconds < 60)", "def detect_completion(self):\n results_dir = glob.glob(f\"{self.production.rundir}\")\n if len(results_dir)>0: # dynesty_merge_result.json\n if len(glob.glob(os.path.join(results_dir[0], f\"extrinsic_posterior_samples.dat\"))) > 0:\n return True\n else:\n return False\n else:\n return False", "def _check_completed(self):\n current_rung_df = self.sieve_board.loc[\n self.sieve_board['status'].isin(\n [StatusType.WAITTING, StatusType.RUNNING])\n ]\n if current_rung_df.empty:\n return True\n else:\n return False", "def complete(self):\n return all((constraint.satisfied() for constraint in self.constraints))", "def has_happened(self):\n\n return self.end < timezone.now()", "def valid_update_flags(self) -> bool:\n if CoronaCaseRaw.objects.all().count() < 2:\n return True\n return not CoronaCaseRaw.objects.filter(update_flag=(not self.latest_flag())).exists()", "def is_active(self):\n return (datetime.now() - self.updated).days < 100", "def is_full(self):\n return len(self.cache_data) >= self.MAX_ITEMS", "def completed(self):\n content = self.getContent()\n for field in self.fields.values():\n if not field.field.required:\n continue\n dm = zope.component.getMultiAdapter(\n (content, field.field), IDataManager)\n if dm.query(\n field.field.missing_value) is field.field.missing_value:\n return False\n return True", "def is_training_completed(self) -> bool:\n return (self.model_dir/self.TRAINING_COMPLETED_FILE_NAME).is_file()", "def validate_ts(self):\n try:\n self.get_log_file()\n\n self.parse_vibrations()\n\n self.obtain_geometries()\n\n self.percent_changes = self.obtain_percent_changes()\n\n\n center_values = np.log(\n self.percent_changes[self.percent_changes.center].percent_change.mean())\n shell_values = np.log(\n self.percent_changes[self.percent_changes.center != True].percent_change.mean())\n\n if center_values > shell_values + 1:\n logging.info(\"Vibrational analysis was successful\")\n return True\n else:\n logging.info(\n \"Cannot reasonably say that we have arrived at a TS through vibrational analysis.\")\n return False\n except AssertionError:\n logging.info(\"Something went wrong when attempting vibrational analysis...\")\n logging.info(\"Cannot verify via vibrational analysis\")\n return False", "def _is_finished(self, as_of):\n if self.is_one_off():\n last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]\n return last_billing_cycle.date_range.upper <= as_of\n else:\n return False", "def available(self) -> bool:\n return self._coordinator.last_update_success", "def has_finished():", "def valid(t):\n return float(t) > time.time()", "def check_all_done(self, label: str):\n all_converged = True\n if not self.output[label]['convergence']:\n for job_type, spawn_job_type in self.job_types.items():\n if spawn_job_type and not self.output[label]['job_types'][job_type] \\\n and not ((self.species_dict[label].is_ts and job_type in ['scan', 'conformers'])\n or (self.species_dict[label].number_of_atoms == 1\n and job_type in ['conformers', 'opt', 'fine', 'freq', 'rotors', 'bde'])\n or job_type == 'bde' and self.species_dict[label].bdes is None\n or job_type == 'conformers'\n or job_type == 'irc'\n or job_type == 'tsg'):\n logger.debug(f'Species {label} did not converge.')\n all_converged = False\n break\n if all_converged:\n self.output[label]['convergence'] = True\n if self.species_dict[label].is_ts:\n self.species_dict[label].make_ts_report()\n logger.info(self.species_dict[label].ts_report + '\\n')\n zero_delta = datetime.timedelta(0)\n conf_time = extremum_list([job.run_time for job in self.job_dict[label]['conformers'].values()],\n return_min=False) \\\n if 'conformers' in self.job_dict[label].keys() else zero_delta\n tsg_time = extremum_list([job.run_time for job in self.job_dict[label]['tsg'].values()], return_min=False) \\\n if 'tsg' in self.job_dict[label].keys() else zero_delta\n opt_time = sum_time_delta([job.run_time for job in self.job_dict[label]['opt'].values()]) \\\n if 'opt' in self.job_dict[label].keys() else zero_delta\n comp_time = sum_time_delta([job.run_time for job in self.job_dict[label]['composite'].values()]) \\\n if 'composite' in self.job_dict[label].keys() else zero_delta\n other_time = extremum_list([sum_time_delta([job.run_time for job in job_dictionary.values()])\n for job_type, job_dictionary in self.job_dict[label].items()\n if job_type not in ['conformers', 'opt', 'composite']], return_min=False) \\\n if any([job_type not in ['conformers', 'opt', 'composite']\n for job_type in self.job_dict[label].keys()]) else zero_delta\n self.species_dict[label].run_time = self.species_dict[label].run_time \\\n or (conf_time or zero_delta) + \\\n (tsg_time or zero_delta) + \\\n (opt_time or zero_delta) + \\\n (comp_time or zero_delta) + \\\n (other_time or zero_delta)\n logger.info(f'\\nAll jobs for species {label} successfully converged. '\n f'Run time: {self.species_dict[label].run_time}')\n # Todo: any TS which did not converged (any rxn not calculated) should be reported here with full status: Was the family identified? Were TS guesses found? IF so, what's wrong?\n elif not self.species_dict[label].is_ts or self.species_dict[label].ts_guesses_exhausted:\n job_type_status = {key: val for key, val in self.output[label]['job_types'].items()\n if key in self.job_types and self.job_types[key]\n and (key != 'irc' or self.species_dict[label].is_ts)}\n logger.error(f'Species {label} did not converge. Job type status is: {job_type_status}')\n # Update restart dictionary and save the yaml restart file:\n self.save_restart_dict()", "def status_check(self):\n from coordinator.tasks import cancel_release\n # Check if we hit the time limit\n last_update = self.events.order_by('-created_at')\\\n .first().created_at\n diff = datetime.datetime.utcnow() - last_update.replace(tzinfo=None)\n\n if diff.total_seconds() > settings.RELEASE_TIMEOUT:\n if self.state == 'canceling':\n return\n logger.error(f'canceling release {self.kf_id} for time out.')\n self.cancel()\n self.save()\n django_rq.enqueue(cancel_release, self.kf_id)\n return\n\n # Check if any contained tasks have failed/canceled\n for task in self.tasks.all():\n if task.state in ['failed', 'canceled', 'rejected']:\n if self.state == 'canceling':\n return\n logger.error(f'canceling release: {self.kf_id} task is ' +\n f'{task.state}')\n self.cancel()\n self.save()\n django_rq.enqueue(cancel_release, self.kf_id)\n return", "def mark_completed_trips_from_obs(session, trip_ids_from_observations):\n print('checking {} observations for completed trips'.format(len(trip_ids_from_observations)))\n uncompleted = Trip.uncompleted(session)\n trips_with_no_observations = [trip for trip in uncompleted if trip.trip_id not in trip_ids_from_observations]\n for trip in trips_with_no_observations:\n trip.trip_end = TrainActivity.most_recent_for_trip_id(trip.trip_id).timestamp\n\n return len(trips_with_no_observations)", "def full(self):\n return len(self.future_buffer) == self.CAPACITY", "def is_ready_update(self):\n size_of_buffer = len(self.training_buffer.update_buffer['actions'])\n return size_of_buffer > max(int(self.trainer_parameters['buffer_size'] / self.policy.sequence_length), 1)", "def check_pickle(src: \"list[str]\", targets: \"list[str]\"):\n src_time = np.array([os.path.getmtime(item) for item in src])\n targets_time = np.array([os.path.getmtime(item) for item in targets])\n for time in targets_time:\n if np.any(src_time > time):\n csv_pickle()\n print('new pickle data were successfully made.')\n break", "def latest_flag(self) -> bool:\n if CoronaCaseRaw.objects.all().count() == 0:\n return True\n return CoronaCaseRaw.objects.latest('date_received').update_flag", "def is_complete(self):\r\n # We normally don't want django dependencies in xmodule. foldit is\r\n # special. Import this late to avoid errors with things not yet being\r\n # initialized.\r\n from foldit.models import PuzzleComplete\r\n\r\n complete = PuzzleComplete.is_level_complete(\r\n self.system.anonymous_student_id,\r\n self.required_level,\r\n self.required_sublevel,\r\n self.due_time)\r\n return complete", "def is_ready_update(self):\n size_of_buffer = len(self.training_buffer.update_buffer[\"actions\"])\n return size_of_buffer > max(\n int(self.trainer_parameters[\"buffer_size\"] / self.policy.sequence_length), 1\n )", "def probe_completed(self, job, queue_lengths, current_time):\r\n events = []\r\n task_arrival_time = current_time + get_param(\"network_delay\")\r\n used = {}\r\n all_empty_queues = True\r\n for counter, task_constraints in enumerate(job.constraints):\r\n servers = [s for s in queue_lengths if s[0] in task_constraints]\r\n best = min(servers, key=lambda x: x[1])\r\n if best[1] > 0:\r\n all_empty_queues = False \r\n # Increment the load on the chosen server. Assumes value returned\r\n # is in units of one task.\r\n queue_lengths.remove(best)\r\n queue_lengths.append((best[0], best[1] + 1))\r\n if get_param(\"record_task_info\"):\r\n job.record_probe_result(counter, best[1])\r\n events.append((task_arrival_time,\r\n TaskArrival(best[0], job, counter)))\r\n if all_empty_queues:\r\n self.stats_manager.record_job_with_all_empty_queues()\r\n return events", "def is_full(self):\n return len(self.keys) == self.order", "def isValidationInProgress(self):\n validation = self.getLatestValidValidation()\n today = date.today()\n if validation and validation.getDownTo():\n validfrom = validation.getDownFrom().asdatetime().date()\n validto = validation.getDownTo().asdatetime().date()\n if validfrom <= today <= validto:\n return True\n return False", "def has_timestamp(self):\n return (self.data_type() & 0x100 == 0x100) and (self.raw_data_length() >= 8)", "def _check_all_systems_ready(self):\n raise NotImplementedError()", "def Complete(self):\n return self.hour is not None and self.minute is not None and self.second is not None", "def __is_complete__(self,config,mockdb):\n if GenericProcess.__is_complete__(self):\n return True\n if self.pipelines is None:\n return False\n for pipeline in self.__current_pipeline_list__(mockdb):\n if not pipeline.__is_complete__():\n return False\n return True", "def is_ready(self):\n return self.prep_job.is_done()", "def is_all_dates(self) -> bool:\n return False", "def tasks_are_available(tasks):\n task_not_finished_not_scheduled_count = len(tasks)\n for task in tasks:\n if task.getisTaskFinished():\n continue\n if task.getisTaskScheduled():\n continue\n else:\n task_not_finished_not_scheduled_count -= 1\n if task_not_finished_not_scheduled_count < len(tasks):\n return True\n else:\n return False", "def _should_run_now(self):\n # Assumes the unit/all values will have values.\n if not len(self._device_values.keys()) > 0:\n return False\n return not len(self._needed_devices) > 0", "def __reader_check_complete_all_event(self):\n if self._complete_all_event.is_set():\n self.logger.info(\"Received complete all request event in reader\")\n self._reader_map = {}\n self.msg.put_into_Queue()\n return True" ]
[ "0.617297", "0.6029285", "0.59201694", "0.5905041", "0.5833025", "0.5806174", "0.58009607", "0.5789535", "0.5736804", "0.56675947", "0.566142", "0.56097996", "0.55602163", "0.55594164", "0.555323", "0.55530936", "0.55497646", "0.55493504", "0.5537272", "0.5535819", "0.55284566", "0.55262035", "0.55253464", "0.549965", "0.5499578", "0.54865664", "0.5479413", "0.5479413", "0.54698735", "0.54668367", "0.5461132", "0.5460218", "0.5455605", "0.5453121", "0.5451642", "0.5440424", "0.5439322", "0.54299", "0.54235196", "0.5388577", "0.53882843", "0.538827", "0.5375584", "0.53749406", "0.53450537", "0.5338437", "0.53345966", "0.5329436", "0.53242314", "0.53082365", "0.5307732", "0.5302545", "0.5291354", "0.52841103", "0.5276517", "0.52654487", "0.52581376", "0.52532995", "0.52503395", "0.5235934", "0.52306503", "0.52237797", "0.5223372", "0.5217331", "0.52121717", "0.520216", "0.52004707", "0.5189944", "0.5186668", "0.5179705", "0.51797014", "0.5176425", "0.51747656", "0.5173572", "0.5160912", "0.51510674", "0.5119131", "0.5116034", "0.51115155", "0.510899", "0.5108539", "0.5108255", "0.51034886", "0.5102479", "0.50999635", "0.50980234", "0.50967324", "0.5096016", "0.50959855", "0.50876546", "0.50867087", "0.5085624", "0.5080901", "0.5072522", "0.5069816", "0.5061298", "0.5057441", "0.5044814", "0.5043063", "0.5041278" ]
0.57210994
9
Empties the models within the bag
def empty_bag(self): if self.peds is not None: for _, model in self.peds.items(): model.reset() self.drone.reset() self.subject.reset()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear():\n\t\tModel.counter = 0", "def ClearModels(self):\n self._modelFileNames = []\n self._models = []\n self.Modified(readAgain=True)", "def clearmodels(self):\n \n dbpath, config = self._start() \n ModelDescriptionTable(dbpath).empty()\n ModelPhenotypeTable(dbpath).empty()\n ModelScoreTable(dbpath).empty() \n self._end()", "def reset_bag(self):", "def clear(self) -> None:\n self.objects = []", "def clear(self):\n self.beginResetModel()\n self.root_item = RootItem()\n self.requests_items = {}\n self.endResetModel()", "def resetmodel(self):\n for key, value in self._dentsvertsdata.items():\n value.free()\n self._dentsvertsdata.clear()", "def clear_batch(self):\n self._batch_idx = 0\n self.variant_states = None\n self.object_specs = None\n self.object_attribute_values = None", "def clear(self):\n for ob in self.obs:\n ob.clear()\n return", "def clear(self):\r\n\t\tself.free_objects[:] = []", "def clear(self) -> None:\n # Creates a new, empty bag and assigns self.da to the new, empty bag.\n new_bag = Bag()\n self.da = new_bag.da", "def clear():\n MIGRATIONS.clear()", "def clear(self):\n self.vars = []", "def clear_all():\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete()", "def _finalize(self):\n for model in self.models:\n model._finalize()", "def clear(self):\n self._items = []", "def clear(self) -> None:\n self.saved.clear()", "def clear(self):\n\n\t\tfor chain in self.chain:\n\t\t\tchain.clear()\n\n\t\tself.chain = []\n\t\tself.remark = []", "def clear(self):\r\n self.orderitem_set.all().delete()", "def reset(self):\n self.vna.write(edit_list(self.model))\n self.vna.write(clear_list(self.model))", "def clear(self):\n self._pkcache = {}\n self._typecache = defaultdict(dict)\n self.init()", "def clear(self):\n self._store = {}", "def reset(self):\n self.entities = set()\n self.frozen = False", "def reset(self):\n logging.info(\"Resetting DINTModel.\")\n if self.classifier:\n self.server.remove_model(self.classifier)\n # for ds in self.server.datasets:\n # self.server.remove_dataset(ds)\n # TODO: remove datasets?\n self.classifier = None", "def reset(self):\n self._setupObjects()", "def reset(self):\n # Clear mutable data, but leave the immutables intact\n self.train_data = {}\n self.val_data = {}\n self.test_data = {}\n self.model_files = []\n self.custom_data = {}\n # Remove all the physical assets\n for item in os.scandir(self.root_path):\n os.remove(item.path)\n # Reserialize\n self.serialize()", "def clear(self):\n self.versions = {}\n self.clearItems()", "def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()", "def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()", "def tearDown(self):\n for model in MODELS:\n for obj in model.objects.all():\n obj.delete()", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def reset(self):\n for provider in self.providers.values():\n provider.reset()\n\n for observation in self.observations.values():\n observation.reset()", "def clear_dummy_obj(self):\n for d in self.dummies:\n self.map.remove_node(d)\n\n self.dummies = []", "def clear(self):\n # Orphan all objects\n for obj in self.uow:\n state(obj).session = None\n self.uow.clear()\n self.imap.clear()", "def clear_all(self):\n raise NotImplementedError", "def _data_reset(self):\n conn = self.get_connection()\n\n elements = {\n **self.domain.registry.aggregates,\n **self.domain.registry.entities,\n **self.domain.registry.views,\n }\n for _, element_record in elements.items():\n provider = current_domain.providers[element_record.cls.meta_.provider]\n repo = self.domain.repository_for(element_record.cls)\n\n model_cls = repo._model\n if provider.conn_info[\n \"DATABASE\"\n ] == Database.ELASTICSEARCH.value and conn.indices.exists(\n model_cls._index._name\n ):\n conn.delete_by_query(\n refresh=True,\n index=model_cls._index._name,\n body={\"query\": {\"match_all\": {}}},\n )", "def reset(self):\n self.dict_lock.acquire()\n self.list_lock.acquire()\n\n self.beginResetModel()\n self.levels_dict = {}\n self.view_list = []\n self.endResetModel()\n \n self.list_lock.release()\n self.dict_lock.release()", "def clear(self) -> None:\n self.items.all().delete()\n self._cached_items = None", "def clear_model_cache():\n global __model_cache\n __model_cache = {}", "def remove_objects(self):\n logger.debug('Removing all objects from model.')\n del self._objects[:]", "def clear(self) -> None:", "def full_clear(self):\n self.clear()\n self.class_hooks.clear()", "def reset(self):\n for layer in self.network:\n layer.clean()", "def clear(self):\n ...", "def clear(self) -> None:\n self._store.clear()", "def clear(self):\n self.__dict__.clear()", "def clear(self):\n if self.debug:\n print(\"DIMS cleared\")\n self.sp_dicts.clear()", "def remove_all_recs(self):\n return self.storage.clear()", "def clear_data():\n logger.info(\"Delete Structure instances\")\n Structure.objects.all().delete()\n logger.info(\"Delete StructureType instances\")\n StructureType.objects.all().delete()\n logger.info(\"Delete Industry instances\")\n Industry.objects.all().delete()\n logger.info(\"Delete Price instances\")\n PriceList.objects.all().delete()\n logger.info(\"Delete Stock instances\")\n Stock.objects.all().delete()\n logger.info(\"Delete News instances\")\n News.objects.all().delete()\n logger.info(\"Delete NewsImages instances\")\n NewsImage.objects.all().delete()\n logger.info(\"Delete News Sections instances\")\n NewsCategorySection.objects.all().delete()\n logger.info(\"Delete Analysis instances\")\n AnalysisOpinion.objects.all().delete()\n logger.info(\"Delete Analysis Images instances\")\n AnalysisImage.objects.all().delete()\n logger.info(\"Delete Analysis Sections instances\")\n AnalysisCategorySection.objects.all().delete()", "def clear_data(self):\n self.game_list.clear()\n self.game_scores.clear()", "def clear(self, using=None):\n backend = self.get_backend(using)\n\n if backend is not None:\n backend.clear(models=[self.get_model()])", "def clear(self):\n pass", "def clear(self):\n pass", "def clear(self):\n pass", "def _reset(self) -> None:\n self.images = []\n self.activations = []\n self.labels = []\n self.preds = []\n self.n_found = 0", "def clear_all(self):\n self._data = {}\n self.uncache()\n self.dirty = True\n self.shipping_method = None\n self.payment_method = None\n self.customer_comment = \"\"", "def _reset(self):\n self._model._reset()\n super(RDPAnalyzer, self)._reset()", "def clear(self):\n self.recorders = set([])\n self.reset()\n\n # Stop any currently running SpiNNaker application\n self.stop()", "def empty(self):\n self.items = []\n self.totalWeight = 0", "def clear_model_sigs(self):\n self._model_sigs.clear()", "def clear(self):\n for key in self.__data.keys():\n del self.__data[key]", "def clear():", "def reset():\n teardown_db()\n build()", "def clearStore(self):\n os.remove(self.uid+\".pcl\")\n self.items = []", "def clear_collection(self):\n self._cards = []", "def clean(self):\r\n self.roadrunnerModel = None\r\n return self", "def clearList(self):\r\n self.skills.clear()\r\n del self.orderedSkills[:]", "def clear(self):\r\n try:\r\n while True:\r\n self.pop()\r\n except KeyError:\r\n pass", "def reset(self):\r\n self.pop()", "def _purge(self):\n for _ in self.all():\n self.delete(_)", "def reset(self):\n self.__sets = []\n self._computed = False", "def clear(self):\n try:\n self._load(False)\n except KeyError:\n return\n\n for i in xrange(self.size):\n try:\n del self.db[i]\n except KeyError:\n pass\n del self.db['count']\n del self.db['head']\n del self.db['size']", "def _unload(apps, schema_editor):\n for modelname in models:\n model = apps.get_model(appname, modelname)\n model.objects.all().delete()", "def clear(self):\n for key in self.keys():\n del self[key]", "def __reset__(self):\n\n for i in self.__dict__.keys():\n self.__dict__[i] = None", "def clear(self) -> None:\n ...", "def clear(self):\n self.__list = []", "def reset(self):\n self._pkgs.clear()\n self._catalogs.clear()\n self._categories.clear()\n self._command_to_category.clear()\n self._version = None", "def clear(self):\n self._in_memory_objects.clear()\n self._disk_objects.clear()\n self._object_to_list_node.clear()\n for bucket in self._buckets:\n bucket.clear()", "def reset(self):\n for item in TextChannelFilterItem.objects(channel_filter=self):\n item.delete()\n self.reset_counters()\n self.retrain()", "def clear(self):\n self._zombie_list = []\n self._human_list = []\n poc_grid.Grid.clear(self)", "def clear(self) -> None:\n self.selected = {}\n self.orderings = []\n self.predicate = None\n self.limit_index = None\n self.offset_index = None\n self.callbacks.clear()", "def reset(self, model):\n for obs_func in self.observation_functions:\n obs_func.reset(model)", "def clear(self) -> None:\n self._items = []\n self._size = 0", "def clear(self):\n self._clear()", "def clear(self):\n self._clear()", "def clear(self):\n self.tensor_store.clear()\n self.i = 0", "def clear(self):\r\n poc_grid.Grid.clear(self)\r\n self._zombie_list = []\r\n self._human_list = []", "def reset(self):\n self.satisfiability = Satisfiability.UNTESTED\n self.model = None\n self.unsatCore = []", "def clear(self):\n self.keyvaluepair_set.all().delete()", "def clear(self):\n self._list.clear()", "def clear(self):\n self.weight = 0", "def clear(self):\n # we want to use self.inputs.clear() but it's not in python2\n del self.inputs[:]\n del self.outputs[:]", "def clear(self):\n poc_grid.Grid.clear(self)\n self._zombie_list = []\n self._human_list = []\n # need proof it works" ]
[ "0.77507436", "0.76478666", "0.7637113", "0.74050826", "0.7388874", "0.73118776", "0.7252325", "0.709604", "0.7079502", "0.6967123", "0.69053054", "0.6871042", "0.68186563", "0.6815605", "0.68100023", "0.68016225", "0.6794553", "0.6774137", "0.67709094", "0.67597353", "0.6744278", "0.6739675", "0.6722856", "0.67019886", "0.6701017", "0.6696451", "0.66916823", "0.6679739", "0.6679739", "0.6679739", "0.667821", "0.667821", "0.667821", "0.667821", "0.667821", "0.667821", "0.667821", "0.66702384", "0.6670163", "0.66570276", "0.6644759", "0.6627977", "0.6627338", "0.66188836", "0.6617718", "0.66142803", "0.66092324", "0.65913755", "0.65902776", "0.6579889", "0.65718615", "0.65622425", "0.6538044", "0.65273887", "0.6523186", "0.6520358", "0.65177184", "0.65150136", "0.65150136", "0.65150136", "0.65143377", "0.65091443", "0.6507829", "0.6505601", "0.64999413", "0.6488509", "0.64877206", "0.648671", "0.6478849", "0.6478075", "0.6476727", "0.6474495", "0.64703935", "0.64685756", "0.64683497", "0.6465181", "0.64621246", "0.6456299", "0.64552265", "0.6455104", "0.6451751", "0.6448085", "0.64293444", "0.6427194", "0.64233106", "0.64203227", "0.64196324", "0.6414099", "0.6407899", "0.6406558", "0.6404729", "0.6404729", "0.6390545", "0.63808584", "0.6380032", "0.6377471", "0.63693184", "0.6368381", "0.6367519", "0.6367357" ]
0.8674077
0
Flushes the stored data and empties the bag. Use the drone ts
def get_data(self): data = { "ts": self.drone.pos[0][0], "drone": self.drone, "subject": self.subject, "peds": self.peds, # can be None "objs": self.objs # can be None } self.empty_bag() return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_data():\n redis_db.flushdb()", "def _flush(self):\n self._d = {}", "def flush(self):\n self.cur_stocks = self.cur_stocks.drop(self.cur_stocks.index)\n # add history that flushed whole stocks", "def reset(self):\n self.temp_data.clear()", "def empty_bag(self):\n if self.peds is not None:\n for _, model in self.peds.items():\n model.reset()\n self.drone.reset()\n self.subject.reset()", "def delete_all(self):\n with self.__lock:\n self.__data = dict()\n self.flush()", "def purge(self):\n pass", "def finalize(self):\n self.storage.finalize(basket=self)\n self.uncache()\n self._data = None\n self.dirty = False", "def clear(self):\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()", "def clear(self):\r\n self._state[\"data\"].clear()\r\n self._state[\"session\"].request_rerun()", "def _flush(self):\n tempbuf = self.databuffer\n self.databuffer = []\n self.database.runInteraction(self._executemany, tempbuf)", "def gc(self):\n if self.verbose:\n t0=time.time()\n self.data = None\n self.data_orig = None\n self.weights = None\n self.weights_orig = None\n if self.verbose:\n t1=time.time()\n print(\"Unload time: %0.2f s\" % (t1-t0))\n g.collect()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\n self._state[\"data\"].clear()\n self._state[\"session\"].request_rerun()", "def clear(self):\n self._grasp_data = None\n self._status = None\n self._cache = dict()\n self._trajectory_result = None", "def flush(self):\n self.table = []", "def flush(self):\n self.genomes = []", "def reset(self):\n self._data = []", "def cleanup(self) -> None:\n self.min_insert_size = -1 # ensure everything gets flushed\n self.insert_data()\n self.db_event_time = sum(self.event_times)", "def removeall(self):\n\n # If there used to be a key, there must exist an old value blob somewhere in the database. It should be deallocated after a successful commit to disk.\n for key in self.keys:\n if self.keys[key] is not None:\n punchat,punchlen = self.keys[key]\n self.awaitingpunch.append((punchat, punchlen))\n \n self.keys = {}\n self.buffered = {}\n self.cache = {}\n \n if self.autocommit:\n commit()", "def clear_data():\n conn = get_connect()\n #conn.execute(\"DELETE from match\")\n #conn.execute(\"DELETE from account\")\n #conn.execute(\"DELETE from championMatchData\")\n conn.execute(\"DELETE from championData\")\n conn.commit()\n conn.close()\n print(\"all data in info.db has been cleared\")\n return", "def reset_bag(self):", "def reset_data(self):\n self.data = []", "def clear(self) -> None:\n # Creates a new, empty bag and assigns self.da to the new, empty bag.\n new_bag = Bag()\n self.da = new_bag.da", "def flush(self):\r\n if stub_manager.active_stubs == 'remote':\r\n import random\r\n import string\r\n code = ''.join([random.choice(string.ascii_letters)\r\n for x in range(4)])\r\n print \"\\n\\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\r\n print \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\r\n print \"Warning! You're about to delete the *production* datastore!\"\r\n print \"Only models defined in your INSTALLED_APPS can be removed!\"\r\n print \"If you want to clear the whole datastore you have to use \" \\\r\n \"the datastore viewer in the dashboard. Also, in order to \" \\\r\n \"delete all unneeded indexes you have to run appcfg.py \" \\\r\n \"vacuum_indexes.\"\r\n print \"In order to proceed you have to enter the following code:\"\r\n print code\r\n response = raw_input(\"Repeat: \")\r\n if code == response:\r\n print \"Deleting...\"\r\n delete_all_entities()\r\n print \"Datastore flushed! Please check your dashboard's \" \\\r\n \"datastore viewer for any remaining entities and \" \\\r\n \"remove all unneeded indexes with appcfg.py \" \\\r\n \"vacuum_indexes.\"\r\n else:\r\n print \"Aborting.\"\r\n exit()\r\n elif stub_manager.active_stubs == 'test':\r\n stub_manager.deactivate_test_stubs()\r\n stub_manager.activate_test_stubs(self)\r\n else:\r\n destroy_datastore(get_datastore_paths(self.settings_dict))\r\n stub_manager.setup_local_stubs(self)", "def tearDown(self):\n self.all_ob = storage.all()\n self.all_ob.clear()\n storage.save()", "def clear_mem(self):\n dbe.upload_trials(self.trials, self.name)\n self.trials = []", "def clear(self):\n self._data = []", "def clear(self):\n self._data = []", "def flush(self):\n super().flush()\n self.dists = {}", "def finish(self):\r\n\r\n\t\t# Write any unempty buffers to file\r\n logger.info('Writing un-empty buffers...')\r\n self._writeBuffers()\r\n\r\n\t\t# Compact datasets\r\n logger.info('Compacting datasets...')\r\n for ds in self.datasets.keys():\r\n\r\n self._resizeDataset(self.datasets[ds], ds, finished=self.idxs[ds])\r\n\r\n\t\t# close the database\r\n self.db.close()", "def _purge():\r\n _cache.clear()", "def last_buy(self):\n multi_data = []\n while not self.infoQueue.empty():\n multi_data.append(self.infoQueue.get_nowait())\n self.redisHandle.set_multiple_data(multi_data)\n print(\"flush all data\")", "def flush(self):", "def flush(self):", "def flush(self):", "def flush(self):", "def flush(self):", "def flush(self):", "def reset_data(self):\n self.data = None", "def clearStore(self):\n os.remove(self.uid+\".pcl\")\n self.items = []", "def clear():", "def flush(self):\n return", "def flush(self):\n # flush entire inventory\n self.__flushGrids(self.keys())", "def __del__(self):\n self.evaler.db.flush()", "def flush(self):\n pass", "def flush(self):\n pass", "def flush(self):\n pass", "def flush(self):\n pass", "def clear(self):\n try:\n self._load(False)\n except KeyError:\n return\n\n for i in xrange(self.size):\n try:\n del self.db[i]\n except KeyError:\n pass\n del self.db['count']\n del self.db['head']\n del self.db['size']", "def clearData():\n Co8PersistentData.__dataDict.clear()", "def clear(self):\n self._store = {}", "def clear_data(self):\n self.game_list.clear()\n self.game_scores.clear()", "def clear(self):\n self._data.clear()", "def clear_data(cls):\n cls.__data.clear()\n cls.__counters.clear()", "def reset_the_db(_step):\r\n reset_data(None)", "def flush(self) -> None:\n pass", "def reset(self):\n if self.monotonic_energy is not None:\n self.monotonic_energy.reset()\n if self.chunk_energy is not None:\n self.chunk_energy.reset()\n self.bd_L_prev = 0\n self.key_tail = None", "def clear(self) -> None:\n self.data = {} # defaultdict fails (T282865)\n self.size = 0", "def releaseTraingingData(self):\n del(self.documents)\n #del(self.sumsOfVectors)\n self.documents = {}\n #self.sumsOfVectors = {}", "def clear(self):\n self._storage.clear()", "def clear_storage(self):\r\n raise NotImplementedError('override me')", "def flush(self):\n cursor = self.db.cursor()\n cursor.execute(\"DELETE FROM triples\")\n self.db.commit()", "def clear(self):\n for key in self.__data.keys():\n del self.__data[key]", "def shush(self):\n cancel_all()", "def reset_storage(self):\n self.set_storage()", "def resetData(self):\n self.alive = 1\n self.fromSystem = self.toSystem\n self.systemGrid = 5\n mySystem = self.myGalaxy.systems[self.toSystem]\n mySystem.availSYC += self.repair", "def resetmodel(self):\n for key, value in self._dentsvertsdata.items():\n value.free()\n self._dentsvertsdata.clear()", "def takedown(self):\n self.data_handler.send_all_to_backlog()\n\n del(self)", "def cleanup():\n redis_client.flushall()", "def cleanup_and_reset(self):\n self.mem.set(self.mem.META_PLAN, None)\n self.mem.set(self.mem.META_GOALS, None)\n self.mem.set(self.mem.META_CURR_GOAL, None)", "def clear(self) -> None:\n self._store.clear()", "def clear(self):\n if self.debug:\n print(\"DIMS cleared\")\n self.sp_dicts.clear()", "def clear_data(self):\n if isinstance(self.data, DataManager):\n self.data._update_keys(clear=True)\n else:\n self.data = {}", "def purge(self):\n self.remaining = 0", "def flush(self):\r\n # this flush method is needed for python 3 compatibility.\r\n # this handles the flush command by doing nothing.\r\n pass", "def flush(self):\n super().flush()\n self._targetEvaluation = None\n self._solutionExport = None", "def flush():\n for k in cache._thecache.keys():\n del cache._thecache[k]", "def clear():\n\t\tModel.counter = 0", "def _data_reset(self):\n conn = self.get_connection()\n\n elements = {\n **self.domain.registry.aggregates,\n **self.domain.registry.entities,\n **self.domain.registry.views,\n }\n for _, element_record in elements.items():\n provider = current_domain.providers[element_record.cls.meta_.provider]\n repo = self.domain.repository_for(element_record.cls)\n\n model_cls = repo._model\n if provider.conn_info[\n \"DATABASE\"\n ] == Database.ELASTICSEARCH.value and conn.indices.exists(\n model_cls._index._name\n ):\n conn.delete_by_query(\n refresh=True,\n index=model_cls._index._name,\n body={\"query\": {\"match_all\": {}}},\n )", "def __del__(self): \n \n self.flush()", "def flush(self):\n self._getMemcacheClient().flush_all()", "def clear_data():\n logger.info(\"Delete Structure instances\")\n Structure.objects.all().delete()\n logger.info(\"Delete StructureType instances\")\n StructureType.objects.all().delete()\n logger.info(\"Delete Industry instances\")\n Industry.objects.all().delete()\n logger.info(\"Delete Price instances\")\n PriceList.objects.all().delete()\n logger.info(\"Delete Stock instances\")\n Stock.objects.all().delete()\n logger.info(\"Delete News instances\")\n News.objects.all().delete()\n logger.info(\"Delete NewsImages instances\")\n NewsImage.objects.all().delete()\n logger.info(\"Delete News Sections instances\")\n NewsCategorySection.objects.all().delete()\n logger.info(\"Delete Analysis instances\")\n AnalysisOpinion.objects.all().delete()\n logger.info(\"Delete Analysis Images instances\")\n AnalysisImage.objects.all().delete()\n logger.info(\"Delete Analysis Sections instances\")\n AnalysisCategorySection.objects.all().delete()", "def reset(self):\n self.reset_count += 1\n self._init_data()", "def _flush(self):\n pass", "def flush_cache(cls, ):\n cls.Lock.acquire()\n cls.UsbDevices.clear()\n cls.Lock.release()", "def clear(self) -> None:", "def reset(self):\n if not self._data_writer.is_ran_last():\n if not self._data_writer.is_ran_ever():\n logger.error(\"Ignoring the reset before the run\")\n else:\n logger.error(\"Ignoring the repeated reset call\")\n return\n for population in self._data_writer.iterate_populations():\n population._cache_data() # pylint: disable=protected-access\n\n # Call superclass implementation\n AbstractSpinnakerBase.reset(self)", "def discart(self):\n self.queue.clear()\n self.fetchable = 0", "def clear(self):\r\n\t\tself.free_objects[:] = []", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def flush(self):\n # spectra/*.csv | Age distribution of various subpopulations (e.g. population that died of genetic causes)\n for key, val in self.collection.items():\n with open(self.paths[\"visor_spectra\"] / f\"{key}.csv\", \"ab\") as f:\n array = np.array(val)\n np.savetxt(f, [array], delimiter=\",\", fmt=\"%i\")\n\n # Reinitialize the collection\n self.collection = copy.deepcopy(self._collection)", "def flush_structure(self):\n ...", "async def clear(self):" ]
[ "0.7044794", "0.6967814", "0.68903756", "0.6873136", "0.6852722", "0.68504673", "0.68156374", "0.67802024", "0.6756149", "0.6756149", "0.67459303", "0.67278725", "0.6683704", "0.6683704", "0.6683704", "0.66800135", "0.6664512", "0.66624105", "0.66586465", "0.66405725", "0.6590448", "0.6590231", "0.65853405", "0.6579776", "0.6576952", "0.65745366", "0.6567904", "0.6564967", "0.6553601", "0.6553601", "0.654088", "0.65355086", "0.6525888", "0.6516929", "0.65021837", "0.65021837", "0.65021837", "0.65021837", "0.65021837", "0.65021837", "0.6480298", "0.647572", "0.6473111", "0.64661205", "0.64570177", "0.64500004", "0.64488", "0.64488", "0.64488", "0.64488", "0.6437207", "0.64286035", "0.6421688", "0.64180064", "0.6400853", "0.6378791", "0.6371526", "0.6370279", "0.6366992", "0.63663596", "0.63610494", "0.6359789", "0.63412917", "0.63289505", "0.6315172", "0.6314009", "0.63111216", "0.6300127", "0.6295129", "0.62950665", "0.62898225", "0.6288781", "0.6281203", "0.6275641", "0.62742645", "0.62727374", "0.62663305", "0.62586296", "0.6258362", "0.6244612", "0.62397385", "0.6239727", "0.6230636", "0.62139523", "0.62130773", "0.6206182", "0.61966693", "0.6190101", "0.6187571", "0.6180998", "0.6180977", "0.6179025", "0.6179025", "0.6179025", "0.6179025", "0.6179025", "0.6179025", "0.6179025", "0.61771494", "0.617269", "0.61721224" ]
0.0
-1
Store data samples sent in multiple batches
def store_in_bag(self, data): # timestamp is (s, nanos): data["ts"], data["tnanos"] self.bag.add(data) # Ensure that all data have the same timestamp and are not None # Also there can't be more than a sample per second. if self.bag.is_full(): if random() > 0.99999: print("Telemetry data: ", data["topic"]) print("Bag data: ", self.bag.print_data()) # Then flush the data to process it and empty the bag data = self.bag.get_data() self.on_full(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, num_batches: int):", "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def set_batch_data():\r\n if not os.path.exists(filepath):\r\n download_data()\r\n for n in range(0,6):\r\n d = read(filepath + flist[n])\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, trts = {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n trts['x'], trts['y'] = d['data'], d['labels']\r\n trtsflag = ['train', 'train', 'train', 'train', 'train', 'test']\r\n\r\n data['flag'] = trtsflag[n]\r\n data[trtsflag[n]] = trts\r\n save_pkl(data, savename=flist[n]+'.pkl')", "def batch_data(data, state_queue):\n while True:\n time.sleep(BATCH_SLEEP_TIME)\n state_queue.put((\"device_values\", [data]))", "def prepare_data(self, train_data, **kwargs):\n data_len = len(train_data[\"done\"])\n for index in range(data_len):\n if self.multi_step == 1:\n self.buff.add(train_data[\"cur_state\"][index],\n train_data[\"action\"][index],\n train_data[\"reward\"][index],\n train_data[\"next_state\"][index],\n float(train_data[\"done\"][index])) # Add replay buffer", "def _defineBatches(self):\n # extract all ids\n all_keys = list(self.data_dict.unique_ids)\n\n # randomly shuffle keys\n if self.random_shuffle_batches:\n random.shuffle(all_keys)\n\n # create batches based on number of batches\n if self.n_big_batches is not None:\n self.n_big_batches += 1\n # define cuts for batches\n cuts = np.linspace(0, self.n_observations,\n self.n_big_batches).round()\n # create batches based on batch size\n elif self.batch_size is not None:\n cuts = [x for x in range(0, self.n_observations,\n int(self.batch_size))]\n if cuts[-1] < self.n_observations:\n cuts.append(self.n_observations)\n\n # convert batch sizes to integers\n cuts = [int(x) for x in cuts]\n\n # save batches into dictionary\n batches = dict()\n for i in range(0, (len(cuts) - 1)):\n # create DataBatch object\n current_batch = DataBatch(ids=all_keys[cuts[i]:cuts[i+1]],\n batch_id=i)\n current_batch.setDiskStoragePath(self.disk_scratch)\n batches[i] = current_batch\n\n # save batches\n self.n_batches = len(batches.keys())\n self.batches = batches", "def _train(self):\n epoch_training_time = 0\n epoch_metrics_time = 0\n self.epoch_ += 1\n for i_batch, sample_batched in enumerate(self.dataloader):\n self.global_step_ += 1\n batch_start_time = time.time()\n data_sample = sample_batched[0].to(self.device)\n\n # Get model samples, either from replay buffer or noise.\n if self.model_samples_ is None:\n self.model_samples_ = deque(\n [\n self.net_.sample_from_prior(\n data_sample.shape[0], device=self.device\n ).detach()\n ]\n )\n elif len(self.model_samples_) > self.max_replay:\n self.model_samples_.popleft()\n replay_sample = random.choices(\n self.model_samples_,\n # favor more recent samples:\n weights=list(range(1, len(self.model_samples_) + 1)),\n )[0]\n noise_sample = self.net_.sample_from_prior(\n replay_sample.shape[0], device=self.device\n )\n mask = torch.rand(replay_sample.shape[0]) < self.replay_prob\n while len(mask.shape) < len(replay_sample.shape):\n # Add extra feature-dims\n mask.unsqueeze_(dim=-1)\n\n model_sample = torch.where(\n mask.to(self.device), replay_sample, noise_sample\n )\n\n self.net_.eval()\n # Run at least one iteration\n model_sample = self.net_.sample_fantasy(\n model_sample,\n num_mc_steps=self.num_mc_steps,\n mc_dynamics=self.sampler,\n ).detach()\n\n self.model_samples_.append(model_sample)\n\n # Sanity checks:\n assert (\n data_sample.shape[1:] == self.net_.input_shape\n ), \"Data is incompatible with network.\"\n assert (\n model_sample.shape[1:] == data_sample.shape[1:]\n ), \"Model and data samples are incompatible.\"\n\n # Forward gradient:\n self.net_.train()\n self.net_.zero_grad()\n data_energy_mean = self.net_(data_sample).mean()\n model_energy = self.net_(model_sample)\n model_energy_mean = model_energy.mean()\n\n # Estimate the odds of the data's energy based on a normal fitted to\n # model samples:\n data_erf = torch.erf(\n (data_energy_mean - model_energy_mean) / model_energy.std()\n )\n\n objective = data_energy_mean - model_energy_mean\n objective.backward()\n torch.nn.utils.clip_grad.clip_grad_value_(self.net_.parameters(), 1e2)\n self.optimizer_.step()\n\n batch_training_time = time.time() - batch_start_time\n epoch_training_time += batch_training_time\n self.logger_(energy_diff=float(objective))\n self.logger_(data_erf=float(data_erf))\n\n tr_metrics_start_time = time.time()\n for callback in self.step_callbacks:\n callback(\n net=self.net_,\n data_sample=data_sample,\n model_sample=model_sample,\n epoch=self.epoch_,\n global_step=self.global_step_,\n validation=False,\n )\n tr_metrics_time = time.time() - tr_metrics_start_time\n epoch_metrics_time += tr_metrics_time\n if self.verbose:\n print(\n f\"on epoch {self.epoch_}, batch {i_batch}, data erf: {data_erf}, objective: {objective}\"\n )\n print(f\"model energy: {model_energy_mean} +- {model_energy.std()}\")\n print(f\"data energy: {data_energy_mean}\")\n print(\n f\"training time: {batch_training_time:0.3f}s, metrics time: {tr_metrics_time:0.3f}s\"\n )\n means = self.logger_.means()\n if self.verbose:\n print(f\"on epoch {self.epoch_}\")\n for k, v in means.items():\n print(f\"{k}: {v}\")\n self.logger_.flush()\n means[\"loss\"] = energy_model.utils.constraints.add_soft_constraint(\n means[\"loss_ais\"], means[\"data_erf\"], lower_bound=-1\n )\n return means", "def gen_batch(self):\n batch_size = self.batch_size\n shuffle = self.shuffle\n data = np.array(self.sentences)\n\n data_size = len(data)\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n while True:\n # shuffle the data at starting of each epoch\n shuffled_data = data\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n \n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield self._format_samples(shuffled_data[start_index:end_index], self.max_length)\n\n if self.mode in ['train', \"pred\"]:\n break", "def train(self, batch):\n pass", "def batch_size(self) -> int:\n ...", "def _local_train(self, dataloader_with_memory, num_updates):\n # Local train\n _size = len(dataloader_with_memory)\n self.model = self.model.train()\n for _batch in range(num_updates):\n X, y = dataloader_with_memory.get_samples()\n X, y = X.to(self._device), y.to(self._device)\n if _batch == 0:\n # Initialize the batch-size using the first batch to avoid\n # edge cases with drop_last=False\n _batch_size = X.shape[0]\n _num_batches_per_epoch = (_size // _batch_size) + int(\n (_size % _batch_size) != 0\n )\n # Compute prediction and loss\n _pred = self.model(X)\n _loss = self._loss(_pred, y)\n\n # Backpropagation\n _loss.backward()\n self._optimizer.step()\n self._optimizer.zero_grad()\n self.num_batches_seen += 1\n _loss, _current_epoch = (\n _loss.item(),\n self.num_batches_seen // _num_batches_per_epoch,\n )\n\n if self.log:\n if _batch % self.log_period == 0:\n print(\n f\"loss: {_loss:>7f} after {self.num_batches_seen:>5d}\"\n f\" batches of data amounting to {_current_epoch:>5d}\"\n \" epochs.\"\n )\n self.writer.add_scalar(\n f\"client{self.client_id}/train/Loss\",\n _loss,\n self.num_batches_seen,\n )\n\n if _current_epoch > self.current_epoch:\n # At each epoch we look at the histograms of all the\n # network's parameters\n for name, p in self.model.named_parameters():\n self.writer.add_histogram(\n f\"client{self.client_id}/{name}\", p, _current_epoch\n )\n\n self.current_epoch = _current_epoch", "def _train_batch(self):\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n result = self._batch_iter(source, target, i)\n\n # yield\n yield result", "def train_batch(self, data, num_iteration, verbose=False):\n self.train(data, num_iteration, random_order=False, verbose=verbose)", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def prepareDataBatches(self, traindata, trainlabel):\n index = np.random.permutation(len(traindata))\n traindata = traindata[index]\n trainlabel = trainlabel[index]\n split_no = int(len(traindata) / self.batchSize)\n return zip(np.split(traindata[:split_no*self.batchSize], split_no), np.split(trainlabel[:split_no*self.batchSize], split_no))", "def _upload_samples(self, samples):\n # Iterate over the full set of provided samples, uploading them in chunks.\n for offset in range(0, len(samples), self.upload_chunk_size):\n chunk = samples[offset:offset + self.upload_chunk_size]\n self.api.upload_samples(offset, chunk)", "def prepare_batches(self, data):\n batches = []\n start, end = 0, 100\n if len(data) > 100:\n while True:\n data_batch = data[start:end]\n if not data_batch:\n break\n temp = end + 100\n start, end = end, temp\n if data_batch:\n batches.append(data_batch)\n else:\n batches.append(data)\n return batches", "def _batch(self, X, y, batch_size=16):\n X, y = self._shuffle(X, y) # shuffle the data\n self.batches = []\n idx = 0\n while idx < len(X):\n batch = (X[idx:idx+batch_size], y[idx:idx+batch_size])\n self.batches.append(batch)\n idx += batch_size", "def static_batch(data, batch_size=16):\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= batch_size:\n yield buf\n buf = []\n if len(buf) > 0:\n yield buf", "def train_next_batch(self, batch_size=None):", "def write_batch(self, batch):\n for item in batch:\n self.write_buffer.buffer(item)\n key = self.write_buffer.get_key_from_item(item)\n if self.write_buffer.should_write_buffer(key):\n self._write_current_buffer_for_group_key(key)\n self.increment_written_items()\n self._check_items_limit()", "def storeBatch(self, storage, X_data, y_data):\n # data to store\n data_dict = {'X_data': X_data,\n 'y_data': y_data}\n # store in memory\n if storage == \"memory\":\n # dict with data\n self.raw_data = data_dict\n # store on disk\n elif storage == \"disk\":\n # check batch directory\n batch_file = 'batch' + str(self.batch_id) + '.pkl'\n if batch_file in os.listdir(self.disk_storage_path):\n print(\"Overwriting: %s\" % batch_file)\n else:\n pkl.dump(data_dict,\n open(self.disk_storage_path + batch_file, \"wb\"))\n # save in native numpy format\n elif storage == \"numpy\":\n batch_file = 'batch' + str(self.batch_id) + '.npy'\n if batch_file in os.listdir(self.disk_storage_path):\n print(\"Overwriting: %s\" % batch_file)\n else:\n np.save(self.disk_storage_path + \"X_\" + batch_file, X_data)\n np.save(self.disk_storage_path + \"y_\" + batch_file, y_data)\n # save to disk as images\n elif storage == \"disk_raw\":\n # create new directory for batch\n batch_dir = self.disk_storage_path + \"batch\" + str(self.batch_id)\n os.mkdir(batch_dir)\n # loop through all images and store\n for i in range(0, X_data.shape[0]):\n img = array_to_img(X_data[i, :, :, :])\n img.save(batch_dir + \"/\" + i + \".jpeg\")\n else:\n # do nothing\n return None\n\n # set flags and parameters\n self.is_stored = True\n self.storage = storage", "def on_train_batch_begin(self, step, logs=None):", "def hook_store_batch_size(module):\n if self._batch_size == {}:\n batch_axis = 0\n batch_size = module.input0.shape[batch_axis]\n\n for group in param_groups:\n group_id = id(group)\n\n if self._verbose:\n print(f\"Group {group_id}: Store 'batch_size'\")\n\n self._batch_size[group_id] = batch_size", "def batches(set_name):\n global num_batches, args, ds_sizes \n # num_batches = how many batches in each dataset(train, valid, test)\n # ds_sizes = dataset_sizes \n for b in range(num_batches[set_name]):\n bi = b * args.batch_size # one batch mul batch_size \n bj = (b + 1) * args.batch_size \n if b == num_batches[set_name] - 1:\n bj = ds_sizes[set_name] # maybe only remainer set\n yield bi, bj", "def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r", "def next_batch(self, batch_size):\n batch_data = np.zeros([batch_size,] + list(self.example_shape))\n for i in range(batch_size):\n index = self.q.pop()\n batch_data[i,...] = self.data[index]\n if len(self.q)==0:\n self.__new_epoch()\n\n return batch_data", "def sample_batch(pid, args, batch_queue, port_dict, device, actor_id_to_ip_dataport, local_size, cache_array):\n def recv_data(k, data_stream, actor_set, real_data_tasks_i):\n for real_data in data_stream:\n tmp = []\n tmp.append(real_data.state)\n tmp.append(real_data.action)\n tmp.append(real_data.reward)\n tmp.append(real_data.next_state)\n tmp.append(real_data.done)\n tmp.append(actor_set[k]['w'][real_data.idx])\n tmp.append(actor_set[k]['i'][real_data.idx])\n tmp.append(actor_set[k]['t'][real_data.idx])\n tmp.append(real_data.timestamp)\n local_dict[actor_set[k]['i'][real_data.idx]] = tmp\n cache_array[actor_set[k]['i'][real_data.idx]] |= 2**pid\n decom_state = torch.FloatTensor(np.frombuffer(zlib.decompress(real_data.state), dtype=np.uint8).reshape((1, 4, 84, 84)))\n real_data_tasks_i['states'].append(decom_state) #.to(device))\n real_data_tasks_i['actions'].append(torch.LongTensor([real_data.action])) #.to(device))\n real_data_tasks_i['rewards'].append(torch.FloatTensor([real_data.reward])) #.to(device))\n decom_next_state = torch.FloatTensor(np.frombuffer(zlib.decompress(real_data.next_state), dtype=np.uint8).reshape((1, 4, 84, 84)))\n real_data_tasks_i['next_states'].append(decom_next_state) #.to(device))\n real_data_tasks_i['dones'].append(torch.FloatTensor([real_data.done])) #.to(device))\n real_data_tasks_i['batch_weights'].append(torch.FloatTensor([actor_set[k]['w'][real_data.idx]])) #.to(device))\n real_data_tasks_i['batch_idxes'].append(actor_set[k]['i'][real_data.idx])\n # is the data overwrited?\n real_data_tasks_i['batch_timestamp_store'].append(actor_set[k]['t'][real_data.idx])\n real_data_tasks_i['batch_timestamp_real'].append(real_data.timestamp)\n conn = grpc.insecure_channel(port_dict['replay_ip'] + ':' + port_dict['sampleDataPort'])\n client = apex_data_pb2_grpc.SampleDataStub(channel=conn)\n local_dict = {}\n while True:\n batch_timestamp_real = []\n batch_timestamp_store = []\n batch_weights = []\n batch_idxes = []\n\n states, actions, rewards, next_states, dones = [], [], [], [], []\n\n res_batch = client.Send(apex_data_pb2.SampleDataRequest(batch_size=args.batch_size, beta = args.beta))\n actor_ids, data_ids, timestamps, weights, idxes = res_batch.actor_ids, res_batch.data_ids, res_batch.timestamp, res_batch.weights, res_batch.idxes\n actor_set = {}\n cached_value = {'states':{},'actions':{},'rewards':{},'next_states':{},'dones':{},'batch_weights':{},'batch_idxes':{},'batch_timestamp_store':{},'batch_timestamp_real':{}}\n for i in range(len(actor_ids)):\n set_a = actor_set.get(actor_ids[i], False)\n if set_a == False:\n actor_set[actor_ids[i]] = {}\n set_a = actor_set[actor_ids[i]]\n set_a['d'] = []\n set_a['w'] = []\n set_a['i'] = []\n set_a['t'] = []\n cached_value['states'][actor_ids[i]] = []\n cached_value['actions'][actor_ids[i]] = []\n cached_value['rewards'][actor_ids[i]] = []\n cached_value['next_states'][actor_ids[i]] = []\n cached_value['dones'][actor_ids[i]] = []\n cached_value['batch_weights'][actor_ids[i]] = []\n cached_value['batch_idxes'][actor_ids[i]] = []\n cached_value['batch_timestamp_store'][actor_ids[i]] = []\n cached_value['batch_timestamp_real'][actor_ids[i]] = []\n cache_id = actor_ids[i]*local_size+data_ids[i]\n cache_trans = cache_array[cache_id]\n if cache_trans & 2**pid == 0:\n set_a['d'].append(data_ids[i])\n set_a['w'].append(weights[i])\n set_a['i'].append(idxes[i])\n set_a['t'].append(timestamps[i])\n if cache_trans == 0 and local_dict.get(cache_id, False) != False:\n del local_dict[cache_id]\n else:\n try:\n state_tmp = local_dict[cache_id][0]\n action_tmp = local_dict[cache_id][1]\n reward_tmp = local_dict[cache_id][2] \n next_state_tmp = local_dict[cache_id][3] \n done_tmp = local_dict[cache_id][4] \n batch_weight_tmp = local_dict[cache_id][5] \n batch_idx_tmp = local_dict[cache_id][6] \n batch_store_tmp = local_dict[cache_id][7] \n batch_real_tmp = local_dict[cache_id][8] \n decom_state = torch.FloatTensor(np.frombuffer(zlib.decompress(state_tmp), dtype=np.uint8).reshape((1, 4, 84, 84)))\n cached_value['states'][actor_ids[i]].append(decom_state)\n cached_value['actions'][actor_ids[i]].append(torch.LongTensor([action_tmp]))\n cached_value['rewards'][actor_ids[i]].append(torch.FloatTensor([reward_tmp]))\n decom_next_state = torch.FloatTensor(np.frombuffer(zlib.decompress(next_state_tmp), dtype=np.uint8).reshape((1, 4, 84, 84)))\n cached_value['next_states'][actor_ids[i]].append(decom_next_state)\n cached_value['dones'][actor_ids[i]].append(torch.FloatTensor([done_tmp]))\n cached_value['batch_weights'][actor_ids[i]].append(torch.FloatTensor([batch_weight_tmp]))\n cached_value['batch_idxes'][actor_ids[i]].append(batch_idx_tmp)\n cached_value['batch_timestamp_store'][actor_ids[i]].append(batch_store_tmp)\n cached_value['batch_timestamp_real'][actor_ids[i]].append(batch_real_tmp)\n except:\n set_a['d'].append(data_ids[i])\n set_a['w'].append(weights[i])\n set_a['i'].append(idxes[i])\n set_a['t'].append(timestamps[i])\n real_data_links = {}\n real_data_tasks = {}\n for k, v in actor_set.items():\n actor_ip, data_port = actor_id_to_ip_dataport[k]\n conn_actor = grpc.insecure_channel(actor_ip + ':' + data_port)\n client_actor = apex_data_pb2_grpc.SendRealDataStub(channel=conn_actor)\n real_data_links[k] = client_actor.Send(apex_data_pb2.RealBatchRequest(idxes=v['d']))\n real_data_tasks[k] = {}\n real_data_tasks[k]['states'] = cached_value['states'][k]\n real_data_tasks[k]['actions'] = cached_value['actions'][k]\n real_data_tasks[k]['rewards'] = cached_value['rewards'][k]\n real_data_tasks[k]['next_states'] = cached_value['next_states'][k]\n real_data_tasks[k]['dones'] = cached_value['dones'][k]\n real_data_tasks[k]['batch_weights'] = cached_value['batch_weights'][k]\n real_data_tasks[k]['batch_idxes'] = cached_value['batch_idxes'][k]\n real_data_tasks[k]['batch_timestamp_store'] = cached_value['batch_timestamp_store'][k]\n real_data_tasks[k]['batch_timestamp_real'] = cached_value['batch_timestamp_real'][k]\n threads = []\n for k, v in real_data_links.items():\n t = threading.Thread(target=recv_data, args=(k, v, actor_set, real_data_tasks[k],))\n threads.append(t)\n t.start()\n\n for t in threads:\n t.join()\n\n for k, v in real_data_tasks.items():\n states += v['states']\n actions += v['actions']\n rewards += v['rewards']\n next_states += v['next_states']\n dones += v['dones']\n batch_weights += v['batch_weights']\n batch_idxes += v['batch_idxes']\n batch_timestamp_real += v['batch_timestamp_real']\n batch_timestamp_store += v['batch_timestamp_store']\n\n states = torch.cat(states,0).to(device)\n actions = torch.cat(actions,0).to(device)\n rewards = torch.cat(rewards,0).to(device)\n next_states = torch.cat(next_states,0).to(device)\n dones = torch.cat(dones,0).to(device)\n batch_weights = torch.cat(batch_weights,0).to(device)\n\n batch = [states, actions, rewards, next_states, dones, batch_weights, batch_idxes]\n batch_queue.put(batch)\n data, batch = None, None", "def batch_split(self) -> np.array:\n pass", "def _one_mini_batch(self, data, indices, pad_id):\n batch_data = {'raw_data': [data[i] for i in indices],\n 'question_token_ids': [],\n 'question_length': [],\n 'passage_token_ids': [],\n 'passage_length': [],\n 'start_id': [],\n 'end_id': []}\n max_passage_num = max([len(sample['passages']) for sample in batch_data['raw_data']])\n max_passage_num = min(self.max_p_num, max_passage_num)\n for sidx, sample in enumerate(batch_data['raw_data']):\n for pidx in range(max_passage_num):\n if pidx < len(sample['passages']):\n batch_data['question_token_ids'].append(sample['question_token_ids'])\n batch_data['question_length'].append(len(sample['question_token_ids']))\n passage_token_ids = sample['passages'][pidx]['passage_token_ids']\n batch_data['passage_token_ids'].append(passage_token_ids)\n batch_data['passage_length'].append(min(len(passage_token_ids), self.max_p_len))\n else:\n batch_data['question_token_ids'].append([])\n batch_data['question_length'].append(0)\n batch_data['passage_token_ids'].append([])\n batch_data['passage_length'].append(0)\n batch_data, padded_p_len, padded_q_len = self._dynamic_padding(batch_data, pad_id)\n for sample in batch_data['raw_data']:\n if 'answer_passages' in sample and len(sample['answer_passages']):\n gold_passage_offset = padded_p_len * sample['answer_passages'][0]\n batch_data['start_id'].append(gold_passage_offset + sample['answer_spans'][0][0])\n batch_data['end_id'].append(gold_passage_offset + sample['answer_spans'][0][1])\n else:\n # fake span for some samples, only valid for testing\n batch_data['start_id'].append(0)\n batch_data['end_id'].append(0)\n return batch_data", "def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n if self.mode == tf.estimator.ModeKeys.PREDICT and self.imagenet_train_predict_partial:\n # Sort and shuffle with seed to randomize deterministically.\n random.seed(self.imagenet_train_predict_shuffle_seed)\n random.shuffle(filenames)\n dataset = tf.contrib.data.TFRecordDataset(filenames)\n\n # Parse records.\n dataset = dataset.map(self.parser,\n num_threads=batch_size,\n output_buffer_size=2 * batch_size)\n\n # If training, shuffle and repeat indefinitely.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=50000 + 3 * batch_size)\n dataset = dataset.repeat(-1)\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n if self.predict_split == 'train':\n if self.imagenet_train_predict_partial:\n MAX_EXAMPLES = 50000\n # Skip to start at a random spot in the first TFRecord.\n random.seed(self.imagenet_train_predict_shuffle_seed)\n skip_examples = random.randint(0, 1251)\n dataset = dataset.skip(skip_examples)\n # Continue shuffling amongst at least as many examples\n # as it could see in 3 cross validations.\n dataset.shuffle(buffer_size=3 * MAX_EXAMPLES,\n seed=self.imagenet_train_predict_shuffle_seed)\n num_examples = MAX_EXAMPLES\n else:\n # Take whole training set.\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.TRAIN)\n else:\n # Take whole validation set.\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.EVAL)\n # Take as much of the dataset as possible that can be evenly\n # divided by batch_size.\n while True:\n if num_examples % batch_size == 0:\n break\n else:\n num_examples -= 1\n dataset = dataset.take(num_examples)\n dataset = dataset.repeat(1)\n\n # dataset = dataset.take(1000) # For fast debugging!\n else:\n dataset = dataset.repeat(1)\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_one_shot_iterator()\n image_batch, label_batch = iterator.get_next()\n\n return image_batch, label_batch", "def sample_batch(self, batch_size):\n batch = []\n\n # Sample using prorities\n if(self.with_per):\n T = self.buffer.total() // batch_size\n #print(\"T is \",T)\n for i in range(batch_size):\n a, b = T * i, T * (i + 1)\n s = random.uniform(a, b)\n idx, error, data = self.buffer.get(s)\n #print(\"sampled data \", s, \" \",data, end=\" \")\n batch.append((*data, idx))\n\n idx = np.array([i[2] for i in batch])\n #idx in the offline buffer\n \n # Sample randomly from Buffer\n elif self.count < batch_size:\n idx = None\n batch = random.sample(self.buffer, self.count)\n else:\n idx = None\n batch = random.sample(self.buffer, batch_size)\n\n # Return a batch of experience\n names_batch = np.array([i[1] for i in batch])\n\n return names_batch, idx", "def split_and_load(batch_data, num_gpus):\n return [batch_data[i].data[0] for i in range(num_gpus)], \\\n [batch_data[i].label[0].as_in_context(mx.gpu(i)) for i in range(num_gpus)]", "def on_train_batch_end(self, step, logs=None):", "def dynamic_batch(data, max_frames_in_batch=12000):\n buf = []\n longest_frames = 0\n for sample in data:\n assert \"feat\" in sample\n assert isinstance(sample[\"feat\"], torch.Tensor)\n new_sample_frames = sample[\"feat\"].size(0)\n longest_frames = max(longest_frames, new_sample_frames)\n frames_after_padding = longest_frames * (len(buf) + 1)\n if frames_after_padding > max_frames_in_batch:\n yield buf\n buf = [sample]\n longest_frames = new_sample_frames\n else:\n buf.append(sample)\n if len(buf) > 0:\n yield buf", "def on_batch_begin(self, batch, logs=None):", "def _send_data(self):\n \n # Do not send more than 100 datasets each time (totally arbitrary)\n MAX_DATA_SETS_PER_POST = 100\n data_to_send = self._data_buffer[0:MAX_DATA_SETS_PER_POST]\n data_to_keep = self._data_buffer[MAX_DATA_SETS_PER_POST:]\n\n # Prepare data string with the values in data buffer\n now = time.time()\n data_string = '[' \n for (timestamp, data) in data_to_send:\n data_string += '['\n data_string += str(round(timestamp-now,2))\n for sample in data:\n data_string += ','\n data_string += str(sample)\n data_string += '],'\n # Remove trailing comma and close bracket\n data_string = data_string[0:-1]+']'\n\n self._log.debug(\"Data string: \" + data_string)\n \n # Prepare URL string of the form\n # 'http://domain.tld/emoncms/input/bulk.json?apikey=\n # 12345&data=[[-10,10,1806],[-5,10,1806],[0,10,1806]]'\n url_string = self._settings['protocol'] + self._settings['domain'] + \\\n self._settings['path'] + \"/input/bulk_json?apikey=\" + \\\n self._settings['apikey'] + \"&data=\" + data_string\n self._log.debug(\"URL string: \" + url_string)\n\n # Send data to server\n self._log.info(\"Sending to \" + \n self._settings['domain'] + self._settings['path'])\n try:\n result = urllib2.urlopen(url_string, timeout=60)\n except urllib2.HTTPError as e:\n self._log.warning(\"Couldn't send to server, HTTPError: \" + \n str(e.code))\n except urllib2.URLError as e:\n self._log.warning(\"Couldn't send to server, URLError: \" + \n str(e.reason))\n except httplib.HTTPException:\n self._log.warning(\"Couldn't send to server, HTTPException\")\n except Exception:\n import traceback\n self._log.warning(\"Couldn't send to server, Exception: \" + \n traceback.format_exc())\n else:\n if (result.readline() == 'ok'):\n self._log.debug(\"Send ok\")\n # Send ok -> empty buffer\n self._data_buffer = data_to_keep\n return True\n else:\n self._log.warning(\"Send failure\")", "def load_batch(self):\r\n\r\n #if we've seen all the data, start again with them in a new random order\r\n if self.batchcounter+self.batchsize > self.num_data:\r\n self.batchcounter = 0\r\n self.epochs += 1\r\n self._permutation = np.random.permutation(self.num_data)\r\n\r\n this_perm = self._permutation[self.batchcounter:self.batchcounter+self.batchsize]\r\n\r\n self.X_batch = self.X[this_perm]\r\n self.likelihood.set_data(self.Y[this_perm])\r\n if self.has_uncertain_inputs:\r\n self.X_variance_batch = self.X_variance[this_perm]\r\n\r\n self.batchcounter += self.batchsize\r\n\r\n self.data_prop = float(self.batchsize)/self.num_data\r\n\r\n self._compute_kernel_matrices()\r\n self._computations()", "def __iter__(self):\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0\n\n while True:\n\n # Randomizing wav lists\n random.shuffle(self._lst_spk_files)\n random.shuffle(self._lst_noise_files)\n\n for spk_file, noise_file in zip(self._lst_spk_files, self._lst_noise_files):\n\n # Read wav files\n sig_spk = self.__read_wav_file(spk_file)\n sig_noise = self.__read_wav_file(noise_file)\n\n # Align signal\n min_length = min(sig_spk.shape[0], sig_noise.shape[0])\n\n if min_length < self._fftsize:\n raise Exception(\"ERROR: Too short signals in dataset\")\n\n sig_spk = sig_spk[:min_length]\n sig_noise = sig_noise[:min_length]\n\n # Generate need SNR\n need_snr = random.uniform(self._min_snr, self._max_snr)\n\n # Calc scaled signals\n sig_spk, sig_noise = self.__mix_with_snr(sig_spk, sig_noise, need_snr)\n\n # Calc STFT\n stft_spk = stft(sig_spk, fftsize=self._fftsize, overlap=self._overlap)\n stft_noise = stft(sig_noise, fftsize=self._fftsize, overlap=self._overlap)\n stft_mix = stft_spk + stft_noise\n\n # Skip small segments\n frames, bin = stft_mix.shape\n if frames <= self._context_size:\n continue\n\n # Collect batch\n i = 0\n while i + self._context_size < frames:\n\n batch_sp.append(stft_spk[i:i + self._context_size, :])\n batch_noise.append(stft_noise[i:i + self._context_size, :])\n batch_mix.append(stft_mix[i:i + self._context_size, :])\n\n i += self._context_size // 2\n batch_count += 1\n\n if batch_count == self._batch_size:\n sp = np.array(batch_sp).reshape((self._batch_size,\n self._context_size, -1))\n noise = np.array(batch_noise).reshape((self._batch_size,\n self._context_size, -1))\n mix = np.array(batch_mix).reshape((self._batch_size,\n self._context_size, -1))\n yield sp, noise, mix\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0", "def generate_batch():\n\n # Initialize variables\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n alphas = np.zeros(self.batch_size)\n n_items = 0\n index = 0\n\n while index < len(data):\n reduced_window = random.randint(0, self.window_size)\n if data[index] is not None:\n\n left = max(0, index - self.window_size + reduced_window)\n right = min((index + self.window_size + 1 -\n reduced_window), len(data) - 1)\n for pos2 in range(left, right, 1):\n\n if n_items == self.batch_size:\n queue.put((example, labels, index))\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n n_items = 0\n\n if pos2 != index and data[pos2] is not None:\n example[n_items] = data[pos2]\n labels[n_items] = data[index]\n alpha = self.learning_rate - \\\n (self.learning_rate - 0.001) * (index / self.n_words)\n alphas[n_items] = max(0.001, alpha)\n n_items += 1\n index += 1\n\n # Poison pills\n for _ in range(n_workers):\n queue.put(None)", "def batch(self, batch):\n\n self._batch = batch", "def _batch_train(self, batch, training_step, step):\n lstm_size = (self.batch_size, self.Qmain.h_size)\n batch_mem = np.zeros(lstm_size)\n batch_carry = np.zeros(lstm_size)\n input_shape = (self.batch_size,\n self.trace_length,\n self.observation_size)\n m_data = np.vstack(batch[:, 0])\n m_data = m_data.reshape(input_shape)\n t_data = np.vstack(batch[:, 4])\n t_data = t_data.reshape(input_shape)\n q_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(m_data)]\n q1_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(t_data)]\n\n # Batch predict\n self.Qmain.trace_length.assign(self.trace_length)\n self.Qmain.dropout_rate.assign(0.0)\n self.Qtarget.trace_length.assign(self.trace_length)\n self.Qtarget.dropout_rate.assign(0.0)\n\n # Save the graph just the first time\n if training_step == 0:\n tf.summary.trace_on()\n\n # T batch predict\n pred = self.Qmain.model.predict(q_input,\n batch_size=self.batch_size)\n Q = pred[0]\n batch_bus = pred[1]\n batch_line = pred[2]\n batch_disp = pred[3]\n\n ## Log graph once and disable graph logging\n if training_step == 0:\n with self.tf_writer.as_default():\n tf.summary.trace_export(self.name + \"-graph\", step)\n\n # T+1 batch predict\n Qn, *_ = self.Qtarget.model.predict(q1_input,\n batch_size=self.batch_size)\n \n # Compute batch Q update to Qtarget\n for i in range(self.batch_size):\n idx = i * (self.trace_length - 1)\n a = batch[idx][1]\n grid = a[0]\n batch_bus[i][:] = a[1][:]\n batch_line[i][:] = a[2][:]\n batch_disp[i][:] = a[3][:]\n r = batch[idx][2]\n d = batch[idx][3]\n Q[i][grid] = r\n if d == False:\n Q[i][grid] += DISCOUNT_FACTOR * Qn[i][grid]\n\n # Batch train\n batch_x = [batch_mem, batch_carry, m_data]\n batch_y = [\n Q,\n batch_bus, batch_line, batch_disp,\n batch_mem, batch_carry\n ]\n loss = self.Qmain.model.train_on_batch(batch_x, batch_y)\n loss = loss[0]\n\n # Log to tensorboard\n self._tf_log_summary(loss, step)", "def sample_batch(self) -> List:\n return self.buffer.sample(self.batch_size)", "def generate_batches(data, labels, batch_size):\n for start in range(0, len(data), batch_size):\n yield Tensor(data[start:start+batch_size, ...]), Tensor(labels[start:start+batch_size, ...])", "def batches(self):\n return [self.get_batch(i) for i in range(self.num_batches)]", "def save_batch(self):\n self._batch_counter += 1\n write_to_disk(\n self._batch_cases,\n os.path.join(\n self.crop.location,\n \"batches\",\n BTCH_NM.format(self._batch_counter),\n ),\n )\n self._batch_cases = []\n self._counter = 0", "def process_batch(self, data):\n [embedding_batch] = self._sess.run([self._embedding_tensor],\n feed_dict={self._features_tensor: data})\n return embedding_batch", "def append(self, batch: Batch):", "def instantiate_batch(self, inputs):\n return inputs", "def data(self, train=True, batch_size=2):\n if train:\n elements = self.prepare_batch(self.training_albums)\n else:\n elements = self.prepare_batch(self.validation_albums)\n\n while len(elements) > 0:\n # Collect the batch\n batch = []\n for _ in range(min(batch_size, len(elements))):\n batch.append(elements.pop())\n\n # Get same sequence size for all elements of the batch\n albums, labels = self.batchify(batch)\n yield albums, labels", "def init_batch(self):\n pass", "def train_minibatches(self):\n batch_size = self.params['batch_size']\n start_index = 0\n while start_index + batch_size < 500:\n end_index = start_index + batch_size\n yield self.input[start_index:end_index], self.y[start_index:end_index]\n start_index = end_index", "def _update_num_batches(self):\n # maximum possible number of batches is equal to number of whole times\n # batch_size divides in to the number of data points which can be\n # found using integer division\n possible_num_batches = self.inputs.shape[0] // self.batch_size\n if self.max_num_batches == -1:\n self.num_batches = possible_num_batches\n else:\n self.num_batches = min(self.max_num_batches, possible_num_batches)", "def create_batch(client, generator: DataGenerator):\n try:\n event_data_batch = client.create_batch()\n for device in generator.devices:\n # event_data_batch.add(EventData(gen.generate_payload(device)))\n event_data_batch.add(EventData(generator.generate_payload(device)))\n return event_data_batch\n except Exception as e:\n print(str(e))", "def multiple_batch(self, handler, dataset):\n iterator = tf.data.Iterator.from_string_handle(handler, dataset.output_types, dataset.output_shapes)\n (src_ids, tgt_input_ids, tgt_output_ids, src_seq_len, tgt_seq_len) = (iterator.get_next())\n return BatchedInput(iterator=None,\n batched_dataset=None,\n handle=None,\n initializer=None,\n source=src_ids,\n target_input=tgt_input_ids,\n target_output=tgt_output_ids,\n source_sequence_length=src_seq_len,\n target_sequence_length=tgt_seq_len)", "def data_gen(\n v: int, batch: int, nbatches: int, device: torch.device = torch.device(\"cpu\")\n) -> Iterator[Batch]: # TODO bad name\n for i in range(nbatches):\n data = np.random.randint(1, v, size=(batch, 10))\n data[:, 0] = 1\n src: LongTensorType = torch.from_numpy(data)\n tgt: LongTensorType = torch.from_numpy(data)\n src, tgt = src.to(device), tgt.to(device)\n yield Batch(src, tgt, 0)", "def save_feature(self):\n import scipy.io as sio\n testdp = self.test_data_provider\n num_batches = len(testdp.batch_range)\n print 'There are ' + str(testdp.get_num_batches(self.data_path)) + ' in directory'\n if self.test_data_provider.batch_size > 0:\n num_batches = (num_batches - 1)/ self.test_data_provider.batch_size + 1\n if self.test_one:\n num_batches = min(num_batches, 1)\n print 'There are ' + str( num_batches ) + ' in range'\n iu.ensure_dir(self.save_feature_path)\n feature_name = self.op.get_value('save_feature_name')\n feature_dim = self.model_state['layers'][self.feature_idx]['outputs']\n print 'Feature dim is %d' % feature_dim\n for b in range(num_batches):\n epoch, b_num, data = self.get_next_batch(train=False)\n print ' Start writing batch......\\t' + str(b_num)\n num_data = data[0].shape[-1]\n data += [n.zeros((num_data, feature_dim), dtype=n.single)]\n save_name = 'batch_feature_' + str(b_num) + '_' + feature_name \n save_path = iu.fullfile(self.save_feature_path, save_name)\n self.libmodel.startFeatureWriter(data, self.feature_idx)\n self.finish_batch()\n d = dict()\n d['X'] = data[-1].transpose()\n d['batch_num'] = b_num\n d['Y'] = data[1]\n cur_batch_indexes = self.test_data_provider.data_dic['cur_batch_indexes']\n # d['Y_other'] = data[2:-1] if len(data) > 3 else []\n ####### WARN BEGIN ################\n # for human eva fake experiments\n # d['images_path'] = [self.test_data_provider.images_path[x] for x in cur_batch_indexes]\n # d['Y'] = np.concatenate(map(lambda x:self.test_data_provider.batch_meta['RelativeSkel_Y3d_mono_body_backup'][...,x].reshape((-1,1),order='F'), cur_batch_indexes),axis=1)\n print d['Y'].shape\n d['cur_batch_indexes'] = cur_batch_indexes\n ####### WARN END ################\n print 'The len of data is ' + str(len(data))\n print 'The shape of X is' + str(d['X'].shape)\n print 'The shape of Y is' + str(d['Y'].shape)\n ##sio.savemat(save_path, d)\n pickle(save_path, d)", "def collect_samples(self):\n self.replay_buffer = self.collect_initial_batch(\n self.replay_buffer, self.acm_pre_train_samples\n )", "def _get_batch(self):\n # index = self._index[self._current]\n # im_path = self._imdb.image_path_from_index(0)\n # im_path = 'data/demo/dog.jpg'\n # with open(im_path, 'rb') as fp:\n # img_content = fp.read()\n\n batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))\n batch_label = [] \n global imgi\n # img = mx.nd.array(imgi)\n # imgr = mx.img.imdecode(img_content)\n data = self._data_augmentation(imgi)\n batch_data[0] = data\n \n self._data = {'data': batch_data}\n self._label = {'label': None}", "def save_and_upload_batch_sample_sets(batch_samples, batch_tumors, batch_normals, tsca_id, namespace, workspace):\n # Save to file\n os.system('mkdir -p %s'%tsca_id)\n batch_samples_filename = './%s/fc_upload_sample_set_tsca_%s.txt' % (tsca_id, tsca_id)\n batch_tumors_filename = './%s/fc_upload_sample_set_tsca_%s_tumors.txt' % (tsca_id, tsca_id)\n batch_normals_filename = './%s/fc_upload_sample_set_tsca_%s_normals.txt' % (tsca_id, tsca_id)\n \n batch_samples.to_csv(batch_samples_filename , sep=\"\\t\", index=False )\n batch_tumors.to_csv(batch_tumors_filename , sep=\"\\t\", index=False )\n batch_normals.to_csv(batch_normals_filename , sep=\"\\t\", index=False )\n\n r1 = upload_entities_from_tsv(namespace, workspace, batch_samples_filename)\n r2 = upload_entities_from_tsv(namespace, workspace, batch_tumors_filename)\n r3 = upload_entities_from_tsv(namespace, workspace, batch_normals_filename)\n return (r1, r2, r3)", "def get_batches(self, batch_size):\n if self.data.shape[0] % batch_size != 0:\n raise RuntimeError('num of data tuples is not a multiple of batch size')\n num_batch = self.data.shape[0] // batch_size\n for b in range(num_batch):\n yield self.data[b*batch_size:(b+1)*batch_size, :], \\\n self.target[b*batch_size:(b+1)*batch_size, :]", "def next_batch(self, batch_size, shuffle=True):", "def batches(self, batch_size): \n if self.shuffle:\n idx = np.arange(len(dataset.train_x))\n np.random.shuffle(idx)\n self.train_x = self.train_x[idx]\n \n n_batches = len(self.train_x) // batch_size\n for ii in range(0, len(self.train_x), batch_size):\n x = self.train_x[ii:ii+batch_size]\n \n yield self.scaler(x)", "def data_generator(delta=1, batch_size=32):\n while True:\n yield generate_samples(delta=delta, n=batch_size)", "def data_iter(data, vocab, batch_size, shuffle=True, cuda=False):\n\n buckets = defaultdict(list)\n for pair in data:\n src_sent = pair[0]\n buckets[len(src_sent)].append(pair)\n\n batched_data = []\n for src_len in buckets:\n tuples = buckets[src_len]\n if shuffle: np.random.shuffle(tuples)\n batched_data.extend(list(HNCMDataLoader.batch_slice(tuples, batch_size)))\n\n if shuffle:\n np.random.shuffle(batched_data)\n for src_sents, trg_sents, fact_sents in batched_data:\n num_trg_word = sum(len(s[:-1]) for s in trg_sents)\n src_lengths = [len(s) for s in src_sents]\n src_seqs_var = to_input_var(src_sents, vocab.src, cuda)\n trg_seqs_var = to_input_var(trg_sents, vocab.trg, cuda)\n fact_lengths = [[len (s) for s in fact_sent] for fact_sent in fact_sents]\n fact_seqs_var = to_input_var_2d(fact_sents, vocab.src, cuda)\n\n yield {\n 'src_seq': src_seqs_var, 'src_lengths': src_lengths,\n 'fact_seq': fact_seqs_var, 'fact_lengths': fact_lengths,\n 'trg_seq': trg_seqs_var[:, :-1],\n 'target': trg_seqs_var[:, 1:],\n 'num_trg_word': num_trg_word, 'num_trg_seq': len(trg_sents)\n }", "def train(batch_size, num_sample=128):\n return paddle.batch(_read_creater(num_sample=num_sample), batch_size)", "def on_test_batch_begin(self, batch, logs=None):", "def produce_query_batches(self):\n pass", "def process(self, data_batch: Sequence[Dict],\n data_samples: Sequence[Dict]) -> None:\n for data_sample in data_samples:\n pred_labels = data_sample.get('pred_instances').get(self.key).cpu()\n gt_labels = data_sample.get('gt_instances').get(self.key).cpu()\n\n result = dict(\n pred_labels=pred_labels.flatten(),\n gt_labels=gt_labels.flatten())\n self.results.append(result)", "def sample_train_batch(self):\r\n batch = []\r\n labels =[]\r\n num_groups = self.batch_size // self.batch_k\r\n sampleed_classes = np.random.choice(self.train_class_ids,num_groups,replace=False)\r\n for class_id in sampleed_classes:\r\n img_fname = np.random.choice(self.train_image_files[class_id],self.batch_k,replace=False)\r\n batch += img_fname.tolist()\r\n labels += [class_id]*self.batch_k\r\n return batch,labels", "def test_batch(self):\n pass", "def next_batch(self, batch_size):\n raise NotImplementedError", "def gen_batches(data, batch_size=2048):\n indices = torch.randperm(len(data))\n indices = indices.cuda()\n\n for idx in range(0, len(data) - batch_size + 1, batch_size):\n sample = indices[idx:idx + batch_size]\n l_words, r_words = data.L_words[sample], data.R_words[sample]\n l_vecs = data.l_vecs[l_words]\n r_vecs = data.r_vecs[r_words]\n l_bias = data.l_biases[l_words]\n r_bias = data.r_biases[r_words]\n weight = data.weights[sample]\n y = data.y[sample]\n yield weight, l_vecs, r_vecs, y, l_bias, r_bias", "def generate_next_batch(self, data): \n \n batch_words = np.array(data[self.batch_lookup[self.batch_index]][0])\n batch_labels = np.array(data[self.batch_lookup[self.batch_index]][1])\n self.batch_index += 1\n if self.batch_index == len(data) - 1:\n self.epoch += 1\n return batch_words, batch_labels", "def __iter__(self):\n batch = []\n for idx in self._sampler:\n batch.append(idx)\n if len(batch) == self._batch_size:\n batch = sum(batch, [])\n yield batch\n batch = []\n if len(batch) > 0:\n batch = sum(batch, [])\n yield batch", "def _generate_and_save_examples(self, epoch):\n\n if self.fn_save_examples:\n z_in = tf.random.uniform((self.batch_size, self.z_dim), -1, 1)\n x_save = self.raw_x_dataset[0:self.batch_size]\n c_save = self.raw_dataset[0:self.batch_size]\n\n generations = tf.squeeze(self.generator(z_in, c_save, training=False))\n self.fn_save_examples(epoch, x_save, generations)", "def upload_data_new_batch(tsca_id, latest_tsca_id, paths_to_batches_info, namespace, workspace, google_bucket_id):\n paths_to_batches_info_df = pd.read_excel(paths_to_batches_info, index_col=0)\n path_to_samples_info = paths_to_batches_info_df.loc[tsca_id, 'path_to_samples_info']\n\n # DF of remote [sample < > sample set ]\n remote_sample_sets = pd.read_table('remote_files/sample_set_membership_%s.tsv'%latest_tsca_id)\n # DF of remote [pair < > pair set]\n remote_pair_sets = pd.read_table('remote_files/pair_set_membership_%s.tsv'%latest_tsca_id)\n\n all_samples = get_samples(paths_to_batches_info, google_bucket_id)\n # Add cohorts for older batches\n all_samples = add_cohort_to_old_batches(all_samples)\n\n ##### Remove blacklisted samples ##\n # Blacklisted samples\n blacklisted = pd.read_table(\"samples_blacklist.txt\", header=None, names=[\"entity:sample_id\"])\n blacklisted_sample_ids = blacklisted[\"entity:sample_id\"].values.tolist()\n all_samples = all_samples[~all_samples[\"entity:sample_id\"].isin(blacklisted_sample_ids)]\n\n ########## Participants ##########\n print(\"Uploading participants...\") \n participants = prepare_participants_for_metadata_export(path_to_samples_info, tsca_id)\n r1 = save_and_upload_participants(participants, namespace, workspace, tsca_id)\n ##################################\n\n ########## Samples ############\n print(\"Uploading samples...\")\n batch_samples = prepare_batch_samples_for_metadata_export(path_to_samples_info, tsca_id, google_bucket_id)\n r2 = save_and_upload_samples(batch_samples, namespace, workspace, tsca_id)\n #################################\n\n ########## Pairs #############\n print(\"Uploading pairs...\")\n pairs = create_pairs_list(all_samples)\n r3 = save_and_upload_pairs(namespace, workspace, pairs)\n #################################\n\n ########## Sample Sets #########\n print(\"Uploading sample sets...\")\n batch_sample_set, batch_tumor_set, batch_normal_set = prepare_batch_sample_set_for_metadata_export(path_to_samples_info, tsca_id)\n # Remove the samples that have already been uploaded \n uploaded_sample_ids = remote_sample_sets['sample'].tolist()\n batch_sample_set_clean = batch_sample_set[~batch_sample_set['sample_id'].isin(uploaded_sample_ids)]\n batch_tumor_set_clean = batch_tumor_set[~batch_tumor_set['sample_id'].isin(uploaded_sample_ids)]\n batch_normal_set_clean = batch_normal_set[~batch_normal_set['sample_id'].isin(uploaded_sample_ids)]\n r4a, r4b, r4c = save_and_upload_batch_sample_sets(batch_sample_set_clean, batch_tumor_set_clean, batch_normal_set_clean, tsca_id, namespace, workspace)\n #################################\n\n ########## PoNs ###############\n print(\"Uploading PoNs...\")\n \n # Number of latest tsca id\n latest_tsca_id_int = int(re.findall('\\d+', latest_tsca_id )[0])\n # Array with list of all previous TSCA ids\n previous_tsca_ids = [\"TSCA%s\"%i for i in np.arange(14, latest_tsca_id_int+1)]\n previous_tsca_ids.insert(0, \"TSCA1213\")\n\n pon, name = create_panel_of_normals_advanced(tsca_id, all_samples,\\\n num_normals_per_cohort_involved = 3, \\\n batches_to_pick_from = previous_tsca_ids)\n\n # Only upload PoN if it hasn't been uploaded already\n if not name in remote_sample_sets['membership:sample_set_id'].unique().tolist():\n r5 = upload_pon(pon, name, namespace, workspace) \n else: \n print(\"PoN already exists...\")\n r5 = {}\n #################################\n \n ########## Pair Set ###########\n print(\"Uploading pair sets...\")\n # Upload cumulative pair sets\n tn_cum_pairsets, tp_cum_pairsets = prepare_cumulative_pairsets_for_metadata_export(pairs, tsca_id)\n r6 = upload_pairsets(namespace, workspace, tn_cum_pairsets, \"TN\")\n r7 = upload_pairsets(namespace, workspace, tp_cum_pairsets, \"TP\")\n\n # Batch pair sets\n tn_pairsets, tp_pairsets = prepare_batch_pairsets_for_metadata_export(all_samples, pairs, tsca_id)\n uploaded_pair_ids = remote_pair_sets['pair'].tolist()\n tn_pairsets_clean = tn_pairsets[~tn_pairsets['pair_id'].isin(uploaded_pair_ids)]\n tp_pairsets_clean = tp_pairsets[~tp_pairsets['pair_id'].isin(uploaded_pair_ids)]\n\n r8 = upload_pairsets(namespace, workspace, tn_pairsets_clean, \"TN\")\n r9 = upload_pairsets(namespace, workspace, tp_pairsets_clean, \"TP\")\n #################################\n\n return (r1, r2, r3, r4a, r4b, r4c, r5, r6, r7, r8, r9)", "def on_train_begin(self, logs={}):\n self._data = []", "def sample_batch(self, batch_size):\n batch = []\n\n # Sample using prorities\n if(self.with_per):\n T = self.buffer.total() // batch_size\n for i in range(batch_size):\n a, b = T * i, T * (i + 1)\n s = random.uniform(a, b)\n idx, error, data = self.buffer.get(s)\n batch.append((*data, idx))\n idx = np.array([i[5] for i in batch])\n #TD errors are only updated for transitions that are replayed\n \n # Sample randomly from Buffer\n elif self.count < batch_size:\n idx = None\n batch = random.sample(self.buffer, self.count)\n else:\n idx = None\n batch = random.sample(self.buffer, batch_size)\n\n # Return a batch of experience\n s_batch = np.array([i[0] for i in batch])\n a_batch = np.array([i[1] for i in batch])\n r_batch = np.array([i[2] for i in batch])\n d_batch = np.array([i[3] for i in batch])\n new_s_batch = np.array([i[4] for i in batch])\n\n return s_batch, a_batch, r_batch, d_batch, new_s_batch, idx", "def _train(self, samples):\n for i, sample in enumerate(samples):\n # add extra info to samples\n sample = {\n 'src_tokens': sample[0],\n 'input_tokens': self._right_shifted_ys(sample[1]),\n 'target': sample[1],\n 'id': None\n }\n sample['ntokens'] = sum(len(t) for t in sample['target'])\n sample['src_positions'] = self._positions_for_tokens(\n sample['src_tokens'])\n sample['input_positions'] = self._positions_for_tokens(\n sample['input_tokens'])\n samples[i] = sample\n return self.trainer.train_step(samples)", "def batch(self, data, size):\n\n return [data[x : x + size] for x in range(0, len(data), size)]", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 16\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints + 1.0\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def split_to_batches(self, train_data, batch_size):\n num_of_training_examples = len(train_data)\n for i in range(0, num_of_training_examples, batch_size):\n x, y = zip(*train_data[i: i+batch_size])\n yield np.vstack(x), np.vstack(y)", "def train_dynamic(batch_size=10):\n \n return", "def next_batch(self):\n # Whether an epoch is done.\n done = False\n samples = []\n for _ in range(self.batch_size):\n # Indeed, `>` will not occur.\n if self.ptr >= self.dataset_size:\n done = True\n break\n else:\n self.ptr += 1\n sample = self.enqueuer.queue.get()\n samples.append(sample)\n # print 'queue size: {}'.format(self.enqueuer.queue.qsize())\n # Indeed, `>` will not occur.\n if self.ptr >= self.dataset_size:\n done = True\n return samples, done", "def batch_fit(self, data_loader: torch.utils.data.DataLoader, epochs: int):\n pass", "def batch_start(self, batch_idx, batch_data):\n self.batch = batch_idx", "def next_batch(self, batch_size):\r\n raise NotImplementedError", "def gen_batches(data, batch_size):\n data = np.array(data)\n\n for i in range(0, data.shape[0], batch_size):\n yield data[i:i+batch_size]", "def generate_batch(self):\n\n # sbatch = list()\n # tbatch = list()\n # for i in range(self.dict_paras['batch_size']):\n # sbatch.append(self.lst_triplet_train_map[self.data_index])\n # self.data_index = (self.data_index + 1) % self.triplet_train_size\n\n sbatch = random.sample(self.lst_triplet_train_map, self.dict_paras['batch_size'])\n tbatch = list()\n\n for ele in sbatch:\n corrupted1, corrupted2 = self.get_corrupted_triplet(ele)\n tbatch.append((ele, corrupted1))\n tbatch.append((ele, corrupted2))\n return tbatch", "def prepare_batch(data,BATCH_SIZE, filename):\n ### select the last BATCH_SIZE rows from batch dataset\n batch = data.iloc[-BATCH_SIZE:].values.tolist()\n batch_data = []\n \n ### remove white spaces in the list because predtion server expects no white spaces between elements\n for i in batch:\n str_row = str(i)\n str_row = str_row.replace(' ','')\n batch_data.append(str_row)\n \n ### write values in a file called filename\n with open(filename, 'w') as f:\n f.write(','.join(str(i) for i in batch_data))", "def batch_data(source, target, batch_size):\n for batch_i in range(0, len(source)//batch_size):\n start_i = batch_i * batch_size\n source_batch = source[start_i:start_i + batch_size]\n target_batch = target[start_i:start_i + batch_size]\n yield np.array(pad_sentence_batch(source_batch)), np.array(pad_sentence_batch(target_batch))", "def samples(self, samples):\n\n self._samples = samples", "def next_batch(self, batch_size=8):\n raise NotImplementedError()", "def _fillBatches(self):\n\n batchRE = r\"\"\"\n B\n (?P<observebatch>\\d+?)\n (?P<startend>[SE])\n (?P<sequence>\\d+?)\n _SR\n (?:_(?P<extraInjections>\\d+?|\\w+?))?\n $\n \"\"\"\n batchRE = re.compile(batchRE, re.VERBOSE)\n # We canot infer batches unless we have runorder\n if 'Run Order' in self.sampleMetadata.keys():\n currentBatch = 0\n # Loop over samples in run order\n for index, row in self.sampleMetadata.sort_values(by='Run Order').iterrows():\n nameComponents = batchRE.search(row['Sample File Name'])\n if nameComponents:\n # Batch start\n if nameComponents.group('startend') == 'S':\n # New batch - increment batch no\n if nameComponents.group('sequence') == '1':\n currentBatch = currentBatch + 1\n\n # Don't include the dilution series or blanks\n if not ((row['AssayRole'] == AssayRole.LinearityReference) or (row['SampleType'] == SampleType.ProceduralBlank)):\n self.sampleMetadata.loc[index, 'Batch'] = currentBatch\n self.sampleMetadata.loc[index, 'Correction Batch'] = currentBatch\n\n else:\n warnings.warn('Unable to infer batches without run order, skipping.')\n return", "def _send_batch(self):\n batch = RPLogBatch(self._batch)\n http_request = HttpRequest(\n self.session.post, self._log_endpoint, files=batch.payload,\n verify_ssl=self.verify_ssl)\n batch.http_request = http_request\n self._worker.send(batch)\n self._batch = []\n self._payload_size = helpers.TYPICAL_MULTIPART_FOOTER_LENGTH", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 15\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 15\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def next(self):\n\n if self.i_sample < self.n_sample:\n df_batch = self.grouped[self.i_sample:min(self.n_sample, self.i_sample + self.batch_size)]\n # at end of epoch, number of sample remains may be smaller than batch size\n if len(df_batch) < self.batch_size:\n df_sample = random.sample(self.grouped, self.batch_size-len(df_batch))\n df_batch = df_batch + df_sample\n try:\n assert len(df_batch) == self.batch_size\n except AssertionError:\n print(self.i_sample, df_sample, df_batch)\n\n # get random frame_idxs\n if self.train:\n flips = np.random.choice(a=[False, True], size=(self.batch_size,), p=[0.5, 0.5])\n else:\n flips = np.zeros(self.batch_size, dtype=bool)\n\n\n video = sample_clips(df_batch, flips, self.batch_size, self.n_frame,\n self.scale_w, self.scale_h, self.sample_half_time, self.train)\n\n bboxes = np.zeros((self.batch_size, self.n_frame // self.temporal_scale, self.n_bbox, 5))\n labels = np.zeros((self.batch_size, self.n_bbox, self.num_class))\n for i in range(len(df_batch)):\n tmp_bbox, tmp_label = self.get_bbox_and_label(df_batch[i], flips[i], i, self.scale_w, self.scale_h)\n bboxes[i] = tmp_bbox\n labels[i] = tmp_label\n\n if self.debug_dataloader:\n with open('dataset/AVA_v2.1/ava_action_list_v2.1.pbtxt') as fd:\n lines = fd.readlines()\n\n labels_info = []\n for i in range(80):\n name_line = lines[i * 5 + 1]\n label_id_line = lines[i * 5 + 2]\n label_type_line = lines[i * 5 + 3]\n\n name = name_line[name_line.find('\"') + 1:name_line.rfind('\"')]\n label_id = int(label_id_line.strip().split(':')[1].strip())\n label_type = label_type_line.strip().split(':')[1].strip()\n\n assert label_id == i + 1\n labels_info.append({\n 'name': name,\n 'label_type': label_type\n })\n\n for bidx in range(self.batch_size):\n s_video = video[bidx, ...]\n s_bboxes = bboxes[bidx, ...]\n s_labels = labels[bidx, ...]\n\n window_name = 'batch_idx_'+str(bidx)\n if self.train:\n window_name += '_train'\n else:\n window_name += '_val'\n\n\n bbox = s_bboxes[0, 0, 1:].astype(np.int32)\n label_indices = np.where(s_labels[0, :])[0]\n\n for fidx in range(self.n_frame):\n # print('fidx', fidx)\n save_name = window_name + '_' + str(fidx)\n tmp_img = (s_video[:, fidx, :, :].transpose((1,2,0))).astype(np.uint8).copy()\n\n cv2.rectangle(tmp_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=(0,0,255), thickness=2)\n for en_idx, label_index in enumerate(label_indices):\n # print('label_index', label_index, 'len', len(labels_info))\n cv2.putText(tmp_img, labels_info[label_index]['name'], (bbox[0], bbox[1] + en_idx * 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, color=(0, 255, 0), thickness=1)\n\n cv2.imwrite(save_name+'.jpg', tmp_img)\n\n\n #print(video.shape, bboxes.shape, labels.shape)\n ret = mx.io.DataBatch(data=[mx.nd.array(video), mx.nd.array(bboxes)],\n label=[mx.nd.array(labels),],\n provide_data=self.provide_data,\n provide_label=self.provide_label)\n\n self.i_sample += self.batch_size\n return ret\n else:\n raise StopIteration" ]
[ "0.67769766", "0.6642692", "0.66230625", "0.6600419", "0.6592506", "0.64439774", "0.64208335", "0.63826704", "0.62283033", "0.619053", "0.61626637", "0.6160936", "0.61458206", "0.6139684", "0.61335284", "0.61245483", "0.61134374", "0.6109564", "0.60928875", "0.60751015", "0.6073456", "0.6071311", "0.606895", "0.6056902", "0.6046535", "0.6035673", "0.6028624", "0.602325", "0.6013657", "0.6011615", "0.60091263", "0.60024095", "0.5989787", "0.59784913", "0.59779084", "0.597138", "0.5968291", "0.59593225", "0.5957691", "0.5952634", "0.59508944", "0.59502244", "0.59386414", "0.59365547", "0.5929014", "0.59207505", "0.59126765", "0.5907854", "0.59069496", "0.5904922", "0.5903771", "0.5895348", "0.58947015", "0.58899456", "0.5882897", "0.5878427", "0.5876755", "0.58759", "0.5873941", "0.5869417", "0.5854498", "0.58533704", "0.5852776", "0.58495307", "0.5846189", "0.58433634", "0.58340675", "0.5822984", "0.5822408", "0.581928", "0.58147705", "0.5813848", "0.5808603", "0.5803609", "0.58030504", "0.5797823", "0.57958287", "0.5794992", "0.5794419", "0.57933116", "0.5790247", "0.57872224", "0.57849574", "0.578335", "0.57809675", "0.57798266", "0.5779718", "0.5773251", "0.57640976", "0.57616305", "0.5751019", "0.57492554", "0.5749143", "0.5742852", "0.5736812", "0.5734309", "0.5734299", "0.5725935", "0.5725935", "0.5724539" ]
0.58928144
53
Do computations when multiple samples with equal timestamp are received
def on_full(self, bag_data): # bag_data is a dict {ts, drone, subject, peds, objs} # Bear in mind some simulations cannot contain neither peds nor objs dr_pos, dr_vel, dr_acc = bag_data["drone"].get_data() subj_pos = bag_data["subject"].get_pos() peds_poses = [] if bag_data["peds"] is not None: peds_poses = [p.get_pos() for p in bag_data["peds"].values()] objs_poses = [] if bag_data["objs"] is not None: objs_poses = [m.pose for m in bag_data["objs"]] # Bear in mind some simulations cannot contain neither peds nor objs # (ped and obj force would be 0.0) forces = [0,0,0] if not self.file.closed: rowdata = [ bag_data["ts"], dr_pos, dr_vel, dr_acc, subj_pos, forces[0], forces[1], forces[2] ] + peds_poses + objs_poses # print("\n\nROWDATA: ", rowdata, "\n\n") self.csv_writer.writerow(rowdata)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audioEpochFeats(cur,uid,timestamp):\n\tuidA = uid +'audio'\n\n\tvar_stats = []\n\tstd_stats = []\n\tnoise = []\n\tvoiceToSilenceRatio = []\n\n\tfor i in range(1,24):\n\t\ths_timestamp = timestamp-86400+(i-1)*hour\n\t\the_timestamp = timestamp-86400+i*hour\n\t\t# Determining if start/end time of given hour is in the night\n\t\t# If yes, proceed with feature calculation, if not skip\n\t\ts_epoch = epochCalc(hs_timestamp)\n\t\te_epoch = epochCalc(he_timestamp)\n\n\t\tif s_epoch[0][0]=='night' or e_epoch[0][0]=='night':\n\t\t\tcur.execute('SELECT audio FROM {0} WHERE time_stamp >= {1} AND time_stamp<= {2}'\n\t\t\t\t.format(uidA,timestamp-86400+(i-1)*hour,timestamp-86400+i*hour))\n\t\t\trecords = cur.fetchall()\n\n\t\t\tvar_stats.append(np.var(records))\n\t\t\tstd_stats.append(np.std(records))\n\n\t\t\t# Calculating number of silence and voice/noise occurences\n\t\t\tsilence = len([item for item in records if item==0])\n\t\t\tvoice = len([item for item in records if item==1 or item==2])\n\t\t\tnoise.append(len([item for item in records if item==3]))\n\t\t\tif silence>0:\n\t\t\t\tvoiceToSilenceRatio.append(float(voice) / silence)\n\t\t\telse:\n\t\t\t\tvoiceToSilenceRatio.append(0)\n\treturn(np.nan_to_num(np.hstack((voiceToSilenceRatio,var_stats,std_stats,noise))))\n\t\"\"\"\ndef main():\n\tcon = psycopg2.connect(database='dataset', user='tabrianos')\n\tcur = con.cursor()\n\t#warnings.simplefilter(\"error\")\n\t#centers = np.load('visualizations/clustercenters.npy')\n\n# ------------TEST CASE-----------------------------\n\tfor loso in uids1:\n\t\tytest=[]\n\t\taccuracies =[]\n\t\tacc=0\n\t\tmaxminAcc =[]\n\t\tXbig = np.zeros([1,132])\t\n\t\tYbig = np.zeros([1])\n\t\tlabels=[]\n\t\tlabels.append(19)\n\t\t# loso means leave one student out: forest is trained on other users data\n\t\t# then tests are run on 'loso' student \n\t\tuids2.remove(loso)\n\t\tuids2.append(loso)\n\t\tprint('LOSO: {0}'.format(loso))\n\t\tfor testUser in uids2:\n\t\t\tprint(testUser)\n\t\t\t# lists that temporary store features before concatenation\n\t\t\t\n\t\t\tcolocationList =[]\n\t\t\tconversationList =[]\n\t\t\tactivityList=[]\n\t\t\taudioList = []\n\n\t\t\t# loading stress labels from database (currently on 0-5 scale)\n\t\t\trecords = loadSleepLabels(cur,testUser) \n\t\t\n\n\t\t\t\n\t\t\t#X,Y store initially the dataset and the labels accordingly\n\t\t\tY = np.zeros(len(records))\n\t\t\tX = np.array(records)\n\n\t\n\n\n\t\t\tfor i in range(0,len(records)):\n\t\t\t\tcolocationList.append( colocationEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tconversationList.append( convEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tactivityList.append(activityEpochFeats(cur,testUser,X[i][0]))\n\t\t\t#\tScreenList.append( screenStatFeatures(cur,testUser,X[i][0],day) )\n\t\t\t\taudioList.append(audioEpochFeats(cur,testUser,X[i][0]))\n\t\t\n\t\t\t\tif testUser==loso:\n\t\t\t\t\tytest.append(X[i][1])\n\t\t\t\t#labels list holds user ids to be used in LeaveOneOut pipeline\n\t\t\t\tlabels.append(testUser[-2:])\n\t\t\t\tY[i] = X[i][2]\n\n\t\t\t\n\t\t\t#concatenating features in one array \n\n\t\t\tXtt = np.concatenate((np.array(activityList),np.array(conversationList),np.array(colocationList),np.array(audioList)),axis=1)\n\t\t\tprint(Xtt.shape)\n\n\t\t\t#initiating and training forest, n_jobs indicates threads, -1 means all available\n\t\t\t# while the test student is not reached, training data are merged into one big matrix\n\t\t\tXbig = np.concatenate((Xbig,Xtt),axis=0)\n\t\t\tYbig = np.concatenate((Ybig,Y),axis=0)\n\n\t\t\tdel colocationList[:]\n\t\t\tdel conversationList[:]\n\t\t\tdel activityList[:]\n\t\t\tdel audioList[:]\n\n\n\n\t\t\tif testUser!=loso:\n\t\t\t\tXbig = Xbig.astype(np.float64)\n\t\t\t\tprint(Xbig.dtype)\n\t\t\t\t\n\n\t\t\t# when loso, tests are run\n\t\t\telif testUser==loso:\n\t\t\t\t#Xbig = preprocessing.scale(Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyX.npy',Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyY.npy',Ybig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyLOO.npy',np.array(labels))\n\t\t\t\tprint(Xbig.shape[0],Ybig.shape[0],len(labels))\n\t\t\t\tprint('train matrix saved')\n\t\t\t\ta = raw_input()\n\t\t\t\tforest = RandomForestClassifier(n_estimators=100, n_jobs = -1)\n\t\t\t\tforest.fit(Xbig,Ybig)\n\t\t\t\tef = forest.score(Xtt,ytest)\n\t\t\t\tprint(ef*100)\n\n\t\t\t\toutput = np.array(forest.predict(Xtt))\n\t\t\t\tscored = output - np.array(ytest)\n\n\t\t\t\t# Counting as correct predictions the ones which fall in +/-1, not only exact\n\t\t\t\t# I call it the 'Tolerance technique'\n\t\t\t\tcorrect=0\n\t\t\t\tc = Counter(scored)\n\t\t\t\tfor k in c.keys():\n\t\t\t\t\tif k<2 and k>-2:\n\t\t\t\t\t\tcorrect += c[k]\n\t\t\t\t\n\t\t\t\tscore = float(correct)/len(scored)\n\t\t\t\tprint(score*100)\n\n\n\n\t\tprint(Xbig.shape)\n\t\n\t\t\n\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\t\"\"\"", "def record_data(self, no_of_samples, interval):\r\n\r\n #tempory storage while function is completing\r\n temp_return_list = []\r\n\r\n #colec\r\n for i in range(0,no_of_samples):\r\n\r\n print(i)\r\n sensor_value = self.sen.get_sensor_value()\r\n\r\n temp_return_list.append([sensor_value,(i*interval)])\r\n\r\n time.sleep(interval)\r\n\r\n \r\n \r\n self.return_data = temp_return_list", "def processData(pipe, event, pulses):\n logging.info(\"Started processData\")\n start_time = time.time()\n for pulse in range(pulses):\n samples = pipe.get()\n# time.sleep(0.001)\n end_time = time.time()\n elapsed = end_time - start_time\n samplesProcessed = (pulses * len(samples[0]) * len(samples))\n logging.info(\"processData processed %d Msamples in %.3f s\",\n samplesProcessed / 1e6,\n elapsed)\n logging.info(\"processData rate: %.3f Msa/s in lumps of %d samples\",\n samplesProcessed / elapsed / 1e6,\n dig.pointsPerCycle)", "def _got_chunk(self, chunk, timestamp):\n log.debug(\"_got_chunk: chunk=%s\", chunk)\n self._extract_sample(D1000TemperatureDataParticle, D1000TemperatureDataParticle.regex_compiled(), chunk,\n timestamp)", "def UTC_times(times, \n trace, \n diff_thres = 30.0):\n # set times values to seconds\n \n #AUTOMATE THIS SECTION!\n #CHECK THAT THIS IS CORRECT\n times = times / trace.stats.sampling_rate\n #remove unwanted parts of times numpy array \n times = times[:,0]\n \n #remove the first instance of time because it is \n #somehow always of the wrong format!\n #times = np.delete(times, 0) \n \n event_times = []\n event = [times[0]]\n \n start_time = trace.stats.starttime\n \n #for item in times:\n # print start_time + item\n\n for i in range(1, len(times)):\n \n # check if two events in times array have a difference < diff_thres, \n #if not, run average of those times, if so append that events to a \n #new events_times list\n \n #time_diff = times[i + 1] - times[i]\n \n time_diff = times[i] - times[i-1]\n\n #save info until events are far enough apart! \n if time_diff < diff_thres:\n\n event.append(times[i])\n \n \n #raise conditional for if events are far enough apart! \n else:\n\n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n\n event_times.append([event_start, event_end])\n \n event = [] \n \n event.append(times[i])\n\n #if event still contains something for any reason, add it to event times\n if len(event) > 0: \n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n event_times.append([event_start, event_end])\n event = [] \n \n\n\n #if len(event_times) == 0 and len(event) > 0 or time_diff > diff_thres and len(event) > 0:\n \n #event_times.append(sum(event) / len(event))\n \n # event_start = event[0] - 2 #minus 5 seconds\n # event_end = event[-1] + 2 #add 5 seconds\n \n # event_times.append([event_start, event_end])\n \n # event = []\n \n #event_times.append(times[i])\n \n # else:\n # event.append(times[i])\n \n\n UTC_events = []\n\n #earthquake length threshold is 10 seconds and above!\n eq_len = 0#5.0\n\n for i in event_times:\n estart = start_time + i[0]\n eend = start_time + i[1]\n \n if eend - estart > eq_len:\n UTC_events.append([estart, eend])\n \n #UTC_events = np.unique(np.asarray(UTC_events))\n\n \n return UTC_events", "def _got_chunk(self, chunk, timestamp):\n if self._extract_sample(SBE19DataParticle, SBE19DataParticle.regex_compiled(), chunk, timestamp):\n self._sampling = True\n return\n\n for particle_class in SBE19HardwareParticle, \\\n SBE19CalibrationParticle, \\\n SBE19ConfigurationParticle, \\\n SBE19StatusParticle, \\\n OptodeSettingsParticle:\n if self._extract_sample(particle_class, particle_class.regex_compiled(), chunk, timestamp):\n return", "def sample_run(df, n_epochs = 10, window_size = 500, com = 12, p_anoms = .5):\n import numpy as np\n\n # create arrays that will hold the results of batch AD (y_true) and online AD (y_pred)\n y_true = []\n y_pred = []\n run_times = []\n \n # check which unique machines, sensors, and timestamps we have in the dataset\n machineIDs = df['machineID'].unique()\n sensors = df.columns[2:]\n timestamps = df['datetime'].unique()[window_size:]\n \n # sample n_machines_test random machines and sensors \n random_machines = np.random.choice(machineIDs, n_epochs)\n random_sensors = np.random.choice(sensors, n_epochs)\n\n # we intialize an array with that will later hold a sample of timetamps\n random_timestamps = np.random.choice(timestamps, n_epochs)\n \n for i in range(0, n_epochs):\n # take a slice of the dataframe that only contains the measures of one random machine\n df_s = df[df['machineID'] == random_machines[i]]\n \n # smooth the values of one random sensor, using our run_avg function\n smooth_values = run_avg(df_s[random_sensors[i]].values, com)\n \n # create a data frame with two columns: timestamp, and smoothed values\n df_smooth = pd.DataFrame(data={'timestamp': df_s['datetime'].values, 'value': smooth_values})\n\n # load the results of batch AD for this machine and sensor\n anoms_s = anoms_batch[((anoms_batch['machineID'] == random_machines[i]) & (anoms_batch['errorID'] == random_sensors[i]))]\n \n # find the location of the t'th random timestamp in the data frame\n if np.random.random() < p_anoms:\n anoms_timestamps = anoms_s['datetime'].values\n np.random.shuffle(anoms_timestamps)\n counter = 0\n while anoms_timestamps[0] < timestamps[0]:\n if counter > 100:\n return 0.0, 9999.0\n np.random.shuffle(anoms_timestamps)\n counter += 1\n random_timestamps[i] = anoms_timestamps[0]\n \n # select the test case\n test_case = df_smooth[df_smooth['timestamp'] == random_timestamps[i]]\n test_case_index = test_case.index.values[0]\n\n\n # check whether the batch AD found an anomaly at that time stamps and copy into y_true at idx\n y_true_i = random_timestamps[i] in anoms_s['datetime'].values\n\n # perform online AD, and write result to y_pred\n y_pred_i, run_times_i = detect_ts_online(df_smooth, window_size, test_case_index)\n \n y_true.append(y_true_i)\n y_pred.append(y_pred_i)\n run_times.append(run_times_i)\n \n return fbeta_score(y_true, y_pred, beta=2), np.mean(run_times)", "def collect_samples(serialPort,NO_SENSORS,NO_SAMPLES,log):\n run = '1'\n badSamples = 0\n count = 1\n log_temp = []\n temp = [0] * 20\n NO_FIELDS = (NO_SENSORS * 3) + 1\n \n while (run == '1'):\n # If the input buffer is not empty read the data out into rawData using \\n as a delimiter.\n if (serialPort.inWaiting()>0):\n rawData = serialPort.readline()\n print(rawData)\n \n # If invalid data is recieved this prevents program crash\n try:\n # Decode the bytes into a string\n data = rawData.decode()\n \n # Split x, y, z and newline values into a list\n if (count >= (NO_SAMPLES + 1)):\n endTime_temp = data.split(\" \", 2)\n if (len(endTime_temp) == 2 and '' not in endTime_temp):\n endTime = int(endTime_temp[0])\n else:\n endTime = 780\n print('Time not recieved')\n print('Lost Samples: ' + str(badSamples))\n run = '0'\n else:\n data_readings = data.split(\" \", NO_FIELDS)\n print(data_readings)\n \n # A correct sample should contain 16 values and not include null and so this is used\n # to validate the data and record any samples that are discarded in this way\n if (len(data_readings) == NO_FIELDS and '' not in data_readings):\n # Discard newline characters before saving data\n int_data_readings = list(map(int,data_readings[:(NO_FIELDS - 1)]))\n log_temp.append(int_data_readings)\n else:\n badSamples += 1\n except:\n print('Invalid data recieved')\n \n count += 1\n\n samplingPeriod = (endTime/NO_SAMPLES)/NO_SENSORS\n timeStamp = 0.0\n\n for i in range(0,len(log_temp)):\n for j in range(0,NO_SENSORS):\n temp[0+(j*4)] = log_temp[i][0+(j*3)]\n temp[1+(j*4)] = log_temp[i][1+(j*3)]\n temp[2+(j*4)] = log_temp[i][2+(j*3)]\n temp[3+(j*4)] = timeStamp\n timeStamp += samplingPeriod\n log.append(temp.copy())", "def test_data_handlers(timeseries, handler):\n handler = handler(timeseries, HISTORICAL_CONTEXT_NUMBER)\n updates = []\n i = 0\n while True:\n try:\n context, update = handler.next()\n except NoMoreData:\n break\n i += 1\n\n updates.append(update)\n # import pdb; pdb.set_trace()\n assert context[-1].datetime < update.datetime\n\n # total lenght of series(25) - context(5) which aren't provided as updates\n assert i == 20\n updates = [x.datetime.date().isoformat() for x in updates]\n assert updates[10] == \"2018-01-22\"", "def _got_chunk(self, chunk, timestamp):\n new_timestamp_match = _NEW_DATFILE_PARTICLE_TIMESTAMP_MATCHER.match(chunk)\n if new_timestamp_match:\n date = datetime.strptime(new_timestamp_match.group(1), '%Y-%m-%dT%H:%M:%S.%f')\n self.log_timestamp = (date - datetime(1900, 1, 1)).total_seconds()\n return\n\n new_body_match = _NEW_DATFILE_PARTICLE_BODY_MATCHER.match(chunk)\n if new_body_match:\n # Only publish D1000 playback if we have a timestamp.\n if self.log_timestamp is not None:\n self._extract_sample(\n D1000TemperatureDataParticle,\n _NEW_DATFILE_PARTICLE_BODY_MATCHER,\n chunk,\n self.log_timestamp\n )\n\n return\n\n old_timestamp_match = _OLD_DATFILE_PARTICLE_TIMESTAMP_MATCHER.match(chunk)\n if old_timestamp_match:\n date = datetime.strptime(old_timestamp_match.group(1), '%Y-%m-%d %H:%M:%S')\n self.log_timestamp = (date - datetime(1900, 1, 1)).total_seconds()\n return\n\n old_body_match = _OLD_DATFILE_PARTICLE_BODY_MATCHER.match(chunk)\n if old_body_match:\n # Only publish D1000 playback if we have a timestamp.\n if self.log_timestamp is not None:\n self._extract_sample(\n D1000TemperatureDataParticle,\n _OLD_DATFILE_PARTICLE_BODY_MATCHER,\n chunk,\n self.log_timestamp\n )", "def process_original_data():\n audioSamples = AudioDataOriginal.query.filter().all()\n\n currentSampleDatetime = None\n maxValueRecord = None\n for sample in audioSamples:\n if currentSampleDatetime is None:\n currentSampleDatetime = sample.datetime\n if maxValueRecord is None:\n maxValueRecord = sample\n # Check if we've reached a new sample batch.\n if currentSampleDatetime != sample.datetime:\n # If we reach a new sample batch, update the `processedValue` of the\n # record held in `maxValueRecord` to be the `audio` value.\n maxValueRecord.processedValue = maxValueRecord.audio\n # Update to the first value of the new sample batch.\n currentSampleDatetime = sample.datetime\n maxValueRecord = sample\n elif sample.audio > maxValueRecord.audio:\n # If not, then check if the value is higher than the current one\n # and overwrite if so.\n maxValueRecord = sample\n # Update the last record, since this won't be caught in the if statement\n # inside the loop (it compares the current.datetime vs a new.datetime after\n # all).\n maxValueRecord.processedValue = maxValueRecord.audio\n db.session.commit()", "def update_live_data(n, last_time, id1, id2, power):\n if power:\n raise PreventUpdate\n\n timer_start = perf_counter()\n # 1 sec delay so server has time to add live data\n end_time = datetime.now(timezone.utc) - timedelta(seconds=1)\n\n # Initialization and lag prevention\n if last_time is None or end_time - strptime_fix(last_time) > timedelta(seconds=3):\n logging.warning('Falling behind! Start %s End %s', last_time, end_time)\n return dash.no_update, dash.no_update, end_time.isoformat(), dash.no_update\n\n # Query data from SMIP\n logging.info(f'start_time {last_time} end_time {end_time}')\n timer_query_start = perf_counter()\n r = conn.get_data(last_time, end_time.isoformat(),\n [id1, id2], timeout=1)\n timer_query_end = perf_counter()\n response_json: dict = r.json()\n logging.debug(response_json.keys())\n if 'errors' in response_json:\n logging.error(response_json)\n raise Exception()\n data = response_json['data']['getRawHistoryDataWithSampling']\n logging.info('Got %s responses in %s seconds', len(\n data), timer_query_end - timer_query_start)\n\n # Used for measuring performance\n start_processing = perf_counter()\n\n # Unpack data\n def unpack(id: int):\n \"\"\"Unpacks return data into time and value lists\"\"\"\n id = int(id)\n time_list = [i['ts'] for i in data if int(i['id']) == id]\n val_list = [i['floatvalue'] for i in data if int(i['id']) == id]\n # SMIP always returns one entry before the start time for each ID, we don't need this\n if len(time_list) < 2 or len(val_list) < 2:\n return dash.no_update\n time_list.pop(0)\n val_list.pop(0)\n # Measure sampling rate\n rate = nan\n if len(time_list) > 1:\n rate = (strptime_fix(time_list[1])\n - strptime_fix(time_list[0])).total_seconds()\n return {'time_list': time_list, 'val_list': val_list, 'rate': rate}\n\n # Used for measuring performance\n data_processed = perf_counter()\n logging.info('Total %s Query %s Processing %s', data_processed - timer_start, timer_query_end - timer_query_start,\n data_processed - start_processing)\n\n return unpack(id1), unpack(id2), end_time.isoformat(), \\\n [f'Last updated {end_time.astimezone()},',\n html.Br(),\n f'received {len(data)} samples in {round(data_processed - timer_start, 3)} seconds']", "def process(self, timeframe, timestamp):\n pass", "def predict(self):\n\n global pos\n pos = (pos + 1) % len(ue_data) # iterate through entire list one by one in cycle manner and will be updated when live feed will be coming through KPIMON to influxDB\n sample = ue_data[pos]\n ue_df = pd.DataFrame([sample], columns=db.data.columns)\n val = predict_anomaly(self, ue_df)\n if (val is not None) and (len(val) > 2):\n msg_to_ts(self, val)", "def _compute_samples(self, samples):\n return samples", "def timings(samples):\n groups = samples.groupby(axis=1, level=0)\n return groups.apply(lambda group: group.iloc[:, 1] - group.iloc[:, 0])", "def _query_data(self, index, tag):\n version, datapoints = yield self.quasar.stream_get(self.name, tag, tag+(15*qdf.MINUTE))\n values = np.empty((BLOCK_SIZE,), dtype=(type(datapoints[0])))\n values[:] = None\n \n for point in datapoints:\n time = float(point.time - tag)\n time_index = int(round(time*SAMPLE_RATE/qdf.SECOND))\n values[time_index] = point\n\n self.cache[index][CACHE_INDEX_TAG] = tag\n self.cache[index][CACHE_INDEX_DATA] = values", "def run():\n\n import matplotlib.pyplot as plt\n\n anomalies_t = []\n anomalies_v = []\n anomalies_c = []\n\n all_t = []\n all_v = []\n\n rows = []\n for i, row in dataSet.iterrows():\n\n inputData = row.to_dict()\n\n detectorValues = handleRecord(inputData)\n\n if (detectorValues[0] > 0.65):\n anomalies_t.append(inputData[\"timestamp\"])\n anomalies_v.append(inputData[\"value\"])\n anomalies_c.append(detectorValues[0])\n\n all_t.append(inputData[\"timestamp\"])\n all_v.append(inputData[\"value\"])\n\n outputRow = list(row) + list(detectorValues)\n\n rows.append(outputRow)\n\n # Progress report\n if (i % 1000) == 0:\n print \".\",\n sys.stdout.flush()\n\n fig, ax = plt.subplots()\n\n ax.plot(all_t, all_v)\n ax.plot(anomalies_t, anomalies_v, 'ro')\n\n plt.show()\n\n ans = pandas.DataFrame(rows)\n return ans", "def process_point(meta, data):\n point = dfparser.Point()\n point.ParseFromString(data)\n\n sample_freq = meta['params']['sample_freq']\n threshold = meta['process_params']['threshold']\n\n events_all = []\n for channel in point.channels:\n for i, block in enumerate(channel.blocks):\n SOCKETIO.emit('progress',\n {'val': int((i/len(channel.blocks))*100)})\n eventlet.sleep(0)\n events = []\n for event in block.events:\n data = np.frombuffer(event.data, np.int16)\n events.append(extract_amps_approx2(data, event.time,\n threshold,\n sample_freq)[0])\n events = np.hstack(events)[0::2]\n events_all.append(events)\n\n events_all = np.hstack(events_all)\n return events_all", "def hit(self, timestamp):\n idx = timestamp % 300\n if self.counter[idx][1] == timestamp:\n self.counter[idx][0] += 1\n else:\n self.counter[idx] = [1, timestamp]", "def test_time_series_from_file():\r\n\r\n TR = 1.35\r\n ts_ff = io.time_series_from_file\r\n\r\n #File names:\r\n fmri_file1 = os.path.join(data_path,'fmri1.nii.gz')\r\n fmri_file2 = os.path.join(data_path,'fmri2.nii.gz')\r\n\r\n #Spatial coordinates into the volumes:\r\n coords1 = np.array([[5,5,5,5],[5,5,5,5],[1,2,3,4]])\r\n coords2 = np.array([[6,6,6,6],[6,6,6,6],[3,4,5,6]])\r\n\r\n #No averaging, no normalization:\r\n t1 = ts_ff([fmri_file1,fmri_file2],[coords1,coords2],TR)\r\n\r\n npt.assert_equal(t1[0].shape,(4,80)) # 4 coordinates, 80 time-points\r\n\r\n t2 = ts_ff([fmri_file1,fmri_file2],[coords1,coords2],TR,average=True)\r\n\r\n npt.assert_equal(t2[0].shape,(80,)) # collapse coordinates,80 time-points\r\n\r\n t3 = ts_ff(fmri_file1,coords1,TR,normalize='zscore')\r\n\r\n #The mean of each channel should be almost equal to 0:\r\n npt.assert_almost_equal(t3.data[0].mean(),0)\r\n #And the standard deviation should be almost equal to 1:\r\n npt.assert_almost_equal(t3.data[0].std(),1)\r\n\r\n t4 = ts_ff(fmri_file1,coords1,TR,normalize='percent')\r\n\r\n #In this case, the average is almost equal to 0, but no constraint on the\r\n #std:\r\n npt.assert_almost_equal(t4.data[0].mean(),0)\r\n\r\n #Make sure that we didn't mess up the sampling interval:\r\n npt.assert_equal(t4.sampling_interval,nitime.TimeArray(1.35))\r\n\r\n # Test the default behavior:\r\n data = io.load(fmri_file1).get_data()\r\n t5 = ts_ff(fmri_file1)\r\n npt.assert_equal(t5.shape, data.shape)\r\n npt.assert_equal(t5.sampling_interval, ts.TimeArray(1, time_unit='s'))\r\n\r\n # Test initializing TR with a TimeArray:\r\n t6= ts_ff(fmri_file1, TR=ts.TimeArray(1350, time_unit='ms'))\r\n npt.assert_equal(t4.sampling_interval, t6.sampling_interval)\r\n\r\n # Check the concatenation dimensions:\r\n t7 = ts_ff([fmri_file1, fmri_file2])\r\n npt.assert_equal([t7.shape[:3], t7.shape[-1]], [data.shape[:3], data.shape[-1]*2])\r\n\r\n t8 = ts_ff([fmri_file1, fmri_file2], average=True)\r\n npt.assert_equal(t8.shape[0], data.shape[-1]*2)\r\n\r\n t9 = ts_ff([fmri_file1, fmri_file2], average=True, normalize='zscore')\r\n npt.assert_almost_equal(t9.data.mean(), 0)", "def newChunk(self, data, sample_period):\n added = False\n found = False\n for r in self._receiving:\n if r[0] == data.timestamp:\n r[1][data.sensor - 1] = data\n found = True\n break\n if found is False:\n self._receiving.append((data.timestamp, [None] * self._sensors))\n self._receiving[-1][1][data.sensor - 1] = data\n self._receiving.sort(key=lambda x: x[0])\n\n # all data for given timestamp received\n while len(self._receiving) > 0:\n r = self._receiving[0]\n if r[1].count(None) > 0:\n break\n dl = len(data.accelerationX)\n timestamps = np.arange(r[0], r[0] + sample_period * dl, sample_period)\n\n def copy_data(start, l):\n self.data[\"timestamp\"][\n self.current_index : self.current_index + l\n ] = timestamps[start : start + l]\n for s in range(1, self._sensors + 1):\n self.data[f\"{s} X\"][\n self.current_index : self.current_index + l\n ] = r[1][s - 1].accelerationX[start : start + l]\n self.data[f\"{s} Y\"][\n self.current_index : self.current_index + l\n ] = r[1][s - 1].accelerationY[start : start + l]\n self.data[f\"{s} Z\"][\n self.current_index : self.current_index + l\n ] = r[1][s - 1].accelerationZ[start : start + l]\n\n l = min(dl, self._size - self.current_index)\n if l > 0:\n copy_data(0, l)\n self.current_index += l\n dl -= l\n\n if dl > 0:\n self.current_index = 0\n self.filled = True\n copy_data(l, dl)\n self.current_index = dl\n\n self._receiving.remove(r)\n added = True\n\n chunk_removed = False\n if len(self._receiving) > self._window:\n self._receiving = self._receiving[1:]\n chunk_removed = True\n return (added, chunk_removed)", "def _compute_epochs(events, rel_start_ms, rel_stop_ms, timestamps, sr):\n\n # THIS IS SO MUCH FASTER THAN NP.WHERE, CRAZY\n offsets = events.stTime.apply(lambda x: np.searchsorted(timestamps, x))\n # offsets = events.stTime.apply(lambda x: np.where(timestamps >= x)[0][0])\n rel_start_micro = int(rel_start_ms * sr / 1e3)\n rel_stop_micro = int(rel_stop_ms * sr / 1e3)\n epochs = np.array([(offset + rel_start_micro, offset + rel_stop_micro) for offset in offsets])\n return epochs", "def aggregated_second(self, data):\n if self.results:\n if self.results[-1][\"ts\"] >= data[\"ts\"]:\n raise AssertionError(\"TS sequence wrong: %s>=%s\" % (self.results[-1][\"ts\"], data[\"ts\"]))\n self.results.append(data)", "def _init_sample(self):\n self.timestamps = np.zeros(5)\n self.data = np.zeros((5, 12))", "def _sample_from_null_frm_dist(mean_spike_count, total_baseline_time, total_effect_time, sample_size=10 ** 6):\n total_time = total_baseline_time + total_effect_time\n\n samples = (\n st.poisson(mean_spike_count * total_effect_time / total_time).rvs(sample_size) / total_effect_time\n -\n st.poisson(mean_spike_count * total_baseline_time / total_time).rvs(sample_size) / total_baseline_time\n )\n\n # convert 1/ms to 1/s (Hz)\n samples = samples / MS_TO_S\n\n return samples", "def testQueryWithTimestamp(self):\n for i in range(5):\n row_name = \"aff4:/row:query_with_ts\"\n data_store.DB.Set(row_name, \"metadata:5\", \"test\", timestamp=i + 10,\n replace=False, token=self.token)\n data_store.DB.Set(row_name, \"aff4:type\", \"test\", timestamp=i + 10,\n replace=False, token=self.token)\n\n # Read all timestamps.\n rows = [row for row in data_store.DB.Query(\n [], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:query_with_ts\",\n timestamp=data_store.DB.ALL_TIMESTAMPS, token=self.token)]\n attributes = rows[0]\n self.assertEqual(attributes[\"subject\"][0][0], \"aff4:/row:query_with_ts\")\n self.assertEqual(len(attributes[\"aff4:type\"]), 5)\n\n # Read latest timestamp.\n rows = [row for row in data_store.DB.Query(\n [], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:query_with_ts\",\n timestamp=data_store.DB.NEWEST_TIMESTAMP, token=self.token)]\n\n attributes = rows[0]\n self.assertEqual(attributes[\"subject\"][0][0], \"aff4:/row:query_with_ts\")\n self.assertEqual(len(attributes[\"aff4:type\"]), 1)\n self.assertEqual(attributes[\"aff4:type\"][0][0], \"test\")\n\n # Newest timestamp is 4.\n self.assertEqual(attributes[\"aff4:type\"][0][1], 14)\n\n # Now query for a timestamp range.\n rows = [row for row in data_store.DB.Query(\n [], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:query_with_ts\",\n timestamp=(11, 13), token=self.token)]\n\n attributes = rows[0]\n self.assertEqual(attributes[\"subject\"][0][0], \"aff4:/row:query_with_ts\")\n # Now we should have three timestamps.\n self.assertEqual(len(attributes[\"aff4:type\"]), 3)\n\n timestamps = [attribute[1] for attribute in attributes[\"aff4:type\"]]\n self.assertListEqual(sorted(timestamps), [11, 12, 13])", "def run(self):\n while(not self.stop_event.is_set()):\n # read values until stop is sent\n response1 = _read_once(1,self.serial)\n response2 = _read_once(2,self.serial)\n #print(response)\n self.data1[\"d\"].append(response1) # Push response to the data list for later\n self.data2[\"d\"].append(response2) # Push response to the data list for later\n curTime = time.time()\n self.data1[\"t\"].append(curTime)\n self.data2[\"t\"].append(curTime)\n #sleep(0.0001) # I need to be small enough to capture peaks.\n return", "def statistical_feature_extraction(window_size, signal, axis, device, subject_ID):\n\n start_running = timeit.default_timer()\n try:\n directory = f'data/row_data/{device}_{signal}/S{subject_ID}_{device}_{signal}.csv'\n sampling_rate = 20\n window_size = int(sampling_rate * window_size)\n # print(window_size)\n except:\n print('Error! Can not find such directory.')\n\n raw_signal = pd.read_csv(directory)\n win_count = 0\n total_win_count = 0\n features_for_all_windows_one_activity = []\n features_for_all_windows_all_activities = []\n column_title = f'{axis}_{device}_{signal}'\n for class_label in np.append(range(1, 14), range(15, 20)):\n activity_ID = chr(class_label + 64)\n raw_data_one_activity = np.array(raw_signal.loc[raw_signal['activity_ID'] == activity_ID, [column_title]])\n raw_data_one_activity = pd.DataFrame(raw_data_one_activity)\n\n for data_point in range(0, len(raw_data_one_activity), window_size):\n win_count += 1\n start = data_point\n end = start + window_size\n time_domain_window = raw_data_one_activity[start:end]\n\n time_mean = pd.Series(time_domain_window.mean()).rename(f'{axis}_{signal}_mean')\n time_min = pd.Series(time_domain_window.min()).rename(f'{axis}_{signal}_min')\n time_max = pd.Series(time_domain_window.max()).rename(f'{axis}_{signal}_max')\n time_std = pd.Series(time_domain_window.std()).rename(f'{axis}_{signal}_std')\n time_median = pd.Series(time_domain_window.median()).rename(f'{axis}_{signal}_median')\n time_variance = pd.Series(time_domain_window.var()).rename(f'{axis}_{signal}_variance')\n zero_crossing_rate = pd.Series(zero_crossing(time_domain_window)).rename(\n f'{axis}_{signal}_zero_crossing')\n mean_crossing = pd.Series(mean_crossing_rate(time_domain_window)).rename(\n f'{axis}_{signal}_mean_crossing')\n activity_id_ = pd.Series(activity_ID).rename('Activity_ID')\n\n features_for_one_window_one_activity = pd.concat(\n [time_mean, time_min, time_max, time_std, time_median, time_variance, zero_crossing_rate, mean_crossing,\n activity_id_], axis=1)\n features_for_all_windows_one_activity.append(features_for_one_window_one_activity)\n # print(features_for_all_windows)\n\n print('Window count', win_count)\n total_win_count += win_count\n win_count = 0\n features_for_all_windows_all_activities.append(features_for_all_windows_one_activity)\n features = pd.concat(features_for_all_windows_all_activities[0], ignore_index=False)\n print(features)\n save_as_directory = f'feature_label_tables/feature_{device}_{signal}/feature_S{subject_ID}_{axis}_{device}_{signal}.csv'\n features.to_csv(save_as_directory, encoding='utf-8', index=False)\n finish_running = timeit.default_timer()\n print('Total number of windows: ', total_win_count)\n print('Running time: ', finish_running - start_running)", "def _handler_sample(self, *args, **kwargs):\n next_state = None\n timeout = time.time() + SAMPLE_TIMEOUT\n\n for i in self._units:\n self._do_command(Command.READ, i)\n\n particles = self.wait_for_particles([DataParticleType.D1000_PARSED], timeout)\n\n return next_state, (next_state, particles)", "def timestamp_processor(timestamps, with_tz=False, check_delta=False):\n if timestamps.count(timestamps[0]) == len(timestamps):\n unified_timestamp = timestamps[0]\n else:\n average_timestamp = sum([dt.timestamp for dt in timestamps])/len(timestamps)\n unified_timestamp = arrow.get(average_timestamp)\n\n if check_delta:\n for ts in timestamps:\n delta = unified_timestamp - arrow.get(ts)\n second_difference = abs(delta.total_seconds())\n\n if second_difference > 3600:\n # more than 1 hour difference\n raise ValueError(\"\"\"South Korea generation data is more than 1 hour apart,\n saw {} hours difference\"\"\".format(second_difference/3600))\n\n if with_tz:\n unified_timestamp = unified_timestamp.replace(tzinfo='Asia/Seoul')\n\n return unified_timestamp", "def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n sensor_name = self.__sensor.get_sensor_type_name()\n sensor_id = self.__sensor.get_sensor_id()\n data = self.__sensor.retrieve_data_string() \n if DEBUG:\n print('data: \"{}\"'.format(data),\n file = sys.stderr, flush=True)\n when = datetime.datetime.now(datetime.timezone.utc).isoformat()\n result = OUTPUT_FORMAT.format(when,\n sensor_name, \n sensor_id, \n data)\n output.write(result)\n output.flush()\n \n next_sample_time = next_sample_time + self.__interval\n delay_time = next_sample_time - time.time()\n if DEBUG:\n print('delay_time = {}'.format(delay_time),\n file=sys.stderr, flush=True)\n \n if 0 < delay_time: # don't sleep if already next sample time\n time.sleep(delay_time)", "def get_observation(self, ts: int) -> np.ndarray:\n \n self.connect_db()\n c = self.conn.cursor()\n c.arraysize = self.num_clients * self.ticks_per_observation + 10\n \n # Create np array of size [clients x ticks_per_observation x parameters]\n result = np.zeros((len(self.client_list),\n self.ticks_per_observation,\n len(self.conf['ceph-param'])), dtype=float)\n \n # Get performance indicators within time step\n # sql = '''SELECT clientid, ts, pis from perfs WHERE ts <= ? AND ts > ? ORDER BY clientid ASC, ts ASC'''\n c.execute('''SELECT clientid, ts, pis from perfs WHERE ts <= ? AND \n ts > ? ORDER BY clientid ASC, ts ASC''',\n (ts,ts - self.ticks_per_observation))\n \n data = c.fetchall()\n if len(data) < self.num_clients * self.ticks_per_observation - self.missing_entry_tolerance:\n raise NotEnoughDataError\n elif len(data) != self.num_clients * self.ticks_per_observation:\n logger.debug('Observation at ts {0} has {1} missing entries'.format(\n ts, self.num_clients * self.ticks_per_observation - len(data)))\n for row in data:\n client_id = row[0]\n pi = row[2]\n if client_id not in self.client_list:\n # non client MA should send in zero length data for now\n assert len(pickle.loads(pi)) == 0\n continue\n \n client_id_idx = self.client_list.index(client_id)\n ts_idx = row[1] - (ts - self.ticks_per_observation) - 1\n # numpy assignments also check that pi is in right shape\n result[client_id_idx, ts_idx] = pickle.loads(pi)\n \n self.conn.close()\n return result.reshape((self.observation_size,))", "def _set_sample(self, sample, PB_X, t):\n for sensor in PB_X.keys():\n sample.set(sensor, np.array(PB_X[sensor]), t=t+1)", "def testGenerateSamplesRuntimeAndTimestamps(self):\n timer = timing_util.IntervalTimer()\n with timer.Measure('First'):\n pass\n with timer.Measure('Second'):\n pass\n start0 = timer.intervals[0][1]\n stop0 = timer.intervals[0][2]\n start1 = timer.intervals[1][1]\n stop1 = timer.intervals[1][2]\n with mock.patch(\n 'perfkitbenchmarker.timing_util.TimestampMeasurementsEnabled',\n return_value=True):\n samples = timer.GenerateSamples()\n exp_samples = [\n sample.Sample('First Runtime', stop0 - start0, 'seconds'),\n sample.Sample('First Start Timestamp', start0, 'seconds'),\n sample.Sample('First Stop Timestamp', stop0, 'seconds'),\n sample.Sample('Second Runtime', stop1 - start1, 'seconds'),\n sample.Sample('Second Start Timestamp', start1, 'seconds'),\n sample.Sample('Second Stop Timestamp', stop1, 'seconds')]\n self.assertSampleListsEqualUpToTimestamp(samples, exp_samples)", "def create_msgs():\n getcontext().prec = 3 # will round to 3 decimal places\n orig_times = sorted(dat)\n for n in range(len(dat) - 1):\n linfun = interp1d([orig_times[n], orig_times[n+1]], \\\n [dat[orig_times[n]], dat[orig_times[n+1]]])\n dt = orig_times[n+1] - orig_times[n] # current\n freq = 1/dt # current\n if dt < (1/desHz):\n print('found instance where Freq already at/above desired Freq')\n else:\n new_dt = dt*freq/desHz\n new_times = linspace(orig_times[n],orig_times[n+1],floor(dt/new_dt))\n # print(new_times)\n new_values = linfun(new_times)\n # rounded_values = [float(Decimal(\"%.3f\" % e)) for e in new_values]\n rounded_times = [float(Decimal(\"%.3f\" % e)) for e in new_times]\n for m in range(len(rounded_times)):\n # this_time = int(new_times[m]*100000)/100000 # 5 decimal places in timstamp\n self.outData[sens][meas][rounded_times[m]] = new_values[m]", "def test_single_ended_matching_sections_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n nx = 200\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, nx)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n ts_ambient = np.ones(nt) * 12\n ts_valid = np.ones(nt) * 16\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask1 = np.logical_and(x > 0.125 * cable_len, x < 0.25 * cable_len)\n cold_mask2 = np.logical_and(x > 0.625 * cable_len, x < 0.75 * cable_len)\n warm_mask1 = np.logical_and(x > 0.75 * cable_len, x < 0.875 * cable_len)\n warm_mask2 = np.logical_and(x > 0.25 * cable_len, x < 0.375 * cable_len)\n valid_mask = np.logical_and(x > 0.40 * cable_len, x < 0.50 * cable_len)\n temp_real = np.ones((len(x), nt)) * 12 + 273.15\n temp_real[cold_mask1 + cold_mask2] = ts_cold + 273.15\n temp_real[warm_mask1 + warm_mask2] = ts_warm + 273.15\n temp_real[valid_mask] = ts_valid + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n # Add attenuation\n tr_att = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.4) :] *= tr_att\n tr_att2 = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.6) :] *= tr_att2\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n \"ambient\": ([\"time\"], ts_ambient),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"cold\": [slice(0.13 * cable_len, 0.24 * cable_len)],\n \"warm\": [slice(0.26 * cable_len, 0.365 * cable_len)],\n }\n\n matching_sections = [\n (\n slice(0.01 * cable_len, 0.09 * cable_len),\n slice(0.51 * cable_len, 0.59 * cable_len),\n True,\n ),\n (\n slice(0.01 * cable_len, 0.09 * cable_len),\n slice(0.91 * cable_len, 0.99 * cable_len),\n True,\n ),\n ]\n\n ds_test = ds.copy(deep=True)\n\n # WLS\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing gamma + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_gamma=(482.6, 0),\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing dalpha + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_dalpha=(6.46e-05, 0),\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing gamma & dalpha + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_gamma=(482.6, 0),\n fix_dalpha=(6.46e-05, 0),\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n # Test conf. ints. for the combination of everything\n ds_test.conf_int_single_ended(\n p_val=\"p_val\",\n p_cov=\"p_cov\",\n st_var=1.0,\n ast_var=1.0,\n conf_ints=[2.5, 50.0, 97.5],\n mc_sample_size=50,\n )\n\n ds_test_1 = ds_test.isel(time=-1)\n # ds_test_1.tmpf\n # ds_test_1.tmpf_mc.isel(CI=0).values\n # ds_test_1.tmpf_mc.isel(CI=2).values\n\n assert np.all(\n np.less(ds_test_1.tmpf_mc.isel(CI=0).values, ds_test_1.tmpf)\n ), \"Single-ended, trans. att.; 2.5% confidence interval is incorrect\"\n\n assert np.all(\n np.greater(ds_test_1.tmpf_mc.isel(CI=2).values, ds_test_1.tmpf)\n ), \"Single-ended, trans. att.; 97.5% confidence interval is incorrect\"", "def time_record(record, sampling_rate, detector, n_runs):\n start = timer()\n for _ in range(n_runs):\n detector(record, sampling_rate=sampling_rate)\n end = timer()\n avg_time = (end - start) / n_runs * 1000\n\n return avg_time", "def testData(nTimes = 1000, taus = [1.0e-6, 1.0e-4, 5.0e-3], amps = [0.3, 0.3, 0.4], sigma = 0.05, linear=False):\n\n if linear:\n Times = rangeLin(1.0e-7, 1.0e-3, nTimes) # in seconds\n else:\n Times = rangeLog(1.0e-9, 1.0, nTimes) # in seconds\n\n Data = np.zeros( Times.shape )\n for i in range(len(taus)):\n Data += amps[i]*np.exp(-1.*Times/taus[i])\n Data += sigma*np.random.randn( len(Data) )\n\n return Times, Data", "def job_gen(self, time_frame):", "def get_datapoints(self, rid, t0, t1, nmax = 300):\n self.read_curs.execute(\"SELECT COUNT(*) FROM readings WHERE readout_id = ? AND time >= ? AND time <= ?\", (int(rid), t0, t1))\n if self.read_curs.fetchone()[0] > nmax:\n self.read_curs.execute(\"SELECT avg(time),avg(value) FROM readings WHERE readout_id = ? AND time >= ? AND time <= ? GROUP BY round(time/?) ORDER BY time DESC\", (int(rid), t0, t1, (t1-t0)/nmax));\n else:\n self.read_curs.execute(\"SELECT time,value FROM readings WHERE readout_id = ? AND time >= ? AND time <= ? ORDER BY time DESC\", (int(rid), t0, t1))\n return self.read_curs.fetchall()", "def get_accumulated_data(self, topic, start_time, end_time, units):\n ignore_start_time = self._get_value('ignore_start_time', topic)\n ignore_end_time = self._get_value('ignore_end_time', topic)\n adjust_start_time = self._get_value('adjust_start_time', topic)\n adjust_end_time = self._get_value('adjust_end_time', topic)\n\n if ignore_start_time:\n self.logger.debug(\"Service ignoring start time.\")\n start_ts = self.peek_datetime(topic) - adjust_start_time\n else:\n start_ts = start_time - adjust_start_time\n\n if ignore_end_time:\n self.logger.debug(\"Service ignoring end time.\")\n end_ts = self.peek_last_datetime(topic) + adjust_end_time\n else:\n end_ts = end_time + adjust_end_time\n\n self.logger.debug(\"Service processing interval: %f %f\" %(start_ts, end_ts))\n accumulator = weewx.accum.Accum(weeutil.weeutil.TimeSpan(start_ts, end_ts))\n\n for data in self.get_data(topic, end_ts):\n if data:\n try:\n self.logger.debug(\"Service data to accumulate: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(data['dateTime']), to_sorted_string(data)))\n accumulator.addRecord(data)\n except weewx.accum.OutOfSpan:\n self.logger.info(\"Service ignoring record outside of interval %f %f %f %s\"\n %(start_ts, end_ts, data['dateTime'], (to_sorted_string(data))))\n else:\n break\n\n target_data = {}\n if not accumulator.isEmpty:\n aggregate_data = accumulator.getRecord()\n self.logger.debug(\"Service data prior to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(aggregate_data['dateTime']), to_sorted_string(aggregate_data)))\n target_data = weewx.units.to_std_system(aggregate_data, units)\n self.logger.debug(\"Service data after to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(target_data['dateTime']), to_sorted_string(target_data)))\n else:\n self.logger.debug(\"Dervice queue was empty\")\n\n # Force dateTime to packet's datetime so that the packet datetime is not updated to the MQTT datetime\n if ignore_end_time:\n target_data['dateTime'] = end_time\n\n return target_data", "def test_aggregate(self):\n\n #10 Minute sampleing\n result = export.processExport(houseId=1,\n aggregate=\"10Min\")\n\n self.assertEqual(result.shape, (1440, 2))\n #And the second sample should be 10 minutes in\n self.assertEqual(result.index[1], datetime.datetime(2013, 01, 01, 0, 10, 00))\n\n #1/2 hourly\n result = export.processExport(houseId=1,\n aggregate=\"30Min\")\n\n #2 * 24 * 10 = 480\n self.assertEqual(result.shape, (480, 2))\n #And the second sample should be 10 minutes in\n self.assertEqual(result.index[1], datetime.datetime(2013, 01, 01, 0, 30, 00))\n\n #Hourly\n result = export.processExport(houseId=1,\n aggregate=\"1H\")\n\n self.assertEqual(result.shape, (240, 2))\n #And the second sample should be 10 minutes in\n self.assertEqual(result.index[1], datetime.datetime(2013, 01, 01, 1, 00, 00))\n\n\n #daily\n result = export.processExport(houseId=1,\n aggregate=\"1D\")\n\n self.assertEqual(result.shape, (10, 2))\n #And the second sample should be 10 minutes in\n self.assertEqual(result.index[1], datetime.datetime(2013, 01, 02, 0, 00, 00))", "def polling_experiment_multiple(dataset_id, times, **kwargs):\n total_yeahs = 0\n total_noes = 0\n for i in range(0, times):\n yeah, noes = polling_experiment(dataset_id, **kwargs)\n total_yeahs += yeah\n total_noes += noes\n logging.info(\"After \"+str(times)+\" \"+ str(dataset_id) + \" experiments:\")\n logging.info(\"\"+ str(total_yeahs) + \" correct predictions\")\n logging.info(\"\"+ str(total_noes) + \" wrong predictions\")\n logging.info(\"\"+ str(total_yeahs/(total_yeahs + total_noes)) + \" accuracy\")", "def test_dejitter_timestamps():\n n_steps = 100\n n_tests = 50\n sfreqs = np.linspace(1, 5000, n_tests).astype(int)\n last_times = np.random.randint(-100, 100, size=n_tests)\n test_timestamps = np.random.random((n_tests, n_steps)) + np.arange(n_steps)\n expected_timestamps = [np.arange(n_steps)/sfreq + last_times[i] + 1/sfreq\n for i, sfreq in enumerate(sfreqs)]\n for i, args in enumerate(zip(test_timestamps, sfreqs, last_times)):\n dejittered = acquire.dejitter_timestamps(*args)\n # there may be some floating-point errors, so just make sure the\n # difference is tiny\n assert np.all((dejittered - expected_timestamps[i]) < 1e-14)", "def data_point(inputs: list):\n \n opv = '1'\n \n sample_id = 0\n \n timenow = strftime(\"%#m/%#d/%Y %#H:%M\")\n volts = inputs[0]\n current = inputs[1]\n power = inputs[2]\n \n data_point = [opv, sample_id, timenow, volts, current, power]\n\n if data_point == True:\n sample_id += 1\n \n return data_point", "def run_resample(self):\n\n self.in_data.open()\n self.out_data.open()\n\n try:\n # Get the fields from the input file and set them/write headers in output:\n self.all_fields = self.in_data.fields\n\n self.out_data.set_fields(self.all_fields)\n self.out_data.write_headers()\n\n # Set up the sensor fields by removing non-sensor fields:\n self.set_sensor_only_fields()\n\n # Read the first event from the input file:\n self.get_next_input_event()\n\n # Warn and exit if we have no input data to read:\n if self.next_input_event is None:\n msg = f\"The input file {self.in_file} did not have any data rows\"\n warn(msg)\n\n return\n\n self.first_event_stamp = self.next_input_event[self.stamp_field]\n\n # Set up the sample tracking (here mostly to set the start of the first interval):\n self.reset_sample_tracking()\n\n # Now iterate through the output intervals:\n while True:\n self.process_next_interval()\n except EOFError: # catch when we are at the end of the file\n pass\n finally:\n self.in_data.close()\n self.out_data.close()\n\n print() # make sure we go to a new output line", "def filter(self, newer_than=None):\r\n if newer_than is None:\r\n newer_than = self._clock.time() - self._window.as_(Time.SECONDS)\r\n self._samples = [sample for sample in self._samples if sample[0] >= newer_than]", "def update_data():\n values = temp_serial_placeholder()\n time = current_time_milli() - __start\n points = [ [time, values[0]], [time, values[1]] ]\n __data.append(points)\n return points", "def test_plt_mag_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n # create the arrays per filter and readout pattern\n nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], []\n nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], []\n filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout']\n max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr']\n for i, val in enumerate(max_val_box):\n if '140' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(val)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(val)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif '110' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(val)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(val)\n nrsrapidd6_clear.append(np.NaN)\n else:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(val)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(val)\n # add to the bokeh data structure\n ta.source.data[\"nrsrapid_f140x\"] = nrsrapid_f140x\n ta.source.data[\"nrsrapid_f110w\"] = nrsrapid_f110w\n ta.source.data[\"nrsrapid_clear\"] = nrsrapid_clear\n ta.source.data[\"nrsrapidd6_f140x\"] = nrsrapidd6_f140x\n ta.source.data[\"nrsrapidd6_f110w\"] = nrsrapidd6_f110w\n ta.source.data[\"nrsrapidd6_clear\"] = nrsrapidd6_clear\n result = ta.plt_mag_time()\n\n assert bokeh_plot_type == type(result)", "def test_stream_loop(self):\n chans, gains, scans, rate = (10,10,10,10), (1,2,4,5), 1024, 500\n v = [v[0] for v in self.l.stream_sync(\n channels=chans, gains=gains,\n num_scans=scans, rate=rate)]\n for vi in v:\n for r in vi:\n self.assertTrue(abs(r-2.5) < .1,\n \"%s should be cal, 2.5v\" % vi[0])", "def get_peaks(\n self,\n new_sample: np.ndarray,\n threshold: float,\n min_peaks_interval=None,\n ) -> tuple:\n tic = time.time()\n nb_peaks = []\n if len(new_sample.shape) == 1:\n new_sample = np.expand_dims(new_sample, 0)\n sample_proc = np.copy(new_sample)\n if not self._is_one:\n self._is_one = [False] * new_sample.shape[0]\n\n for i in range(new_sample.shape[0]):\n for j in range(new_sample.shape[1]):\n if new_sample[i, j] < threshold:\n sample_proc[i, j] = 0\n self._is_one[i] = False\n elif new_sample[i, j] >= threshold:\n if not self._is_one[i]:\n sample_proc[i, j] = 1\n self._is_one[i] = True\n else:\n sample_proc[i, j] = 0\n\n if len(self.raw_data_buffer) == 0:\n self.raw_data_buffer = new_sample\n self.processed_data_buffer = sample_proc\n nb_peaks = None\n\n elif self.raw_data_buffer.shape[1] < self.processing_window:\n self.raw_data_buffer = np.append(self.raw_data_buffer, new_sample, axis=1)\n self.processed_data_buffer = np.append(self.processed_data_buffer, sample_proc, axis=1)\n nb_peaks = None\n\n else:\n self.raw_data_buffer = np.append(self.raw_data_buffer[:, new_sample.shape[1] :], new_sample, axis=1)\n self.processed_data_buffer = np.append(\n self.processed_data_buffer[:, new_sample.shape[1] :], sample_proc, axis=1\n )\n\n if min_peaks_interval:\n self.processed_data_buffer = RealTimeProcessing._check_and_adjust_interval(\n self.processed_data_buffer, min_peaks_interval\n )\n if isinstance(nb_peaks, list):\n for i in range(self.processed_data_buffer.shape[0]):\n nb_peaks.append(np.count_nonzero(self.processed_data_buffer[i, :]))\n self.process_time.append(time.time() - tic)\n return nb_peaks, self.processed_data_buffer", "def test_TimeSeries():\r\n\r\n #Test initialization with duration:\r\n tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], duration=10)\r\n tseries2 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_interval=1)\r\n npt.assert_equal(tseries1.time, tseries2.time)\r\n\r\n #downsampling:\r\n t1 = ts.UniformTime(length=8, sampling_rate=2)\r\n #duration is the same, but we're downsampling to 1Hz\r\n tseries1 = ts.TimeSeries(data=[1, 2, 3, 4], time=t1, sampling_rate=1)\r\n #If you didn't explicitely provide the rate you want to downsample to, that\r\n #is an error:\r\n npt.assert_raises(ValueError, ts.TimeSeries, dict(data=[1, 2, 3, 4],\r\n time=t1))\r\n\r\n tseries2 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1)\r\n tseries3 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1000,\r\n time_unit='ms')\r\n #you can specify the sampling_rate or the sampling_interval, to the same\r\n #effect, where specificying the sampling_interval is in the units of that\r\n #time-series:\r\n tseries4 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_interval=1,\r\n time_unit='ms')\r\n npt.assert_equal(tseries4.time, tseries3.time)\r\n\r\n #The units you use shouldn't matter - time is time:\r\n tseries6 = ts.TimeSeries(data=[1, 2, 3, 4],\r\n sampling_interval=0.001,\r\n time_unit='s')\r\n npt.assert_equal(tseries6.time, tseries3.time)\r\n\r\n #And this too - perverse, but should be possible:\r\n tseries5 = ts.TimeSeries(data=[1, 2, 3, 4],\r\n sampling_interval=ts.TimeArray(0.001,\r\n time_unit='s'),\r\n time_unit='ms')\r\n\r\n npt.assert_equal(tseries5.time, tseries3.time)\r\n\r\n #initializing with a UniformTime object:\r\n t = ts.UniformTime(length=3, sampling_rate=3)\r\n\r\n data = [1, 2, 3]\r\n\r\n tseries7 = ts.TimeSeries(data=data, time=t)\r\n\r\n npt.assert_equal(tseries7.data, data)\r\n\r\n data = [1, 2, 3, 4]\r\n #If the data is not the right length, that should throw an error:\r\n npt.assert_raises(ValueError,\r\n ts.TimeSeries, dict(data=data, time=t))\r\n\r\n # test basic arithmetics wiht TimeSeries\r\n tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_rate=1)\r\n tseries2 = tseries1 + 1\r\n npt.assert_equal(tseries1.data + 1, tseries2.data)\r\n npt.assert_equal(tseries1.time, tseries2.time)\r\n tseries2 -= 1\r\n npt.assert_equal(tseries1.data, tseries2.data)\r\n npt.assert_equal(tseries1.time, tseries2.time)\r\n tseries2 = tseries1 * 2\r\n npt.assert_equal(tseries1.data * 2, tseries2.data)\r\n npt.assert_equal(tseries1.time, tseries2.time)\r\n tseries2 /= 2\r\n npt.assert_equal(tseries1.data, tseries2.data)\r\n npt.assert_equal(tseries1.time, tseries2.time)", "def process(self,timeStamp,values,queueNo):\n\t\tdatain = values[0]\n\t\tcurValue = float(datain)\n\t\tif self.isBaselineRunning:\n\t\t\tself.testMinMax(curValue)\n\t\t\n\t\tif self.hasBaselineEnded:\n\t\t\tscaledValue = self.scale(curValue)\n\t\t\t#print scaledValue\n\t\t\tself.addProcessedValues(scaledValue)", "def AddSample(self, machine, timestamp, value):\n self.machine_data.setdefault(machine, list()).append([timestamp, value])\n if len(self.cluster_total) == 0 or timestamp > self.cluster_total[-1][0]:\n self.cluster_total.append([timestamp, 0])\n self.cluster_avg.append([timestamp, 0])\n self.cluster_total[-1][1] += value\n self.cluster_avg[-1][1] = self.cluster_total[-1][1] / float(len(self.machine_data))", "def get_preprocessed_from_raw(sess_no, raw_path, align_on, from_time, to_time) :\n \n #params\n sess = '01'\n \n trial_length = abs(from_time - to_time)\n\n # Paths\n #raw_path = base_path + 'data/raw/' + sess_no + '/session' + sess + '/'\n rinfo_path = raw_path + 'recording_info.mat'\n tinfo_path = raw_path + 'trial_info.mat'\n\n # Define and loop over intervals\n \n srate = io.get_sfreq(rinfo_path) # = 1 000\n n_trials = io.get_number_of_trials(tinfo_path) \n last_trial = int(max(io.get_trial_ids(raw_path)))\n n_chans = io.get_number_of_channels(rinfo_path)\n channels = [ch for ch in range(n_chans)]\n\n # Pre-process data\n filtered = np.empty([n_trials,\n len(channels),\n int(trial_length * srate/1000)])\n\n trial_counter = 0; counter = 0\n while trial_counter < last_trial:\n n_zeros = 4-len(str(trial_counter+1))\n trial_str = '0' * n_zeros + str(trial_counter+1) # fills leading 0s\n if sess == '01' :\n file_in = sess_no + '01.' + trial_str + '.mat'\n else :\n file_in = sess_no + '02.' + trial_str + '.mat'\n \n if align_on == 'sample' : \n onset = io.get_sample_on(tinfo_path)[trial_counter].item()\n elif align_on == 'match' :\n onset = io.get_match_on(tinfo_path)[trial_counter].item()\n else :\n print(\"Petit problème avec align_on : 'sample' ou 'match' \")\n \n\n \n if np.isnan(onset): # drop trials for which there is no onset info\n print('No onset for ' + file_in)\n trial_counter += 1\n if trial_counter == last_trial:\n break\n else:\n counter += 1\n continue\n print(file_in)\n try:\n raw = io.get_data(raw_path + file_in)\n temp = pp.strip_data(raw,\n rinfo_path,\n onset,\n start=from_time,\n length=trial_length)\n \n if temp.shape[1] == trial_length: # drop trials shorter than length\n filtered[counter] = temp\n counter += 1\n except IOError:\n print('No file ' + file_in)\n trial_counter += 1\n\n # Return data\n\n filtered = np.array(filtered)\n return(filtered)", "def calc_observables(samples):\n n = samples.shape[1]\n obs = np.zeros((samples.shape[0], n+n*(n-1)//2))\n \n k = 0\n for i in range(n):\n obs[:,i] = samples[:,i]\n for j in range(i+1,n):\n obs[:,n+k] = samples[:,i] * samples[:,j]\n k += 1\n return obs", "def _process_data_sorted(self):\n n, t, trials = self.raw_data.shape[0], self.raw_data.shape[1], self.raw_data.shape[2]\n if self.PD:\n if self.use_silent_channels:\n clean_channels, red_n = self.raw_data, n\n _, _, invalid_ch = self._discard_channels(self.raw_data)\n else:\n clean_channels, red_n, invalid_ch = self._discard_channels(self.raw_data)\n\n session_onmeds = clean_channels[:, :, :, :, self.pd_ses_order[self.i_sub][0]]\n session_offmeds = clean_channels[:, :, :, :, self.pd_ses_order[self.i_sub][1]]\n subsets = [session_onmeds, session_offmeds]\n else:\n if self.use_silent_channels:\n clean_channels, red_n = self.raw_data, n\n _, _, invalid_ch = self._discard_channels(self.raw_data)\n else:\n clean_channels, red_n, invalid_ch = self._discard_channels(self.raw_data)\n session1 = clean_channels[:, :, :, 0::2]\n session2 = clean_channels[:, :, :, 1::2]\n subsets = [session1, session2]\n\n ts = np.empty(shape=(0, 2, self.N_MOTIV, trials, t, red_n))\n for ds in subsets:\n # session(1/2 or on/off) - rg1/rg2 - motiv - trial - sec - signal\n ts_tmp_new = np.zeros((2, self.N_MOTIV, trials, t, red_n))\n\n # rg1:\n ts_tmp_new[0, :, :, :] = np.array(\n (ds[:, :, :, 0].T, ds[:, :, :, 1].T, ds[:, :, :, 2].T)\n )\n # rg2:\n ts_tmp_new[1, :, :, :] = np.array(\n (ds[:, :, :, 3].T, ds[:, :, :, 4].T, ds[:, :, :, 5].T)\n )\n\n ts = np.concatenate((ts, np.array([ts_tmp_new])))\n\n return ts, invalid_ch", "def generate_sample_data(point_numbers, interval):\n src_names = ['test.example.com', 'salmon.example.com', 'www.example.com']\n sources = []\n for name in src_names:\n sources.append(models.Source.objects.get_or_create(name=name)[0])\n sources.append(None)\n metric_names = ['ps.virtual_memory_usage',\n 'ps.disk_io_counters.write_bytes',\n 'ps.disk_io_counters.read_bytes',\n 'ps.disk_usage.percent',\n 'ps.physical_memory_usage.percent']\n\n for source in sources:\n for name in metric_names:\n metric = models.Metric.objects.get_or_create(source=source,\n name=name)[0]\n start = datetime.datetime.now() - datetime.timedelta(\n minutes=interval * point_numbers)\n for i in range(point_numbers):\n metric.latest_value = random.randint(1, 100)\n metric.last_updated = (start +\n datetime.timedelta(minutes=interval * i))\n metric.save()", "def get_beat_data(data,columns,seconds='last',starteqend=False,preloadduration=0.3):\r\n if ispanda(data) and isastr(columns[0]):\r\n time=data.as_matrix(['X'])\r\n data=data.as_matrix(columns)\r\n elif isnparray(data) and isanum(columns[0]):\r\n data=np.array(data)\r\n time=data[:,[0]].copy()\r\n data=data[:,columns].copy()\r\n else:\r\n return None\r\n time=time.transpose()[0]\r\n if str(seconds)=='last':\r\n seconds=[time[-1]-1,time[-1]]\r\n elif isanum(seconds):\r\n if len(np.array([seconds]))>1:\r\n pass\r\n #now check if we wanted a beat#\r\n elif isinteger(seconds):\r\n pld=preloadduration\r\n seconds=[seconds-1+pld,seconds+pld]\r\n else:\r\n return None\r\n #start row(sr) and end row (er)\r\n sr=np.argmin(abs(time-seconds[0]))\r\n er=np.argmin(abs(time-seconds[-1]))\r\n data=data[sr:er+1]\r\n timeabs=time[sr:er+1].copy()\r\n timerel=timeabs-timeabs[0]\r\n if starteqend:\r\n if timerel[-1]<0.05:\r\n return None\r\n #fifty ms row\r\n fiftymsr=np.argmin(abs(timerel-0.05))\r\n #time as a column again, multiplied horizontally to reach the desired dim\r\n t=np.array([timerel[:fiftymsr+1]]).T \r\n t=np.tile(t,(1,data.shape[1]))\r\n #vf/vi (relationship between final value (vf) and initial value (vi))\r\n vf_vi=np.tile(data[-1]/data[0],(fiftymsr+1,1))\r\n adj_mat=(1-vf_vi)/0.05*t+vf_vi\r\n data[:fiftymsr+1,:]=data[:fiftymsr+1,:]*adj_mat\r\n return data,np.array([timeabs]).T,np.array([timerel]).T", "def compare(self):\n samples = self.data[-2:]\n if len(samples) != 2:\n return\n\n timestamp_a, data_a = samples[0]\n timestamp_b, data_b = samples[1]\n LOG.debug(\"%s comparing sample from %s to %s\", self, timestamp_a, timestamp_b)\n changes = dict_compare(data_a, data_b)\n for key in changes:\n OUTPUT.info(\"%s:%s: %s -> %s\", self, key, get_value(data_a, key), get_value(data_b, key))", "def get_sampling_updates(self, k=1):\n # perform actual negative phase\n [nh_samples, nv_samples, nv_mean, beta, mixstat, E, labels, \n swapstat, rtime, avg_rtime, nup, ndown], updates = \\\n theano.scan(self.pt_step, \n outputs_info = \\\n [{'initial': None, 'return_steps': 1}, # h1_sample\n {'initial': self.buffer, 'return_steps': 1}, # v1_sample\n {'initial': None, 'return_steps': 1}, # v1_mean \n {'initial': self.beta, 'return_steps': 1}, # beta\n {'initial': self.mixstat, 'return_steps': 1}, # mixstat\n {'initial': None, 'return_steps': 1}, # E\n {'initial': self.labels, 'return_steps': 1}, # labels\n {'initial': self.swapstat, 'return_steps': 1}, # swapstat\n {'initial': self.rtime, 'return_steps': 1}, # rtime\n {'initial': self.avg_rtime,'return_steps': 1}, # avg_rtime\n {'initial': self.nup, 'return_steps': 1}, # nup\n {'initial': self.ndown, 'return_steps': 1}],# ndown\n n_steps = k)\n\n updates = {}\n\n # update particle states\n updates[self._buffer] = T.set_subtensor(self._buffer[:self.n_chain_total], nv_samples)\n updates[self.mf_buffer] = T.set_subtensor(self.mf_buffer[:self.n_chain_total], nv_mean)\n \n # update energy of each particle\n updates[self._E] = T.set_subtensor(self._E[:self.n_chain_total], E)\n\n # update particle<->temperature mappings\n updates[self._mixstat] = T.set_subtensor(self._mixstat[:,:self.n_beta], mixstat)\n updates[self._beta] = T.set_subtensor(self._beta[:self.n_chain], beta)\n\n # updates for beta adaptation\n updates[self.labels] = labels\n updates[self.rtime] = rtime\n updates[self.avg_rtime] = avg_rtime\n updates[self._nup] = T.set_subtensor(self._nup[:self.n_beta], nup)\n updates[self._ndown] = T.set_subtensor(self._ndown[:self.n_beta], ndown)\n\n # updates for chain spawning\n updates[self._swapstat] = T.set_subtensor(self._swapstat[:self.n_beta], swapstat)\n\n return [nh_samples, nv_samples, beta, mixstat, E], updates", "def test_single_ended_trans_att_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n nx = 200\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, nx)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n ts_ambient = np.ones(nt) * 12\n ts_valid = np.ones(nt) * 16\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask1 = np.logical_and(x > 0.125 * cable_len, x < 0.25 * cable_len)\n cold_mask2 = np.logical_and(x > 0.625 * cable_len, x < 0.75 * cable_len)\n warm_mask1 = np.logical_and(x > 0.75 * cable_len, x < 0.875 * cable_len)\n warm_mask2 = np.logical_and(x > 0.25 * cable_len, x < 0.375 * cable_len)\n valid_mask = np.logical_and(x > 0.40 * cable_len, x < 0.50 * cable_len)\n temp_real = np.ones((len(x), nt)) * 12 + 273.15\n temp_real[cold_mask1 + cold_mask2] = ts_cold + 273.15\n temp_real[warm_mask1 + warm_mask2] = ts_warm + 273.15\n temp_real[valid_mask] = ts_valid + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n # Add attenuation\n tr_att = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.4) :] *= tr_att\n tr_att2 = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.6) :] *= tr_att2\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n \"ambient\": ([\"time\"], ts_ambient),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"ambient\": [slice(0.52 * cable_len, 0.58 * cable_len)],\n \"cold\": [\n slice(0.125 * cable_len, 0.25 * cable_len),\n slice(0.65 * cable_len, 0.70 * cable_len),\n ],\n \"warm\": [slice(0.25 * cable_len, 0.375 * cable_len)],\n }\n\n ds_test = ds.copy(deep=True)\n\n # WLS\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n # test `trans_att` related functions\n # Clear out old results\n ds_test.set_trans_att([])\n\n assert ds_test.trans_att.size == 0, \"clear out trans_att config\"\n\n del_keys = []\n for k, v in ds_test.data_vars.items():\n if \"trans_att\" in v.dims:\n del_keys.append(k)\n\n assert len(del_keys) == 0, \"clear out trans_att config\"\n\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing gamma + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_gamma=(482.6, 0),\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing alpha + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_dalpha=(6.46e-05, 0),\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )", "def getUnscaledSamples(self, **kwargs) -> TimeData:\n # initialise chans, startSample and endSample with the whole dataset\n options = self.parseGetDataKeywords(kwargs)\n\n # get the files to read and the samples to take from them, in the correct order\n dataFilesToRead, samplesToRead, scalings = self.getDataFilesForSamples(\n options[\"startSample\"], options[\"endSample\"]\n )\n numSamples = options[\"endSample\"] - options[\"startSample\"] + 1\n # set up the dictionary to hold the data\n data = {}\n for chan in options[\"chans\"]:\n data[chan] = np.zeros(shape=(numSamples), dtype=self.dtype)\n\n # loop through chans and get data\n sampleCounter = 0\n for dFile, sToRead, scalar in zip(dataFilesToRead, samplesToRead, scalings):\n # get samples - this is inclusive\n dSamples = sToRead[1] - sToRead[0] + 1\n # spam files always record 5 channels\n dSamplesRead = dSamples * self.recChannels[dFile]\n # read the data\n byteOff = (\n self.dataByteOffset[dFile]\n + sToRead[0] * self.recChannels[dFile] * self.dataByteSize\n )\n dFilePath = os.path.join(self.dataPath, dFile)\n dataRead = np.memmap(\n dFilePath,\n dtype=self.dtype,\n mode=\"r\",\n offset=byteOff,\n shape=(dSamplesRead),\n )\n # now need to unpack this\n for chan in options[\"chans\"]:\n # check to make sure channel exists\n self.checkChan(chan)\n # get the channel index - the chanIndex should give the right order in the data file\n # as it is the same order as in the header file\n chanIndex = self.chanMap[chan]\n # use the range sampleCounter -> sampleCounter + dSamples, because this actually means sampleCounter + dSamples - 1 as python ranges are not inclusive of the end value\n # scale by the lsb scalar here - note that these can be different for each file in the run\n data[chan][sampleCounter : sampleCounter + dSamples] = (\n dataRead[chanIndex : dSamplesRead : self.recChannels[dFile]]\n * scalar[chan]\n )\n # increment sample counter\n sampleCounter = sampleCounter + dSamples # get ready for the next data read\n\n # return data\n startTime, stopTime = self.sample2time(\n options[\"startSample\"], options[\"endSample\"]\n )\n comments = []\n comments.append(\n \"Unscaled data {} to {} read in from measurement {}, samples {} to {}\".format(\n startTime,\n stopTime,\n self.dataPath,\n options[\"startSample\"],\n options[\"endSample\"],\n )\n )\n comments.append(\"Data read from {} files in total\".format(len(dataFilesToRead)))\n comments.append(\n \"Data scaled to mV for all channels using scalings in header files\"\n )\n comments.append(\"Sampling frequency {}\".format(self.getSampleFreq()))\n return TimeData(\n sampleFreq=self.getSampleFreq(),\n startTime=startTime,\n stopTime=stopTime,\n data=data,\n comments=comments,\n )", "def test_callback_multiple_entries(self):\n TestStorage.set_timeout(20)\n TestStorage.set_time(100)\n store = RatedStatisticStorage()\n entity_c = TestStorage._gen_entity(\n \"ram_usage_mean\", [\"20\", \"40\"],\n [chr(Outcome.HIGH), chr(Outcome.LOW)])\n msg = TestStorage._gen_msg(\"n!node\", 100, [entity_c])\n store.callback_rated_statistic(msg)\n\n self.assertEqual(\n store.get_outcome(\"n!node\", \"ram_usage_mean_0\"), Outcome.HIGH)\n self.assertEqual(\n store.get_outcome(\"n!node\", \"ram_usage_mean_1\"), Outcome.LOW)", "def minutes_of_new_data(symbol, kline_size, data, source, client):\n if len(data) > 0:\n old = parser.parse(data[\"timestamp\"].iloc[-1])\n elif source == \"binance\":\n old = datetime.strptime('1 Jan 2017', '%d %b %Y')\n elif source == \"bitmex\":\n old = client.Trade.Trade_getBucketed(symbol=symbol, binSize=kline_size, count=1, reverse=False).result()[0][0][\n 'timestamp'] \n if source == \"binance\": new = pd.to_datetime(client.get_klines(symbol=symbol, interval=kline_size)[-1][0],\n unit='ms')\n if source == \"bitmex\": new = \\\n client.Trade.Trade_getBucketed(symbol=symbol, binSize=kline_size, count=1, reverse=True).result()[0][0]['timestamp']\n return old, new", "def _compute_observations(self):\n observations = {}\n for ts in self.ts_ids:\n if self.traffic_signals[ts].time_to_act() or self.traffic_signals[ts].regular_obs() :\n observations[ts] = self.traffic_signals[ts]._compute_observation()\n return observations", "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def micsample(listentime):\n frames, sampling_rate = record_audio(listentime)\n samples = np.hstack([np.frombuffer(i, np.int16) for i in frames])\n times = np.arange(samples.size) / sampling_rate\n return samples, times", "def callback(in_data, frame_count, time_info, status):\n assert frame_count == window\n try:\n samples = outq.get(block=False)\n except queue.Empty:\n print(\"underrun\", file=sys.stderr)\n samples = zeros\n\n # Return samples and continue signal.\n return (samples, pa.paContinue)", "def add_running_values(audioSamples, step):\n if (step + 1) * groupSize > 2400:\n print(\"We've reached the end of the run!\")\n return None\n samples = audioSamples[step * groupSize: (step + 1) * groupSize]\n noiseValues = [-1, 0, 1]\n for sample in samples:\n # Add noise to the sample.\n noiseValue = random.choice(noiseValues)\n value = sample.audio + noiseValue\n if value < 0:\n value = 0\n newSample = AudioData(\n value,\n \"W6\",\n 1,\n \"Washing Room\"\n )\n newSample.processedValue = value\n db.session.add(newSample)\n db.session.commit()", "def test_double_ended_wls_estimate_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 100)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n rst = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n rast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real) - 1)\n )\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n alpha -= alpha[0] # the first x-index is where to start counting\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"rst\": ([\"x\", \"time\"], rst),\n \"rast\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.4 * cable_len)],\n \"warm\": [slice(0.65 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_double_ended(\n sections=sections,\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method=\"wls\",\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=8)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=6)\n assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=6)\n assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=6)", "def _update(self, datapoints):\r\n if len(datapoints) == 1:\r\n timestamp, value = datapoints[0]\r\n whisper.update(self.path, value, timestamp)\r\n else:\r\n whisper.update_many(self.path, datapoints)", "def addSample(self, time, x, y, z):\n\t\tself.numSamples += 1\n\t\tif self.prevTime != None:\n\t\t\tdt = abs(time - self.prevTime)\n\t\t\tself.timeDifferences[dt] += 1\n\t\t\tif dt > self.highDT:\n\t\t\t\tif getTimeDifference(self.rawData[self.currIdx]) >= self.minSampleTime:\n\t\t\t\t\tself.currIdx += 1\n\t\t\t\t\tself.rawData.append(list())\n\t\t\t\telse:\n\t\t\t\t\tself.rawData[self.currIdx] = list()\n\t\t\t\t\n\t\t\telse: \n\t\t\t\tself.rawData[self.currIdx].append(preProcess.resultantAcceleration(time, x, y, z))\n\n\t\tself.prevTime = time", "def monitor(data_feeder):\n _total_time = 0.\n _costs = []\n _data_feeder = data_feeder(BATCH_SIZE,\n SEQ_LEN,\n OVERLAP,\n Q_LEVELS,\n Q_ZERO,\n Q_TYPE)\n\n for _seqs, _reset, _mask in _data_feeder:\n _start_time = time.time()\n _cost = test_fn(_seqs, _mask)\n _total_time += time.time() - _start_time\n\n _costs.append(_cost)\n\n return numpy.mean(_costs), _total_time", "def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)", "def simulate_queued_data(event_times: typing.Union[list, np.ndarray],\n processing_time: typing.Union[list, np.ndarray],\n num_queues: int = 1) -> np.ndarray:\n num_points = len(event_times)\n thread_available_times = np.ones(num_queues) * event_times[0]\n thread_available_times[0] += processing_time[0]\n\n event_total_times = np.ndarray(num_points)\n\n event_total_times[0] = processing_time[0]\n\n for i in range(1, num_points):\n current_time = event_times[i]\n current_process_time = processing_time[i]\n oldest_thread = thread_available_times.argmin()\n\n # If there is a thread that is empty at the current time, latency and processing time are the same\n if thread_available_times[oldest_thread] < current_time:\n event_total_times[i] = current_process_time\n # Thread will be busy from current time until process finishes\n thread_available_times[oldest_thread] = current_time + current_process_time\n # If there is not a thread empty, the requests latency will be the shortest wait time plus the processing time\n else:\n event_total_times[i] = thread_available_times[oldest_thread] - current_time + current_process_time\n # Thread will continue to be busy this amount of additional time\n thread_available_times[oldest_thread] += current_process_time\n\n return event_total_times", "def _copy(source, track, filter_f=lambda x: True, coef=1000):\n for msg in source:\n if filter_f(msg):\n track.append(msg.copy(time=int(msg.time*coef)))", "def read_timed_multi(\n adcs: tuple[ADC, ...], bufs: tuple[AnyWritableBuf, ...], timer: Timer, /\n ) -> bool:", "def _filterTimes(self):\n print(self.tRange)\n idT = np.where((self.tRange[0] > np.array(self.rawD['Epoch'][:])) & \n (self.tRange[1] < np.array(self.rawD['Epoch'][:])))[0]\n #print(self.rawD['Epoch'][:100])\n print(idT)\n # Filter data\n for key in filter(lambda x: ('Epoch' in x or \n ('Counts' in x and x[-1] == 's')), self.rawD.keys()):\n self.d[key] = self.rawD[key].copy()[idT]\n return", "def process_sample_val(self):\n raise NotImplementedError", "def pre_process(self):\n t1_start = perf_counter()\n wav_arr_raw = np.array(self.raw_data['spectrum_0'].attrs['wavelengths'])\n self.wavelengths = wav_arr_raw\n self.back_spectra_arr = np.array(self.raw_data['spectrum_0'].attrs['background'])\n\n corr_data = []\n times_proc = []\n\n # extract reference point for 0 seconds\n time_ref = str(self.raw_data['spectrum_0'].attrs['creation_timestamp'])\n\n # spectrometer adds 'b' and quotation marks to timestamps that must be removed\n # some spectra are taken on X.000000s which does not have a .%f component - use try and except\n try:\n time_ref = datetime.strptime((time_ref.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n time_ref = datetime.strptime((time_ref.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S\")\n\n print('Measurement was started at {}, \\n normalising times and applying a background correction \\n'.format(time_ref))\n\n # applies background correction\n for counter, spectra in enumerate(self.raw_data.keys()):\n corr_data.append(self.raw_data[spectra]-self.back_spectra_arr)\n time = str(self.raw_data[spectra].attrs['creation_timestamp'])\n try:\n time = datetime.strptime((time.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n time = datetime.strptime((time.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S\")\n deltatime = time - time_ref\n times_proc.append(deltatime.total_seconds())\n\n self.times = np.array(times_proc)\n print('Measurement contains {} spectra with {} wavelengths \\n'.format(len(self.times),len(self.wavelengths)))\n\n # data is stored as a pd Dataframe with elapsed times as indices and wavelengths as columns\n pre_proc_data = pd.DataFrame(corr_data, index = self.times, columns = self.wavelengths)\n\n # data may be disordered in time when iterated through\n # sort the data by elapsed time\n self.pre_proc_data = pre_proc_data.sort_index(axis=0)\n self.times = np.sort(self.times)\n\n t1_stop = perf_counter()\n print(\"Elapsed time for pre-processing:\", t1_stop-t1_start)\n\n return self.pre_proc_data", "def test_make_sampled_format(self):\n for num_inputs in [1, 3]:\n for num_outputs in [1, 2, 4]:\n for num_time_steps in [4, 10, 12]:\n # Generate data\n # P=2 format [0, 1, 2, 3, ...]\n sample_interval = 2\n dt_system = np.random.random()\n dt_sample = sample_interval * dt_system\n outputs = np.random.random(\n (num_time_steps, num_outputs, num_inputs))\n time_steps = make_time_steps(\n num_time_steps, sample_interval)\n time_values = time_steps * dt_system\n\n # Compute using modred\n my_ERA = era.ERA()\n time_steps_computed, outputs_computed =\\\n era.make_sampled_format(time_values, outputs)\n #self.assertEqual(dt_system_computed, dt_system)\n\n # Reference values\n num_time_steps_true = (num_time_steps - 1) * 2\n time_steps_true = make_time_steps(num_time_steps_true, 1)\n outputs_true = np.zeros(\n (num_time_steps_true, num_outputs, num_inputs))\n outputs_true[::2] = outputs[:-1]\n outputs_true[1::2] = outputs[1:]\n\n # Compare values\n np.testing.assert_equal(\n time_steps_computed, time_steps_true)\n np.testing.assert_equal(outputs_computed, outputs_true)\n\n # Test that if there is a wrong time value, get an error\n time_values[num_time_steps // 2] = -1\n self.assertRaises(\n ValueError, era.make_sampled_format, time_values,\n outputs)", "def convEpochFeats(cur,uid,timestamp):\n\n\thour = 3600\n\ttotalConvTime = []\n\ttotalConvs = []\n\tfor i in range(1,8):\n\t\ths_timestamp = timestamp-86400+(i-1)*hour3\n\t\the_timestamp = timestamp-86400+i*hour3\n\t\t# Determining if start/end time of given hour is in the night\n\t\t# If yes, proceed with feature calculation, if not skip\n\t\ts_epoch = epochCalc(hs_timestamp)\n\t\te_epoch = epochCalc(he_timestamp)\n\t\n\t\tif s_epoch[0][0]=='night' or e_epoch[0][0]=='night':\n\t\t\tcur.execute('SELECT * FROM {0} WHERE start_timestamp >= {1} AND end_timestamp<= {2}'\n\t\t\t\t.format(uid+'con',timestamp-86400+(i-1)*hour,timestamp-86400+i*hour))\n\n\t\t\trecords = cur.fetchall()\n\t\t\t# Sum over the duration of all conversation in that hour\n\t\t\ttotalConvTime.append( sum([(records[i][1]-records[i][0]) for i in range(0,len(records))]))\n\t\t\t# Count their total number\n\t\t\ttotalConvs.append(len(records))\n\t# Concatenate to one row before returning\n\treturn(np.hstack((totalConvTime,totalConvs)))", "def interval_multivariate(inputspikes, outputspikes, samples=1):\n times = []\n krdists = []\n for prv, nxt in zip(outputspikes[:-1], outputspikes[1:]):\n krd = multivariate(inputspikes, prv, nxt, samples)\n times.append(krd[0])\n krdists.append(krd[1])\n return times, krdists", "def get_rate(timestamps):\n return (timestamps[1, 1] - timestamps[0, 1]) / (timestamps[1, 0])", "def variable_time_collate_fn(batch, args, device = torch.device(\"cpu\"), data_type = \"train\", \n\tdata_min = None, data_max = None):\n\tD = batch[0][2].shape[1]\n\tcombined_tt, inverse_indices = torch.unique(torch.cat([ex[1] for ex in batch]), sorted=True, return_inverse=True)\n\tcombined_tt = combined_tt.to(device)\n\n\toffset = 0\n\tcombined_vals = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n\tcombined_mask = torch.zeros([len(batch), len(combined_tt), D]).to(device)\n\t\n\tcombined_labels = None\n\tN_labels = 1\n\n\tcombined_labels = torch.zeros(len(batch), N_labels) + torch.tensor(float('nan'))\n\tcombined_labels = combined_labels.to(device = device)\n\t\n\tfor b, (record_id, tt, vals, mask, labels) in enumerate(batch):\n\t\ttt = tt.to(device)\n\t\tvals = vals.to(device)\n\t\tmask = mask.to(device)\n\t\tif labels is not None:\n\t\t\tlabels = labels.to(device)\n\n\t\tindices = inverse_indices[offset:offset + len(tt)]\n\t\toffset += len(tt)\n\n\t\tcombined_vals[b, indices] = vals\n\t\tcombined_mask[b, indices] = mask\n\n\t\tif labels is not None:\n\t\t\tcombined_labels[b] = labels\n\n\tcombined_vals, _, _ = utils.normalize_masked_data(combined_vals, combined_mask, \n\t\tatt_min = data_min, att_max = data_max)\n\n\tif torch.max(combined_tt) != 0.:\n\t\tcombined_tt = combined_tt / torch.max(combined_tt)\n\t\t\n\tdata_dict = {\n\t\t\"data\": combined_vals, \n\t\t\"time_steps\": combined_tt,\n\t\t\"mask\": combined_mask,\n\t\t\"labels\": combined_labels}\n\n\tdata_dict = utils.split_and_subsample_batch(data_dict, args, data_type = data_type)\n\treturn data_dict", "def get_next_sample(self):", "def test_1d_time():\n \n dic,data = ng.pipe.read(\"common_data/1d_pipe/test.fid\")\n assert data.shape == (1500,)\n assert data.dtype == 'complex64'\n assert round(data[0].real,2) == 91899.24\n assert round(data[0].imag,2) == 1964.70\n assert round(data[1].real,2) == 168844.25\n assert round(data[1].imag,2) == 49503.41\n write_readback(dic,data)", "def calc_observables(samples):\n n = samples.shape[1]\n obs = np.zeros((samples.shape[0],n*(n-1)//2))\n \n k = 0\n for i in range(n):\n for j in range(i+1,n):\n obs[:,k] = samples[:,i]*samples[:,j]\n k += 1\n return obs", "def analyze2(ys, freqs, ts):", "def activityEpochFeats(cur,uid,timestamp):\n\tstatToMovingRatio = []\n\tvar_stats = []\n\tstd_stats = []\n\tuidS = uid +'act'\n\tfor i in range(1,8):\n\t\ths_timestamp = timestamp-86400+(i-1)*hour3\n\t\the_timestamp = timestamp-86400+i*hour3\n\t\t# Determining if start/end time of given hour is in the night\n\t\t# If yes, proceed with feature calculation, if not skip\n\t\ts_epoch = epochCalc(hs_timestamp)\n\t\te_epoch = epochCalc(he_timestamp)\n\n\t\tif s_epoch[0][0]=='night' or e_epoch[0][0]=='night':\n\t\t\t# Retrieving data in hour intervals spanning over one day\n\t\t\tcur.execute('SELECT activity FROM {0} WHERE time_stamp >= {1} AND time_stamp<= {2}'\n\t\t\t\t.format(uidS,timestamp-86400+(i-1)*hour,timestamp-86400+i*hour))\n\t\t\trecords = cur.fetchall()\n\n\t\t\tvar_stats.append(np.var(records))\n\t\t\tstd_stats.append(np.std(records))\n\n\t\t\t# Calculating number of stationary and walking/running occurences\n\t\t\tstationary = len([item for item in records if item==0])\n\t\t\tmoving = len([item for item in records if item==1 or item==2])\n\n\t\t\tif moving>0:\n\t\t\t\tstatToMovingRatio.append(float(stationary) / moving)\n\t\t\telse:\n\t\t\t\tstatToMovingRatio.append(0)\n\treturn(np.nan_to_num(np.hstack((statToMovingRatio,var_stats,std_stats))))", "def raw_data(\n n: int = 8, limit: int = 1000, arrival_function: ArrivalF = arrival1\n) -> Counter[int]:\n data = samples(limit, arrival_function(n))\n wait_times = collections.Counter(coupon_collector(n, data))\n return wait_times", "def samples_keep(self,index):\n\n\t\tif isinstance(index, (int, long)): index = range(self.samples)[-index:]\n\n\t\tself.sampled_topics = np.take(self.sampled_topics,index,axis=0)\n\t\tself.tt = np.take(self.tt,index,axis=2)\n\t\tself.dt = np.take(self.dt,index,axis=2)\n\n\t\tself.samples = len(index)", "def test_single_ended_wls_estimate_synthetic():\n\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 500)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n print(\"alphaint\", cable_len * (dalpha_p - dalpha_m))\n print(\"alpha\", dalpha_p - dalpha_m)\n print(\"C\", np.log(C_p / C_m))\n print(\"x0\", x.max())\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.5 * cable_len)],\n \"warm\": [slice(0.5 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_single_ended(\n sections=sections, st_var=1.0, ast_var=1.0, method=\"wls\", solver=\"sparse\"\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=6)\n assert_almost_equal_verbose(ds.dalpha.values, dalpha_p - dalpha_m, decimal=8)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=4)\n\n pass", "def sample_next(self, time, samples, errors):\n if self.previous_value is None:\n output = self.start_value\n else:\n time_diff = time - self.previous_time\n noise = np.random.normal(loc=0.0, scale=1.0, size=1)\n output = (np.power(self.ar_param, time_diff))*self.previous_value+\\\n self.sigma*np.sqrt(1-np.power(self.ar_param, time_diff))*noise\n self.previous_time = time\n self.previous_value = output\n return output", "def getData(dig, pipe, event, pulses):\n logging.info(\"Started getData\")\n start_time = time.time()\n for pulse in range(pulses):\n samples = dig.get_data_raw()\n# logging.info(\"GetData retrieved: %d\", len(samples))\n pipe.put(samples)\n end_time = time.time()\n elapsed = end_time - start_time\n samplesProcessed = (pulses * len(samples[0]) * len(samples))\n logging.info(\"getData processed %d Msamples in %.3f s\",\n samplesProcessed / 1e6,\n elapsed)\n logging.info(\"getData rate: %.3f Msa/s in lumps of %d samples\",\n samplesProcessed / elapsed / 1e6,\n dig.pointsPerCycle)", "def spike_count(spikeTime, start, stop, dt):\n\n\n #Spike time turned into a numpy array\n spikeTime = np.array(spikeTime)\n # print('Spike Times: ', spikeTime)\n\n #Creat interval array - intervals in which to break up the time array - sub time interval array\n duration = stop-start #Total run time\n n = duration/dt #How many subintervals from time horizon results from user defined interval\n splitInterval = np.linspace(0, duration, n+1) #create numpy array of subinterval over which to count spikes\n # print ('split interval: ', splitInterval)\n\n ##Find length over which to iterate in for loop\n length_splitInt = len(splitInterval)\n # print('length splitInterval: ', length_splitInt)\n length_time = len(spikeTime)\n # print('length time: ', length_time)\n length = length_splitInt + ((length_time) - 2)\n # print('length :', length)\n\n i=0 #inex for time array\n j=0 #index for splitInterval array.\n k=0 #index for new matrix that will store the grouped values from the split time array\n counter = 0 #counter variable to keep track of spike count for each subinterval through loop\n SpikeCount = [] #Initialize array to collect the number of spikes occuring wihtin each subinterval\n\n for i in range(length):\n if (i == 0) and (spikeTime[0] == splitInterval[0]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n\n # Spot check\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n elif (spikeTime[k] > splitInterval[j]) and (spikeTime[k] <= splitInterval[j + 1]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n # Spot check\n SpikeCount.append(counter)\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n\n\n else:\n SpikeCount.append(counter)\n counter = 0\n j += 1\n i += 1\n\n # Spot Check\n # print('else counter: ', counter)\n # print(SpikeCount)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('else j: ', j)\n # print('else i: ', i)\n # print('else k: ', k)\n\n return (SpikeCount, splitInterval)", "def test_high_delta_sample_stays_the_same(self, sampling_class,\n sampling_method):\n s = private_sampling.ThresholdSample(0.5, sampling_method)\n for i in range(2000):\n s.process(i, 1)\n private_priority_sample = sampling_class.from_non_private(\n s, eps=0.1, delta=1.0)\n self.assertCountEqual(s.elements.keys(), private_priority_sample.elements)", "def test_4d_stream_time():\n fmask = \"common_data/4d_pipe/full4D.fid\"\n dic,data = ng.pipe.read_lowmem(fmask)\n\n fname = \"common_data/4d_pipe/time_2index/test02006.fid\"\n sdic,sdata = ng.pipe.read(fname)\n\n assert data.shape == (8, 12, 16, 1400)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2,3].real,2) == -395.11\n assert round(data[0,1,2,3].imag,2) == 52.72\n assert round(data[5,9,11,987].real,2) == -35.09\n assert round(data[5,9,11,987].imag,2) == 33.07\n\n # check the slice\n assert sdata.shape == (16, 1400)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == 75.93\n assert round(sdata[1,2].imag,2) == 5.55\n assert round(sdata[7,800].real,2) == -8.93\n assert round(sdata[7,800].imag,2) == -10.24\n\n # slice/data matching\n assert_array_equal(data[1,5],sdata)\n\n lowmem_write_readback(dic,data)", "def split_by_interval(array, interval: int):\n\n prev_timestamp = array[0][\"timestamp\"]\n values = 0\n count = 0\n averages = []\n\n for el in array:\n if el[\"timestamp\"] - prev_timestamp >= timedelta(interval):\n prev_timestamp = el[\"timestamp\"]\n average = values / count\n averages.append(average)\n values = count = 0\n else:\n values += el[\"value\"]\n count += 1" ]
[ "0.60448503", "0.5995645", "0.58302253", "0.57981944", "0.5796712", "0.57483286", "0.57325566", "0.57073534", "0.56920207", "0.5675369", "0.56446433", "0.5632045", "0.5621465", "0.5616494", "0.559965", "0.5543054", "0.55349135", "0.55306476", "0.5527352", "0.55071265", "0.54715633", "0.54674643", "0.54631126", "0.5436526", "0.5403363", "0.54024506", "0.53941214", "0.53931826", "0.538854", "0.5364179", "0.5361968", "0.53616434", "0.5337768", "0.5334232", "0.5327028", "0.53251815", "0.5313115", "0.5309612", "0.53082424", "0.5304992", "0.5301247", "0.5299155", "0.5289329", "0.5288964", "0.527963", "0.5279242", "0.52725583", "0.525972", "0.5247218", "0.5247172", "0.5245574", "0.52394915", "0.52296805", "0.52274007", "0.5223142", "0.5220494", "0.52177536", "0.52086973", "0.5203484", "0.51996785", "0.5192449", "0.5187594", "0.5182446", "0.51818377", "0.51754755", "0.51748526", "0.5171783", "0.5170953", "0.51687473", "0.5165193", "0.51621586", "0.51564026", "0.51542616", "0.51521146", "0.514832", "0.51478344", "0.5141622", "0.51376826", "0.51356465", "0.5134733", "0.5129225", "0.5127647", "0.51250935", "0.5124748", "0.5118706", "0.51161355", "0.511333", "0.5109825", "0.5109059", "0.51058", "0.5095116", "0.5094746", "0.5086436", "0.5086009", "0.50813997", "0.5078517", "0.50745046", "0.50730157", "0.50704646", "0.5068577", "0.50618887" ]
0.0
-1
Get the list of pedestrian topics
def get_ped_topics(n_peds): ls = [] for n in range(n_peds): ls += [coord.format(n) for coord in PED_TEMPL_TOPICS] return ls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_topics(self):\r\n return [x[0] for x in get_published_topics()]", "def topics(ctx):\n pass", "def topics(self):\r\n return topics.Topics(self)", "def topics(self, project: str) -> list:\n assert self.exists(project), f'Project {project} inesistente'\n\n cursor = self.projects(\n {\n 'url': project\n }\n )\n try:\n return cursor.next()['topics']\n except StopIteration:\n return []", "def topics(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"topics\")", "def topics(self):\n return topics.Topics(self)", "def get_topics(self):\n try:\n with self.__db_lock:\n sql = \"SELECT * FROM 'topics' ORDER BY 'name' ASC\"\n self.__cursor.execute(sql)\n topics = self.__cursor.fetchall()\n if topics is None or len(topics) == 0:\n return []\n return [topic[1] for topic in topics]\n except Exception as e:\n logging.error(\n \"Exception when trying to get topics list: {}\".format(e))\n return []", "def list_topics(project_id):\n project_path = f\"projects/{project_id}\"\n for topic in PUBLISHER_CLIENT.list_topics(request={\"project\": project_path}):\n print(topic)", "def topics(self):\r\n return contents.Topics(self)", "def get_topics(self):\n return self.client.cluster.topics()", "def get_topics():\n topics, _ = base_query(db_session)\n return jsonify([p.serialize for p in topics])", "def topics(self):\r\n return Topics(self)", "def topics(self):\n # use get_model to avoid circular dependency\n topic_model = apps.get_model('tags', 'Topic')\n return topic_model.objects.filter(tag__in=self.tags.all()).distinct()", "def get_teacher_topic_all():\n topic_data = query_db(\n \"SELECT topics.id, topics.name, classes.name FROM topics JOIN classes \"\n \"ON topics.class_id=classes.id WHERE teacher_id=?;\",\n [flask.session[\"id\"]],\n )\n topics = []\n for topic in topic_data:\n topic_dict_teacher = {}\n topic_dict_teacher[\"id\"] = topic[0]\n topic_dict_teacher[\"name\"] = flask.escape(str(topic[1]))\n topic_dict_teacher[\"class\"] = flask.escape(str(topic[2]))\n topics.append(topic_dict_teacher)\n return topics", "def topics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"topics\")", "def get_custom_phrases():\n return [x[0] for x in all_topics if x[2] == \"1\"]", "def getGroupTopics(group_id): \r\n return Group.getGroupTopics(group_id)", "def get_topics_for_alt(alt_list, pgm_dict):\n for epv in alt_list:\n name = epv.get('package', {}).get('name', [''])[0]\n if name:\n for pgm_pkg_key, pgm_list in pgm_dict.items():\n for pgm_epv in pgm_list:\n if name == pgm_epv.get('package_name', ''):\n epv['package']['pgm_topics'] = pgm_epv.get('topic_list', [])\n\n return alt_list", "def topics(self):\r\n return ProjectTopics(self)", "def list(self, topic, **options):\n pass", "def my_posttopic_list(request, show_user=False):\n\tif not show_user:\n\t\tshow_user = str(request.user)\n\ttry:\n\t\ttopics = Post.objects.order_by('-post_date').filter(post_author=show_user).values('post_topic').distinct()[:50]\n\t\tposts = []\n\t\tfor i in topics:\n\t\t\tposts.append(int(i['post_topic']))\n\t\ttopics = Topic.objects.order_by('-topic_modification_date').filter(id__in=posts)\n\t\tfor i in topics:\n\t\t\tpmax = i.post_set.all().count()/10\n\t\t\tpmaxten = i.post_set.all().count()%10\n\t\t\tif pmaxten != 0:\n\t\t\t\ti.pagination_max = pmax+1\n\t\t\telse:\n\t\t\t\ti.pagination_max = pmax\n\t\tforum_name = _('User Posts in Latest Topics')\n\texcept:\n\t\treturn render_to_response('myghtyboard/mytopics_list.html', {'perms': list_perms(request)}, context_instance=RequestContext(request))\n\treturn render_to_response(\n\t\t'myghtyboard/mytopics_list.html',\n\t\t{'topics': topics, 'forum_name': forum_name, 'perms': list_perms(request)},\n\t\tcontext_instance=RequestContext(request))", "def get_topics(self, tags=None):\r\n params = {}\r\n if tags:\r\n params['tags'] = ','.join(tags)\r\n resp = self._make_request('get', 'topics', params=params)\r\n return resp.json()", "def getSubscriptionTopics(self) -> List[str]:\n return self.cpp.getSubscriptionTopics()", "def topics(self):\n return set([n.text for n in self.node.xpath('.//domClass')\n if n.text is not None])", "def get_topic_words(self, topics):\n topic_words = []\n for topic, top_n_words in topics.items():\n words = [word for word, c_tf_idf in top_n_words]\n topic_words.append(words)\n return topic_words", "def topic_list(request, forum_id, pagination_id=1):\n\ttry:\n\t\ttopics = Topic.objects.order_by('-is_global', '-is_sticky', '-topic_modification_date').filter(Q(topic_forum=forum_id) | Q(is_global='1'))\n\t\tforum_name = Forum.objects.get(id=forum_id)\n\t\tforum_name = forum_name.forum_name\n\texcept:\n\t\treturn HttpResponseRedirect('/forum/')\n\treturn object_list(\n\t\trequest,\n\t\tTopic.objects.order_by('-is_global', '-is_sticky', '-topic_modification_date').filter(Q(topic_forum=forum_id) | Q(is_global='1')),\n\t\tpaginate_by = 10,\n\t\tallow_empty = True,\n\t\tpage = pagination_id,\n\t\textra_context = {'forum': forum_id, 'perms': list_perms(request), 'forum_name': forum_name},\n\t\ttemplate_name = 'myghtyboard/topics_list.html')", "def resolve_topics(self, info, **kwargs):\n return Topic.objects.all()", "def get_topics(mods):\n output = []\n topics = mods.findall(\"{{{0}}}subject/{{{0}}}topic\".format(common.MODS_NS))\n for topic in topics:\n output.append(topic.text)\n return output", "def get_topics(self):\n topics = self.word_topics\n return topics / topics.sum(axis=1)[:, None]", "def trendingTopics():\n api = twitter.Api()\n\n trending_topics = api.GetTrendsWoeid(PHILA_WOEID)\n for topic in trending_topics:\n util.safe_print(topic.name)", "def get_valid_phrases():\n return [x[0] for x in all_topics if x[1] == \"1\"]", "def cmd_TOPICS(self):\r\n return self._ros.get_topics()", "def get_class_topic(class_id):\n topic_data = query_db(\"SELECT id, name FROM topics WHERE class_id=?\", [class_id])\n topics = []\n for topic in topic_data:\n topic_dict_class = {}\n topic_dict_class[\"id\"] = topic[0]\n topic_dict_class[\"name\"] = topic[1]\n topics.append(topic_dict_class)\n return topics", "def parse_topics(self, n=10):\n assert(self.is_trained)\n raw_topics = self._lda_model.print_topics(self._lda_model.num_topics)\n topics = map(lambda x: x.split(' + '), raw_topics)\n top_words = [\n map(\n lambda x: x.split('*')[1], \n topic[:n]\n ) \n for topic in topics]\n self.topics = top_words\n self.has_topics = True\n return top_words", "def get_top_topics(self, model_name, data):\n\n if model_name == 'lda':\n return list(self.lda_model.top_topics(data))\n elif model_name == 'lsa':\n return list(self.lsa_model.print_topics(num_topics= 10))", "def display_topic(self):\n return ', '.join(topic.name for topic in self.topic.all()[:3])", "def my_topic_list(request, show_user=False):\n\tif not show_user:\n\t\tshow_user = str(request.user)\n\tif request.user.is_authenticated():\n\t\ttopics = Topic.objects.order_by('-topic_modification_date').filter(topic_author=show_user)[:50]\n\t\tforum_name = _('User Topics')\n\t\treturn render_to_response(\n\t\t\t'myghtyboard/mytopics_list.html',\n\t\t\t{'topics': topics, 'forum_name': forum_name, 'perms': list_perms(request)},\n\t\t\tcontext_instance=RequestContext(request))\n\telse:\n\t\treturn render_to_response('pages/bug.html', {'bug': _('You aren\\'t logged in')}, context_instance=RequestContext(request))", "def _topics_words(self, num_of_words):\n x = self.model.show_topics(-1, num_of_words, formatted=False)\n # `show_topics` method return a list of `(topic_number, topic)` tuples,\n # where `topic` is a list of `(word, probability)` tuples.\n return [[i[0] for i in topic[1]] for topic in x]", "def help_topics():\n pass", "def top_level_discussion_topic_ids(self):\r\n topics = self.discussion_topics\r\n return [d[\"id\"] for d in topics.values()]", "def print_topics(self, time=0, top_terms=20):\n return [self.print_topic(topic, time, top_terms) for topic in range(self.num_topics)]", "def test_get_all_topics(mock_send_message_json):\n assert OranDmaap.get_all_topics_url == f\"{BASE_URL}/topics/listAll\"", "def get_exchange_topics(conf):\n return [\n plugin.ExchangeTopics(\n exchange=conf.akanda_notification_exchange,\n topics=set(topic + '.info'\n for topic in conf.akanda_notification_topics)),\n ]", "def get_queryset(self, request):\n qs = super(TopicAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(id__in=request.user.profile.topics.all())", "def check_exists(cls, topics):\n\t\tresult = []\n\t\tfor known_feed in cls.get([cls.create_key(url) for url in set(topics)]):\n\t\t\tif known_feed is not None:\n\t\t\t\tresult.append(known_feed.topic)\n\t\treturn result", "def get_queryset(self):\n return models.ProfileTopic.objects.filter(\n profile__pk=self.kwargs.get(\"pk\")\n )", "def last_topic_list(request):\n\ttopics = Topic.objects.order_by('-topic_modification_date')[:50]\n\tfor i in topics:\n\t\tpmax = i.post_set.all().count()/10\n\t\tpmaxten = i.post_set.all().count()%10\n\t\tif pmaxten != 0:\n\t\t\ti.pagination_max = pmax+1\n\t\telse:\n\t\t\ti.pagination_max = pmax\n\tforum_name = _('Last Active Topics')\n\treturn render_to_response(\n\t\t'myghtyboard/mytopics_list.html',\n\t\t{'topics': topics, 'forum_name': forum_name, 'perms': list_perms(request)},\n\t\tcontext_instance=RequestContext(request))", "def test_topics_for_products(self):\n desktop_topics = topics_for(product=self.desktop)\n eq_(len(desktop_topics), 3)\n\n mobile_topics = topics_for(product=self.mobile)\n eq_(len(mobile_topics), 2)", "def get_topics(category):\n page = requests.get(BASE_URL, verify=False)\n soup = BeautifulSoup(page.content)\n output = []\n get_lesson_id = lambda url: url.split('=')[-1]\n\n if category == 'Top 10 Courses':\n playlist = soup.find(id='featured_playlists')\n for item in playlist.findAll('div', 'item'):\n link = item.find('a', 'featured-playlist-title')\n output.append({\n 'thumbnail': item.find('img').get('src'),\n 'title': link.text.replace('&nbsp;', '').strip(),\n 'lesson_id': get_lesson_id(link['href'])})\n else:\n sidebar = soup.find(id='main_aside')\n for dl in sidebar.findAll('dl'):\n if dl.find('h4').text == category:\n for item in dl.findAll('dd'):\n link = item.find('a', 'category-name')\n output.append({\n 'title': link.getText(' '),\n 'lesson_id': get_lesson_id(link['href'])})\n\n return output", "def get_topics_for_comp(comp_list, pgm_list):\n for epv in comp_list:\n name = epv.get('package', {}).get('name', [''])[0]\n if name:\n for pgm_epv in pgm_list:\n if name == pgm_epv.get('package_name', ''):\n epv['package']['pgm_topics'] = pgm_epv.get('topic_list', [])\n epv['package']['cooccurrence_probability'] = pgm_epv.get(\n 'cooccurrence_probability', 0)\n epv['package']['cooccurrence_count'] = pgm_epv.get(\n 'cooccurrence_count', 0)\n\n return comp_list", "def getHelpTopics(self):\n return self.helpTopics.values()", "def print_topics(lda):\n topics = lda.show_topics(num_topics=100, num_words=10, formatted=False)\n for ti, topic in enumerate(topics):\n print('topic %d: %s' % (ti, ' '.join('%s/%.2f' % (t[1], t[0]) for t in topic)))", "def muckrack_trending_topics():\n\tresponse = requests.get('http://muckrack.com')\n\thtml = response.text\n\tdom = pq(html)\n\ttrending_list = dom('.trending')\n\treturn [topic.text for topic in trending_list]", "def __init__(self, topics=None):\n self.topics = topics or []", "def describe_topics(self, topics=None):\n return self._client.describe_topics(topics)", "def get_topics(all_jsons):\n topics = []\n for j in all_jsons:\n if 'GeneralObjectsDetected' in j:\n for obj in j['GeneralObjectsDetected']:\n topics.append(obj.split(':')[0])\n return list(set(topics))", "def get_topics(model, nlp_model, n_top_words):\n\n words = nlp_model.get_feature_names()\n\n return [convert_to_string([words[i] for i in topic.argsort()[:-n_top_words - 1:-1]]) for topic_idx, topic in enumerate(model.components_)]", "def subscribed_topics(self):\n return self.manager.subscribed_topics", "def print_topic_times(self, topic, top_terms=20):\n topics = []\n for time in range(self.num_time_slices):\n topics.append(self.print_topic(topic, time, top_terms))\n\n return topics", "def get_probable_topic(self, script):\n script_topics = script.topic_probabilities\\\n .filter(topic_model=self)\\\n .only('topic', 'probability')\n\n max_prob = -100000\n probable_topic = None\n for mt in script_topics:\n if mt.probability > max_prob:\n probable_topic = mt.topic\n max_prob = mt.probability\n\n return probable_topic", "def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):\n if num_topics < 0 or num_topics >= self.num_topics:\n num_topics = self.num_topics\n chosen_topics = range(num_topics)\n else:\n num_topics = min(num_topics, self.num_topics)\n # add a little random jitter, to randomize results around the same alpha\n sort_alpha = self.alpha + 0.0001 * numpy.random.rand(len(self.alpha))\n sorted_topics = list(matutils.argsort(sort_alpha))\n chosen_topics = sorted_topics[: num_topics // 2] + sorted_topics[-num_topics // 2:]\n shown = []\n for i in chosen_topics:\n if formatted:\n topic = self.print_topic(i, topn=num_words)\n else:\n topic = self.show_topic(i, topn=num_words)\n shown.append((i, topic))\n if log:\n logger.info(\"topic #%i (%.3f): %s\", i, self.alpha[i], topic)\n return shown", "def scrapeTopic(self, topic, num_articles, sources=list(site.all_sites)): \n pass", "def describe_event_topics(DirectoryId=None, TopicNames=None):\n pass", "def generate_initial_topics(self):\n initial_topics = random.sample(self.remaining_topics, self.num_topics)\n self.remaining_topics = [topic for topic in self.remaining_topics if topic not in initial_topics]\n return initial_topics", "def format_topics(topics):\n return '|'.join([topic.get('title', '') for topic in topics])", "def extract_queries(self, path_topics=\"../topics-rnd5.xml\"):\n \n topic_queries = []\n with open(path_topics, \"r\") as f:\n for line in f:\n match = re.match(\".*<query>([^<]*)<\\/query>.*\", line)\n if match:\n topic_queries.append(match.group(1))\n if len(topic_queries) != 50:\n sys.exit(\"There should be 50 topics, found {}\".format(\n len(topic_queries)))\n \n return topic_queries", "def _get_topic_for_response():\n return _get_topic_base() + \"res/\"", "def get_most_relevant_topics(topics_list):\n topics_list.sort(cmp=lambda x, y: 1 if x[1] < y[1] else -1)\n topics_list = topics_list[:3] # ARBITRARY (at most 3 topic names)\n topics_id, _ = zip(*topics_list)\n return [topics_tools.lda_topic_names[tid] for tid in topics_id]", "def get_questions_of_topic(topic):\n\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n question_table = dynamodb.Table(\"Questions\")\n\n fe = Attr(\"TopicId\").eq(topic.get(\"TopicId\"))\n response = question_table.scan(FilterExpression=fe)\n questions = response.get(\"Items\")\n return questions", "def topicnews(topic):\n urlnews=urltop\n url=urlnews+topic\n urlapi=url+'&'+'apiKey='\n urlcoun=urlapi+apikey\n response=requests.get(urlcoun)\n data=response.json()\n return data", "def suppress_topics ( *topics ) :\n if topics and 1 == len( topics ) :\n t = str ( topics [ 0 ] ).lower()\n if 'config' == t : return suppress_topics() \n\n if not topics :\n newtopics = [] \n import ostap.core.config as CONFIG\n if 'RooFit' in CONFIG.config :\n import string\n ws = string.whitespace \n node = CONFIG.config [ 'RooFit' ]\n data = node.get('RemoveTopics','(,)' )\n topics = tuple ( i.strip ( ws ) for i in data.split ( ',' ) if i.strip ( ws ) ) \n \n if topics : \n svc = ROOT.RooMsgService.instance()\n svc.saveState () \n topic = msg_topic ( *topics ) \n num = svc.numStreams()\n for i in range ( num ) : ok = Ostap.Utils.remove_topic ( i , topic )", "def describe_topic(self, index):\n assert(self.has_topics)\n assert(0 <= index < self.K)\n return self.topics[index]", "def topic_index():\n topic = db.topic(request.args(0)) or redirect(URL('default', 'index'))\n return dict(topic=topic)", "def print_all_topics(model, num_topics=10, num_words=20, try_to_disambiguate=False,\n min_word_probabity_for_disambiguation=0.010):\n print('Print {0} topics'.format(num_topics))\n print('------------')\n for t in model.show_topics(num_topics=num_topics, num_words=num_words, formatted=False):\n if try_to_disambiguate:\n possible_labels = disambiguate_topic(model.show_topic(t[0]), min_word_probability=min_word_probabity_for_disambiguation)[:2]\n print('{0}:\\t{1}\\n'.format(t[0], possible_labels))\n print('{0}\\n'.format(t[1]))\n else:\n print('{0}:\\t{1}\\n'.format(t[0], t[1]))", "def print_topics(model, feature_names, n_top_words, topic_prev):\n\ti = 0\n\tmessage_list =[]\n\tfor topic_idx, topic in enumerate(model.components_):\n\t\tmessage = \"%f Topic #%d: \" % (topic_prev[i],topic_idx)\n\t\ti +=1\n\t\tlist_feat = [feature_names[i]\n\t\t\t\t\t\t\tfor i in topic.argsort()[:-n_top_words - 1:-1]]\n\t\tfeat_freq = sorted(topic, reverse=True)\n\t\tfor j in range(0, len(list_feat)):\n\t\t\tlist_feat[j] += \" \" + str(round(feat_freq[j], 3)) + \",\"\n\n\t\tmessage += \" \".join(list_feat)\n\t\tmessage_list.append(message)\n\t\tprint(message)\n\tprint()\n\treturn message_list", "def get_publishers(self):", "def get_topic(self):\n return self.topic", "def print_top_topics_custom(topic_model, start_year, end_year, n_topics=10, out=sys.stdout, debug=False):\n papers_count = get_paper_count_per_topic(topic_model, start_year, end_year, debug)\n topic_ids = []\n out.write('#\\ttopic id\\t#docs\\ttopic\\n')\n for i in range(min(n_topics, len(papers_count))):\n topic_id = papers_count[i][0]\n topic_ids.append(topic_id)\n out.write(\n '{0}\\t{3}\\t\\t{1}\\t{2}\\n\\n'.format(i, papers_count[i][1], topic_model.print_topic(topic_id, 30), topic_id))\n\n return topic_ids", "def test_extract_topics():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model = BERTopic()\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def __init__(self):\n self.topics = {}", "def test_wiki_topics(self):\n t1 = TopicFactory(slug='doesnotexist')\n t2 = TopicFactory(slug='extant')\n t3 = TopicFactory(slug='tagged')\n\n doc = DocumentFactory(locale=u'en-US', category=10)\n doc.topics.add(t2)\n RevisionFactory(document=doc, is_approved=True)\n\n doc = DocumentFactory(locale=u'en-US', category=10)\n doc.topics.add(t2)\n doc.topics.add(t3)\n RevisionFactory(document=doc, is_approved=True)\n\n self.refresh()\n\n topic_vals = (\n (t1.slug, 0),\n (t2.slug, 2),\n (t3.slug, 1),\n ([t2.slug, t3.slug], 1),\n )\n\n qs = {'a': 1, 'w': 1, 'format': 'json'}\n for topics, number in topic_vals:\n qs.update({'topics': topics})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(number, json.loads(response.content)['total'])", "def get_topics_paged_get(self, categoryFilter, group, locales, page, pageSize, quickDate, sort, tagstring):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Forum/GetTopicsPaged/{page}/{pageSize}/{group}/{sort}/{quickDate}/{categoryFilter}/\"))", "def test_topic_notification_list_show_private_topic(self):\n TopicNotification.objects.all().delete()\n\n topic_a = utils.create_private_topic(user=self.user)\n topic_notif = TopicNotification.objects.create(\n user=self.user, topic=topic_a.topic,\n comment=self.comment, is_active=True, action=COMMENT)\n\n utils.login(self)\n response = self.client.get(reverse('spirit:topic:notification:index'))\n self.assertEqual(\n list(response.context['notifications']),\n [topic_notif, ])\n\n # list unread should behave the same\n response = self.client.get(\n reverse('spirit:topic:notification:index-unread'))\n self.assertEqual(list(response.context['page']), [topic_notif, ])\n\n # ajax list should behave the same\n response = self.client.get(\n reverse('spirit:topic:notification:index-ajax'),\n HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n res = json.loads(response.content.decode('utf-8'))\n self.assertEqual(len(res['n']), 1)", "def test_topic_list_view_unauthenticated(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context[\"topics\"]), 3)", "def get_topics_strings(\n topics_words, mu, sigma, vocabulary, topics_to_print=10, words_per_topic=30\n):\n mu = np.squeeze(mu, axis=0)\n sigma = np.squeeze(sigma, axis=0)\n # Use a stable sorting algorithm so that when alpha is fixed\n # we always get the same topics.\n highest_weight_topics = np.argsort(-mu, kind=\"mergesort\")\n top_words = np.argsort(-topics_words, axis=1)\n\n res = []\n # try:\n for topic_idx in highest_weight_topics[:topics_to_print]:\n lst = [\n \"index={} mu={:.2f} sigma={:.2f}\".format(\n topic_idx, mu[topic_idx], sigma[topic_idx]\n )\n ]\n lst += [vocabulary[word] for word in top_words[topic_idx, :words_per_topic]]\n res.append(\" \".join(lst))\n # except:\n # res.append('')\n\n return np.array(res)", "def test_get_topics(self):\n\n for m in self.models:\n\n topics = m.topics\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertEqual(topics.num_rows(), 25)\n self.assertEqual(topics.num_columns(), 2)\n z = m.topics[\"topic_probabilities\"]\n for k in range(m.num_topics):\n self.assertTrue(\n abs(sum(z.vector_slice(k)) - 1) < DELTA,\n \"Returned probabilities do not sum to 1.\",\n )\n\n # Make sure returned object is an SFrame of the right size\n topics = m.get_topics()\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertTrue(\n topics.num_columns() == 3,\n \"Returned SFrame should have a topic, word, and probs.\",\n )\n\n # Make sure that requesting a single topic returns only that topic\n num_words = 8\n topics = m.get_topics([5], num_words=num_words)\n self.assertTrue(\n all(topics[\"topic\"] == 5), \"Returned topics do not have the right id.\"\n )\n self.assertEqual(topics.num_rows(), num_words)\n topics = m.get_topics([2, 4], num_words=num_words)\n self.assertEqual(set(list(topics[\"topic\"])), set([2, 4]))\n self.assertEqual(topics.num_rows(), num_words + num_words)\n\n # Make sure the cumulative probability of the returned words is\n # is less than the cutoff we provided.\n # A cutoff of 1.0 should return num_words for every topic.\n cutoff = 1.0\n topics = m.get_topics(cdf_cutoff=cutoff, num_words=len(m.vocabulary))\n totals = topics.groupby(\n \"topic\", {\"total_score\": turicreate.aggregate.SUM(\"score\")}\n )\n self.assertTrue(\n all(totals[\"total_score\"] <= (cutoff + DELTA)),\n \"More words were returned than expected for this cutoff.\",\n )\n\n # Make sure we raise errors for bad input\n with self.assertRaises(ValueError):\n m.get_topics([-1])\n with self.assertRaises(ValueError):\n m.get_topics([10000])\n with self.assertRaises(ToolkitError):\n topics = m.get_topics(output_type=\"other\")\n\n # Test getting topic_words\n topic_words = m.get_topics(output_type=\"topic_words\", num_words=5)\n self.assertEqual(type(topic_words), turicreate.SFrame)\n\n # Test words are sorted correctly for the first topic\n # TODO: Make this more deterministic.\n\n # topic_probs = m.get_topics(num_words=5)\n # expected = [w for w in topic_probs['word'][:5]]\n # observed = topic_words['words'][0]\n # self.assertEqual(observed[0], expected[0])", "def get_related_topics(self,keyword,cut=0.5):\n \n ret = []\n\n if type(keyword) == str:\n if keyword in self.topic_map.keys():\n ret = [(keyword,1.0)]\n keyword = \"\"\n else:\n _keyword = []\n for k in keyword:\n if k in self.topic_map.keys():\n ret.append((k,1.0))\n else:\n _keyword.append(k) \n keyword = _keyword\n\n keyword_rels = set(self.get_related_keywords(keyword,self.keyword_map_rel,_score=False))\n\n if len(keyword_rels) > 0:\n for topic,topic_rels in self.topic_map.items():\n alike = keyword_rels.intersection(topic_rels)\n score = (len(alike) * (100/len(keyword_rels)))/100\n ret.append((topic,round(score,3)))\n ret.sort(key=lambda x : x[1], reverse=True)\n ret = [t for t in ret if t[1] >= cut]\n \n return ret", "def topic_count():\n # get the number topics and their counts as tuples: ('Topic', 123)\n query = peewee.RawQuery(Post, \"select topic, count(topic) from post group by topic\").tuples()\n\n # turn the result of the query object into a list of tuples\n tuple_result = []\n for each_tuple in query:\n tuple_result.append(each_tuple)\n\n # sort by the the second element, which is value, of each tuple in the list\n tuple_result = sorted(tuple_result, key=lambda x: x[1], reverse=True)\n\n # separate the topic and count into two lists for graphing purpose\n topics = []\n counts = []\n\n for each_tuple in tuple_result:\n topics.append(each_tuple[0])\n counts.append(each_tuple[1])\n\n return counts, topics", "def getMessages(self, topic=False):\n ret = []\n catalog = getToolByName(self.context, 'portal_catalog')\n theme = ''\n if topic:\n theme = getTheme(self.context)\n query = {\n 'portal_type': 'KeyMessage',\n 'review_state': 'published'\n }\n if theme:\n query['getThemes'] = theme\n brains = catalog.searchResults(query)\n for brain in brains:\n text = self._prepareText(brain)\n obj = brain.getObject()\n parent = obj.aq_parent\n ret.append({\n 'text': text,\n 'url': brain.getURL,\n 'parent_url': parent.absolute_url(),\n 'parent_title': parent.Title(),\n })\n return ret", "def target_lang_titles(self):\n return self.target_lang_topics.keys()", "def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics", "def get_topics_articles(topic_id):\n articles = db_session.query(Article).filter_by(topic_id=topic_id)\n return jsonify([p.serialize for p in articles])", "def get_partitions_for_topic(self, topic):\n return self.client.cluster._partitions[topic]", "def explore_topic_nouns(topic_number, topn=25, model=10):\n #\n if model==10:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_10'))\n topicname=topic_names_10[topic_number]\n gensimdic={0:9,1:8,2:6,3:7,4:3,5:10,6:5,7:1,8:2,9:4}\n gensimSTR=str(gensimdic[topic_number])\n \n # \n print(u'{:20} {}'.format(u'term', u'frequency') + u'\\n')\n \n dic={}\n j=0\n \n print('top 5 terms')\n for term, frequency in lda.show_topic(topic_number, topn):\n if dfff[dfff['nouns']==term].empty: ## dfff is loaded from pilot_path/bow_nouns.csv\n pass\n else:\n j=j+1\n if j<6:\n print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))\n dic[term]=frequency\n dff=pd.DataFrame.from_dict(dic,orient='index')\n dff.columns=[''.join(['topic:',topicname,' (gensim topic:',gensimSTR,')'])] \n return(dff)", "def include_topics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"include_topics\")", "def delete_topic():\n return dict()", "def topic(df, num_topics=5):\r\n\r\n lda = LatentDirichletAllocation(n_topics=num_topics,\r\n max_iter=5,\r\n learning_method='online',\r\n learning_offset=50.,\r\n random_state=0)\r\n return lda.fit_transform(df)", "async def create_unconsumed_topics():\n # ################################################ #\n # TODO: remove these once there is someone consuming the topics\n unconsumed_topics = ['dummy']\n\n logger.warning(\n f'Creating topics on the publisher: {unconsumed_topics} due to lack of consumers. '\n 'Remove them once there are consumers'\n )\n for topic in unconsumed_topics:\n await kafka.topic(topic).maybe_declare()\n\n # ################################################ #", "def unread_forum_topics(context):\n request = context.get('request', None)\n\n # Get all topics\n all_forum_topics = Topic.objects.all()\n\n # Retrieve the unread topics\n return TrackingHandler(request=request).get_unread_topics(all_forum_topics, request.user)", "def get_queryset(self, request):\n qs = super(TaskAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs.filter()\n return qs.filter(topic_id__in=request.user.profile.topics.all())" ]
[ "0.7867339", "0.73566186", "0.733255", "0.73222786", "0.72298735", "0.71772873", "0.7127868", "0.7058027", "0.7025698", "0.6943055", "0.68688285", "0.6814466", "0.67832524", "0.6769526", "0.6753593", "0.6670809", "0.660663", "0.6604779", "0.6540701", "0.65293837", "0.650491", "0.647983", "0.6411239", "0.63960874", "0.63816625", "0.6375652", "0.6361237", "0.6351099", "0.6350313", "0.6305629", "0.6255691", "0.6232363", "0.6173439", "0.6145597", "0.614464", "0.61225873", "0.6079282", "0.60713136", "0.60563374", "0.6038992", "0.6029752", "0.6016739", "0.5996351", "0.5983899", "0.5979008", "0.5975755", "0.5968089", "0.5965385", "0.5960553", "0.5954899", "0.5954757", "0.5946697", "0.59444726", "0.5940261", "0.5933061", "0.5926648", "0.59000725", "0.58572507", "0.5856428", "0.5851768", "0.5839545", "0.5829418", "0.5808505", "0.5803146", "0.5782622", "0.57816726", "0.5768138", "0.57652295", "0.5763986", "0.5758385", "0.57409227", "0.57249683", "0.5721607", "0.5715178", "0.5711881", "0.5708401", "0.57020795", "0.5701426", "0.5699075", "0.5693329", "0.56839204", "0.56791806", "0.5662876", "0.56581336", "0.56429994", "0.5624922", "0.56230205", "0.5622304", "0.56129634", "0.5592812", "0.5587868", "0.5577466", "0.5571116", "0.55681175", "0.55617374", "0.5537945", "0.55322146", "0.5528402", "0.5516151", "0.55110574" ]
0.68354625
11
Stop timer when user enter a nonescape command.
def stop(self): command = input("Enter anything to finish (or 'exit' to cancel)>>>") return command != 'exit'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleKeyboardInterupt():\n System.stopExecution(TERMINATED_BY_USER)", "def on_KeyboardInterrupt(player):\n print(\"paused by KeyboardInterrupt\")\n player.edit()", "def term():\n curses.endwin()\n unicornhathd.off()", "def stop_timer(self):\r\n self.countdownTimer.stop()", "def cb_check_esc(data, remaining_calls):\n global esc_pressed, vi_buffer, cmd_text, catching_keys_data\n if last_signal_time == float(data):\n esc_pressed += 1\n set_mode(\"NORMAL\")\n # Cancel any current partial commands.\n vi_buffer = \"\"\n cmd_text = \"\"\n weechat.command(\"\", \"/bar hide vi_cmd\")\n catching_keys_data = {'amount': 0}\n weechat.bar_item_update(\"vi_buffer\")\n return weechat.WEECHAT_RC_OK", "def prompt_stop(cls):\n\n cls._set_mode_prompt_stop()\n TimeDisplay.stop_time()\n for callback in cls.pause_callback:\n callback()", "def stop_timer(self):\n self.end_time = datetime.now()", "def stop(self):\r\n self.inst.write(':STOP')", "def emergencyStop(self):\n return self.set_command(\"!\")", "def stopLong(self, reject=False):\n if self.longCommand:\n p = self.spawnProc\n #print 'stopLong: sending ctrl-c'\n p.send(CTRL_C)\n match = p.expect([self.prompt,\n pexpect.TIMEOUT], 2)\n if match == 0:\n\t\tif reject: return\n trailingJunk = '\\^C' + '\\r\\n' + self.promptChunk\n output = re.sub(self.longCommand+'\\r\\n', '', p.before)\n output = re.sub(trailingJunk, '', output)\n return output\n else:\n return \"timed out\"", "def end_stimulus(win,end_stim):\n #start core clock\n clock = core.Clock()\n\n #while space bar is not pressed continue to show end stimulus\n #if 50 seconds pass, then stop showing end stimulus\n end_stim.setAutoDraw(True)\n while not event.getKeys(['space']):\n win.flip()\n if int(clock.getTime()) > 50:\n break\n end_stim.setAutoDraw(False)", "def kill_switch(disable_after, keys):\n watchdog(disable_after, keys)", "def loop_stop(self):\n super(TimerLoop, self).loop_stop()\n self.timer.cancel()\n self.loop_confirm_stopped()", "def stop() -> None:", "def esc_cancel_completion(event):\n event.cli.current_buffer.cancel_completion()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.stdscr.keypad(False)\n self.stdscr.nodelay(False)\n curses.echo()\n curses.nocbreak()\n curses.endwin()", "def stop():", "def stop():", "def stop():", "def stop():", "def anti_idle_timer_handler(self):\n if not self.connected(): return\n self.send_nop()\n self.start_anti_idle_timer()", "def _user_hit_escape(b=None):\n if b is None:\n if GLOBAL['GUI']['escape']:\n GLOBAL['GUI']['escape'] = False # toggle it\n return True\n else:\n return False\n else:\n # Set in vipy.gui.using_matplotlib.escape_to_exit()\n assert isinstance(b, bool)\n GLOBAL['GUI']['escape'] = b", "def kill(self):\n self._serial.write('\\x03\\r\\n'.encode('utf-8'))\n self._serial.write('\\x03\\r\\n'.encode('utf-8'))\n sleep(2)\n self._serial.reset_input_buffer()", "def stop(self):\n self.idle = True\n # pass", "def TerminalClientStop(self, exitCode=200):\n pass", "def ctrl_c(signum, frame):\n global shutdown_event\n raise SystemExit('\\nCancelling...')", "def stop_console(self):\n return", "def on_press_escape(self, event):\n del event\n self.destroy()", "def stop(self):\r\n self.terminating = True", "def _stop(self):\n self.display_end_message()", "def set_stop(self):\n self.timer.stop=True\n final_message = \"Time %s: Interrupted %s\"%(self.sim.timestr(),\n self.timer.func.__name__)\n self._close(final_message)", "def pause_handler(term):\n inp = None\n while inp not in (\"p\", \"P\", \"q\", \"Q\"):\n print(term.home + term.clear + term.move_y(term.height // 2))\n print(term.black_on_white(term.center(\"press P to continue.\")))\n\n inp = term.inkey(timeout=10)", "def _control_stop(self):\n self.player.stop()", "def stop_recording():\n do_command('PlayStop')\n print('Stopped')", "def __time_key_release_event(self, event):\n\t\tif event.key() == QtCore.Qt.Key_Space:\n\t\t\tself._player.stop() if self._player.is_playing else _player._video.play()", "def stop_timing_no_callback(self) -> None:\n self._is_timing = False", "def stop_timer(self):\n self.log.info(\"{} timer stopped ({} seconds)\".format(self.name, self.interval))\n self.start_event.clear()\n # self.count = self.interval / self.sleep_chunk", "def disable_keypress(self):\r\n self.cursor_visible = True", "def event2600():\n header(2600)\n rune_effect, rune_flag = define_args('ii')\n\n if_player_has_special_effect(0, rune_effect)\n flag.disable_chunk(EVENT.SableRuneActive, EVENT.RhythmRuneActive)\n flag.enable(rune_flag)\n if_player_does_not_have_special_effect(0, rune_effect)\n restart()", "def _cancel_timeout(_, dc):\n if dc.active():\n dc.cancel()\n return _", "def stop(self):\n self.started = False\n self.i.clear_keymap()\n self.i.stop_listen()", "def stop(self, signum, dummy):\n if signum == signal.SIGINT:\n self.writer.lock.acquire()\n self.stopped = True\n logging.info('Ctrl-C issued')\n self.writer.lock.release()", "def do_exit(self, args) :\r\n\r\n self.__Logger.warn(\"stopping the timer loop\")\r\n\r\n self.cmds[\"SimulatorStartup\"] = True\r\n self.cmds[\"SimulatorShutdown\"] = True\r\n\r\n\r\n return True", "def kill(self):\n Character.kill(self)\n cblocals.global_controlsEnabled = False", "def handle_stop(_):\n loop.force_unmute()", "def sigterm(signum, frame):\n loop.stop()", "def stop_shiftr(event):\n mqttc.disconnect()", "def stop(self):\n\n command = [0x00, 0x00, 0x00, 0x00]\n self.send_command(command)", "def stop(self):\r\n self.running = False", "def stop(self):\r\n self.running = False", "def power_off(timeout: int = 0) -> None:", "def eStop(self):\n Step(speed=0, coils=1, steps=0, dir=Step.BRAKE)\n # #######################################################\n # Need to blink Stop and wait until Stop is pressed again\n # #######################################################", "def cb_key_pressed(data, signal, signal_data):\n global last_signal_time\n last_signal_time = time.time()\n if signal_data == \"\\x01[\":\n # In 50ms, check if any other keys were pressed. If not, it's Esc!\n weechat.hook_timer(50, 0, 1, \"cb_check_esc\",\n \"{:f}\".format(last_signal_time))\n return weechat.WEECHAT_RC_OK", "def stop(self):\n self.setWindowTitle(self.name + ': stopped')\n self._timer.stop()", "def __call__(self,duration=np.inf):\n\n clock = core.Clock()\n t=0\n while t<duration: #Keep going for the duration\n t=clock.getTime()\n\n self.text.draw()\n self.win.flip()\n\n for key in event.getKeys():\n if key:\n return", "def timer():\r\n\r\n T = 0\r\n while True:\r\n print (term.white + term.move_xy(82,1) + 'TIMER : ', end='')\r\n print(T, end='\\r')\r\n time.sleep(1)\r\n T = T + 1", "def __call__(self,duration=np.inf,key_press=True):\n\n clock = core.Clock()\n t=0\n while t<duration: #Keep going for the duration\n t=clock.getTime()\n\n self.text.draw()\n\n #Keep checking for time:\n if clock.getTime()>=duration:\n break\n\n self.win.flip()\n \n #Keep checking for time:\n if clock.getTime()>=duration:\n break\n\n if key_press:\n for key in event.getKeys():\n if key:\n return", "def _cancel_automation(self) -> None:\n if HANDLE_VACATION_MODE in self.handles:\n handle = self.handles.pop(HANDLE_VACATION_MODE)\n self.cancel_timer(handle)", "def skip_control_z(event):\n pass", "def cancel_main():\n entry1.delete(0, END)\n output_on_display.delete(1.0, END)\n output()", "def stop_scroll():\n send_command(0x2E)", "def signal_handler(self, signal, frame):\n logger.info('CTRL+C pressed')\n self.trigger_stop()", "def clean_exit(self):\n if self.client:\n if self.client.key:\n self.client.send(\"!exit\")\n self.client.cli = None\n self.stdscr.keypad(False)\n curses.echo()\n curses.nocbreak()\n curses.endwin()", "def loop_exit_on_q(self, stats_period):\n start_time = time.time() # This is the only way I managed to make a curse application with\n while time.time() - start_time <= stats_period: # screen refreshing exit on key pressed: make window.getch()\n key = self.myscreen.getch() # non blocking with curses.nodelay(1) (otherwise main loop is interrupted)\n if key == ord('q'): # and check it every [10-50]ms to be responsive.\n curses.endwin()\n hacked_print(\"Monitoring ended by user\") # cf hacked_print method\n return 1\n curses.napms(self.GETCH_REFRESH_MS)", "def stopMeasurement_pmt(self):\r\n self.pmtTest.aboutToQuitHandler()", "def timeout_cmd(cmd, timeout):\n return \"timeout -sKILL %us stdbuf -o0 -e0 %s\" % (timeout, cmd)", "def setinterrupt(self, chr: int, /) -> None:", "def stop_auto_quotes(self):\n for AQ in self.auto_quotes_timers:\n self.auto_quotes_timers[AQ].cancel()\n time.sleep(1)\n self.auto_quotes_timers[AQ].cancel()", "def cancel_stop(cls):\n cls._set_mode_running()", "def _stop_bot(_event):\n pass", "def stop( self ):\n self.data_source.si.daqStop()\n self.timer.stop()\n \n #re-enable the play button\n self.play_button.setEnabled(True)\n self.stop_button.setEnabled(False)\n self.spinbox_timestep.setEnabled(True)", "def ShutDown(self):\n self.stop = True", "def signal_handler(signal, frame): \n import signal\n import sys\n from time import localtime, strftime\n time = strftime(\"%H:%M:%S\", localtime())\n sel = raw_input('\\n\\n%s: Paused. Press return to resume, or type exit to quit: \\n' % time)\n if sel.startswith('e') or sel.startswith('E'):\n sys.exit(0)\n else:\n time = strftime(\"%H:%M:%S\", localtime())\n print '%s: Interrogation resumed.\\n' % time", "def clean_up_terminal(self) -> None:\n if self.stdscr:\n # Disable the Keypad mode\n self.stdscr.keypad(False)\n # Renable caracters echoing\n curses.echo()\n # Disable the interrupts\n curses.nocbreak()\n # Restore the terimnal to it's orginial operating mode\n curses.endwin()", "def stop():\n set_power(0)", "async def stop_signal():\n global user_exit\n user_exit = True\n await cancel_async_tasks()", "def timer_ffmpeg_process_timeout():\n try:\n if not self.ffmpeg_process_ps.is_alive():\n timer_ffmpeg_process.stop()\n self.w.hide()\n del (self.w)\n self.ffmpeg_process_ps = None\n except:\n pass", "def stop(self):\n self._stop_event.set()", "def pytest_timeout_cancel_timer(item):\n tle.lib.cancel()\n return True", "def timeout(self):\n self.timeout_scan_flag=True\n self.timer.stop()\n self.status_sig.emit([\"Update_Status\",\"Timeout during acquisition\",'log'])\n self.status_sig.emit([\"Timeout\"])", "def command_clearterm():\n subprocess.call(\"reset\")", "def StopTimer(self):\n return time.time() - self._start_time", "def shutdown():\n # command executed after Ctrl+C is pressed\n rospy.loginfo(\"Stop ASRControl\")\n rospy.sleep(1)", "def end():\n curses.endwin()", "def pause(question='PRESS ENTER TO CONTINUE ...'):\n try: input(question)\n except KeyboardInterrupt:\n global shutDown\n shutDown = True\n except: pass", "def stop(self):\n self.active = False", "def stopGame(event):\n if event.action == sense_hat.ACTION_RELEASED:\n global playAgain, alive\n playAgain = False\n alive = False", "def stop_media(self):\n self.stdin_queue.put(\"stop\")", "def on_stop(self):\n self.write_log(\"策略停止\")\n self.cta_engine.event_engine.unregister(EVENT_TIMER, self.process_timer_event)", "def OnStopPress(self, event):\n\t\tself.onOffText.SetLabel('Off')\n\t\tself.isBaselineRunning = False\n\t\tself.hasBaselineEnded = True", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def handler(signum, frame):\n\tglobal stop_flag\n\tstop_flag=True\n\tprint(\"Query timeout!!!!!!!!!!!\")\n\traise Exception(\"Timeout\")", "def stopDetection(self):\n self.statusWrite(\"stop\")\n self.p.sleep()\n self.birdHere = 0", "def _D(stdscr):\n curses.nocbreak()\n stdscr.keypad(0)\n curses.echo()\n curses.endwin()\n import pdb; pdb.set_trace()", "def ctrl_d_condition() -> bool:\n app = get_app()\n return (\n app.current_buffer.name == DEFAULT_BUFFER\n and not app.current_buffer.text\n )", "def Stop():\n timer.stop()\n global total_stop\n global success_stop\n total_stop += 1\n if n % 10 == 0:\n success_stop = success_stop + 1" ]
[ "0.61809236", "0.59877855", "0.590018", "0.5879762", "0.58237314", "0.5802075", "0.57889146", "0.57686067", "0.5766843", "0.57374257", "0.5713238", "0.5673947", "0.5642621", "0.564084", "0.5605514", "0.5603989", "0.55528235", "0.55528235", "0.55528235", "0.55528235", "0.5546439", "0.5542056", "0.5526183", "0.551357", "0.54676926", "0.5462781", "0.5438458", "0.54364985", "0.5430201", "0.54087824", "0.5406931", "0.537861", "0.535127", "0.53455067", "0.53432494", "0.53406966", "0.5318454", "0.5317229", "0.5287121", "0.52847767", "0.52767205", "0.5272566", "0.527218", "0.5270632", "0.5265245", "0.5258437", "0.524834", "0.52459884", "0.5239925", "0.5239925", "0.523847", "0.5213012", "0.5211838", "0.5209146", "0.52053845", "0.5199889", "0.51993465", "0.5193347", "0.5191202", "0.51910985", "0.5183301", "0.5183241", "0.51827437", "0.51755595", "0.5165957", "0.5164364", "0.51598334", "0.5158349", "0.5157558", "0.5157219", "0.5157075", "0.51523584", "0.515105", "0.5149021", "0.51465344", "0.51446164", "0.513314", "0.51254195", "0.5125212", "0.51227665", "0.51190096", "0.51181054", "0.5117727", "0.51126724", "0.5107727", "0.5105046", "0.5102107", "0.5100808", "0.5098018", "0.5093533", "0.50921154", "0.50921154", "0.50921154", "0.50921154", "0.50921154", "0.5091357", "0.5090711", "0.50863403", "0.50855815", "0.50801355" ]
0.5726188
10
Logs an entry for the homework log using a timer
def do_start(self, input): course_name = course.course_name(input) if course_name in config.current_courses: timer = Timer(course_name) timer.start() if self.stop(): timer.stop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timer(work_log):\n start = time.time()\n print '\\nyou started working at %s\\n' % time.ctime(int(start))\n\n input = raw_input(\"\\ntype 'stop' to stop timer...\\n\")\n while (input != 'stop'):\n input = raw_input(\"\\ntype 'stop' to stop timer...\\n\")\n work = raw_input(\"\\nwhat'd you work on?\\n\")\n stop = time.time()\n print_to_file(start, stop, (stop-start), work, work_log)", "def internal_event(self):\n # log activity\n self.log_activity(LogEntry(\n sys_time=time(),\n logical_time=self.logical_clock,\n action=\"work\"\n ))", "def entry(entry,fhandle): \n\n import time\n timestamp = time.strftime('%H:%M:%S')\n print entry\n try: fhandle.write('{0}: {1}\\n'.format(timestamp, entry))\n except: print 'Log File Error: entry could not be logged'\n return", "def log(self, label, times, overlapping=False):\r\n self._timings.append(Timing(label, times, overlapping))", "def internal_event (self):\n self.clock_time += 1\n self.log()", "def log_tofile(self, inst):\n self._tick += 1\n if self._tick >= self._second:\n self.logger.log(inst)\n self._tick = 0", "def add_line_in_log():\n logging.info(' ' + '-' * 60 + '\\n')", "def log_entry(debug, out, text):\n # Format log entry\n monthday = make_time_stamp('%m%d')\n time_stamp = make_time_stamp('%H:%M:%S')\n now = time.time()\n ms = \".\"+str('%06d' % int((now - int(now)) * 1000000))\n line_form = \"I{monthday} {time_stamp} 0000 main.py:00] {text}\\n\"\n entry = line_form.format(monthday=monthday, time_stamp=time_stamp+ms, text=text)\n\n # Log entry to stderr\n sys.stderr.write(entry)\n pass", "def logFile(self):\n\n event = 'stim'\n mStr = '{:013}'.format(self.mouse.tag) + '\\t'\n outPutStr = mStr + \\\n datetime.fromtimestamp(int(time())).isoformat(' ') + '\\t' + event\n print (outPutStr)\n if self.textfp != None:\n outPutStr = mStr + '{:.2f}'.format(time()) + '\\t' + event\n self.textfp.write(outPutStr + '\\n')\n self.textfp.flush()", "def log(self, game: str, outcome: str):\n current_time = datetime.now()\n self.user.record.append([current_time.strftime(\"%c\"), game, outcome, self.user.balance])", "def log(exercise):\n global logfile\n msg = raw_input(\"Enter your message. \")\n logfile.write(exercise + \" >>> \" + msg + \"\\n\")", "def recordLog(project, status, memo):\n path = getPath(project)\n log = open(path, 'a')\n writer = csv.writer(log, lineterminator='\\n')\n writer.writerow((time.time(), status, memo))\n log.close()\n if status == 'a':\n print(\"Tracking your time on \" + project)\n if status == 's':\n print(\"Tracking suspended on \" + project)\n if status == 't':\n print(\"Time shifted on \" + project)\n if not path == '.sourglass':\n store = open(os.path.join(basepath, 'last'), 'w')\n store.write(project)\n store.close", "def log(self, line):\n now = datetime.datetime.now()\n time = datetime.datetime.strftime(now, '(%d %b %Y %H:%M:%S)')\n with open(self.logfile, 'a') as log:\n log.write(time + ' ' + line + '\\n')", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def task_display_funny_time():\n print(\"funny time is %s\" % datetime.datetime.now())\n logger.info(\"Hurray its working\")", "def Log(self, times):\n\n print '--'\n print times.PrettyPrintLog()\n\n return", "def log_schedule(self):\n self.logger.log_schedule(self.params.schedule)", "def logWork(self, id, logData):\n\t\tnow = datetime.datetime.now()\n\t\ttimeText = now.strftime(LOG_DATE_FORMAT)\n\n\t\tlogToEdit = self.LOGS.get(id).get(\"log\")\n\t\t#If inside this case and there is already a log entry for this time\n\t\talreadyEntryForThisTime = False\n\t\tfor entry in logToEdit:\n\t\t\tif timeText == entry[0]:\n\t\t\t\tentry[1] += logData\n\t\t\t\talreadyEntryForThisTime = True\n\n\t\tif not alreadyEntryForThisTime:\n\t\t\tlogToEdit.append([timeText, logData])\n\t\t\tself.logByDate(now.strftime(CASE_DATE_FORMAT), id)\n\n\t\tself.save_logs(\"c\")\n\t\tself.autoCommit()\n\t\treturn 0", "def on_log(self):\n monitors = self.monitors\n if self.monitors is None:\n monitors = self.trainer.metrics.keys()\n\n\n hparams = self.hparams\n if self.hparams is None:\n hparams = self.trainer.hparams.keys()\n\n metrics = {name: format_metric(self.trainer.metrics[name])\n for name in monitors\n if name in self.trainer.metrics}\n hparams = {name: format_metric(self.trainer.hparams[name])\n for name in hparams\n if name in self.trainer.hparams}\n\n\n step_bar = self.step_bars[-1]\n step_bar.set_description(\"Epoch {}\".format(self.trainer.epoch+1))\n step_bar.set_postfix(**metrics, **hparams)\n step_bar.update(self.trainer.steps_trained - self.last_step)\n self.last_step = self.trainer.steps_trained", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def log(text):\n print \"%s: %s\" % (str(datetime.datetime.now()), text)", "def log_time(name):\n if DEBUG:\n now = time.time()\n logging.debug('emcc step \"%s\" took %.2f seconds', name, now - TimeLogger.last)\n TimeLogger.update()", "def log(self):\n\n\t\t# Only every 1/10 second (or so) to avoid flooding networktables\n\t\tif not self.log_timer.running or not self.log_timer.hasPeriodPassed(self.log_timer_delay):\n\t\t\treturn\n\n\t\twpilib.SmartDashboard.putString('Pressure', '{0:.2f}'.format(self.get_pressure()))\n\t\twpilib.SmartDashboard.putBoolean(\"Garbo?\", self.is_pbot)\n\n\t\tself.drive.log()\n\t\tself.elevator.log()\n\t\tself.intake.log()", "def log(self, event_cls, *args, **kw):\n args = list(args)\n args.append(self.time)\n args.append(self)\n args.append(self.worldview.locality_copy())\n self.diary.log(self.time, event_cls(*args, **kw))", "def _log(self, runtime, extra):\n\t\tif extra is None:\n\t\t\tdebug(\"Timer - %s took %d ms\" % (self._item, 1000 * runtime))\n\t\telse:\n\t\t\tdebug(\"Timer - %s [%s] took %d ms\" % (self._item, str(extra), 1000 * runtime))\n\t\treturn self", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def logentry(self, string=None):\n if (self._OIFlogging):\n oiflogfile = open(self._commslogfilename, \"a\")\n oiflogfile.write(\"# \" + \"%04.6fs: \" % (self._gettime() - self._logstarttime) + string + \"\\n\")\n oiflogfile.flush()\n else:\n# if self._print_once:\n# self._print_once = 0\n# print self.hilite(\"Warning: Not logging OIF transactions. Use\\n it.logfile(<filename>) to set log filename and\\n it.logging(True) to enable logging\", False, True)\n print 'Unable to write log entry', string\n return", "def log_time(label: str) -> None:\n print(label, datetime.now())", "def log(self, msg=\"\"):\n if len(msg):\n msg = \"[%.03fs] %s\" % (time.time()-self.timeStart, msg)\n print(msg)\n self.logLines.append(msg)", "def log(self, extra=None):\n\t\tself.stop()\n\t\treturn self._log(self.time(), extra)", "def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )", "def timer_callback(self):\n # There're 5 logger-level in ROS 2 get_logger() System.\n # Try out and watch whats difference.\n self.get_logger().debug(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().info(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().warn(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().error(f'==== Hello ROS 2 : {self.count}====')\n self.get_logger().fatal(f'==== Hello ROS 2 : {self.count}====')\n\n self.count += 1", "def log_entry(self, timestamp, entry):\n if timestamp in self.log:\n self.log[timestamp].update(entry)\n else:\n self.log[timestamp] = entry", "def logger(start_time, file):\n with open(file, \"a\") as text:\n text.write(\"\"\"\n\n Current date and time: {}\n Program ran in {} seconds.\n \"\"\".format(datetime.datetime.now(), time.process_time() - start_time))\n\n return 'hello'", "def log(self, *args, **kwargs):\n self.game_view.log(*args, **kwargs)", "def log(self, msg):\n current_datetime = self.get_date_time()\n self.file.write(\"%s %s\\n\" % (current_datetime, msg))", "def log(message):\n print(\"{0}: {1}\".format(acm.Time.TimeNow(), message))", "def add_log(conn, task, start_time):\n cursor = conn.cursor()\n cursor.execute('INSERT INTO timelogs (task, start_time) VALUES (?, ?);', (task, start_time))", "async def log_time(self, event):\n sender = await event.get_sender()\n user = utils.get_display_name(sender)\n\n message = event.message\n\n time = message.date.astimezone(self.__to_zone).time().hour\n\n logging.debug(\"Got the following message: \\\"\" + event.raw_text + \"\\\" at time \" + str(time))\n\n self.__contact_times.labels(user).observe(time)", "def on_start(self):\r\n self.log()", "def new_entry():\n clear_screen()\n entry = {}\n entry['id'] = get_next_id()\n entry['name'] = input_name()\n print(\"How many minutes did you spend on {}?\".format(entry['name']))\n print(\"Or you may specify a format after the time, seperated by a comma\")\n entry['time_spent'] = input_time_spent()\n add_notes = input(\"Add notes? Y/n \").lower()\n if add_notes != 'n':\n entry['notes'] = input_notes()\n entry['date'] = datetime.now().strftime(FMT_MONTH_DAY_YEAR)\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writerow(entry)", "def on_a(self):\r\n self.log()", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def print_to_file(start, stop, time_worked, work_text, work_log):\n today = datetime.date.today()\n\n record = ' || %.2f || %.2f || %.4f hours || %s\\n' % (start, stop, time_worked/3600, work_text)\n\n #if it is a new file you have the option to set a start time for the project\n # and how many hours a week you want to work\n if not os.path.isfile(work_log):\n while True:\n option = raw_input('\\nThis is a new log, would you like to specify a start date and a hours per week goal for the project? (y/n): ').lower()\n if option == 'y':\n date = raw_input('\\nplease enter the start date of the project (dd-mm-yyyy): ')\n hours_per_week = raw_input('\\nplease enter the number of hours you intend to work on the project per week: ')\n try:\n datetime.datetime.strptime(date, '%d-%m-%Y')\n if hours_per_week.isdigit():\n f = open(work_log, 'a')\n f.write('#! || ' + date + ':' + hours_per_week + '\\n')\n f.close()\n break\n else:\n print \"\\nPlease enter a valid number for hours to work!\\n\"\n except ValueError:\n print \"\\nPlease enter a valid date!\\n\"\n\n else:\n break\n\n\n f = open(work_log, 'a')\n print '\\n\\n' + today.strftime('%b-%d-%Y') + record\n f.write(today.strftime('%b-%d-%Y') + record)\n f.close()", "def main():\n\n # check database for tracking options\n # if empty prompt to add subject\n\n # present tracking options\n\n # calculate timedelta\n\n # printing/updating the time", "def main_log(logfile, entry, print_tag=False):\n if logfile != None:\n with open(logfile, 'a') as lf:\n lf.write('{}\\n'.format(entry))\n\n if print_tag:\n print entry", "def InsertLog():", "def log_time(self, batch_idx, duration, loss):\n samples_per_sec = self.opt.batch_size / duration\n time_so_far = time.time() - self.start_time\n print_string = \"exp_name {} \\n| dataset: {:>5} | epoch {:>3} | batch {:>6}/{:>6} | \" \\\n \"examples/s: {:5.1f} | loss: {:.5f}\"\n print(print_string.format(self.log_path.split('/')[-1], self.syn_or_real, self.epoch, batch_idx,\n self.num_total_batch, samples_per_sec, loss))", "def record(self, time, increment):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def after_epoch(self):\n line = ' '.join([str(k) + ': ' + str(v) for k, v in self.trainer.status.items()])\n with open(os.path.join(self.root_path, 'log.txt'), 'a+') as fout:\n fout.write(line + '\\n')", "def log(self, message):\n timestamp = time.strftime(\"[%H:%M:%S]\", time.localtime(time.time()))\n self.file.write('%s %s\\n' % (timestamp, message))\n self.file.flush()", "def _addLogEntry(request, action, pagename, filename):\n from MoinMoin.logfile import editlog\n t = wikiutil.timestamp2version(time.time())\n fname = wikiutil.url_quote(filename)\n\n # Write to global log\n log = editlog.EditLog(request)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)\n\n # Write to local log\n log = editlog.EditLog(request, rootpagename=pagename)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)", "def logStarted(build, step, log):", "def time_automation_listener(now):\n action()", "def time_in(self):\n if self.is_logged():\n self.time_out()\n else:\n TaskLog.objects.create(task=self)", "def log_message(self, srcid, msg, t = None):\n t = time.time() if t is None else t\n self.insert(\"textlog\", {\"readout_id\":srcid, \"time\":t, \"msg\":msg})", "def _log_results(self, results):\n log.new_entry(results)\n self.new_entry = 2", "def QueueLog(self):\n # Check every 100ms if there is a new message in the queue to display\n while True:\n try:\n record = self.log_queue.get(block=False)\n\n except queue.Empty:\n\n break\n else:\n self.display(record)\n self.fram.after(100, self.QueueLog)", "def _log(self, action: types.NestedArray) -> None:\n if self._logger is None:\n return\n self._logger.info('{}, {}, {}, {}, {}, {}, {}'.format(\n self._last_timestep.observation['STAGE'],\n self._last_timestep.observation['CHIPS'],\n self._last_timestep.observation['PLAYER_TOTAL'],\n self._last_timestep.observation['PLAYER_ACES'],\n self._last_timestep.observation['DEALER_TOTAL'],\n action,\n self._deck_distribution))", "def __log_trial__(self, trial_data):\n from klibs.KLDatabase import EntryTemplate\n\n trial_template = EntryTemplate('trials')\n trial_template.log(P.id_field_name, P.participant_id)\n for attr in trial_data:\n trial_template.log(attr, trial_data[attr])\n\n return self.database.insert(trial_template)", "def log(self, message):", "def _log_progress(self, t):\n\n # Run the update only 2 step before the actual logging happens in order to\n # make sure that the most recent possible values will be stored in\n # self.summary. This is a hacky workaround in order to support OffPolicyAgent\n # which runs 2 threads without coordination\n if (t+2) % self.log_freq == 0 and self.learn_started:\n episode_rewards = self.env_monitor.get_episode_rewards()\n self.episode_rewards = np.asarray(episode_rewards)\n if self.episode_rewards.size > 0:\n self.mean_ep_rew = np.mean(episode_rewards[-self.stats_n:])\n self.best_mean_ep_rew = max(self.best_mean_ep_rew, self.mean_ep_rew)\n\n if t % self.log_freq == 0 and self.learn_started:\n stats_logger.info(\"\")\n for s, lambda_v in self.log_info:\n stats_logger.info(s.format(lambda_v(t)))\n stats_logger.info(\"\")\n\n if self.summary:\n # Log with TensorBoard\n self.tb_writer.add_summary(self.summary, global_step=t)", "def writeToLog(self, type, line):\r\n self._log.append((type, time(), line))\r\n if len(self._log[self._logIndex:]) >= self.config.get('logging', 'loginterval'):\r\n self.logFlush()\r\n return True\r\n return False", "def timer_callback(*args):\n logging.debug(\"timer callback at %s\" % datetime.now())", "def log(self, txt, dt=None):\n try:\n dt = dt or self.datas[0].datetime.datetime(0)\n print('%s, %s' % (dt.isoformat(), txt))\n except IndexError:\n print('%s' % txt)", "def logIt(self, msg):\n\n\t\tif( self.logger ): self.logger.logIt( msg )", "def record_task_attempt(task_name: str):\n\n from common.models import InvenTreeSetting\n\n logger.info(f\"Logging task attempt for '{task_name}'\")\n\n InvenTreeSetting.set_setting(f'_{task_name}_ATTEMPT', datetime.now().isoformat(), None)", "def run_game_log(player, game_log):\n\n\tgame_log.update_room_type()\n\tcurrent_msg = game_log.get_current_message()\n\n\tprint('HP: {} GOLD: {}'.format(player.stats['HP'], player.stats['GOLD']))\n\tprint('CURRENT ROOM: {}\\n'.format(game_log.current_room))\n\tprint(\"{}'s LOG: {}\".format(player.info['Name'].upper(), current_msg))", "def add_log(self,txt):\n try:\n now=datetime.datetime.now()\n new_item=QtWidgets.QListWidgetItem(now.strftime('%Y/%m/%d %H:%M:%S')+\": \"+txt)\n self.ui.logger_list.addItem(new_item)\n if self.h5saver.h5_file.isopen:\n self.h5saver.append(self.h5saver.logger_array, now.strftime('%Y/%m/%d %H:%M:%S')+\": \"+txt)\n\n except:\n pass", "def log(self, obj, action):\n action_dict = {'time': time.time(),\n 'action': action}\n self.log_data[obj.get_obj_id()]['actions'].append(action_dict)", "def listener(q, output_file):\n\n with open(output_file, 'w') as time_code_log:\n while True:\n m = q.get()\n if m == 'kill':\n time_code_log.write('killed')\n break\n\n time_code_log.write(m[0] + '\\n')\n time_code_log.write(m[1] + '\\n')\n time_code_log.write(str(m[2]) + '\\n')\n time_code_log.flush()", "def on_L1(self):\r\n self.log()", "def log(msg=\"\"):\n print(msg)\n sys.stdout.flush()\n f = open(\"/target/testdriver.log\", \"a\")\n f.write('{:%Y-%m-%d %H:%M:%S.%s} :: '.format(datetime.datetime.now()))\n f.write(f\"{msg}\\n\")\n f.close()", "def record_event(self, description, time=None, additional=None):\n if time is None:\n time = datetime.datetime.now()\n if additional is not None:\n self.history.append((time, (description, additional)))\n else:\n self.history.append((time, description))", "def writeToLogFile(self, event):\n outPutStr = '{:013}'.format(0)\n logOutPutStr = outPutStr + '\\t' + '{:.2f}'.format (time ()) + '\\t' + event + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ')\n printOutPutStr = outPutStr + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ') + '\\t' + event\n print (printOutPutStr)\n if self.logFP is not None:\n self.logFP.write(logOutPutStr + '\\n')\n self.logFP.flush()", "def log(self, client_addr, request):\n with codecs.open(self.log_path, \"a\", 'UTF-8') as fh_out:\n print >> fh_out, (time.strftime('%Y-%m-%d %H:%M:%S') + \"\\t\" +\n ':'.join([str(i) for i in client_addr]) + \"\\t\" +\n request)", "def simulate(self, path):\n prev_time = 0\n for entry in self.read_log(path):\n event = Event(entry)\n if event.time != prev_time: # Show frame at time i\n self.show()\n prev_time = event.time\n self.process_event(event)", "def log_time(self, batch_idx, duration, loss):\n samples_per_sec = self.batch_size / duration\n time_sofar = time.time() - self.start_time\n training_time_left = (\n self.num_total_steps / self.step - 1.0) * time_sofar if self.step > 0 else 0\n print_string = \"epoch {:>3} | batch {:>6} | examples/s: {:5.1f}\" + \\\n \" | loss: {:.5f} | time elapsed: {} | time left: {}\"\n print(print_string.format(self.epoch, batch_idx, samples_per_sec, loss,\n sec_to_hm_str(time_sofar), sec_to_hm_str(training_time_left)))", "def log(self, msg):\n logging.info(\"Logging Message\")\n ml = self.monk_logs\n today = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n ml.insert({today: {\"log\": msg,\n \"sentiment\": self.get_sentiment(msg),\n \"weather\": Weather.get_weather()}})", "def log_time(self, batch_idx, duration, loss):\n samples_per_sec = self.opt.batch_size / duration\n time_sofar = time.time() - self.start_time\n training_time_left = (\n self.num_total_steps / self.step - 1.0) * time_sofar if self.step > 0 else 0\n print_string = \"epoch {:>3} | batch {:>6} | examples/s: {:5.1f}\" + \\\n \" | loss: {:.5f} | time elapsed: {} | time left: {}\"\n print(print_string.format(self.epoch, batch_idx, samples_per_sec, loss,\n sec_to_hm_str(time_sofar), sec_to_hm_str(training_time_left)))", "def log_every(self, identifier, message, seconds=1):\r\n if self.has_been_n_seconds_since_last(identifier, seconds):\r\n logger.info(message)\r\n else:\r\n logger.debug(message)", "def run_timing():\n time_log = []\n while True:\n one_run = input(\"Enter your time for this 10 km: \")\n if not one_run:\n break\n try:\n time_log.append(float(one_run))\n except ValueError:\n print(\n \"Hey, you enter something strange, \"\n \"please enter a valid number\")\n avg_time = sum(time_log) / len(time_log)\n return f\"Your average time is about: {avg_time:.1f} \" \\\n f\"over {len(time_log)} runs\"", "def log_completed_run(self, log_file_path):\n with open(log_file_path, \"r\") as f:\n stats = f.read().splitlines()\n\n self._parse_log_entry(stats)\n self.experiment.log_other(\"log_file_path\", log_file_path)", "def time_tracking(self):\n fb = FreshBooks()\n tg = Toggl()\n self.print_splash()\n self.print(\"Tip: You can always enter 'skip' when you want to skip a time entry.\", format='warn')\n days = self.get_interactive_days() # number of days to go back\n self.print(\"OK, I'll run you through the Toggl time entries of the past %i day(s).\" % (days))\n timestamp = self.get_timestamp(days) # unix timestamp including tz\n time_entries = tg.get_time_entries(timestamp)\n if len(time_entries) == 0:\n self.print(\"No Toggl entries in this time span!\", 'warn')\n return False\n time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries\n fb_projects = fb.get_projects()\n # Loop through merged Toggl time entries:\n for entry in time_entries:\n # Get and convert all necessary info:\n client_id = tg.get_client_id(project_id=entry.get('pid'))\n client_name = tg.get_client_name(client_id)\n project = tg.get_project(entry.get('pid'))\n duration = int(entry['duration']) / 60 / 60 # convert duration to hours\n duration = round(duration * 4 ) / 4 # round hours to nearest .25\n description = self.format_description(project['name'], entry['description'])\n date = str(parser.parse(entry['start']).date())\n # Print info in a nice way:\n self.print_divider(30)\n self.print(\"Description: \" + description)\n self.print(\"Date: \" + date)\n self.print(\"Hours spent: \" + str(duration))\n # Skip if Toggl entry is already booked:\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n self.print(\"Skipping this entry because it is already in Freshbooks.\", 'cross')\n # Skip if duration is below 0.25:\n elif duration < 0.25:\n self.print(\"Skipping this entry because there are less than 0.25 hours spent.\", 'cross')\n # If billable, add to Freshbooks:\n elif entry['billable']:\n # Get FreshBooks project name through interactive search:\n try:\n self.print(\"Project: \\U0001F50D \")\n fb_project_name = self.interactive_search(fb_projects.keys(), client_name)\n # Handle KeyboardInterrupt\n except KeyboardInterrupt:\n answer = input(\"\\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) \")\n if answer.lower() == 's' or answer == '':\n self.clear_lines(1)\n self.print(\"Skipping this entry.\", 'cross')\n continue\n else:\n self.clear_lines(1)\n self.print(\"Ok, stopping time tracking.\", 'cross')\n sys.exit()\n # If user requests so, skip this entry:\n self.clear_lines(1)\n if not fb_project_name:\n self.print(\"Skipping this entry.\", 'cross')\n continue\n # Otherwise, add entry to FreshBooks and tag Toggl entry/entries:\n self.print(\"Project: \" + fb_project_name)\n project_id = fb.get_project_id(fb_project_name)\n fb.add_entry(project_id, duration, description, date)\n tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG)\n # If not billable, skip entry:\n else:\n self.print(\"Skipping this entry because it is not billable.\", 'cross')\n self.print_divider(30)\n answer = input(\"All done! Open FreshBooks in browser to verify? (Y/n) \")\n if answer.lower() == 'y' or answer == '':\n webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])", "def __init__(self, logfile):\n\n self.logfile = logfile\n if self. logfile:\n self.file = open(logfile, \"w\")\n self.starttime = time.time()\n self.file.write(\"%.2f %s Starting log\\n\" % (time.time() - self.starttime, time.asctime()))", "def log(self, level, *msg_elements):\r\n self.report.log(self._threadlocal.current_workunit, level, *msg_elements)", "def print_log(*content):\n now = datetime.datetime.now().strftime(\"%y-%m-%d %H:%M:%S\")\n print(\"MODEL INFO: \" + str(now)+ \" \", end='')\n print(*content)", "def log(self, txt):\n if self.logfile:\n self.logfile.write(txt)", "def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()", "def logByDate(self, timeText, id):\n\t\t# Array of IDs for a day\n\t\tdayLog = self.LOGS.get(\"by_date\").get(timeText)\n\n\t\t# If day already made\n\t\tif dayLog:\n\t\t\tif not id in dayLog:\n\t\t\t\tdayLog.append(id)\n\t\telse:\n\t\t\tself.LOGS.get(\"by_date\")[timeText] = [id]", "def record_time(t):\n\n f = open('time.out', 'w')\n f.write(str(t))\n f.close()", "def debug_started(self, command):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n lines = [\n \"%s\\n\" % (\"*\" * self.line_lenght),\n \"Command: %s\\n\" % command,\n \"DateTime: %s\\n\" % now,\n \"%s\\n\" % (\"*\" * self.line_lenght)\n ]\n\n with open(self.debug_log, 'a+') as logfile:\n logfile.writelines(lines)", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" \" + message)", "def check_timer():\n end = time.time()\n time_elapsed = end - target_time[0]\n durationMSG = fg.cyan + f\"Scans Completed for {args.target} in: \" + fg.rs\n print(durationMSG, display_time(time_elapsed))", "def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()", "def write(self, txt): \n self.log.appendtext(txt)\n self.log.update_idletasks()\n return", "def log(self, msg):\n\n\t\tself.eyetribe.log_message(msg)", "def log(self, msg=None):\n f = open(self.logbook, 'a')\n # if send or receive, write message\n if msg: \n f.write(\" System time: \" + str(datetime.now()) + \n \" Logical clock time: \" + str(self.clock_time) + \n \" \" + str(msg) + '\\n')\n # if it is an internal event just write the system time and current\n # logical clock time\n else:\n f.write(\" System time: \" + str(datetime.now()) + \n \" Logical clock time: \" + str(self.clock_time) + '\\n')\n f.close()", "def log(self, loginfo):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s:%(message)s',\n datefmt='%d %b %Y %H:%M:%S',\n filename=self.logfilepath,\n filemode='w')\n filelog = logging.FileHandler(self.logfilepath)\n logging.getLogger('Functest').addHandler(filelog)\n logging.info(loginfo)", "def _print_logs_info(job_id, project_id):\n print(\"\\nJob submitted successfully.\")\n print(\"Your job ID is: \", job_id)\n print(\"\\nPlease access your training job information here:\")\n print(\n \"https://console.cloud.google.com/mlengine/jobs/{}?project={}\".format(\n job_id, project_id))\n print(\"\\nPlease access your training job logs here: \"\n \"https://console.cloud.google.com/logs/viewer?resource=ml_job%2F\"\n \"job_id%2F{}&interval=NO_LIMIT&project={}\\n\".format(\n job_id, project_id))", "def log(message):\n path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n with open(os.path.join(path, logfile_name), 'a+') as f:\n t = strftime(\"%d %b %Y %H:%M:%S\", gmtime())\n f.write(\"\\n\" + t + \" %s\" % message)" ]
[ "0.70600265", "0.64849997", "0.64838594", "0.6340308", "0.626186", "0.62565213", "0.6244451", "0.61845344", "0.6172261", "0.61576736", "0.61439645", "0.6137266", "0.61355144", "0.61291295", "0.60836816", "0.60411006", "0.60167503", "0.6007121", "0.59954494", "0.5931569", "0.59120244", "0.59064573", "0.58965003", "0.58921224", "0.58653826", "0.5857319", "0.5856026", "0.5855597", "0.58551645", "0.58194077", "0.5807463", "0.580267", "0.5792795", "0.5787527", "0.5776082", "0.57709527", "0.5751121", "0.5742101", "0.5717202", "0.5714032", "0.57138205", "0.56932104", "0.56879073", "0.5687035", "0.56595445", "0.5649528", "0.5640544", "0.5619968", "0.5614741", "0.5593052", "0.55851716", "0.558189", "0.55804276", "0.5566729", "0.5558903", "0.5541059", "0.5522113", "0.55192804", "0.5498454", "0.54628766", "0.54506767", "0.5441389", "0.5440478", "0.54370886", "0.5436044", "0.54190946", "0.5410207", "0.5405395", "0.5404787", "0.5389913", "0.5389582", "0.5382065", "0.5377675", "0.5365362", "0.5357453", "0.5353378", "0.5348371", "0.53434265", "0.53412867", "0.5336479", "0.5334991", "0.5334269", "0.53336316", "0.53226274", "0.5322209", "0.5318494", "0.53139496", "0.5312586", "0.53125644", "0.53110665", "0.5302052", "0.530113", "0.5299679", "0.5298444", "0.5286686", "0.5283248", "0.52771425", "0.527601", "0.5273054", "0.5267496", "0.52626884" ]
0.0
-1
Determines if an input is a float.
def is_float(self, input): try: float(input) return True except ValueError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_float(x):\r\n try:\r\n float(x)\r\n except ValueError:\r\n return False\r\n return True", "def isfloat(value):\r\n try:\r\n float(value)\r\n return True\r\n except ValueError:\r\n return False", "def isFloat(value): \n try:\n float(value)\n return True\n except ValueError:\n return False", "def isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def is_float(self, value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def isfloat(s):\n try:\n x = float(s)\n return True\n except:\n return False", "def is_float(input_string):\r\n try:\r\n float(input_string)\r\n return True\r\n except ValueError:\r\n return False", "def is_float(input_string):\r\n try:\r\n float(input_string)\r\n return True\r\n except ValueError:\r\n return False", "def is_float(value):\n try:\n float(value)\n except ValueError:\n return False\n else:\n return True", "def is_float(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def is_float(val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def is_float(self, val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def isit_float(s):\r\n try:\r\n int(s)\r\n return False\r\n except ValueError:\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def could_be_float(val):\n if val == None:\n return False\n\n if isinstance(val, float):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n f = float(val)\n if not isinstance(f, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False", "def is_floatable(value):\n\n try:\n float(value)\n return True\n except:\n return False", "def is_float(string):\n try:\n return float(string)\n except ValueError:\n return False", "def isfloat(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def isFloat(string):\n return (True)", "def is_float(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def isFloat(string):\n try: float(string)\n except ValueError: return 0\n else: return 1", "def is_valid_float(input_string):\n assert input_string is not None\n try:\n float(input_string)\n return True\n except ValueError:\n return False", "def is_float(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def is_float(string: str) -> bool:\n try:\n float(string)\n return True\n except ValueError:\n return False", "def isfloat(string:str) -> bool:\n try:\n float(string)\n return True\n except ValueError:\n return False", "def is_float(self, string):\n try:\n return decimal.Decimal(string)\n except decimal.DecimalException:\n return False", "def is_float(*args): \n try:\n for i in args:\n float(i)\n return True\n except Exception:\n return False", "def is_float_or_int(value):\n if type(value) is float:\n return True\n elif type(value) is int:\n return True\n else:\n return False", "def check_for_float(check):", "def isfloat(str):\n\n try:\n float(str)\n return True\t\t\t#Returns true if the string is a floating point number\n except (ValueError, TypeError):\n return False\t\t\t#Returns false otherwise", "def is_float(value):\n if isinstance(value, float):\n return True\n\n if isinstance(value, np.ndarray):\n return value.dtype == np.float64\n\n return False", "def is_float(word):\n try:\n float(word)\n return True\n except ValueError:\n return False", "def is_float(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_float)", "def is_float(self, size=None):\n return False", "def check_type_force_float(x, name):\n if type(x) is int:\n return float(x)\n elif type(x) is not float and type(x) is not numpy.float64:\n raise TypeError(\"%r should be a float\" % (name,))\n else:\n return x", "def is_float(possible_number):\r\n try:\r\n float(possible_number)\r\n return True\r\n except ValueError:\r\n return False", "def is_number_parse_float(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def isFloat(val):\n\tvalFloat = True\n\ttry:\n\t\ttVal = float(val)\n\texcept ValueError:\n\t\tvalFloat = False\n\ttVal = None\n\tr = (valFloat, tVal)\n\treturn r", "def isFloat(string):\n return all(c in \"0123456789.\" for c in string)", "def _is_positive_float(item):\n if not isinstance(item, (int, float)):\n return False\n return item > 0", "def is_float_like(val):\n try:\n return str(float(val)) == str(val)\n except Exception:\n return False", "def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False", "def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True", "def value_is_float_not_int(value):\n # this is klugy and only needed to display deprecation warnings\n try:\n int(value)\n return False\n except ValueError:\n try:\n float(value)\n return True\n except ValueError:\n return False\n except TypeError:\n return False", "def is_floating_point(type):\n float_def = (\n create_cv_types(cpptypes.float_t()) +\n create_cv_types(cpptypes.double_t()) +\n create_cv_types(cpptypes.long_double_t()))\n\n return remove_alias(type) in float_def", "def isFloat(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.float32 or col.dtype == np.float64", "def is_floatscalar(x: Any) -> bool:\r\n return isinstance(x, (\r\n float,\r\n np.float16,\r\n np.float32,\r\n np.float64,\r\n ))", "def isNumber(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_floating_point(self, size=None):\n return False", "def _is_number(value):\n try:\n float(value)\n return True\n except (TypeError, ValueError):\n return False", "def convertFloat(s):\n try:\n float(s)\n return \"FLOAT\"\n except:\n return s", "def str_is_float(value):\n return all(\n [[any([i.isnumeric(), i in ['.', 'e']]) for i in value],\n len(value.split('.')) == 2])", "def _is_non_negative_float(item):\n if not isinstance(item, (int, float)):\n return False\n return item >= 0", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def _float(data):\n try:\n return float(data)\n except ValueError as err:\n if data in ('None', 'NA', 'nan'):\n return nan\n else:\n raise ValueError(err)", "def is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def tryFloat(value):\n try:\n return float(value)\n except:\n return value", "def possible_float(arg):\n try:\n return float(arg)\n except ValueError:\n logging.info(f'failed to parse {arg} as a float, treating it as a string')\n return arg", "def isNumber(x):\n return isinstance(x, (int, float))", "def is_number(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def isNumber(self, s):\n try:\n tmp = float(s)\n return True\n except:\n return False", "def isNumber(s):\n\ttry:\n\t\tfloat(s)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def float_or_none(s):\n if s:\n return float(s)", "def test_wiki_toc_isfloat_true(self):\n from .wiki_toc import isfloat\n value = isfloat(value='40.22222')\n self.assertTrue(value is True)", "def operand_present(input_str): # HELPER\n try:\n float(input_str)\n return True\n except ValueError:\n return False", "def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def is_sequence_of_float(items):\n return all(isinstance(item, float) for item in items)", "def isnum(value):\n\n try:\n return bool(isinstance(value, (float, int)))\n except RuntimeError:\n return False", "def test_float():\n floatify = fields.FloatField().adapt\n\n for input, expect in [\n (1.1, 1.1),\n (11, 11.0),\n (int(5.7), 5)\n ]:\n assert floatify(input) == expect", "def IsNumber(s):\n try:\n v = float(s)\n return True\n except ValueError:\n return False", "def checkNumberFloat(value):\n try:\n value = float(value)\n return value\n except ValueError:\n print(\"You did not enter a number\")\n newNum = input(\"Please enter a number: \")\n return checkNumberFloat(newNum)", "def check_for_float_and_int(check):", "def char_float(inp_char):\n try:\n nFloat = float(inp_char)\n except:\n nFloat = 0.0\n return nFloat", "def test_float(self):\n self.assertFalse(validate_measure_input('0.0', self.measures))\n self.assertFalse(validate_measure_input('1.0', self.measures))\n self.assertFalse(validate_measure_input('1.1', self.measures))", "def test_float_type(self):\n\n input_ = 1.2\n expected = ValueError\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_float(self, env: yaenv.Env):\n _val = env.float('FLOAT_VAR')\n assert _val == 10.0 and type(_val) == float\n _val = env.float('MISSING', -3.1)\n assert _val == -3.1 and type(_val) == float\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.float('LIST_VAR')\n assert 'Invalid numerical' in str(err.value)\n assert env.float('MISSING') is None", "def isNumber(x):\n\treturn type(x) in [int, float]", "def check_pos_float(v):\n status = True\n try:\n val = float(v)\n if val <= 0:\n status = False\n except ValueError:\n status = False\n return status", "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(num):\n try:\n float(num)\n return True\n except ValueError:\n return False", "def float(x) -> float:\n pass", "def _assert_float_dtype(dtype):\n if not dtype.is_floating:\n raise ValueError(\"Expected floating point type, got %s.\" % dtype)\n return dtype", "def ffloat(string):\n try:\n return float(string.strip())\n except:\n return 0", "def convert_to_float(s):\n try:\n return float(s)\n except TypeError:\n return None", "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def _is_real(symbol):\n return isa(symbol, float) or is_int(symbol)", "def test_float(self):\n htype = h5t.py_create('f')\n self.assertIsInstance(htype, h5t.TypeFloatID)", "def test_wiki_toc_isfloat_false(self):\n from .wiki_toc import isfloat\n value = isfloat(value='test_float')\n self.assertTrue(value is False)", "def is_float3(items):\n return len(items) == 3 and all(isinstance(item, float) for item in items)", "def is_number(s: Union[str, int, float]):\n if isinstance(s, str) and s.lower() == \"nan\":\n return True\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(x):\n if isinstance(x, (int, float)):\n return True\n else:\n return False", "def CheckNumber(userInput):\n try:\n float(userInput)\n return True\n except(ValueError):\n return False", "def validate_float(self, p_str):\n # may validate only '[+-].' which needs to be handled later\n float_pattern = r\"^[\\+\\-]?([0-9]*[.])?[0-9]*$\"\n if re.search(float_pattern, p_str) or p_str == \"\":\n return True\n self.frame.bell()\n return False", "def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)", "def IsFloatable(self):\r\n\r\n return self.HasFlag(self.optionFloatable)", "def safe_float(float_string: str = \"0.0\") -> float:\n float_things = [None, \"\", \"-\", \"0\"]\n\n if float_string in float_things:\n return 0.0\n else:\n return float(float_string)", "def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False", "def isrealnum(variable):\n return bool(math.isfinite(variable))" ]
[ "0.82458997", "0.8190943", "0.81595534", "0.81558645", "0.81085587", "0.8093256", "0.807079", "0.807079", "0.80703425", "0.8061959", "0.8060793", "0.8011374", "0.7838578", "0.7823464", "0.77867234", "0.77559304", "0.7752695", "0.7747797", "0.7746936", "0.77243704", "0.7696662", "0.7658992", "0.7640902", "0.76312405", "0.76285326", "0.76165867", "0.7559674", "0.7531927", "0.75238013", "0.7520814", "0.74827254", "0.7369767", "0.7336208", "0.7332111", "0.7326844", "0.71557266", "0.7097585", "0.7084261", "0.7077825", "0.70132476", "0.69466156", "0.6942867", "0.693276", "0.69027615", "0.6882085", "0.6837131", "0.68317866", "0.6823439", "0.68204474", "0.68148977", "0.681379", "0.67892045", "0.678092", "0.678092", "0.678092", "0.67802936", "0.6779412", "0.67675513", "0.67405176", "0.6728251", "0.6716626", "0.6710665", "0.6708011", "0.67048496", "0.6698156", "0.6694513", "0.66868407", "0.6673555", "0.66717637", "0.6667786", "0.66517895", "0.6631107", "0.66027755", "0.65864027", "0.6576154", "0.6573517", "0.65604794", "0.6555999", "0.655375", "0.6524634", "0.65217745", "0.65208524", "0.6518815", "0.6515827", "0.6509773", "0.6499663", "0.6494604", "0.6492649", "0.64817905", "0.6477013", "0.6460381", "0.645624", "0.6451185", "0.64445364", "0.6443786", "0.6433912", "0.64223486", "0.6420339", "0.6412904", "0.64071983" ]
0.884747
0
Takes a valid float from the user or the escape command
def hours_studied(self): value = input("Enter value (or 'exit')>>>") while not self.is_float(value): value = input("Enter value (or 'exit')>>>") # Escape command if value == 'exit': return value return float(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prompt_float_input(prompt_name: str, get_user_input: GetInputFunc) -> float:\n try:\n return float(get_user_input(f\"{prompt_name}:\"))\n except (ValueError, IndexError) as e:\n raise InvalidInput(str(e))", "def get_float(self, prompt=\"> \"):\n\t\twhile True:\n\t\t\tans = raw_input(prompt)\n\t\t\ttry: \t\n\t\t\t\tans = float(ans)\n\t\t\t\treturn ans\n\t\t\texcept ValueError:\n\t\t\t\tif ans == \"quit\": quit()\n\t\t\t\telse: print \"Please enter a number using decimal notation.\"", "def check_for_float(check):", "def test_normal_decimal_input(self):\r\n ws_leader = \"S. O'Neal (14.9)\"\r\n res = treat_input(ws_leader, type=\"float\")\r\n assert res == 14.9", "def restricted_float(x):\n try:\n x = float(x)\n except ValueError:\n raise argparse.ArgumentTypeError(\"%r not a floating-point literal\" % (x,))\n return x", "def main():\n\ttry:\n\t\tx = input(\"Type in a number to be converted to a float: \")\n\t\tfloatnum = float(x)\n\t\tprint(floatnum)\n\texcept(ValueError):\n\t\tprint(\"Could not convert the string to a float\")", "def checkNumberFloat(value):\n try:\n value = float(value)\n return value\n except ValueError:\n print(\"You did not enter a number\")\n newNum = input(\"Please enter a number: \")\n return checkNumberFloat(newNum)", "def read_float(message, default=None, help=None):\n while True:\n option = raw_input(message)\n if not option:\n if default is not None:\n return default\n else:\n print(\"Please provide a value.\")\n elif option == \"?\":\n if help:\n print(help)\n else:\n print(\"Please provide a number\")\n else:\n try:\n return float(option)\n except ValueError:\n print(\"Expected a number.\")\n pass", "def sanitizeFloatFromKeyboard(s,range_start=0,range_end=0):\n try:\n\tx = float(s)\n except ValueError:\n\terr = 1\n\treturn err,0\n\n if (x >= range_start) and (x <= range_end):\n\terr = 0\n return err,x\n else:\n\terr = 1\n\treturn err,x", "def _restricted_float(val: float):\n try:\n val = float(val)\n except ValueError:\n raise argparse.ArgumentTypeError(f\"{val} not a floating-point literal\")\n\n if 0.0 < val > 1.0:\n raise argparse.ArgumentTypeError(f\"{val} not in range [0.0, 1.0]\")\n return val", "def validate_float(self, p_str):\n # may validate only '[+-].' which needs to be handled later\n float_pattern = r\"^[\\+\\-]?([0-9]*[.])?[0-9]*$\"\n if re.search(float_pattern, p_str) or p_str == \"\":\n return True\n self.frame.bell()\n return False", "def char_float(inp_char):\n try:\n nFloat = float(inp_char)\n except:\n nFloat = 0.0\n return nFloat", "def getFloat(fl):\n while True:\n try:\n return float(input(fl))\n except ValueError:\n print(\"Give a proper value please\")", "def possible_float(arg):\n try:\n return float(arg)\n except ValueError:\n logging.info(f'failed to parse {arg} as a float, treating it as a string')\n return arg", "def gravedad(frm):\r\n x=int(frm.txtGrav.GetValue())\r\n while type(x)!=float:\r\n try:\r\n x=float(x)\r\n\r\n if type(x)==float:\r\n return -x\r\n\r\n else:\r\n pass\r\n\r\n except:\r\n print \"Ingrese un valor valido para la gravedad\"\r\n print\r\n x=raw_input(\"?Cu?l es la aceleracion de la gravedad?: m/s\")", "def get_user_input():\n return float(input('Your transaction amount please: '))", "def number_format(num):\n while True:\n try:\n user_input = float(input(num))\n return user_input\n except ValueError:\n print(\"Error. Please enter the desired number. You may use \"\n \"decimals.\")\n except:\n print(\"Error: unknown.\")", "def test_try():\n numb = input(\"Enter a number\")\n print(type(numb))\n try:\n num = float(numb)\n print(num)\n except Exception as e: # if there is exception,we wont crash,we will catch it\n print(e)\n print(\"Exiting the program\")", "def give_me_a_float():\n return 5.8\n pass", "def handle_commas(x):\n if type(x) != str:\n print (\"Please enter a string\")\n return False\n else:\n x = float(x.replace(\",\",\"\"))\n return x", "def CheckNumber(userInput):\n try:\n float(userInput)\n return True\n except(ValueError):\n return False", "def setfloat(self, strcommand, value):\n command = ct.c_wchar_p(strcommand)\n value = ct.c_double(value)\n self.lib.AT_SetFloat(self.AT_H, command, value)", "def _ensure_positive_float(f):\n if float(f) < 0:\n raise argparse.ArgumentTypeError(\n 'Argument must be greater than zero')\n return float(f)", "def get_check_fn():\r\n fn = input(\"Enter your favorite number.\\n\\t\")\r\n while True:\r\n try:\r\n fn = float(fn)\r\n except ValueError:\r\n fn = input(\"Must enter a number.\\n\\t\")\r\n else:\r\n return fn", "def isFloat(string):\n return (True)", "def velocidad(frm):\r\n x=int(frm.txtvel.GetValue())\r\n while type(x)!=float:\r\n try:\r\n x=float(x)\r\n\r\n if type(x)==float:\r\n return x\r\n\r\n else:\r\n pass\r\n\r\n except:\r\n print \"Ingrese un valor v?lido para la fuerza\"\r\n print\r\n x=raw_input(\"?Cu?l es la velocidad inicial del objeto? (m/s): \")", "def default(self, line):\n if line == \"EOF\":\n return True\n elif line in self.operations:\n try:\n self.calc.operation(self.operations[line])\n\t\tself._debug_stack()\n except ReversePolishCalcError, errmsg:\n self._debug_calc_error(errmsg)\n elif re.match(\"[0-9.eE]+\", line):\n try:\n self.calc.insert_value(float(line))\n except ValueError:\n debug(\"Invalid float value: %s\" % line)\n else:\n debug(\"Unknown command: %s\" % line)", "def Float(val):\n try:\n return float(val)\n except ValueError:\n return ''", "def clean_input(prompt='Error'): # A special input function that will reject a\r\n # user's input of text when a number is requested -- if no prompt is\r\n # specified in the program, it will display \"Error\"\r\n text = True\r\n phrase = '0'\r\n while text:\r\n phrase = input(prompt + '\\n')\r\n try: # Adapted from an example in the ThinkPython textbook (15.7) -\r\n # Checks whether the input is a number, positive or negative. If\r\n # not, rejects the input and user gets to try again\r\n float(phrase)\r\n text = False\r\n except ValueError:\r\n print(\"Error: Non-Numeric Entry Detected\")\r\n # if phrase.isnumeric(): # Checks for a positive number (negative\r\n # rejected as well as text) - replaced with superior form from textbook\r\n # example\r\n # return float(phrase) # Return the number the user entered\r\n # else:\r\n # print(\"Error: Non-Numeric Entry Detected\")\r\n return float(phrase) # Return the number the user entered\r", "def validation_method(input_value):\r\n while True:\r\n try:\r\n valor = float(input(input_value))\r\n return valor\r\n except ValueError:\r\n print(\" ingresa un número\")", "def validate_calibration(prompt,name):\n \n while True:\n temp_value = input(prompt)\n try:\n float(temp_value)\n\n if temp_value > 0:\n return float(temp_value)\n else:\n print(\"invalid {}\".format(name))\n except:\n print(\"invalid {}\".format(name))", "def test_float(self):\n self.assertFalse(validate_measure_input('0.0', self.measures))\n self.assertFalse(validate_measure_input('1.0', self.measures))\n self.assertFalse(validate_measure_input('1.1', self.measures))", "def test_float(self, env: yaenv.Env):\n _val = env.float('FLOAT_VAR')\n assert _val == 10.0 and type(_val) == float\n _val = env.float('MISSING', -3.1)\n assert _val == -3.1 and type(_val) == float\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.float('LIST_VAR')\n assert 'Invalid numerical' in str(err.value)\n assert env.float('MISSING') is None", "def get_float(message, high, low=0):\r\n\r\n while True:\r\n try:\r\n floatValue = float(input(message))\r\n except ValueError:\r\n print (\"ERROR, Entry must be a number. Please try again.\")\r\n continue\r\n if floatValue <= low or floatValue > high:\r\n print (\"ERROR, Entry must be greater than \" + str(low) + \" and, less than or equal to \"\\\r\n + str(high) + \". Please try again.\")\r\n continue\r\n break\r\n return floatValue", "def usetf(self, prompt=None, default=None):\n\n i = 0\n abak = copy(default) # Backup our default value\n\n while(i<self.maxTries):\n tmp = self.uset(prompt,default)\n try:\n a = float(tmp)\n i = self.maxTries # preload failure\n except:\n # Print warning\n print\n print \" WARNING: Invalid Entry. Please enter a floating point number \"\n print \n # reload the default\n a = abak\n i = i+1\n\n return(a)", "def validate_insert(self, s, internal=True):\n super(FieldNumeric, self).validate_insert(s, internal) # mandatory check\n if s:\n try:\n float(s)\n except:\n raise FilemanError(\"\"\"[%s] is not a valid number\"\"\" % s)", "def operand_present(input_str): # HELPER\n try:\n float(input_str)\n return True\n except ValueError:\n return False", "def is_valid_float(input_string):\n assert input_string is not None\n try:\n float(input_string)\n return True\n except ValueError:\n return False", "def check_arguments(arguments):\n quit = False\n for argument, value in vars(arguments).items():\n try:\n float(value)\n except:\n print(\"{} must be numeric\".format(argument))\n quit = True\n if quit:\n exit(1)", "def is_float(self, input):\n try:\n float(input)\n return True\n except ValueError:\n return False", "def checkfloat(name, val, mn=None, mx=None):\n try:\n\tx = string.atof(val)\n\tif ((mn is not None and x < mn) or\n\t (mx is not None and x > mx)):\n\t\traise ValidationError, \\\n\t\t 'parameter \"%s\", value \"%s\" is out of range' % \\\n\t\t (name, val)\n\treturn\n except ValueError:\n\traise ValidationError, '%s is not a valid floating point number' % val", "def posfloat_p(value):\n # check if the value has the expected type\n if type(value) is not float:\n raise Invalid(\"invalid value type {value}\".format(value=value))\n if value <= 0.0:\n raise Invalid(\"invalid value {value}, positive float expected\".format(value=value))", "def float_or_none(s):\n if s:\n return float(s)", "def value_input(unit):\n print(Fore.CYAN + \"\\n Enter the temperature in \\u00b0\" + unit + \":\\n\" +\n Fore.RESET)\n while True:\n try:\n value = float(input()) # <=== Make sure input is a float\n return value\n break\n except ValueError:\n print(Fore.RED + \"\\n Input must be an integer!\\n\" + Fore.RESET)", "def check_for_float_and_int(check):", "def convert_str_float(x):\n\ttry:\n\t\treturn float(x)\n\texcept ValueError:\n\t\tprint(\"must be a number\")", "def price_product():\n while True:\n price = raw_input(\"Add the price: \")\n try:\n price = float(price)\n return price\n except ValueError: #if the price is a number\n print \"Insert a number\"\n return price", "def handle_value(value):\n\n if value[-1] == 'x':\n return float(value[0:-1])\n\n if value[-1] == '%':\n return float(value[0:-1])\n\n if value[0].isdigit():\n return bytify(value)\n\n raise ValueError", "def read_float(v):\n if v.strip() == '':\n return 0.\n try:\n return float(v)\n except ValueError:\n # ENDF6 may omit the e for exponent\n return float(v[0] + v[1:].replace('+', 'e+').replace('-', 'e-')) # don't replace leading negative sign", "def ffloat(string):\n try:\n return float(string.strip())\n except:\n return 0", "def test_get_value1(monkeypatch):\n mr.initialize_donors()\n monkeypatch.setattr('builtins.input', lambda x: 4)\n value = mr.get_value('Enter a float', float)\n assert value == float('4')", "def tryFloat(value):\n try:\n return float(value)\n except:\n return value", "def get_price():\n\n while (True):\n price = input(\"Enter the purchase price (xx.xx) or 'q' to quit: \")\n if(price.capitalize() == 'Q'):\n return -1\n elif price.replace('.', '').isdigit() and not is_valid(price):\n print(\"Illegal price: Must be a non-negative multiple of 5 cents.\\n\")\n elif not price.replace('.', '').isdigit():\n print(\"Illegal entry: Must be a price like (1.75) or 'q' for quit.\\n\")\n else:\n return float(price)", "def convertFloat(num):\n try:\n num = float(num)\n return num\n except (TypeError, ValueError, ArithmeticError):\n print(\"num cannot convert float\")", "def float_or_auto(val):\n try:\n return float(val)\n except:\n if isinstance(val, basestring) and val == \"auto\":\n return val\n raise TraitError, \"Tick interval must be a number or 'auto'.\"", "def is_float(*args): \n try:\n for i in args:\n float(i)\n return True\n except Exception:\n return False", "def parse_float_value(self, value: str):\n value, power = self.parse_value_ending(value)\n try:\n value = float(value)\n return value * 10 ** power\n except:\n raise Exception(\"Failed to parse the __value.\")", "def maybe_float(v):\n try:\n return float(v)\n except ValueError:\n return v", "def validate_number(value_if_allowed):\n if value_if_allowed == '':\n return True\n try:\n float(value_if_allowed)\n return True\n except ValueError:\n return False", "def evaluate(self):\n self.getInput()\n try:\n self.result = eval(self.userInput)\n except ZeroDivisionError:\n self.entry.delete(0, END)\n self.entry.insert(0, \"Not a number\")\n except SyntaxError:\n self.entry.delete(0, END)\n self.entry.insert(0, \"Input error\")\n else:\n self.entry.delete(0, END)\n self.entry.insert(0, self.result)", "def parse_float(val, fn):\n return float(val)", "def test_number_input(self):\r\n easy_eval = lambda x: calc.evaluator({}, {}, x)\r\n\r\n self.assertEqual(easy_eval(\"13\"), 13)\r\n self.assertEqual(easy_eval(\"3.14\"), 3.14)\r\n self.assertEqual(easy_eval(\".618033989\"), 0.618033989)\r\n\r\n self.assertEqual(easy_eval(\"-13\"), -13)\r\n self.assertEqual(easy_eval(\"-3.14\"), -3.14)\r\n self.assertEqual(easy_eval(\"-.618033989\"), -0.618033989)", "def test_float_type(self):\n\n input_ = 1.2\n expected = ValueError\n with self.assertRaises(expected):\n math.factorial(input_)", "def validate_answer(answer):\r\n try:\r\n float(answer)\r\n return True\r\n except ValueError:\r\n return False", "def val_parser(parser, inputstring):\n\n inputstring = inputstring.strip()\n\n if float(inputstring) == 9.9e37:\n output = float('inf')\n else:\n output = float(inputstring)\n if parser == int:\n output = parser(output)\n\n return output", "def is_float(input_string):\r\n try:\r\n float(input_string)\r\n return True\r\n except ValueError:\r\n return False", "def is_float(input_string):\r\n try:\r\n float(input_string)\r\n return True\r\n except ValueError:\r\n return False", "def isfloat(s):\n try:\n x = float(s)\n return True\n except:\n return False", "def set_float(val,default=None):\n if val in (None,''): return default\n try:\n return float(val)\n except:\n return default", "def is_float(word):\n try:\n float(word)\n return True\n except ValueError:\n return False", "def float(x) -> float:\n pass", "def proper(inputnum):\n if isinstance(inputnum, (bool, complex)):\n return inputnum\n else:\n return float(inputnum)", "def expt(e):\n # print(e)\n print(\"An error has occurred\")\n if 'float division by zero' in e:\n print(\"Unable to divide by zero\")\n cls()\n menu()\n elif 'factorial() argument should not exceed 2147483647' in e:\n print('That number is too large, sorry')\n cls()\n menu()\n elif 'generic' in e:\n print(\"Please try again\")\n cls()\n menu()\n elif 'could not convert string' or \"'NoneType' object is not subscriptable\" in e:\n print('Incorrect input, please try again')\n cls()\n menu()", "def negfloat_p(value):\n # check if the value has the expected type\n if type(value) is not float:\n raise Invalid(\"invalid value type {value}\".format(value=value))\n if value >= 0.0:\n raise Invalid(\"invalid value {value}, negative float expected\".format(value=value))", "def test_float():\n floatify = fields.FloatField().adapt\n\n for input, expect in [\n (1.1, 1.1),\n (11, 11.0),\n (int(5.7), 5)\n ]:\n assert floatify(input) == expect", "def force_float(element, surpress_error = False):\n \n if isinstance(element, float):\n # element is a float, return it\n return element\n else:\n try:\n # try if the element is a number\n return float(element)\n except (ValueError, TypeError):\n # replace all non-digit characters\n element = str(element)\n matches = convert_pattern.match(element)\n \n if matches != None:\n element = matches.group(0)\n \n try:\n return float(element)\n except (ValueError, TypeError):\n if surpress_error:\n return 0\n else:\n raise", "def isfloat(value):\r\n try:\r\n float(value)\r\n return True\r\n except ValueError:\r\n return False", "def _process_numerical_parameter(param, value, allow_neg=False, is_float=False):\n try:\n if is_float:\n value = float(value)\n else:\n value = int(value)\n except:\n raise ValueError(\"The value of parameter {} was set to {}, but must be numerical!\".format(param, value))\n if not allow_neg and value < 0:\n raise ValueError(\"Parameter {} has value {}, but must be >= 0!\".format(param, value))\n return \"{}={}\".format(param, value)", "def isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def convertFloat(s):\n try:\n float(s)\n return \"FLOAT\"\n except:\n return s", "def isFloat(value): \n try:\n float(value)\n return True\n except ValueError:\n return False", "def get_employee_hours_Float(message):\n while True:\n user_input = input('{}: '.format(message))\n\n # Type validation\n try:\n number = float(user_input)\n print(\"You entered this hour \", number)\n break\n except ValueError:\n print('You must enter a Float number.')\n continue\n\n #Range Validation\n # if valid_range and number not in valid_range:\n # _min = min(valid_range)\n # _max = max(valid_range)\n # print('You must enter a number from {} to {}.'.format(_min, _max))\n # continue\n return number", "def _float(data):\n try:\n return float(data)\n except ValueError as err:\n if data in ('None', 'NA', 'nan'):\n return nan\n else:\n raise ValueError(err)", "def check_if_number(list):\n for item in list:\n try:\n float(item)\n except ValueError as e:\n print WrongTypePointError(item)\n sys.exit()", "def get_amount():\n while True:\n try:\n amount = input(\"How much did they donate: \")\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print(\"you have made an invalid choice, try again.\")", "def test_get_value3(monkeypatch):\n mr.initialize_donors()\n monkeypatch.setattr('builtins.input', lambda x: 10.2)\n value = mr.get_value('Enter a string', float)\n assert value == 10.2", "def read_endf_float(string):\n if string.strip() == \"\":\n return 0.0\n if \".\" in string:\n strsplit = string.split('.')\n return float(strsplit[0]+\".\"+strsplit[1].replace(\"-\",\"e-\").replace(\"+\",\"e+\"))\n else:\n return float(string)", "def is_float(val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def get_input():\n\n end_loop = True # Used to stop the loop for user input\n while end_loop:\n try:\n user_input = str(float(input(\"Please enter a number: \")))\n end_loop = False # The loop breaks once the user has entered valid input\n except():\n print(\"Invalid input, please try again.\")\n\n return user_input", "def opr1():\n try:\n a = input(\"Enter the number 'a': \")\n b = input(\"Enter the number 'b': \")\n z = (float(a) ** 2) / float(b)\n return z\n except (ValueError, ZeroDivisionError):\n return 'Something is entered incorrectly!'", "def opr():\n try:\n a = input(\"Enter the number 'a': \")\n b = input(\"Enter the number 'b': \")\n z = (float(a) ** 2) / float(b)\n return z\n except ValueError:\n return 'You entered not a number!'\n except ZeroDivisionError:\n return 'Only stupid divide by zero'", "def check_valid_frac(num) -> float:\n if (num < 0) or (num > 1):\n raise argparse.ArgumentTypeError(f\"{num} is an invalid percentage\")\n return num", "def is_float(x):\r\n try:\r\n float(x)\r\n except ValueError:\r\n return False\r\n return True", "def try_float(data):\n try:\n return float(data)\n except (ValueError, TypeError ):\n return data", "def isfloat(string:str) -> bool:\n try:\n float(string)\n return True\n except ValueError:\n return False", "def acceleration():\n \"\"\"\n This block allowed the user to choose whether to use m/s^2 or g as the unit of acceleration.\n valid_units = [\"m\", \"M\", \"g\", \"G\"]\n unit = raw_input(\"Would you like to enter burn acceleration in meters per second squared (press M) or in g-forces (press G)? \")\n while unit not in valid_units:\n unit = raw_input(\"Invalid input. Please press M for m/s^2 or G for g-forces. \")\n \"\"\"\n acceleration = raw_input(\"What is the ship's acceleration in g? \")\n while True:\n try:\n float(acceleration)\n except ValueError:\n acceleration = raw_input(\"Invalid input. Please enter a positive number with no other non-decimal \"\n \"characters. \")\n continue\n else:\n break\n while float(acceleration) <= 0:\n acceleration = raw_input(\"Invalid input. Please enter a positive number. \")\n try:\n float(acceleration)\n except ValueError:\n acceleration = raw_input(\"Invalid input. Please enter a positive number with no other non-decimal \"\n \"characters. \")\n continue\n else:\n break\n # These two lines assisted the block above in converting to g if m/s^2 was chosen as the unit of acceleration.\n '''if unit in [\"m\", \"M\"]:\n acceleration = round(float(acceleration) / 9.8, 2)'''\n return float(acceleration)", "def _float_or_nan(ent):\n try:\n return float(ent)\n except ValueError:\n return float('nan')", "def getFloat(string):\n return (0.0)", "def write_endf_float(value):\n if(abs(value) < 1e-9 or abs(value) > 9.999e9):\n raise ValueError(\"value is too small or too big\")\n valstring = \"{:>13.6e}\".format(value).replace('e','').replace('+0','+').replace('-0','-')\n # with AMPX written files we use \"-0\" instead of \"+0\" for some reason\n if( '+0' in valstring ):\n valstring = valstring.replace('+0','-0')\n return valstring", "def parse_float(value):\n try:\n return float(value)\n except (ValueError, TypeError):\n return None" ]
[ "0.71381927", "0.7047657", "0.7025061", "0.69696033", "0.67903113", "0.6774239", "0.67697823", "0.66692364", "0.6616977", "0.6591584", "0.656603", "0.65500516", "0.6470003", "0.64432317", "0.64170504", "0.6386314", "0.6362336", "0.6322216", "0.6319651", "0.63095284", "0.628306", "0.62816256", "0.6269164", "0.6263565", "0.6245321", "0.62437123", "0.61984056", "0.61793274", "0.61704344", "0.6168855", "0.6155719", "0.6142138", "0.61419845", "0.61346006", "0.60798055", "0.60771513", "0.6073596", "0.60633755", "0.6049336", "0.6036392", "0.60126865", "0.6007995", "0.59937626", "0.5987921", "0.5979778", "0.59756565", "0.59702", "0.59663445", "0.5961952", "0.595345", "0.5950798", "0.5935071", "0.5934135", "0.59280664", "0.5885621", "0.5883926", "0.5878518", "0.5866909", "0.5864821", "0.58452237", "0.58417326", "0.583745", "0.5833384", "0.58262163", "0.5820567", "0.5808444", "0.5808444", "0.58062863", "0.580619", "0.57638144", "0.5753542", "0.5748542", "0.5739517", "0.57318246", "0.5706658", "0.56967", "0.569051", "0.5685406", "0.5680785", "0.56797516", "0.56676847", "0.5667651", "0.56305856", "0.5623565", "0.5623404", "0.5620492", "0.56173575", "0.56139874", "0.5613908", "0.56126136", "0.56078464", "0.5603325", "0.5597563", "0.5595402", "0.5593233", "0.5589791", "0.55767083", "0.5570844", "0.5568808", "0.556633" ]
0.6430425
14
Logs an entry for the homework log using a userentered value
def do_record(self, input): course_name = course.course_name(input) if course_name in config.current_courses: hours_studied = self.hours_studied() # Checks for escape command if hours_studied != 'exit': write.to_csv(course_name, hours_studied)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log(exercise):\n global logfile\n msg = raw_input(\"Enter your message. \")\n logfile.write(exercise + \" >>> \" + msg + \"\\n\")", "def logentry(jobid, label, typ, content=None, path=None):\n ud = str(uuid.uuid4())\n db.logs.save({\"uuid\":ud, \"jobid\":jobid, \"label\":label, \"type\":typ, \"content\":content, \"date\":tstamp(), \"containerurl\":path})", "def log(self, game: str, outcome: str):\n current_time = datetime.now()\n self.user.record.append([current_time.strftime(\"%c\"), game, outcome, self.user.balance])", "def new_entry():\n clear_screen()\n entry = {}\n entry['id'] = get_next_id()\n entry['name'] = input_name()\n print(\"How many minutes did you spend on {}?\".format(entry['name']))\n print(\"Or you may specify a format after the time, seperated by a comma\")\n entry['time_spent'] = input_time_spent()\n add_notes = input(\"Add notes? Y/n \").lower()\n if add_notes != 'n':\n entry['notes'] = input_notes()\n entry['date'] = datetime.now().strftime(FMT_MONTH_DAY_YEAR)\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writerow(entry)", "def entry(entry,fhandle): \n\n import time\n timestamp = time.strftime('%H:%M:%S')\n print entry\n try: fhandle.write('{0}: {1}\\n'.format(timestamp, entry))\n except: print 'Log File Error: entry could not be logged'\n return", "def log_entry(debug, out, text):\n # Format log entry\n monthday = make_time_stamp('%m%d')\n time_stamp = make_time_stamp('%H:%M:%S')\n now = time.time()\n ms = \".\"+str('%06d' % int((now - int(now)) * 1000000))\n line_form = \"I{monthday} {time_stamp} 0000 main.py:00] {text}\\n\"\n entry = line_form.format(monthday=monthday, time_stamp=time_stamp+ms, text=text)\n\n # Log entry to stderr\n sys.stderr.write(entry)\n pass", "def logentry(self, string=None):\n if (self._OIFlogging):\n oiflogfile = open(self._commslogfilename, \"a\")\n oiflogfile.write(\"# \" + \"%04.6fs: \" % (self._gettime() - self._logstarttime) + string + \"\\n\")\n oiflogfile.flush()\n else:\n# if self._print_once:\n# self._print_once = 0\n# print self.hilite(\"Warning: Not logging OIF transactions. Use\\n it.logfile(<filename>) to set log filename and\\n it.logging(True) to enable logging\", False, True)\n print 'Unable to write log entry', string\n return", "def log(self, inp, print_result: bool = None):\n if print_result is None: print_result = self.log_print\n # Parse input\n if type(inp) == str:\n logged_inp = inp\n else:\n raise NotImplementedError(f\"Given type '{type(inp)}' is not supported for logging.\")\n \n # Print if requested\n if print_result: print(logged_inp)\n \n # Append the string to the file\n append_log(logged_inp, f\"population{'_backup' if self.use_backup else ''}/\"\n f\"storage/\"\n f\"{self.folder_name}/\"\n f\"{self}/\"\n f\"logbook.log\")", "def main_log(logfile, entry, print_tag=False):\n if logfile != None:\n with open(logfile, 'a') as lf:\n lf.write('{}\\n'.format(entry))\n\n if print_tag:\n print entry", "def log_entry(self, timestamp, entry):\n if timestamp in self.log:\n self.log[timestamp].update(entry)\n else:\n self.log[timestamp] = entry", "def _addLogEntry(request, action, pagename, filename):\n from MoinMoin.logfile import editlog\n t = wikiutil.timestamp2version(time.time())\n fname = wikiutil.url_quote(filename)\n\n # Write to global log\n log = editlog.EditLog(request)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)\n\n # Write to local log\n log = editlog.EditLog(request, rootpagename=pagename)\n log.add(request, t, 99999999, action, pagename, request.remote_addr, fname)", "def do_log(self, arg):\n arg = \" %s :custom log\" % (arg)\n log(arg)", "def __log__(self, val):\n if lm_settings[\"debug\"]:\n try:\n log_file = open(\"language_manager/info/language_manager.log\", \"a\")\n except FileNotFoundError:\n log_file = open(lm_settings[\"logfile\"], \"w\")\n log_file.write(val)\n log_file.close()", "def InsertLog():", "def log(self, message):", "def log(self, *args, **kwargs):\n self.game_view.log(*args, **kwargs)", "def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )", "def __log_trial__(self, trial_data):\n from klibs.KLDatabase import EntryTemplate\n\n trial_template = EntryTemplate('trials')\n trial_template.log(P.id_field_name, P.participant_id)\n for attr in trial_data:\n trial_template.log(attr, trial_data[attr])\n\n return self.database.insert(trial_template)", "def log(self, message: str):", "async def add_log(self, value):\n log_string = value\n print(log_string)\n self.embed.title = log_string\n self.embed.timestamp = datetime.datetime.now()\n self.embed.description = \"\"", "def addLog(log_info,seed_keyword=\"\",meta_keyword=\"\"):\n payload = {\n \"user\" : os.getlogin(),\n \"seed_keyword\":seed_keyword,\n \"meta_keyword\":meta_keyword,\n \"log_info\":log_info\n }\n res = requests.post('{}add/issue/'.format(base_url),data=payload)\n return res.status_code", "def record(*args, **kwargs):\n LOG.info(\"args={}, kwargs={}\".format(args, kwargs))", "def logWork(self, id, logData):\n\t\tnow = datetime.datetime.now()\n\t\ttimeText = now.strftime(LOG_DATE_FORMAT)\n\n\t\tlogToEdit = self.LOGS.get(id).get(\"log\")\n\t\t#If inside this case and there is already a log entry for this time\n\t\talreadyEntryForThisTime = False\n\t\tfor entry in logToEdit:\n\t\t\tif timeText == entry[0]:\n\t\t\t\tentry[1] += logData\n\t\t\t\talreadyEntryForThisTime = True\n\n\t\tif not alreadyEntryForThisTime:\n\t\t\tlogToEdit.append([timeText, logData])\n\t\t\tself.logByDate(now.strftime(CASE_DATE_FORMAT), id)\n\n\t\tself.save_logs(\"c\")\n\t\tself.autoCommit()\n\t\treturn 0", "def log(info):\n print(f\"[{info}]\")", "def logger(self, value):\n pass", "def log_lesson(request):\n user = User.objects.get(email=request.user.email)\n user_info = UserInformation.objects.get(user=user)\n lesson_set = user_info.current_lesson_set\n lesson = Lesson.objects.get(lesson_index=user_info.current_lesson_index)\n main_set = user_info.current_main_set\n\n print(\"lesson_logged\")\n\n lesson_to_log = LessonLog.objects.create(user=user,\n time_stamp=timezone.now(),\n lesson_set_key=lesson_set,\n lesson_key=lesson,\n lesson_index=lesson.lesson_index,\n main_set_key=main_set)\n lesson_to_log.save()", "def log(self, inputText, saveLog=True):\n\n self.logView.insertPlainText(\">> %s \\n\" % inputText)\n\n self.logView.moveCursor(QtGui.QTextCursor.End)\n\n if saveLog == True:\n f = open(self.logFileName, \"a\")\n\n f.write(inputText + '\\n')\n\n f.close()", "def recordLog(project, status, memo):\n path = getPath(project)\n log = open(path, 'a')\n writer = csv.writer(log, lineterminator='\\n')\n writer.writerow((time.time(), status, memo))\n log.close()\n if status == 'a':\n print(\"Tracking your time on \" + project)\n if status == 's':\n print(\"Tracking suspended on \" + project)\n if status == 't':\n print(\"Time shifted on \" + project)\n if not path == '.sourglass':\n store = open(os.path.join(basepath, 'last'), 'w')\n store.write(project)\n store.close", "def log(text):\n print \"%s: %s\" % (str(datetime.datetime.now()), text)", "def logtool(self, action, **options):\n pass", "def ENTRY(entry_code):\n\tif check_user(entry_code) ==True:\n\t\t###workday = Workday.objects.filter(date=get_time()).get()\n\t\tenter_workday(entry_code)", "def logFile(self):\n\n event = 'stim'\n mStr = '{:013}'.format(self.mouse.tag) + '\\t'\n outPutStr = mStr + \\\n datetime.fromtimestamp(int(time())).isoformat(' ') + '\\t' + event\n print (outPutStr)\n if self.textfp != None:\n outPutStr = mStr + '{:.2f}'.format(time()) + '\\t' + event\n self.textfp.write(outPutStr + '\\n')\n self.textfp.flush()", "def logline(msg):\n print msg", "def Log(self, msg):\n self.DBExecute(\"INSERT INTO Log (class, instance, event) VALUES (%s, %s, %s)\",\n self.__class__.__name__, self._instance, msg)\n print '%s/%s: %s' % (self.__class__.__name__, self._instance, msg)", "def create_log_entry(self, i): \r\n fitness_vector = self.get_fitness_vector()\r\n \r\n best = min(fitness_vector)\r\n worst = max(fitness_vector)\r\n mean = rython.mean(fitness_vector)\r\n std = rython.std(fitness_vector)\r\n \r\n return \"{0},{1},{2},{3},{4}\\n\".format(i, 1.0/best, 1.0/worst, 1.0/mean, std)", "def _log(self, format, args, level=None):\n if level is None:\n level = self.log_level\n xbmc.log(\n \"metadata.movie.stupid: %s - - [%s] %s\\n\" % (\n self.client_address[0], self.log_date_time_string(),\n format % args),\n level)", "def scriptlog(entry, subarray=DEFAULT) :\n multiSubarray('log', subarray, \"SCRIPT: \" + entry)", "def on_a(self):\r\n self.log()", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def print_to_file(start, stop, time_worked, work_text, work_log):\n today = datetime.date.today()\n\n record = ' || %.2f || %.2f || %.4f hours || %s\\n' % (start, stop, time_worked/3600, work_text)\n\n #if it is a new file you have the option to set a start time for the project\n # and how many hours a week you want to work\n if not os.path.isfile(work_log):\n while True:\n option = raw_input('\\nThis is a new log, would you like to specify a start date and a hours per week goal for the project? (y/n): ').lower()\n if option == 'y':\n date = raw_input('\\nplease enter the start date of the project (dd-mm-yyyy): ')\n hours_per_week = raw_input('\\nplease enter the number of hours you intend to work on the project per week: ')\n try:\n datetime.datetime.strptime(date, '%d-%m-%Y')\n if hours_per_week.isdigit():\n f = open(work_log, 'a')\n f.write('#! || ' + date + ':' + hours_per_week + '\\n')\n f.close()\n break\n else:\n print \"\\nPlease enter a valid number for hours to work!\\n\"\n except ValueError:\n print \"\\nPlease enter a valid date!\\n\"\n\n else:\n break\n\n\n f = open(work_log, 'a')\n print '\\n\\n' + today.strftime('%b-%d-%Y') + record\n f.write(today.strftime('%b-%d-%Y') + record)\n f.close()", "def _log_results(self, results):\n log.new_entry(results)\n self.new_entry = 2", "def logP(self, history, word):", "def make_record(self):\n answer = self.input('enter your name: ')\n if answer:\n print('hi ' + answer + '! Your answer has been recorded.')\n self.record_score(answer, len(self.board.tiles.blacks))\n self.game_on = False\n else:\n self.game_on = False # Canceled dialog will turn off", "def log(self):\n\t\tfilename = '/var/log/postunsuspendacct-%s.log' % self.argv.get('user')\n\t\tfileobj = open(filename, 'w');\n\t\tfileobj.write(self.title)\n\t\tfor (key, value) in self.argv.items():\n\t\t\tfileobj.write('%s: %s\\n' % (key, value))\n\t\tfileobj.close()\n\t\tprint \"[%s] Log saved '%s'\" % (ctime(), filename)", "def log(self, key: str, val: Any, iteration: int = None) -> None:\n assert key is not None and val is not None, \"Please set key and val\"\n\n if self._tb_writer is not None:\n assert (\n iteration is not None\n ), \"Must specify iteration when logging to tensorboard\"\n self._tb_writer.add_scalar(key, val, iteration)\n if self._tqdm_bar is not None:\n # update tqdm bar\n self._tqdm_data[key] = val\n self._tqdm_bar.set_postfix(self._tqdm_data, refresh=True)", "def logOutput(self, line):\r\n self.writeToLog('output', line)", "def log_metric(self, name, val, step):\n raise NotImplementedError", "def log_message(self, format, *args):", "def get_log(request, **kwargs):\n\n #Creating the command for the logs \n try:\n\tprint(kwargs)\n\tprint(request.GET['project_id'])\n\toutputStr = sidecar.events.test_logs(project_id=request.GET['project_id'])\n\tlog_data = outputStr.log_data\n\toutputStr = \" <br>\".join(log_data.split(\"\\n\"))\n except Exception, e:\n outputStr = \"Updating the logs...\"\t\n #Making the output\n context = {\n \"page_title\": _(\"Test Details\"),\n \"test_lists\": 'report_list', #tests_list\n \"log_data\": outputStr\n }\n return render(request, 'rally_dashboard/events/test_logs.html', context)", "def log(self, line):\n now = datetime.datetime.now()\n time = datetime.datetime.strftime(now, '(%d %b %Y %H:%M:%S)')\n with open(self.logfile, 'a') as log:\n log.write(time + ' ' + line + '\\n')", "def _log(self, data):\n if self.log_data is not None:\n self.log_data(data)", "def run_game_log(player, game_log):\n\n\tgame_log.update_room_type()\n\tcurrent_msg = game_log.get_current_message()\n\n\tprint('HP: {} GOLD: {}'.format(player.stats['HP'], player.stats['GOLD']))\n\tprint('CURRENT ROOM: {}\\n'.format(game_log.current_room))\n\tprint(\"{}'s LOG: {}\".format(player.info['Name'].upper(), current_msg))", "def log(entry, subarray=DEFAULT) :\n multiSubarray('log', subarray, \"LOG: \" + entry)", "def plain(self, *args):\n self.mylog.log(logging.INFO + 1, *args)", "def on_log(self):\n monitors = self.monitors\n if self.monitors is None:\n monitors = self.trainer.metrics.keys()\n\n\n hparams = self.hparams\n if self.hparams is None:\n hparams = self.trainer.hparams.keys()\n\n metrics = {name: format_metric(self.trainer.metrics[name])\n for name in monitors\n if name in self.trainer.metrics}\n hparams = {name: format_metric(self.trainer.hparams[name])\n for name in hparams\n if name in self.trainer.hparams}\n\n\n step_bar = self.step_bars[-1]\n step_bar.set_description(\"Epoch {}\".format(self.trainer.epoch+1))\n step_bar.set_postfix(**metrics, **hparams)\n step_bar.update(self.trainer.steps_trained - self.last_step)\n self.last_step = self.trainer.steps_trained", "def logIt(self, msg):\n\n\t\tif( self.logger ): self.logger.logIt( msg )", "def log_it(logdata=None):\n with open(\"bloomhack.log\", \"a\") as fp:\n fp.write(logdata)\n return", "def putlog(self,s):\n if not self.logqueue == None:\n# print s\n self.logqueue.put(\"Spectrum: (\"+time.ctime()+\"):\\n\"+s)", "def print_entry(text):\n print \"Text entered: \\n '%s'\" % text", "def log_dialogue_input(log_dict, dynamodb_table):\n if not isinstance(log_dict, dict):\n raise NameError(f\"Logging information must be dictionary, not type {type(log_dict)}\")\n\n # Log in PST\n log_dict[\"time\"] = pst()\n if dynamodb_table is not None:\n try:\n dynamodb_table.put_item(Item=log_dict)\n app.logger.info(\"DB write successful\")\n except Exception as e:\n app.logger.info(f\"Could not write to database: {e}\")\n # If no db is specified, write logs to info\n app.logger.info(log_dict)", "def view(\n id: int = typer.Argument(\n ...,\n help=\"ID of the log entry\"\n )\n):\n manager = LogBookManager()\n log_entry = manager.get(id)\n\n if log_entry:\n log_entry_id = (\n typer.style(\"Log Entry ID: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n str(log_entry.id)\n )\n typer.echo(log_entry_id)\n\n log_datetime = (\n typer.style(\"Log Date & Time: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n log_entry.log_datetime.strftime(\"%Y-%m-%d %I:%M %p\")\n )\n typer.echo(log_datetime)\n\n typer.echo(\n typer.style(\"\\nDescription:\\n\", fg=typer.colors.BRIGHT_BLUE, bold=True)\n )\n typer.echo(log_entry.description + '\\n')\n\n created_at = (\n typer.style(\"Created at: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n log_entry.created_at.strftime(\"%Y-%m-%d %I:%M %p\")\n )\n typer.echo(created_at)\n\n updated_at = (\n typer.style(\"Updated at: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n log_entry.updated_at.strftime(\"%Y-%m-%d %I:%M %p\")\n )\n typer.echo(updated_at)\n else:\n typer.echo(\n typer.style(\n f'No Log Entry Found with id={id}',\n fg=typer.colors.RED,\n bold=True\n )\n )", "def render_entry_log(self):\n self.render_log(self.selenium_testcase_entry_template)", "def log(self, user: TelegramController.User, input: str = \"None\", output: str = \"None\") -> NONE:\n\n header = connect(self.__path)\n curs = header.cursor()\n encrypted_id = md5((str(user.id) + \"typicaluser\").encode()).hexdigest()\n if not self.has_history(user):\n curs.execute(\"INSERT INTO users VALUES(?, ?)\", (encrypted_id, \"\"))\n\n curs.execute(\"SELECT * FROM users WHERE id = (?)\", (encrypted_id,))\n data = curs.fetchall()[0][1]\n data += f\"input: {input}\\noutput: {output}\\n\" + \"-\" * 30 + \"\\n\"\n curs.execute(\"UPDATE users SET history = (?) WHERE id = (?)\", (data, encrypted_id))\n header.commit()", "def log_info(request, piece_id):\n\n asset = AssetMap.objects.get(piece=piece_id)\n url = '/'.join([FTP_URL, asset.folder, asset.name+'.log',])\n context = {\n 'keyform': KeySearchForm(auto_id=False),\n 'piece' : Piece.objects.get(pk=piece_id),\n 'logfile': requests.get(url),\n }\n return render(request, 'mutopia/piece_log.html', context)", "def _print_log(self, step, data=None):\n \n # Set mode to append to log file\n mode = 'a'\n\n if self.logfile is None:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n if step == 1:\n # Output log header\n output = \"\\nSKLearnForQlik Log: {0} \\n\\n\".format(time.ctime(time.time()))\n # Set mode to write new log file\n mode = 'w'\n \n elif step == 2:\n # Output the parameters\n output = \"Model Name: {0}\\n\\n\".format(self.model.name)\n output += \"Execution arguments: {0}\\n\\n\".format(self.exec_params)\n \n try:\n output += \"Scaler: {0}, missing: {1}, scale_hashed: {2}, scale_vectors: {3}\\n\".format(\\\n self.model.scaler, self.model.missing,self.model.scale_hashed, self.model.scale_vectors)\n output += \"Scaler kwargs: {0}\\n\\n\".format(self.model.scaler_kwargs)\n except AttributeError:\n output += \"scale_hashed: {0}, scale_vectors: {1}\\n\".format(self.model.scale_hashed, self.model.scale_vectors)\n\n try:\n if self.model.dim_reduction:\n output += \"Reduction: {0}\\nReduction kwargs: {1}\\n\\n\".format(self.model.reduction, self.model.dim_reduction_args)\n except AttributeError:\n pass\n \n output += \"Estimator: {0}\\nEstimator kwargs: {1}\\n\\n\".format(self.model.estimator, self.model.estimator_kwargs)\n \n elif step == 3: \n # Output the request dataframe\n output = \"REQUEST: {0} rows x cols\\nSample Data:\\n\\n\".format(self.request_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.request_df.head().to_string(), self.request_df.tail().to_string())\n \n elif step == 4:\n # Output the response dataframe/series\n output = \"RESPONSE: {0} rows x cols\\nSample Data:\\n\\n\".format(self.response.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.response.head().to_string(), self.response.tail().to_string())\n \n elif step == 5:\n # Print the table description if the call was made from the load script\n output = \"\\nTABLE DESCRIPTION SENT TO QLIK:\\n\\n{0} \\n\\n\".format(self.table)\n \n elif step == 6:\n # Message when model is loaded from cache\n output = \"\\nModel {0} loaded from cache.\\n\\n\".format(self.model.name)\n \n elif step == 7:\n # Message when model is loaded from disk\n output = \"\\nModel {0} loaded from disk.\\n\\n\".format(self.model.name)\n \n elif step == 8:\n # Message when cache is updated\n output = \"\\nCache updated. Models in cache:\\n{0}\\n\\n\".format([k for k,v in self.__class__.model_cache.items()])\n \n elif step == 9:\n # Output when a parameter grid is set up\n output = \"Model Name: {0}, Estimator: {1}\\n\\nGrid Search Arguments: {2}\\n\\nParameter Grid: {3}\\n\\n\".\\\n format(self.model.name, self.model.estimator, self.model.grid_search_args, self.model.param_grid)\n \n elif step == 10:\n # self.model.estimator_kwargs['architecture']\n output = \"\\nKeras architecture added to Model {0}:\\n\\n{1}\\n\\n\".format(self.model.name,\\\n self.model.architecture.to_string())\n\n elif step == 11:\n # Output after adding lag observations to input data\n output = \"Lag observations added ({0} per sample). New input shape of X is {1}.\\n\\n\".format(self.model.lags, data.shape)\n output += \"Feature Definitions:\\n{0}\\n\\n\".format(self.model.features_df.to_string())\n output += \"Sample Data:\\n{0}\\n...\\n{1}\\n\\n\".format(data.head(5).to_string(), data.tail(5).to_string())\n \n sys.stdout.write(output)\n with open(self.logfile, mode, encoding='utf-8') as f:\n f.write(output)", "def _Log(self, logf, s):\r\n if logf:\r\n logf(s + '\\n')", "def log_message(self, formate, *args):\n return", "def print_entry(entry):\n print(\"\"\"\nEmployee Name: {name}\nTask Name: {task_name}\nDate: {date}\nTime Spent: {time_spent}\nNotes: {notes}\n\"\"\".format(**entry))", "def log_example(var):\n\n log.info('example code started')\n log.debug('calling settings')\n test_settings()\n log2.error('there is no error this is example ')\n log2.info('finished')", "def log(self, reward, action):\n self.logs.append([reward, action])", "def logStarted(build, step, log):", "def commandlog(entry, subarray=DEFAULT) :\n multiSubarray('log', subarray, \"COMMAND: \" + entry)", "def log_metric(key, value, step=None):\n mlflow.log_metric(key, value, step=step)", "def submit_entry(self, entry_type=\"Log\"):\n # Gratitude List\n gratitude_li = []\n for grat_entry in self.grat_entry_li:\n grat_txt = grat_entry.get()\n if grat_txt != \"\":\n gratitude_li.append(grat_txt.replace(\",\", \"\\comma\"))\n\n # Goals List\n goals_li = []\n\n for goal_entry in self.goals_entry_li:\n goal_txt = goal_entry.get()\n if goal_txt != \"\":\n goals_li.append(goal_txt.replace(\",\", \"\\comma\"))\n\n \"\"\"\n Plans List: {\"Description\": description entry box,\n \"Priority\": priority combo box, \"Steps\": list of step entry boxes }\n \n Step list: {\"Description\": new_step_box, \"Status\": check_var}\n \"\"\"\n plans_li = []\n\n for plan_entry in self.plans_entry_li:\n if plan_entry[\"Description\"].get() == \"\" and len(plan_entry[\"Steps\"]) == 0:\n pass\n else:\n plan = {}\n plan[\"Plan_Type\"] = plan_entry[\"Plan_Type\"].get()\n plan[\"Description\"] = plan_entry[\"Description\"].get().replace(\n \",\", \"\\comma\")\n plan[\"Status\"] = plan_entry[\"Status\"].get()\n plan[\"Priority\"] = plan_entry[\"Priority\"].get()\n plan[\"Steps\"] = []\n for step_entry in plan_entry[\"Steps\"]:\n step_txt = step_entry[\"Description\"].get()\n if step_txt == \"\":\n pass\n else:\n full_step = {\"Status\": step_entry[\"Status\"].get(),\n \"Description\": step_txt}\n plan[\"Steps\"].append(full_step)\n plans_li.append(plan)\n\n # Affirmation Entry\n affirmation = self.affirm_entry.get(\"1.0\", 'end-1c')\n\n # Additional Comment Entry\n additional_notes = self.notes_entry.get(\"1.0\", 'end-1c')\n\n add_new_entry(entry_type=entry_type,\n gratitude=gratitude_li,\n goals=goals_li,\n plans=plans_li,\n affirmation=affirmation,\n additional_notes=additional_notes,\n test=True)\n\n self.root.switch_page(self.root._HomePage)", "def log3(request):\n if request.method == \"GET\":\n bValid = True\n (usernumber_str, bValid) = getValue(request, u\"uid\", bValid)\n (timelapse_str, bValid) = getValue(request, u\"timelapse\", bValid)\n (propkind_str, bValid) = getValue(request, u\"propkind\", bValid)\n (figure_str, bValid) = getValue(request, u\"figure\", bValid)\n (correctness_str, bValid) = getValue(request, u\"corr\", bValid)\n (term_S_str, bValid) = getValue(request, u\"S\", bValid)\n (term_M_str, bValid) = getValue(request, u\"M\", bValid)\n (term_P_str, bValid) = getValue(request, u\"P\", bValid)\n (term_Q1_str, bValid) = getValue(request, u\"q1\", bValid)\n (term_Q2_str, bValid) = getValue(request, u\"q2\", bValid)\n (term_Q3_str, bValid) = getValue(request, u\"q3\", bValid)\n (conclusion_truth_value_str, bValid) = getValue(request, u\"conclusion\", bValid)\n (validity_answer, bValid) = getValue(request, u\"validity\", bValid)\n\n if not bValid:\n return render(request, 'log.html')\n else:\n if correctness_str == \"true\":\n bCorrect = True\n else:\n bCorrect = False\n\n if conclusion_truth_value_str == \"1\":\n bConclusionTruthValue = True\n else:\n bConclusionTruthValue = False\n\n newobj3 = Answer3(usernumber=int(usernumber_str),\n timelapse=int(timelapse_str),\n figure=int(figure_str),\n quantor_1=term_Q1_str,\n quantor_2=term_Q2_str,\n quantor_3=term_Q3_str,\n term_S=int(term_S_str),\n term_M=int(term_M_str),\n term_P=int(term_P_str),\n propkind=propkind_str,\n student_answer=validity_answer,\n correctness=bCorrect,\n conclusion_truth_value=bConclusionTruthValue)\n\n newobj3.save()\n\n return render(request, 'log.html')\n else:\n return render(request, 'log.html')", "def enter_linking_log_patient(project_path: str, project_name: str):\n # Get Linking Log\n linking_log_path = os.path.join(project_path, 'logs', 'logs_with_phi', 'Master_Linking_Log.xlsx')\n\n while True:\n choice = input(\"Would you like to enter a new patient onto the Linking log log [Y/N]? \")\n\n if choice and choice.strip() and choice.lower() == 'y':\n patient_data = rsm.get_data_from_user.get_master_linking_log_data()\n rsm.add_patient_to_excel_file.add_patient(linking_log_path,\n patient_data,\n 'Master_Linking_Log')\n print(\"Successfully added patient to {} Master Linking log\".format(project_name))\n\n elif choice and choice.strip() and choice.lower() == 'n':\n break\n else:\n print(\"Please enter Y or N\")", "def create_log_entry_when_user_logs_in(sender, request, user, **kwargs):\n create_user_log(\n request=request,\n user=user,\n type=_account_const.AUTHENTICATION,\n action=_account_const.LOGIN\n )", "def display_entry(self, entry):\n border = '-' * 50\n print(border)\n print('Employee: {}'.format(entry.employee_name))\n print('Task Name: {}'.format(entry.task_name))\n print(\"Date: {}\".format(entry.date))\n print(\"Time Spent: {}\".format(entry.time_spent))\n if entry.notes != '':\n print(\"Notes:\\n{}\\n{}\".format('----------', entry.notes))\n print(border)", "def log_step(step: int, message: str, stdout: bool = True) -> None:\n log(f\"Step {step:6d}: {message}\", stdout=stdout)", "def add_line_in_log():\n logging.info(' ' + '-' * 60 + '\\n')", "def save_result(self, value: Any) -> None:\n self.run_logger.set_tags({self.name: value})", "def on_L1(self):\r\n self.log()", "def log(self):\n self.logger = logging.getLogger(self.log_name)\n self.logger.info(f\"Name: {self.name}\")\n self.logger.info(f\"Grid points: {self.gp}\")\n self.logger.info(f\"Nadir points: {self.nadir_p}\")\n self.logger.info(f\"Penalty weight: {self.eps}\")\n self.logger.info(f\"Early exit: {self.early_exit}\")\n self.logger.info(f\"Bypass coefficient: {self.bypass}\")\n self.logger.info(f\"Flag array: {self.flag}\")\n self.logger.info(f\"CPU Count: {self.cpu_count}\")\n self.logger.info(f\"Redivide work: {self.redivide_work}\")\n self.logger.info(f\"Shared flag array: {self.shared_flag}\")\n self.logger.info(Helper.separator())", "def log(msg):\n print msg", "def log(self, msg):\n logging.info(\"Logging Message\")\n ml = self.monk_logs\n today = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n ml.insert({today: {\"log\": msg,\n \"sentiment\": self.get_sentiment(msg),\n \"weather\": Weather.get_weather()}})", "def log_successful_login(sender, request, user, **kwargs):\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.info(u\"Login success - user.id: {0}\".format(user.id))\r\n else:\r\n AUDIT_LOG.info(u\"Login success - {0} ({1})\".format(user.username, user.email))", "def log_message(self, fmt, *args):\n pass", "def log(self, *args):\n self._check_private_key(\"log data\")\n params = {'private_key': self.privateKey}\n params.update(dict((k, self._encoder.serialize(v))\n for k, v in zip(self.fields, args)))\n response = self._post(self.inputUrl(), params=params)\n\n self._last_headers = response.headers\n self._stats = None", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def log_request(req: 'Flask_Request', results: str) -> None:\n\n with UseDatabase(app.config['dbconfig']) as cursor:\n _SQL_INSERT = \"\"\"insert into log_table\n (phrase, letters, ip, browser_string, results)\n values\n (?, ?, ?, ?, ?)\"\"\"\n\n cursor.execute(_SQL_INSERT,\n (req.form['phrase'], req.form['letter'], req.remote_addr, req.user_agent.browser,\n results,))", "def log(self, message, *args, newline):\n\n self.current_character.log(message, *args, newline=newline)", "def log(self, client_addr, request):\n with codecs.open(self.log_path, \"a\", 'UTF-8') as fh_out:\n print >> fh_out, (time.strftime('%Y-%m-%d %H:%M:%S') + \"\\t\" +\n ':'.join([str(i) for i in client_addr]) + \"\\t\" +\n request)", "def _print_logs_info(job_id, project_id):\n print(\"\\nJob submitted successfully.\")\n print(\"Your job ID is: \", job_id)\n print(\"\\nPlease access your training job information here:\")\n print(\n \"https://console.cloud.google.com/mlengine/jobs/{}?project={}\".format(\n job_id, project_id))\n print(\"\\nPlease access your training job logs here: \"\n \"https://console.cloud.google.com/logs/viewer?resource=ml_job%2F\"\n \"job_id%2F{}&interval=NO_LIMIT&project={}\\n\".format(\n job_id, project_id))", "def history_log(self, user, action=CHANGE, message=''):\n LogEntry.objects.log_action(\n user_id=user.pk,\n content_type_id=ContentType.objects.get_for_model(self).pk,\n object_id=self.pk,\n object_repr=force_text(self),\n action_flag=action,\n change_message=message\n )", "def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()", "def log(self, obj, action):\n action_dict = {'time': time.time(),\n 'action': action}\n self.log_data[obj.get_obj_id()]['actions'].append(action_dict)", "def log(self, *args, **kwargs):\n return self.event.log(*args, meta={'contribution_id': self.id}, **kwargs)", "def get_action_logfile():\n return \"action\" + get_day() + \".log\"", "def record_task_attempt(task_name: str):\n\n from common.models import InvenTreeSetting\n\n logger.info(f\"Logging task attempt for '{task_name}'\")\n\n InvenTreeSetting.set_setting(f'_{task_name}_ATTEMPT', datetime.now().isoformat(), None)", "def log(data):\n return _make.log(data)" ]
[ "0.68945855", "0.6559437", "0.6485974", "0.62908894", "0.62812376", "0.6237484", "0.6155005", "0.60995555", "0.60548156", "0.6035026", "0.60128146", "0.59510344", "0.5913017", "0.59052706", "0.5875756", "0.58669865", "0.5864817", "0.5801178", "0.5774821", "0.57367194", "0.56955934", "0.56485015", "0.5620402", "0.56164557", "0.55982274", "0.5586754", "0.5570456", "0.5569306", "0.55621547", "0.555175", "0.5551224", "0.554468", "0.5528927", "0.55110824", "0.5506672", "0.5501592", "0.54945815", "0.5494404", "0.54781675", "0.5477084", "0.5475177", "0.5468415", "0.54650044", "0.54586476", "0.5457204", "0.54539627", "0.5453257", "0.54469234", "0.5435364", "0.5420035", "0.54141724", "0.5409191", "0.5406821", "0.5406493", "0.53991663", "0.53926677", "0.53923625", "0.5390717", "0.5389024", "0.5388456", "0.53867424", "0.5385702", "0.5383189", "0.537404", "0.5363159", "0.53628963", "0.53588384", "0.5356544", "0.5352027", "0.53381604", "0.53378874", "0.533786", "0.5332198", "0.5331161", "0.5327184", "0.53262144", "0.53227776", "0.5320269", "0.5319203", "0.53183365", "0.53111476", "0.5308007", "0.53015375", "0.5296755", "0.529649", "0.52934474", "0.5291275", "0.52901256", "0.5289721", "0.5286743", "0.52845395", "0.5283241", "0.5277811", "0.52707493", "0.5251437", "0.52470165", "0.5244757", "0.52435553", "0.5238904", "0.5237906" ]
0.54031944
54
Given a latitude and longitude calculate distance to airplane including altitude, return kilometers
def distance(self, lat: float, long: float) -> float: # Initial euclidian formula below # diff_lat = self.lat - lat # diff_long = self.long - long # euclidian = math.sqrt((diff_lat ** 2 + diff_long ** 2 + self.altitude ** 2)) return self._haversine(lat, long) + self.altitude / 1000
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_distance_meters(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n dlong *= math.cos( aLocation2.lat * math.pi / 180.0 )\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat * dlat) + (dlong * dlong)) * 1.113195e5", "def get_distance_metres(aLocation1, aLocation2):\n \n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def get_distance_meters(location1, location2):\n dlat = location2.lat - location1.lat\n dlong = location2.lon - location1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5", "def calculate_distance(long_x, lat_x, long_y, lat_y):\n if not (long_x and lat_x and long_y and lat_y):\n return 0\n\n dlon = long_y - long_x if long_y != long_x else long_y\n dlat = lat_y - lat_x if lat_y != lat_x else lat_y\n\n a = (sin(dlat / 2) ** 2) + cos(lat_x) * cos(lat_y) * (sin(dlon / 2) ** 2)\n c = 2 * asin(sqrt(a))\n r = 6371 # radius of earth in kms.\n return (c * r)", "def calcMetresDistance(lat1, long1, lat2, long2):\n return (abs(lat1 - lat2) + abs(long1 - long2)) * 100\n # no, like, really badly", "def lnglat_to_meters(longitude, latitude):\n if isinstance(longitude, (list, tuple)):\n longitude = numpy.array(longitude)\n if isinstance(latitude, (list, tuple)):\n latitude = numpy.array(latitude)\n\n origin_shift = numpy.pi * 6378137\n easting = longitude * origin_shift / 180.0\n northing = numpy.log(numpy.tan((90 + latitude) * numpy.pi / 360.0)) * origin_shift / numpy.pi\n return (easting, northing)", "def get_distance(latitude, longitude, del_latitude, del_longitude):\n coord = (latitude, longitude)\n del_coord = (del_latitude, del_longitude)\n return distance.geodesic(coord, del_coord).km", "def get_distance_between_point(test_long, test_lat, lab_long, lab_lat):\r\n test = (test_lat, test_long)\r\n lab = (lab_lat, lab_long)\r\n return geodesic(test, lab).miles", "def coord_dist_meters(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2)**2 + math.cos(lat1) * \\\n math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(math.sqrt(a))\n r = 6371000 # Radius of earth in meters. Use 3956 for miles\n return c * r", "def distance_in_meters(coord1, coord2):\n return vincenty(coord1, coord2).meters", "def closest_cruising_altitude(altitude):\n return 1000 * ((altitude + 500) // 1000)", "def find_distance_to_store(latitude1, longitude1, latitude2, longitude2):\n lat1 = float(latitude1)\n lon1 = float(longitude1)\n lat2 = float(latitude2)\n lon2 = float(longitude2)\n\n dlat = radians(lat2-lat1)\n dlon = radians(lon2-lon1)\n a = sin(dlat/2) * sin(dlat/2) + cos(radians(lat1)) \\\n * cos(radians(lat2)) * sin(dlon/2) * sin(dlon/2)\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n distance_in_kilometers = RADIUS_OF_THE_EARTH_KILOMETERS * c\n\n return distance_in_kilometers", "def coord_distance(lat1, lon1, lat2, lon2):\n\tlon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n\tdlon = lon2 - lon1\n\tdlat = lat2 - lat1\n\ta = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n\tc = 2 * math.asin(math.sqrt(a))\n\tkm = 6367 * c \n\treturn km", "def distance(lat0, lng0, lat1, lng1):\n # convert decimal degrees to radians \n lat0, lng0, lat1, lng1 = map(radians, [lat0, lng0, lat1, lng1])\n # haversine formula \n dlng = lng1 - lng0 \n dlat = lat1 - lat0 \n a = sin(dlat/2)**2 + cos(lat0) * cos(lat1) * sin(dlng/2)**2\n c = 2 * asin(sqrt(a)) \n m = 6367000 * c\n return m", "def get_distance(lat1, lon1, lat2, lon2):\n phi1 = math.radians(lat1)\n phi2 = math.radians(lat2)\n d_phi = math.radians(lat2 - lat1)\n d_lam = math.radians(lon2 - lon1)\n a = math.sin(d_phi/2) ** 2 + math.cos(phi1) * math.cos(phi2) * math.sin(d_lam/2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n return 6371000 * c", "def calcDistance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km * 1000", "def coord_distance(lat1, lon1, lat2, lon2):\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n km = 2 * 6367 * math.asin(math.sqrt(a))\n mi = 0.621371 * km\n return mi", "def distance_between(lat_1, lon_1, lat_2, lon_2):\n lat_1, lon_1 = math.radians(lat_1), math.radians(lon_1)\n lat_2, lon_2 = math.radians(lat_2), math.radians(lon_2)\n theta = lon_1 - lon_2\n dist = math.sin(lat_1)*math.sin(lat_2) + math.cos(lat_1)*math.cos(lat_2)*math.cos(theta)\n dist = math.acos(dist)\n dist = math.degrees(dist)\n dist = dist * 69.06 # 69.09 = circumference of earth in miles / 360 degrees\n return dist", "def distance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n km = 6367 * c\n return km", "def get_distance_metres(aLocation1, aLocation2):\n [dNorth, dEast, dDown] = get_position_error(aLocation1, aLocation2)\n \n return math.sqrt((dNorth*dNorth) + (dEast*dEast))", "def test_get_kilometers() -> None:\n kilometers = location_util.vincenty(COORDINATES_PARIS, COORDINATES_NEW_YORK)\n assert round(kilometers, 2) == DISTANCE_KM", "def get_euclidian_distance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(math.sqrt(a))\n # Radius of earth in kilometers is 6371\n km = 6371 * c\n return km", "def test_get_distance() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1],\n )\n\n assert meters / 1000 - DISTANCE_KM < 0.01", "def get_distance(lat1, long1, lat2, long2):\n x = 69.1*(lat2 - lat1)\n y = 69.1*(long2 - long1) * math.cos(lat1/57.3)\n dist = math.sqrt(x*x + y*y)\n return dist", "def haversine_distance_ignore_altitude(loc1_lat, loc1_lon, loc2_lat, loc2_lon):\n R = 6372797.560856 # radius of the earth in meters\n\n lat_arc = math.radians(loc1_lat - loc2_lat)\n lon_arc = math.radians(loc1_lon - loc2_lon)\n\n lat_h = math.sin(lat_arc * 0.5)\n lat_h = lat_h * lat_h\n\n lon_h = math.sin(lon_arc * 0.5)\n lon_h = lon_h * lon_h\n\n tmp = math.cos(math.radians(loc1_lat)) * math.cos(math.radians(loc2_lat))\n rad = 2.0 * math.asin(math.sqrt(lat_h + tmp * lon_h))\n\n return rad * R", "def getAltitudeFromLatLon(self, lat, lon):\r\n # print \"-----\\nFromLatLon\", lon, lat\r\n lat -= self.lat\r\n lon -= self.lon\r\n # print \"lon, lat\", lon, lat\r\n if lat < 0.0 or lat >= 1.0 or lon < 0.0 or lon >= 1.0:\r\n raise WrongTileError(self.lat, self.lon, self.lat+lat, self.lon+lon)\r\n x = lon * (self.size - 1)\r\n y = lat * (self.size - 1)\r\n # print \"x,y\", x, y\r\n x_int = int(x)\r\n x_frac = x - int(x)\r\n y_int = int(y)\r\n y_frac = y - int(y)\r\n # print \"frac\", x_int, x_frac, y_int, y_frac\r\n value00 = self.getPixelValue(x_int, y_int)\r\n value10 = self.getPixelValue(x_int+1, y_int)\r\n value01 = self.getPixelValue(x_int, y_int+1)\r\n value11 = self.getPixelValue(x_int+1, y_int+1)\r\n value1 = self._avg(value00, value10, x_frac)\r\n value2 = self._avg(value01, value11, x_frac)\r\n value = self._avg(value1, value2, y_frac)\r\n # print \"%4d %4d | %4d\\n%4d %4d | %4d\\n-------------\\n%4d\" % (\r\n # value00, value10, value1, value01, value11, value2, value)\r\n return value", "def earth_distance(lat1: float, lon1: float, lat2: float, lon2: float)\\\n -> float:\n # R = 6373.0 # earth radius in km\n R = 3963.0 # earth radius in miles\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n distance = R * c\n return distance", "def calculate_distance(x: float, y: float) -> float:\n # return geopy.distance.vincenty(x, y).km\n R = 6370\n lat1 = radians(x[0]) #insert value\n lon1 = radians(x[1])\n lat2 = radians(y[0])\n lon2 = radians(y[1])\n\n dlon = lon2 - lon1\n dlat = lat2- lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n distance = R * c\n return distance", "def get_location_meters(original_location, north, east, alt=0):\n # Radius of \"spherical\" earth\n earth_radius = 6378137.0\n # Coordinate offsets in radians\n lat = north / earth_radius\n lon = east / (earth_radius * math.cos(original_location.lat * math.pi/180))\n\n # New position in decimal degrees\n newlat = original_location.lat + (lat * 180/math.pi)\n newlon = original_location.lon + (lon * 180/math.pi)\n return Location(newlat, newlon, original_location.alt + alt, original_location.is_relative)", "def distance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * asin(sqrt(a))\n m = 6367 * c * 1000\n return m", "def road_distance(lat1, lon1, lat2, lon2):\n point1 = lat1, lon1\n point2 = lat2, lon2\n url = \"https://maps.googleapis.com/maps/api/distancematrix/json?origins={0},{1}&destinations={2},{3}&mode=driving&language=en-EN&sensor=false&key={4}\".format(str(lat1),str(lon1),str(lat2),str(lon2), google_api_key)\n response = api_call(url)\n km = response['rows'][0]['elements'][0]['distance']['value']\n return round(km/1000,1)", "def _earth_distance(time='now'):\n return get_earth(time).radius", "def calc_dist(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n mtr = 6371000 * c\n return mtr", "def distance(lat1, lon1, lat2, lon2):\r\n earth_radius=3959.0 #miles\r\n if lat1==lat2 and lon1==lon2:\r\n dst=0\r\n else:\r\n dst = acos(\r\n (sin(radians(lat1)) * sin(radians(lat2))) +\r\n (cos(radians(lat1)) * cos(radians(lat2)) * cos(radians(lon1) - radians(lon2)))\r\n ) * earth_radius\r\n return dst", "def calculate_distance_based_on_lon_lat(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * np.arcsin(sqrt(a)) \n # Radius of earth in kilometers. Use 3956 for miles\n r = 6378137.0\n return c * r", "def distance(latitude_1: float, longitude_1: float, latitude_2: float, longitude_2: float) -> float:\n lat1, lon1, lat2, lon2 = map(radians, (latitude_1, longitude_1, latitude_2, longitude_2))\n return (\n 2\n * EARTH_RADIUS\n * asin(\n sqrt(\n sin((lat2 - lat1) / 2) ** 2 + cos(lat1) * cos(lat2) * (sin((lon2 - lon1) / 2) ** 2)\n )\n )\n )", "def distance_in_meters(self, latlng):\n return self.distance_to(latlng)", "def getDist(lat1,long1,lat2,long2):\n\tlat1 = math.radians(lat1)\n\tlong1 = math.radians(long1)\n\tlat2 = math.radians(lat2)\n\tlong2 = math.radians(long2)\n\tR = 6371 # km\n\td = cmath.acos(cmath.sin(lat1) * cmath.sin(lat2) + \\\n\tcmath.cos(lat1) * cmath.cos(lat2) *\n\tcmath.cos(long2 - long1)) * R\n\treturn abs(d) # cast to float", "def test_get_distance(self):\n meters = location_util.distance(COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1])\n self.assertAlmostEqual(meters / 1000, DISTANCE_KM, places=2)", "def get_distance(lat1, lon1, lat2, lon2) -> float:\n # Earth radius in meters\n radius = 6371000\n\n # Degress to radian\n lat1, lon1, lat2, lon2 = map(np.deg2rad, [lat1, lon1, lat2, lon2])\n\n # Deltas\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n\n # Calculate distance\n arch = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2\n arch_sin = 2 * np.arcsin(np.sqrt(arch))\n\n return radius * arch_sin", "def latlon2distance(lat1, long1, lat2, long2, miles=False):\n global verbose\n\n if lat1 == lat2 and long1 == long2:\n return 0\n\n\n # Convert latitude and longitude to\n # spherical coordinates in radians.\n degrees_to_radians = math.pi / 180.0\n\n # phi = 90 - latitude\n phi1 = (90.0 - lat1) * degrees_to_radians\n phi2 = (90.0 - lat2) * degrees_to_radians\n\n # theta = longitude\n theta1 = long1 * degrees_to_radians\n theta2 = long2 * degrees_to_radians\n\n # Compute spherical distance from spherical coordinates.\n\n # For two locations in spherical coordinates\n # (1, theta, phi) and (1, theta, phi)\n # cosine( arc length ) =\n # sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n\n cos = (math.sin(phi1) * math.sin(phi2) * math.cos(theta1 - theta2) + math.cos(phi1) * math.cos(phi2))\n try:\n arc = math.acos(cos)\n except Exception as err:\n sys.stderr.write(\"There was an err: {} trying to take the acos of ({})\\n\".format(err, cos))\n arc=0\n # Remember to multiply arc by the radius of the earth\n # in your favorite set of units to get length.\n #\n # To convert to miles multiple arc by 3960\n # To convert to kilometers multiply arc by 6373\n\n if miles:\n arc *= 3960\n else:\n arc *= 6373\n\n return arc", "def distance(lat1,lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(math.sqrt(a))\n miles = earth_radius * c\n return miles", "def get_spherical_distance(lat1,lat2,long1,long2):\n q=radians(lat2-lat1)\n r=radians(long2-long1)\n lat2r=radians(lat2)\n lat1r=radians(lat1)\n a=sin(q/2)*sin(q/2)+cos(lat1r)*cos(lat2r)*sin(r/2)*sin(r/2)\n c=2*atan2(sqrt(a),sqrt(1-a))\n R=6371*1000\n d=R*c\n return d", "def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']", "def _calculate_distance(self, passenger, driver):\n londriver, latdriver = driver['lon'], driver['lat']\n lonpassenger, latpassenger = passenger['lon'], passenger['lat']\n lon_p, lat_p, lon_d, lat_d = map(radians,\n [float(lonpassenger), float(latpassenger), float(londriver), float(latdriver)])\n lon_distance = lon_d - lon_p\n lat_distance = lat_d - lat_p\n a = sin(lat_distance / 2) ** 2 + cos(lat_p) * cos(lat_d) * sin(lon_distance / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def _calculate_distance(self, passenger, driver):\n londriver, latdriver = driver['lon'], driver['lat']\n lonpassenger, latpassenger = passenger['lon'], passenger['lat']\n lon_p, lat_p, lon_d, lat_d = map(radians,\n [float(lonpassenger), float(latpassenger), float(londriver), float(latdriver)])\n lon_distance = lon_d - lon_p\n lat_distance = lat_d - lat_p\n a = sin(lat_distance / 2) ** 2 + cos(lat_p) * cos(lat_d) * sin(lon_distance / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def harversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # harversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2.)**2. + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2.)**2.\n c = 2. * math.asin(math.sqrt(a))\n km = 6371. * c # radius of earth\n return km", "def distance(lat1, lon1, lat2, lon2):\n lon1, lat1 = math.radians(lon1), math.radians(lat1)\n lon2, lat2 = math.radians(lon2), math.radians(lat2)\n a = (math.sin((lat2 - lat1) / 2) ** 2 +\n math.cos(lat1) * math.cos(lat2) * math.sin((lon2 - lon1) / 2) ** 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = 6371000 * c\n\n return d", "def calculate_distance(srcLong, srcLat, dstLong, dstLat):\n return math.sqrt( (srcLong-dstLong) ** 2 + (srcLat - dstLat) ** 2)", "def calcDistance(lat1, lon1, lat2, lon2):\n yDistance = (lat2 - lat1) * nauticalMilePerLat\n xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * (nauticalMilePerLongitude / 2) \n distance = math.sqrt( yDistance**2 + xDistance**2 )\n return distance * milesPerNauticalMile", "def topographic_altitude(lat, lon):\n global __model\n type_output = type(lat)\n lat = prepare_input_array(lat)\n lon = prepare_input_array(lon)\n lon = np.mod(lon, 360)\n val = __model.topographic_altitude(lat, lon)\n val = np.maximum(val, 1e-7)\n return prepare_output_array(val, type_output) * u.km", "def calculateDistance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])\n\n # haversine formula\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def get_spherical_distance(lat1,lat2,long1,long2):\n lat1,lat2,long1,long2= float(lat1),float(lat2),float(long1),float(long2)\n q=radians(lat2-lat1)\n r=radians(long2-long1)\n lat2r=radians(lat2)\n lat1r=radians(lat1)\n a=sin(q/2)*sin(q/2)+cos(lat1r)*cos(lat2r)*sin(r/2)*sin(r/2)\n c=2*atan2(sqrt(a),sqrt(1-a))\n R=6371*1000\n d=R*c\n return d", "def calcDistanceOptimized(lat1, lon1, lat2, lon2):\n rad = 0.017453292519943\n yDistance = (lat2 - lat1) * 60.00721\n xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * 30.053965\n distance = math.sqrt( yDistance**2 + xDistance**2 )\n return distance * 1852.00088832", "def get_distance_from_point(long1, lati1, long2, lati2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [long1, lati1, long2, lati2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def dist_between(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arcsin(np.sqrt(a)) \n km = 6367 * c\n return km", "def test_get_kilometers(self):\n kilometers = location_util.vincenty(COORDINATES_PARIS,\n COORDINATES_NEW_YORK)\n self.assertEqual(round(kilometers, 2), DISTANCE_KM)", "def distance(self, lat: float, lon: float) -> float:\n return distance((self.lat, self.lon), (lat, lon))", "def calcDistance(lat1, lon1, lat2, lon2):\n yDistance = (lat2 - lat1) * nauticalMilePerLat\n xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * (nauticalMilePerLongitude / 2)\n distance = math.sqrt( yDistance**2 + xDistance**2 )\n return distance * milesPerNauticalMile", "def compute_distance_to_city_in_km(self, city):\n lat = math.radians(self.latitude - city.latitude)\n long = math.radians(self.longitude - city.longitude)\n a = math.pow(math.sin(lat/2), 2) \\\n + math.cos(math.radians(self.latitude)) * math.cos(math.radians(city.latitude)) * pow(math.sin(long/2), 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n\n radius_earth = 6378.7 # in kilometers\n return radius_earth * c", "def distance_between_m(lat1, lon1, lat2, lon2):\n phi1 = (90. - lat1) * pi / 180.\n phi2 = (90. - lat2) * pi / 180.\n theta1 = lon1 * pi / 180.\n theta2 = lon2 * pi / 180.\n arc_length = acos(sin(phi1) * sin(phi2) * cos(theta1 - theta2) +\n cos(phi1) * cos(phi2))\n return arc_length * EARTH_RADIUS_M", "def get_distance_from_hq(self, lat, lon):\n coords_1 = (40.646860, -8.642999) # Coords to the office\n coords_2 = (lat, lon)\n return distance.distance(coords_1, coords_2).km", "def haversine(lon1, lat1, lon2, lat2): \r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) \r\n #print 34\r\n dlon = lon2 - lon1 \r\n dlat = lat2 - lat1 \r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 \r\n c = 2 * atan(sqrt(a)/sqrt(1-a)) \r\n r = 6371 \r\n d=c * r\r\n #print type(d)\r\n return d", "def get_earth_radius(latitude):\n a = R_EARTH_MAX\n b = R_EARTH_MIN\n num = ((a ** 2 * np.cos(latitude)) ** 2 +\n (b ** 2 * np.sin(latitude)) ** 2)\n den = ((a * np.cos(latitude)) ** 2 +\n (b * np.sin(latitude)) ** 2)\n\n earth_radius = np.sqrt(num / den)\n\n return earth_radius", "def distance(s_lat, s_lng, e_lat, e_lng):\n\n # approximate radius of earth in km\n R = 6373.0\n\n# s_lat = s_lat*np.pi/180.0\n s_lat = np.deg2rad(s_lat)\n s_lng = np.deg2rad(s_lng)\n e_lat = np.deg2rad(e_lat)\n e_lng = np.deg2rad(e_lng)\n\n d = (np.sin((e_lat - s_lat)/2)**2 + np.cos(s_lat)*np.cos(e_lat) *\n np.sin((e_lng - s_lng)/2)**2)\n distance = 2 * R * np.arcsin(np.sqrt(d))\n\n return distance", "def haversine_distance(loc1_lat, loc1_lon, loc1_alt, loc2_lat, loc2_lon, loc2_alt):\n R = 6372797.560856 # radius of the earth in meters\n R = R + loc2_alt - loc1_alt\n\n lat_arc = math.radians(loc1_lat - loc2_lat)\n lon_arc = math.radians(loc1_lon - loc2_lon)\n\n lat_h = math.sin(lat_arc * 0.5)\n lat_h = lat_h * lat_h\n\n lon_h = math.sin(lon_arc * 0.5)\n lon_h = lon_h * lon_h\n\n tmp = math.cos(math.radians(loc1_lat)) * math.cos(math.radians(loc2_lat))\n rad = 2.0 * math.asin(math.sqrt(lat_h + tmp * lon_h))\n\n return rad * R", "def gpx_distance(lat1, lon1, lat2, lon2):\n theta = lon1 - lon2\n rads = sin(radians(lat1)) * sin(radians(lat2)) + cos(radians(lat1)) * cos(radians(lat2)) * cos(radians(theta))\n\n # make sure rads is [-1, 1]\n rads = 1 if rads > 1 else rads\n rads = -1 if rads < -1 else rads\n\n rads = acos(rads)\n\n # multiply by radius of the earth to get distance\n return rads * 6367", "def distance(a, b):\n return vincenty((float(a.longitude), float(a.latitude)),\n (float(b.longitude), float(b.latitude))).km", "def distance(lat1, lon1, lat2, lon2):\r\n radius = 6373 * 1000\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = (math.sin(dlat/2))**2 + math.cos(lat1) * math.cos(lat2) * (math.sin(dlon/2))**2\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n return radius * c", "def aversine(lon1, lat1, lon2, lat2):\n\n lon1 = float(lon1)\n lon2 = float(lon2)\n lat1 = float(lat1)\n lat2 = float(lat2)\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.asin(math.sqrt(a))\n meters = 6356988 * c\n\n return meters", "def get_location_offset_meters(self, dNorth, dEast, alt):\n earth_radius=6378137.0 #Radius of \"spherical\" earth\n #Coordinate offsets in radians\n dLat = dNorth/earth_radius\n dLon = dEast/(earth_radius*math.cos(math.pi*self.home.lat/180))\n\n #New position in decimal degrees\n newlat = self.home.lat + (dLat * 180/math.pi)\n newlon = self.home.lon + (dLon * 180/math.pi)\n return LocationGlobal(newlat, newlon,self.home.alt+alt)", "def get_nearest_station(latitude, longitude):\n urlbase = \"http://realtime.mbta.com/developer/api/v2/stopsbylocation?api_key=wX9NwuHnZU2ToO7GmGR9uw&lat=\"\n urlbase += str(latitude)\n urlbase += '&lon='\n urlbase += str(longitude)\n urlbase += '&format=json'\n response_data = get_json(urlbase)\n station_name = response_data[\"stop\"][0][\"stop_name\"]\n distance = response_data[\"stop\"][0][\"distance\"]\n return station_name, distance", "def dist_in_meters(coords, pt, is_geo=False):\n xe = coords[:, 0]\n ye = coords[:, 1]\n xp = pt[0]\n yp = pt[1]\n if is_geo:\n d = _get_dist_geo(xe, ye, xp, yp)\n else:\n d = np.sqrt(np.square(xe - xp) + np.square(ye - yp))\n return d", "def calc_distance(start_lat, start_lng, end_lat, end_lng):\n # approximate radius of earth in km\n r = 6373.0\n\n # Convert to radians\n lat1 = math.radians(start_lat)\n lon1 = math.radians(start_lng)\n lat2 = math.radians(end_lat)\n lon2 = math.radians(end_lng)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n\n return r * c", "def get_fast_distance(lat1, lon1, lat2, lon2):\n KM = 6371.393\n lat1, lon1, lat2, lon2 = map(np.deg2rad, [lat1, lon1, lat2, lon2])\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2\n c = 2 * np.arcsin(np.sqrt(a))\n distance = KM * c\n return distance", "def calculate_H(s_lat,s_lon,e_lat,e_lon):\n R = 6371.0\n snlat = radians(s_lat)\n snlon = radians(s_lon)\n elat = radians(e_lat)\n elon = radians(e_lon)\n actual_dist = 6371.01 * acos(sin(snlat) * sin(elat) + cos(snlat) * cos(elat) * cos(snlon - elon))\n actual_dist = actual_dist * 1000\n return actual_dist", "def latlong_distance(p1, p2):\n radius = 6371 # km\n\n lat1 = p1[1] * math.pi / 180.0\n lat2 = p2[1] * math.pi / 180.0\n lon1 = p1[0] * math.pi / 180.0\n lon2 = p2[0] * math.pi / 180.0\n\n deltaLat = lat2 - lat1\n deltaLon = lon2 - lon1\n a = (math.sin(deltaLat / 2)**2\n + math.cos(lat1) * math.cos(lat2) * math.sin(deltaLon / 2)**2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n d = d * 1e3 # Return in m\n return d", "def test_get_distance_to_same_place() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n )\n\n assert meters == 0", "def get_hike_distance(df1lat, df1long, df2lat, df2long):\n # approximate radius of earth in km\n R = 6373.0\n\n lat1 = radians(df1lat)\n lon1 = radians(df1long)\n lat2 = radians(df2lat)\n lon2 = radians(df2long)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n return distance", "def spherical_distance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6373 * c\n km = '%d' % km\n return float(km)", "def dist(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r", "def distance(lat1, lon1, lat2, lon2):\n coord = map(lambda x: float(x) * pi / 180.0, [lat1, lon1, lat2, lon2])\n inverse_arc = sin(coord[0]) * sin(coord[2]) + \\\n cos(coord[0]) * cos(coord[2]) * cos(coord[1] - (coord[3]))\n arc_dist = acos(min(1, max(inverse_arc, -1))) * 6371\n return arc_dist", "def delta_lat_miles(self, delta_lat):\n\n return delta_lat.dist_from_radius(EARTH_RADIUS)", "def measure_gps(lat1, lon1, lat2, lon2):\n R = 6378.137; # Radius of earth in KM\n dLat = radians(lat2) - radians(lat1)\n dLon = radians(lon2) - radians(lon1)\n a = sin(dLat/2) * sin(dLat/2) + cos(radians(lat1)) * cos(radians(lat2)) * sin(dLon/2) * sin(dLon/2)\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n d = R * c\n return d * 1000 # meters", "def calc_distance_two_points(lat_from, long_from, lat_to, long_to):\n distance_in_km = haversine(\n (lat_from, long_from),\n (lat_to, long_to),\n unit='km')\n\n return distance_in_km", "def distance_between_coordinates(current_coordinate, target_coordinate):\n distance_in_km = haversine(current_coordinate, target_coordinate)\n distance_in_metres = math.ceil(distance_in_km * 1000)\n return distance_in_metres", "def get_nearest_station(latitude, longitude):\n url1 = \"http://realtime.mbta.com/developer/api/v2/stopsbylocation\"\n params1 = {'api_key':'lm1M_mXgq0O6dsH9xduPAQ','lat':latitude,'lon':longitude,'format':'json'}\n req1 = requests.get(url1,params=params1)\n stat1 = req1.status_code\n stop_name = req1.json()['stop'][0]['stop_name']\n distance = req1.json()['stop'][0]['distance']\n return stop_name, distance", "def distance_coordinates(lat1: Decimal, lon1: Decimal, lat2: Decimal, lon2: Decimal) -> Decimal:\n lat1 = math.radians(lat1)\n lon1 = math.radians(lon1)\n lat2 = math.radians(lat2)\n lon2 = math.radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n distance = Decimal(R * c)\n\n return distance", "def calcDistance(lat1, lon1, lat2, lon2):\n nauticalMilePerLat = 60.00721\n nauticalMilePerLongitude = 60.10793\n rad = math.pi / 180.0\n milesPerNauticalMile = 1.15078\n \n yDistance = (lat2 - lat1) * nauticalMilePerLat\n xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * (nauticalMilePerLongitude / 2)\n\n distance = math.sqrt( yDistance**2 + xDistance**2 )\n\n return distance * milesPerNauticalMile * 1609.344", "def calc_distance(user_loc, space):\n geocode_result = gmaps.geocode(space['_location'])\n dest_loc = geocode_result[0]['geometry']['location']\n direction = gmaps.distance_matrix(user_loc, dest_loc, mode=\"walking\")\n distance = direction['rows'][0]['elements'][0]['distance']['value']\n # convert to mile\n distance = distance * 0.000621371\n return distance", "def calculate_distance(source,dest):\n\n ### Earth radius in miles\n R = 3960.0\n\n lat1, lon1 = source\n lat2, lon2 = dest\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n ### compute distance in spherical coordinates\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n return distance", "def get_distance_to(self, latitude, longitude):\n\n if not self.latitude or not self.longitude:\n return None\n return distance_between_coordinates(self.latitude, latitude, self.longitude, longitude)", "def distance_intermediate_formula(lat1, long1, lat2, long2):\n return pow(sin(radians(col(lat1) - col(lat2)) / 2), 2) + (\n pow(sin(radians(col(long1) - col(long2)) / 2), 2)\n * cos(radians(col(lat1)))\n * cos(radians(col(lat2)))\n )" ]
[ "0.70389205", "0.6926336", "0.6884272", "0.6884272", "0.6884272", "0.6884272", "0.6884272", "0.68767065", "0.68557805", "0.67505145", "0.67494184", "0.67259705", "0.6697738", "0.6696931", "0.66856134", "0.66700953", "0.65975", "0.6567814", "0.6560275", "0.6525618", "0.6513142", "0.6469947", "0.64646846", "0.6464619", "0.6458212", "0.6453903", "0.64422905", "0.64214396", "0.6416447", "0.6408516", "0.6407694", "0.63815165", "0.63763237", "0.6375057", "0.63644433", "0.6357537", "0.6357159", "0.63551754", "0.63334054", "0.6330632", "0.6322481", "0.63093776", "0.6309073", "0.63064146", "0.6305236", "0.628643", "0.628607", "0.62803155", "0.6262236", "0.6252615", "0.6244191", "0.6241631", "0.6241631", "0.62402874", "0.6235937", "0.62281615", "0.6221061", "0.6219821", "0.6207887", "0.6205054", "0.6190647", "0.618942", "0.6187081", "0.6171453", "0.6144029", "0.61433804", "0.61379933", "0.6106372", "0.61038923", "0.60972476", "0.6093409", "0.608716", "0.6085234", "0.6081149", "0.60803646", "0.60400105", "0.6037376", "0.60317636", "0.6027537", "0.60274786", "0.6026757", "0.60231334", "0.60209394", "0.6013335", "0.60113496", "0.6002163", "0.59987", "0.59906363", "0.5984894", "0.5983949", "0.59832454", "0.59773344", "0.5976753", "0.59587574", "0.5951377", "0.5949854", "0.5946117", "0.59437156", "0.5936631", "0.5932907" ]
0.6852247
9
Both paths should be full.
def _put(self, src_fname, dst_fname): logging.info('Transferring file %s to %s', src_fname, self._ip_addr) sftp_cli = self._get_sftp_client() if sftp_cli is None: raise Exception('Not supported without ssh.') return sftp_cli.put(src_fname, dst_fname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __combine_path(self, other):\n self.path = other.path + self.path", "def join(self, path, *paths):", "def aix_path_join(path_one, path_two):\n if path_one.endswith('/'):\n path_one = path_one.rstrip('/')\n\n if path_two.startswith('/'):\n path_two = path_two.lstrip('/')\n\n final_path = path_one + '/' + path_two\n return final_path", "def samepath(p1, p2):\n # type: (str, str) -> bool\n return pathnormalize(p1) == pathnormalize(p2)", "def join_paths(path_1, path_2):\r\n a = lib_path.join(path_1, path_2)\r\n return a", "def test_verify_path_2(self):\n result = basic.verify_path(str(self.test_filepath1) + \"abcxyz\", \"file\")\n self.assertFalse(result)", "def _issubpath(self, a, b):\n p1 = a.rstrip(os.sep).split(os.sep)\n p2 = b.rstrip(os.sep).split(os.sep)\n return p1[:len(p2)] == p2", "def test_AppendPath(self) -> None:\n p1 = r'C:\\dir\\num\\one;C:\\dir\\num\\two'\n p2 = r'C:\\mydir\\num\\one;C:\\mydir\\num\\two'\n # have to include the pathsep here so that the test will work on UNIX too.\n p1 = AppendPath(p1, r'C:\\dir\\num\\two', sep=';')\n p1 = AppendPath(p1, r'C:\\dir\\num\\three', sep=';')\n assert p1 == r'C:\\dir\\num\\one;C:\\dir\\num\\two;C:\\dir\\num\\three', p1\n\n p2 = AppendPath(p2, r'C:\\mydir\\num\\three', sep=';')\n p2 = AppendPath(p2, r'C:\\mydir\\num\\one', sep=';')\n assert p2 == r'C:\\mydir\\num\\two;C:\\mydir\\num\\three;C:\\mydir\\num\\one', p2\n\n # check (only) last one is kept if there are dupes in new\n p3 = r'C:\\dir\\num\\one'\n p3 = AppendPath(p3, r'C:\\dir\\num\\two;C:\\dir\\num\\three;C:\\dir\\num\\two', sep=';')\n assert p3 == r'C:\\dir\\num\\one;C:\\dir\\num\\three;C:\\dir\\num\\two', p3", "def append_path(path1, path2):\n\n # Get the first absolute path\n abs_path1 = abspath(path1)\n\n # Return the joined paths\n return os.path.join(abs_path1, path2).replace(\"\\\\\", \"/\")", "def joinPath(path, *args):", "def test_set_path_5(self, verify_path2_mock):\n home = Path(\"~\")\n home = home.expanduser()\n test_file = Path(\"~/dir1/dir2/../file.txt\")\n verify_path2_mock.return_value = (True, None)\n output = basic.set_path(test_file, kind=\"file\", expect=True)\n exp = Path(home, \"dir1/file.txt\")\n self.assertEqual(output, exp)", "def areSamePaths(path1, path2):\n\n path1 = os.path.abspath(os.path.normpath(path1))\n path2 = os.path.abspath(os.path.normpath(path2))\n\n if os.path.exists(path1) and os.path.exists(path2):\n path1 = getExternalUsePath(path1)\n path2 = getExternalUsePath(path2)\n\n path1 = os.path.normcase(path1)\n path2 = os.path.normcase(path2)\n\n return path1 == path2", "def commonpath(a, b):\r\n a = normpath(normcase(a))\r\n b = normpath(normcase(b))\r\n\r\n if a == b:\r\n return a\r\n\r\n while len(a) > 0:\r\n if a == b:\r\n return a\r\n\r\n if len(a) > len(b):\r\n a = dirname(a)\r\n else:\r\n b = dirname(b)\r\n\r\n return None", "def on_same_mount(cls, path1: os.PathLike, path2: os.PathLike) -> bool:\n return cls.get_mount(path1)[0] == cls.get_mount(path2)[0]", "def test_set_path_4(self, verify_path2_mock):\n test_file = Path(\"/dir1/dir2/../file.txt\")\n verify_path2_mock.return_value = (True, None)\n output = basic.set_path(test_file, kind=\"file\", expect=True)\n exp = Path(\"/dir1/file.txt\")\n self.assertEqual(output, exp)", "def test_verify_path2_2(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.file, kind=\"file\", expect=False)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_is_compatible_both_dirs(self):\n\n i = Interface('/foo[0:4]')\n i['/foo[0:2]', 'interface', 'io'] = [0, 'out']\n i['/foo[2:4]', 'interface', 'io'] = [0, 'in']\n j = Interface('/foo[0:4]')\n j['/foo[0:2]', 'interface', 'io'] = [1, 'in']\n j['/foo[2:4]', 'interface', 'io'] = [1, 'out']\n assert i.is_compatible(0, j, 1)", "def test_bad_paths(self):\n self.do_test_bad_path('frog', '/frog') # no permission to write", "def test_relativise_sameparent():\n src = pathlib.Path(\"/tmp/foo/bar/src.txt\")\n dst = pathlib.Path(\"/tmp/foo/bar/dst.txt\")\n rel = relativise(src, dst)\n assert rel == pathlib.Path(\"dst.txt\")", "def test_AppendPathPreserveOld(self) -> None:\n p1 = r'C:\\dir\\num\\one;C:\\dir\\num\\two'\n # have to include the pathsep here so that the test will work on UNIX too.\n p1 = AppendPath(p1, r'C:\\dir\\num\\one', sep=';', delete_existing=0)\n p1 = AppendPath(p1, r'C:\\dir\\num\\three', sep=';')\n assert p1 == r'C:\\dir\\num\\one;C:\\dir\\num\\two;C:\\dir\\num\\three', p1", "def test_verify_path2_1(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.file, kind=\"file\", expect=True)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_set_path_1(self):\n self.file.touch()\n # Since using tempfile, there is an added quirk.\n # the tempfile path may be a symlink, so passing it through set path\n # will resolve the symlink, changing the path, and breaking the test.\n self.file = self.file.resolve()\n output = basic.set_path(self.file, kind=\"file\", expect=True)\n with self.subTest():\n self.assertIsInstance(output, Path)\n with self.subTest():\n self.assertEqual(str(self.file), str(output))", "def test_verify_path2_16(self):\n self.dir.mkdir()\n result, msg = basic.verify_path2(self.dir, kind=None, expect=True)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_verify_path2_13(self):\n result, msg = basic.verify_path2(self.dir, kind=\"dir\", expect=False)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def areInSamePaths(path1, path2):\n return areSamePaths(os.path.dirname(path1), os.path.dirname(path2))", "def path_join(first: str, second: str) -> str:\n first = first.rstrip('/\\\\')\n second = second.lstrip('/\\\\')\n if not first: return second\n if not second: return first\n return first + '/' + second", "def test_verify_path2_7(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.file, kind=None, expect=False)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_verify_path2_12(self):\n self.dir.mkdir()\n result, msg = basic.verify_path2(self.dir, kind=\"dir\", expect=False)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_verify_path2_5(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.dir, kind=\"dir\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_fromFullPath(self):\n log1 = logfile.LogFile(self.name, self.dir, 10, defaultMode=0o777)\n self.addCleanup(log1.close)\n log2 = logfile.LogFile.fromFullPath(self.path, 10, defaultMode=0o777)\n self.addCleanup(log2.close)\n self.assertEqual(log1.name, log2.name)\n self.assertEqual(os.path.abspath(log1.path), log2.path)\n self.assertEqual(log1.rotateLength, log2.rotateLength)\n self.assertEqual(log1.defaultMode, log2.defaultMode)", "def __check_exist_path(self):\n if 'path_out' not in self.params:\n raise ValueError('missing \"path_out\" among parameters')\n self.params['path_out'] = update_path(self.params.get('path_out'))\n list_names = [n for n in self.params if any(m in n.lower() for m in ['path', 'dir', 'file'])]\n for n in list_names:\n p = os.path.abspath(os.path.expanduser(self.params[n]))\n if not os.path.exists(p):\n raise FileNotFoundError('given path/file/dir \"%s\" does not exist!' % p)\n self.params[n] = p\n for n in [n for n in self.params if 'exec' in n]:\n # in case you define executable in your home\n if os.path.expanduser(self.params[n]) != self.params[n]:\n self.params[n] = os.path.expanduser(self.params[n])", "def test_fromFullPath(self):\n log1 = logfile.LogFile(self.name, self.dir, 10, defaultMode=0777)\n log2 = logfile.LogFile.fromFullPath(self.path, 10, defaultMode=0777)\n self.assertEquals(log1.name, log2.name)\n self.assertEquals(os.path.abspath(log1.path), log2.path)\n self.assertEquals(log1.rotateLength, log2.rotateLength)\n self.assertEquals(log1.defaultMode, log2.defaultMode)", "def test_relativise_src_under():\n src = pathlib.Path(\"/tmp/foo/bar/baz/src.txt\")\n dst = pathlib.Path(\"/tmp/foo/dst.txt\")\n rel = relativise(src, dst)\n assert rel == pathlib.Path(\"../../dst.txt\")", "def check_relpath(path1, path2, exception=True):\r\n p1 = op.normpath(path1)\r\n p2 = op.normpath(op.join(path1, path2))\r\n if op.relpath(p1, p2).endswith(op.basename(p1)):\r\n if exception:\r\n raise ValueError(\"Invalid path '%s'\" % path2)\r\n return False\r\n return p2", "def file_path_short(self):\r\n if not hasattr(self, '_file_path_short'):\r\n if self.file_path:\r\n result = None\r\n\r\n for path in sys.path:\r\n candidate = os.path.relpath(self.file_path, path)\r\n if not result or (len(candidate.split('/')) < len(result.split('/'))):\r\n result = candidate\r\n\r\n self._file_path_short = result\r\n else: \r\n self._file_path_short = None\r\n\r\n return self._file_path_short", "def test_relativise_dst_under():\n src = pathlib.Path(\"/tmp/foo/src.txt\")\n dst = pathlib.Path(\"/tmp/foo/bar/baz/dst.txt\")\n rel = relativise(src, dst)\n assert rel == pathlib.Path(\"bar/baz/dst.txt\")", "def test_merge_not_fail_draftpath_intersection(self):\n path_a = PathFactory.create(name=\"A\", geom=LineString((0, 0), (10, 0)))\n path_b = PathFactory.create(name=\"B\", geom=LineString((10, 0), (20, 0)))\n PathFactory.create(name=\"C\", geom=LineString((10, 0), (10, 10)), draft=True)\n response = self.client.post(reverse('core:path-drf-merge-path'), {'path[]': [path_a.pk, path_b.pk]})\n self.assertIn('success', response.json())", "def test_is_compatible_both_dirs_types(self):\n\n i = Interface('/foo[0:4]')\n i['/foo[0:2]'] = [0, 'out', 'gpot']\n i['/foo[2:4]'] = [0, 'in', 'spike']\n j = Interface('/foo[0:4]')\n j['/foo[0:2]'] = [1, 'in', 'gpot']\n j['/foo[2:4]'] = [1, 'out', 'spike']\n assert i.is_compatible(0, j, 1)", "def lpath(file0, file1):\n return os.path.abspath(os.path.join(os.path.dirname(file0), file1))", "def test_verify_path2_15(self):\n self.dir.mkdir()\n result, msg = basic.verify_path2(self.dir, kind=\"file\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_outpath_multi_unequal(tmpdir):\n base = glob.glob(\"%s/dummy/mm0\" % DATA_DIR)[0]\n paths = sorted(glob.glob(base + \"/*.ufo\"))\n # the reference font is modified in-place, make a temp copy first\n referenceSrc = py.path.local(paths[0])\n referenceDst = tmpdir / referenceSrc.basename\n referenceSrc.copy(referenceDst)\n reference = str(referenceDst)\n inpaths = paths[1:]\n outpaths = [str(tmpdir / basename(p)) for p in inpaths][1:]\n\n with pytest.raises(SystemExit):\n psautohint(inpaths + ['-o'] + outpaths + ['-r', reference])", "def test_verify_path2_14(self):\n result, msg = basic.verify_path2(self.dir, kind=\"dir\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_expand_path_2(self):\n input_path = \"/fake/path\"\n expanded_path = basic.expand_path(input_path)\n expected_path = input_path\n self.assertEqual(expanded_path, expected_path)", "def test_file_empty_dir_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\")\n self.write_dir(dir1, \"foo\")\n self.sync_all()\n # Directory wins. File is deleted in dir0\n self.assertDirPresent(dir0, \"foo\")\n self.assertDirPresent(dir1, \"foo\")", "def testJoinPath(self):\n test_file_path = self._GetTestFilePath(['utmp-linux_libc6'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_helper = dfvfs_helpers.DFVFSFileSystemHelper(None)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)\n test_helper.OpenFileSystem(path_spec)\n\n path_segments = os.path.split(test_file_path)\n\n path = test_helper.JoinPath(path_segments)\n self.assertEqual(path, test_file_path)", "def samefile(self, other):\n other = os.fspath(other)\n if not isabs(other):\n other = abspath(other)\n if self == other:\n return True\n if not hasattr(os.path, \"samefile\"):\n return False\n return error.checked_call(os.path.samefile, self.strpath, other)", "def test_verify_path2_17(self):\n self.dir.mkdir()\n result, msg = basic.verify_path2(self.dir, kind=\"invalid\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_relativise_different_parents_shallow():\n src = pathlib.Path(\"/tmp/foo/bar/src.txt\")\n dst = pathlib.Path(\"/tmp/foo/baz/dst.txt\")\n rel = relativise(src, dst)\n assert rel == pathlib.Path(\"../baz/dst.txt\")", "def validate_short_path(short_path):", "def test_verify_path2_6(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.file, kind=None, expect=True)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_verify_path2_4(self):\n result, msg = basic.verify_path2(self.file, kind=\"file\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def existShortestPath(self):\r\n # s, t = self.findSourceDest(source, dest)\r\n return self.run()", "def __eq__(self, other):\n return type(self) == type(other) and self._full_path == other.full_path", "def test_verify_path2_11(self):\n self.dir.mkdir()\n result, msg = basic.verify_path2(self.dir, kind=\"dir\", expect=True)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_filePathNoCommonElements(self):\n self.assertEqual(\n filePathDelta(FilePath(\"/foo/bar\"), FilePath(\"/baz/quux\")),\n [\"..\", \"..\", \"baz\", \"quux\"],\n )", "def test_verify_path_4(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\", \"dir\")\n self.assertFalse(result)", "def test_verify_path2_9(self):\n result, msg = basic.verify_path2(self.file, kind=None, expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def testJoin(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n data={\r\n # 1\r\n 'relativePath':\r\n ['/dir1/',P('dir2/fileBase.ext'),'/dir1/dir2/fileBase.ext'],\r\n\r\n # 2\r\n 'absolutePath':\r\n ['/dir1/',P('/dir2/fileBase.ext'),'/dir2/fileBase.ext'],\r\n\r\n # 3\r\n 'notSeparatorTerminatedPath':\r\n ['dir1',P('dir2/fileBase.ext'),'dir1/dir2/fileBase.ext'],\r\n\r\n # 4\r\n 'emptyPath':\r\n ['dir1',P(''),'dir1/'],\r\n\r\n # 5\r\n 'nonNativePath':\r\n ['dir1',ufsi.HttpPath('http://www.google.com.au/'),\r\n 'http://www.google.com.au/']\r\n }\r\n\r\n for k in data.iterkeys():\r\n p1=P(data[k][0])\r\n p2=data[k][1]\r\n r1=str(p1.join(p2))\r\n r2=data[k][2]\r\n self.assertEquals(r1,r2,\r\n '%s: join result was %r but should have been %r'\r\n %(k,r1,r2))", "def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli, [*CLI_LOG_OPTION, \"config\", \"get\", \"agent\"], standalone_mode=False\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )", "def samefile(path1, path2):\n try:\n return os.path.samefile(path1, path2)\n except OSError as err:\n if err.errno == 2: # ENOENT\n return False\n else:\n raise", "def findShortestPath(self):\r\n pass", "def test_expand_path_3(self):\n partial_path = \"/fake/path\"\n input_path = \".\" + partial_path\n expanded_path = basic.expand_path(input_path)\n local_path = Path(\".\").resolve()\n expected_path = str(local_path) + partial_path\n self.assertEqual(expanded_path, expected_path)", "def test_set_path_3(self, verify_path2_mock):\n home = Path(\"~\")\n home = home.expanduser()\n test_file = Path(\"~/path/to/file.txt\")\n verify_path2_mock.return_value = (True, None)\n output = basic.set_path(test_file, kind=\"file\", expect=True)\n exp = Path(home, \"path/to/file.txt\")\n self.assertEqual(output, exp)", "def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)", "def test_PrependPath(self) -> None:\n p1 = r'C:\\dir\\num\\one;C:\\dir\\num\\two'\n p2 = r'C:\\mydir\\num\\one;C:\\mydir\\num\\two'\n # have to include the pathsep here so that the test will work on UNIX too.\n p1 = PrependPath(p1, r'C:\\dir\\num\\two', sep=';')\n p1 = PrependPath(p1, r'C:\\dir\\num\\three', sep=';')\n assert p1 == r'C:\\dir\\num\\three;C:\\dir\\num\\two;C:\\dir\\num\\one', p1\n\n p2 = PrependPath(p2, r'C:\\mydir\\num\\three', sep=';')\n p2 = PrependPath(p2, r'C:\\mydir\\num\\one', sep=';')\n assert p2 == r'C:\\mydir\\num\\one;C:\\mydir\\num\\three;C:\\mydir\\num\\two', p2\n\n # check (only) first one is kept if there are dupes in new\n p3 = r'C:\\dir\\num\\one'\n p3 = PrependPath(p3, r'C:\\dir\\num\\two;C:\\dir\\num\\three;C:\\dir\\num\\two', sep=';')\n assert p3 == r'C:\\dir\\num\\two;C:\\dir\\num\\three;C:\\dir\\num\\one', p3", "def assertEqualPathsList(first: Iterable[str], second: Iterable[str]) -> None: # pragma: no cover\n if any(isPass(path) for path in first):\n return\n if any(isPass(path) for path in second):\n return\n for fpath in first:\n assert any(fnmatch.fnmatch(fpath, spath) for spath in second)\n for spath in second:\n assert any(fnmatch.fnmatch(fpath, spath) for fpath in first)", "def test_verify_path_1(self):\n result = basic.verify_path(self.test_filepath1, \"file\")\n self.assertTrue(result)", "def test_expand_path_1(self):\n partial_path = \"/fake/path\"\n input_path = \"~\" + partial_path\n expanded_path = basic.expand_path(input_path)\n home_dir = Path(\"~\").expanduser()\n expected_path = str(home_dir) + partial_path\n self.assertEqual(expanded_path, expected_path)", "def make_full_path(self, path, name):\n full_path = (path + \"/\" + name) if path != '' else name\n # remove any duplicate slashes\n full_path = re.sub(r'//+',r'/', full_path)\n self.validate_path(full_path)\n return full_path", "def test_filePathNoCommonElements(self):\n self.assertEquals(filePathDelta(FilePath(\"/foo/bar\"),\n FilePath(\"/baz/quux\")),\n [\"..\", \"..\", \"baz\", \"quux\"])", "def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent\", \"data\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy\", \"value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )", "def test_relativise_different_parents_deep():\n src = pathlib.Path(\"/tmp/foo/bar1/bar2/src.txt\")\n dst = pathlib.Path(\"/tmp/foo/baz1/baz2/baz3/dst.txt\")\n rel = relativise(src, dst)\n assert rel == pathlib.Path(\"../../baz1/baz2/baz3/dst.txt\")", "def test_verify_path2_8(self):\n result, msg = basic.verify_path2(self.file, kind=None, expect=False)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_verify_path2_10(self):\n result, msg = basic.verify_path2(self.file, kind=\"invalid\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_file_dir_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\")\n self.write_file(dir1, \"foo/bar\", \"baz\")\n self.sync_all()\n # Directory wins. File is deleted in dir0\n self.assertFile(dir0, \"foo/bar\", \"baz\")\n self.assertFile(dir1, \"foo/bar\", \"baz\")", "def empty_paths(self):\n self.paths[:]", "def samefile(path1, path2):\n # Handles path-like objects and checks if storage\n path1, path1_is_storage = format_and_is_storage(path1)\n path2, path2_is_storage = format_and_is_storage(path2)\n\n # Local files: Redirects to \"os.path.samefile\"\n if not path1_is_storage and not path2_is_storage:\n return os_path_samefile(path1, path2)\n\n # One path is local, the other storage\n if not path1_is_storage or not path2_is_storage:\n return False\n\n with handle_os_exceptions():\n # Paths don't use same storage\n system = get_instance(path1)\n if system is not get_instance(path2):\n return False\n\n # Relative path are different\n elif system.relpath(path1) != system.relpath(path2):\n return False\n\n # Same files\n return True", "def has_path(self, source, target):\n try:\n sp = nx.shortest_path(self.G, source, target)\n except nx.NetworkXNoPath:\n return False\n return True", "def constructShortestPath(self):", "def check(self):\n super().check()\n\n # scratch directory\n if 'ORTHO' not in PATH:\n setattr(PATH, 'ORTHO', join(PATH.SCRATCH, 'ortho'))", "def test_filePathDeltaSimilarEndElements(self):\n self.assertEqual(\n filePathDelta(FilePath(\"/foo/bar/bar/spam\"), FilePath(\"/foo/bar/baz/spam\")),\n [\"..\", \"..\", \"baz\", \"spam\"],\n )", "def check_paths(self):\n self.data[\"app_path\"] = list(map(\n self.replace_vars_path, self.data[\"app_path\"]))\n self.data[\"icons_path\"] = list(map(\n self.replace_vars_path, self.data[\"icons_path\"]))\n new_app_path = []\n for app_path in self.data[\"app_path\"]:\n if path.isdir(app_path) or path.isfile(app_path):\n new_app_path.append(app_path)\n self.data[\"app_path\"] = new_app_path\n if not len(self.data[\"app_path\"]) == 0:\n new_icons_path = []\n for icon_path in self.data[\"icons_path\"]:\n if (self.data[\"force_create_folder\"] and\n not path.exists(icon_path)):\n log(\"Creating application folder for {0}\".format(self.data[\"name\"]))\n create_dir(icon_path)\n if path.isdir(icon_path):\n if (\"binary\" in self.data.keys()\n and path.isfile(icon_path + self.data[\"binary\"])):\n new_icons_path.append(icon_path)\n elif \"binary\" not in self.data.keys():\n new_icons_path.append(icon_path)\n self.data[\"icons_path\"] = new_icons_path", "def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)", "def join(cls, *args):\n return AbsolutePath(os.path.join(*(str(piece) for piece in args)))", "def full_path(self):\n fullpath = os.path.join(self.path, self.name)\n if self.path == \"\":\n fullpath = self.name\n return fullpath", "def test_predicates_on_unsanitized_paths(self):\n self.mfs.add_entries({'/just/another/pythonista': ''})\n\n self.assertTrue(os.path.isdir('///just'))\n self.assertTrue(os.path.isdir('///just/////another'))\n self.assertTrue(os.path.exists('///just////another////////pythonista'))\n self.assertTrue(os.path.isfile('///just////another////////pythonista'))", "def build_paths():\n print(\"Creating Paths!\")\n try:\n os.stat(FILES_PATH)\n except:\n os.mkdir(FILES_PATH)\n \n try:\n os.stat(BARRELS_PATH)\n except:\n os.mkdir(BARRELS_PATH)\n\n try:\n os.stat(FORWARD_BARRELS_PATH)\n except:\n os.mkdir(FORWARD_BARRELS_PATH)\n\n try:\n os.stat(INVERTED_BARRELS_PATH)\n except:\n os.mkdir(INVERTED_BARRELS_PATH)\n\n try:\n os.stat(SHORT_BARRELS)\n except:\n os.mkdir(SHORT_BARRELS)\n\n try:\n os.stat(SHORT_FORWARD_BARRELS_PATH)\n except:\n os.mkdir(SHORT_FORWARD_BARRELS_PATH)\n\n try:\n os.stat(SHORT_INVERTED_BARRELS_PATH)\n except:\n os.mkdir(SHORT_INVERTED_BARRELS_PATH)\n\n try:\n os.stat(EXTRA_PATH)\n except:\n os.mkdir(EXTRA_PATH)", "def test_getLinkrelToSameDirectory(self):\n linkrel = self.builder.getLinkrel(FilePath(\"/foo/bar\"),\n FilePath(\"/foo/bar\"))\n self.assertEquals(linkrel, \"\")", "def ensure_path(full_path):\n full_path = Path(full_path)\n if not full_path.exists():\n full_path.mkdir(parents=True, exist_ok=True)", "def test_change_non_empty_dir_to_file(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo/bar\", \"baz\")\n self.sync_all()\n self.assertFile(dir0, \"foo/bar\", \"baz\")\n self.assertFile(dir1, \"foo/bar\", \"baz\")\n\n self.delete_file(dir0, \"foo/bar\")\n self.delete_dir(dir0, \"foo\")\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n self.assertFile(dir0, \"foo\", \"bar\")\n self.assertFile(dir1, \"foo\", \"bar\")", "def check_paths(self):\r\n\t\tself.check_line_edits_and_refresh_filestate()\r\n\t\t# paths\r\n\t\tsource_img_filename = self.source_img_entry.text().replace(\"\\\\\", \"/\")\r\n\t\tsink_dir_name = self.sink_dir_entry.text().replace(\"\\\\\", \"/\")\r\n\t\tsink_db_name_entry_text = self.sink_db_name_entry.text()\r\n\t\tdb_ext = \".db\" if not sink_db_name_entry_text.lower().endswith(\".db\") else \"\"\r\n\t\tsink_db_filename = os.path.join(sink_dir_name, sink_db_name_entry_text + db_ext).replace(\"\\\\\", \"/\")\r\n\t\tsource_db_filename = \"\"\r\n\r\n\t\t# check validity\r\n\t\tsource_img_filename_valid = self.filestate.is_valid(source_img_filename, SOURCE_IMG)\r\n\t\tsink_dir_name_valid = self.filestate.is_valid(sink_dir_name, SINK_DIR)\r\n\t\tsink_db_filename_valid = self.filestate.is_valid(sink_db_filename, SINK_DB)\r\n\t\tsource_db_filename_valid = True\r\n\r\n\t\tall_paths_valid = source_img_filename_valid and sink_dir_name_valid and sink_db_filename_valid\r\n\r\n\t\tif self.existing_case:\r\n\t\t\tsource_db_filename = self.source_db_entry.text()\r\n\t\t\tsource_db_filename_valid = self.filestate.is_valid(source_db_filename, SOURCE_DB)\r\n\t\t\tall_paths_valid = all_paths_valid and source_db_filename_valid\r\n\r\n\t\tif all_paths_valid:\r\n\t\t\tself.filestate.set_source_img_filename(source_img_filename)\r\n\t\t\tself.filestate.set_sink_dir_name(sink_dir_name)\r\n\t\t\tself.filestate.set_sink_db_filename(sink_db_filename)\r\n\t\t\tif self.existing_case:\r\n\t\t\t\tself.filestate.set_source_db_filename(source_db_filename)\r\n\t\t\tself.refresh_UI()\r\n\t\t\treturn True\r\n\r\n\t\t# in the case of invalidity\r\n\t\tif not source_img_filename_valid:\r\n\t\t\tif not self.filestate.source_img_file_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source image file at does not exist.\")\r\n\t\t\telif not self.filestate.source_img_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source image file type is invalid (must be .npy).\")\r\n\t\t\tself.filestate.set_source_img_filename(\"\")\r\n\t\tif not source_db_filename_valid: # only if existing case\r\n\t\t\tif not self.source_db_file_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source database file does not exist.\")\r\n\t\t\telif not self.filestate.source_db_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source database file type is invalid (must be .db)\")\r\n\t\t\tself.filestate.set_source_db_filename(\"\")\r\n\t\tif not sink_dir_name_valid:\r\n\t\t\tif not self.filestate.sink_dir_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided sink directory does not exist.\")\r\n\t\t\telif not self.sink_dir_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided sink directory format is invalid.\")\r\n\t\t\tself.filestate.set_sink_dir_name(\"\")\r\n\t\tif not sink_db_filename_valid:\r\n\t\t\tif sink_dir_name_valid and not self.filestate.sink_db_file_preexists and \\\r\n\t\t\t\t\tself.filestate.sink_db_file_format_valid and \\\r\n\t\t\t\t\tdisplay_yes_no_message(self, \"Create file at \" + sink_db_filename + \"?\"):\r\n\t\t\t\t# create file with read write permissions\r\n\t\t\t\t###########################################\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsink_db_file = open(sink_db_filename, \"w+\")\r\n\t\t\t\t\tsink_db_file.close()\r\n\t\t\t\texcept IOError as error:\r\n\t\t\t\t\tdisplay_warning_message(self, \"Failed to create provided sink database file: \" + error)\r\n\t\t\t\t###########################################\r\n\t\t\t\t# set sink db filename\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.filestate.set_sink_db_filename(sink_db_filename)\r\n\t\t\t\t\tself.refresh_UI()\r\n\t\t\t\t\treturn True\r\n\t\t\telif not self.filestate.sink_db_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Be sure to specify a name for the sink database.\")\r\n\t\t\tself.filestate.set_sink_db_filename(\"\")\r\n\r\n\t\t# print(\"paths invalid\")\r\n\t\tself.refresh_UI()\r\n\t\treturn False", "def update_path():\n #TODO update path information\n pass", "def test_realpath(self):\n print real_upath(\"ref with space\")\n self.assertTrue(real_upath(\"ref with space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_upath(\"ref\\ with\\ space\").endswith(\"ref\\ with\\ space\"))\n self.assertTrue(real_ppath(\"ref with space\").endswith(\"ref with space\"))\n self.assertTrue(real_ppath(\"ref\\ with\\ space\").endswith(\"ref with space\"))", "def test_verify_path_5(self):\n result = basic.verify_path(str(self.test_directory1))\n self.assertTrue(result)", "def _set_rel_paths(self):\n if self.working_dir is not None:\n self._rel_working_dir = os.path.relpath(self.working_dir)\n if self.alignment is not None:\n self._rel_alignment = os.path.relpath(self.alignment, \n self.working_dir)\n if self.out_file is not None:\n self._rel_out_file = os.path.relpath(self.out_file, \n self.working_dir)", "def check_abs_path_file(self):\n with in_temp_dir() as dir1:\n filename = self._make_random_file(self.tempdir)\n abspath_filename = os.path.abspath(filename)\n\n # since we're using absolute path to file, we should be able to run the compress command from anywhere\n with in_temp_dir() as dir2:\n assert dir1 != dir2\n\n os.system(_compress_cmd(abspath_filename))\n self._validate_compressed(abspath_filename)", "def test_verify_path2_3(self):\n result, msg = basic.verify_path2(self.file, kind=\"file\", expect=False)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def compactuser(path):\n userPath = expanduser('~')\n otherPath = expanduser(path)\n \n prefix = commonprefix([userPath, otherPath])\n if prefix == userPath:\n return '~' + otherPath[len(prefix):]\n else:\n return otherPath", "def prepare(self):\n if not self._base:\n self.error = \"path= must be specified\"\n return False\n if self._volume:\n if \"://\" not in self._volume:\n self.error = \"mount= can only be an URL\"\n return False\n if self._base.startswith(\"/\"):\n self._base = os.path.join(self._volume, self._base[1:])\n # do the prefix check anyway, for sanity\n if not is_parent_of(self._volume, self._base):\n self.error = \"mount= must be a prefix of path=\"\n return False\n return True", "def force_absolute(base, path):\n if os.path.abspath(path) and os.path.exists(path):\n return path\n else:\n return path_format(base + path)", "def chkPath(fullPath: str) -> None:\n\n # Check if path already exist.\n p = os.path.split(fullPath)\n exists = os.path.exists(p[0])\n # If not then create it.\n if exists == False:\n try:\n os.makedirs(p[0])\n except:\n print(\"Failed to create requested path.\")" ]
[ "0.645393", "0.6182501", "0.61075705", "0.60068315", "0.60003424", "0.5978063", "0.5929083", "0.592156", "0.59077317", "0.59016716", "0.58891624", "0.58888334", "0.584172", "0.5833566", "0.5822319", "0.579614", "0.57439196", "0.5732762", "0.57041806", "0.5696847", "0.5693155", "0.5675337", "0.56707925", "0.5650782", "0.56467646", "0.5644818", "0.56414324", "0.5639067", "0.5620646", "0.5617122", "0.5600285", "0.55982727", "0.55940837", "0.55937153", "0.5590871", "0.5586046", "0.55795944", "0.5575766", "0.5569317", "0.55622745", "0.5553674", "0.55435175", "0.5537959", "0.55328465", "0.55303335", "0.5528622", "0.5524984", "0.5515486", "0.55146545", "0.5506945", "0.5497049", "0.54952633", "0.54928726", "0.54869545", "0.54765564", "0.5470671", "0.5464428", "0.54553473", "0.5452744", "0.54519695", "0.5450707", "0.5445118", "0.544475", "0.54400104", "0.5432142", "0.54269093", "0.54200923", "0.53963995", "0.5389728", "0.53860444", "0.537189", "0.5371793", "0.5371756", "0.5369218", "0.5360853", "0.5355881", "0.53544825", "0.5352988", "0.53405154", "0.5333561", "0.5327588", "0.53259367", "0.5307375", "0.53032476", "0.5301489", "0.5285494", "0.52829564", "0.52813005", "0.5279314", "0.5271664", "0.52713645", "0.52678597", "0.52621025", "0.52572864", "0.5250519", "0.5250345", "0.52489316", "0.5244003", "0.5241094", "0.5236389", "0.5223881" ]
0.0
-1
dst_fname should be full path. Creates directories if required.
def put_file(self, src_fname, dst_fname): dst_fname = os.path.normpath(dst_fname) self.mkdirs(os.path.dirname(dst_fname)) self._put(src_fname, dst_fname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n if self.isdir(dst):\n full_dst = os.path.join(dst, os.path.basename(src)) if src else dst\n\n elif self.isfile(dst):\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True)\n full_dst = dst\n\n return full_dst", "def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n rstat = self.exists(dst, stat=True)\n\n if rstat:\n if self.file_interface.isdir(dst, stat=rstat) and src:\n full_dst = os.path.join(dst, os.path.basename(src))\n else:\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True, **kwargs)\n full_dst = dst\n\n return full_dst", "def prepare(self, dst, options):\n self.checkExisting(dst)\n self.makedirs(dst.parent())", "def MakeDestinationDirectories(self, dst_files):\n for dst in dst_files:\n path = os.path.dirname(dst);\n if (len(path) > 0) and (not os.path.exists(path)):\n self.VerboseMsg(\"Make Directory: \" + path)\n if self.execute:\n os.makedirs(path)", "def copy_file(filename, dst):\n # Create dir if needed\n dir_path = os.path.dirname(os.path.expanduser(dst))\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n src = os.path.join(get_data(''), filename)\n dst = os.path.expanduser(dir_path)\n shutil.copy2(src, dst)", "def _copy_file_with_parents(src, dst, ignore_no_src=False):\n if not os.path.isfile(src) and ignore_no_src:\n return\n\n dst_dir = os.path.dirname(dst)\n create_directories(dst_dir)\n\n copyfile(src, dst)", "def make_local_copy(outdir, subdir, fname):\n\n destdir = path.join(outdir, subdir)\n mkdir(destdir)\n shutil.copyfile(package_data(fname), path.join(destdir, fname))\n return path.join(subdir, fname)", "def copy_deep(src: str, dst: str, create_dst_dir: bool = False) -> None:\n system_is_darwin = platform.system().lower() == \"darwin\"\n if create_dst_dir:\n mkdir_p(os.path.dirname(dst))\n src_is_link = os.path.islink(src)\n dst_exists = os.path.lexists(dst)\n if os.path.isdir(src) and not src_is_link:\n logging.debug(\"Copying directory {} to {}\".format(src, dst))\n mkdir_p(dst)\n for name in os.listdir(src):\n copy_deep(os.path.join(src, name), os.path.join(dst, name))\n elif src_is_link:\n if dst_exists:\n return\n target = os.readlink(src)\n logging.debug(\"Creating symlink {} -> {}\".format(dst, target))\n os.symlink(target, dst)\n else:\n if dst_exists:\n if not system_is_darwin:\n return\n # Only overwrite the file if the source is newer than the destination.\n if os.path.getmtime(src) <= os.path.getmtime(dst):\n return\n logging.debug(\"Copying file {} to {}\".format(src, dst))\n # Preserve the file attributes.\n shutil.copy2(src, dst)", "def safecopy(src, dst):\r\n abs_src = os.path.abspath(src)\r\n abs_dst = os.path.abspath(dst)\r\n if (abs_src != abs_dst) \\\r\n and os.path.isfile(abs_src): \r\n dirname = os.path.dirname(abs_dst)\r\n recurse_mkdir(dirname)\r\n shutil.copy(abs_src, abs_dst)", "def copy_to_se(self, src, dst, create_parent_directory=True):\n mgm, dst = self._safe_split_mgm(dst)\n dst = self._join_mgm_lfn(mgm, dst)\n if create_parent_directory:\n parent_directory = osp.dirname(dst)\n self.create_directory(parent_directory)\n logger.warning('Copying {0} to {1}'.format(src, dst))\n cmd = [ 'xrdcp', '-s', src, dst ]\n svj.core.utils.run_command(cmd)", "def cp(src, dst):\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n src_stat = os.stat(src)\n try:\n dst_stat = os.stat(dst)\n except FileNotFoundError:\n dst_stat = (0,)*10\n src_modif_time = src_stat[stat.ST_MTIME]\n dst_modif_time = dst_stat[stat.ST_MTIME]\n if src_modif_time > dst_modif_time:\n shutil.copyfile(src, dst)\n print(\" ++\", dst[len(THESIS_DIR):])\n else:\n print(\" --\", dst[len(THESIS_DIR):])", "def cp_to_dir(fn0, d):\n\n # keep rewriting attributes\n shutil.copy(fn0, d)", "def _copy_dir(src, dst):\n if os.path.isdir(src):\n os.makedirs(dst, exist_ok=True)\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n\n if os.path.isdir(s):\n _copy_dir(s, d)\n else:\n shutil.copy2(s, d)\n\n else:\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n _delete_file(dst)\n shutil.copy2(src, dst)", "def copy_dir(src: Text, dst: Text) -> None:\n\n if tf.io.gfile.exists(dst):\n tf.io.gfile.rmtree(dst)\n tf.io.gfile.makedirs(dst)\n\n for dir_name, sub_dirs, leaf_files in tf.io.gfile.walk(src):\n for leaf_file in leaf_files:\n leaf_file_path = os.path.join(dir_name, leaf_file)\n new_file_path = os.path.join(dir_name.replace(src, dst, 1), leaf_file)\n tf.io.gfile.copy(leaf_file_path, new_file_path)\n\n for sub_dir in sub_dirs:\n tf.io.gfile.makedirs(os.path.join(dir_name.replace(src, dst, 1), sub_dir))", "def convert_tmpfile(src_file_name:str, dest_path:str):\n src_path = os.path.join(\n current_app.config['UPLOAD_FOLDER'],\n src_file_name\n )\n if not os.path.exists(src_path):\n abort(http.HTTPStatus.BAD_REQUEST, message='raw file not exist')\n pathlib.Path(os.path.dirname(dest_path)).mkdir(parents=True, exist_ok=True)\n shutil.move(src_path, dest_path)", "def makeOutputPath(outputPath, filePath, filename):\n if filePath == \"\" or filePath == None:\n return None\n if outputPath[-1] == FOLDER_DELIM: # remove an eventual / at the end\n outputPath = outputPath[:-1]\n\n fullPath = outputPath + FOLDER_DELIM + filePath\n fullPath += FOLDER_DELIM # add a '/' at the end of the path\n\n if not os.path.isfile(fullPath): # if the path doesn't exist, create it!\n try:\n os.makedirs(fullPath)\n except FileExistsError:\n pass # nothing really went wrong if the folder already exists, continue quietly\n return fullPath + filename + TARGET # return the path and target filename", "def copy_file_to_multiple_subfolders(src, dst, *args, **kwargs):\n print '\\nSource: {}\\nDestinations parent folder: {}'.format(src, dst)\n filename = os.path.basename(src)\n for folder in (d for d in os.listdir(dst) if os.path.isdir(d)):\n print '\\nCopying {} to {}...'.format(filename, folder)\n try:\n shutil.copy(src, os.path.abspath(dst) + '\\\\' + folder)\n except Exception as e:\n print e", "def move_files(src_dir, dst_dir):\n for f in os.listdir(src_dir):\n try:\n name, season, episode = FILENAME_PATTERN.search(f).groups()\n except AttributeError:\n try:\n name, season, episode = FILENAME_PATTERN2.search(f).groups()\n except AttributeError:\n print \"Cannot parse\", f\n pass\n\n name = name.replace('.', ' ').replace('_', ' ').strip().title()\n\n dir_path = os.path.join(dst_dir, name, 'Season %02d' % int(season))\n full_path = os.path.join(dir_path, f)\n source_path = os.path.join(src_dir, f)\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path, 0777)\n\n if not os.path.exists(full_path):\n shutil.move(source_path, full_path)\n os.symlink(full_path, source_path)", "def copy(src, dst):\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n shutil.copy2(src, dst)", "def cpsym(src,dest):\n \n src = os.path.normpath(src)\n dest = os.path.normpath(dest)\n \n if not os.path.exists(src):\n return\n \n for dirpath,dirnames,filenames in os.walk(src):\n rel_dirpath = os.path.relpath(dirpath,src)\n dest_dirpath = os.path.join(dest,rel_dirpath)\n mkdir(dest_dirpath,isfull=True)\n \n for filename in filenames:\n src_filename = os.path.join(dirpath,filename)\n rel_filename = os.path.relpath(src_filename,src)\n \n dest_filename = os.path.join(dest,rel_filename)\n try:\n os.symlink(src_filename,dest_filename)\n except OSError:\n pass", "def create_path_by_date(dest, dt):\n if not os.path.isdir(dest):\n raise FileNotFoundError(f\"dest {dest} must be valid path\")\n yyyy, mm, dd = dt[0:3]\n yyyy = str(yyyy).zfill(4)\n mm = str(mm).zfill(2)\n dd = str(dd).zfill(2)\n new_dest = os.path.join(dest, yyyy, mm, dd)\n if not os.path.isdir(new_dest):\n os.makedirs(new_dest)\n return new_dest", "def create_dir_for_file(f_path):\n d = os.path.dirname(f_path)\n if d and not os.path.exists(d):\n os.makedirs(d)", "def _make_output_directory(self):\n fs = self._filesystem\n output_filename = fs.join(self._root_output_dir, self._test_name)\n fs.maybe_make_directory(fs.dirname(output_filename))", "def create(self, basedir, outdir, name, prefix=None):", "def create_output_dir(self):\n if self.output_dir is None:\n new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))\n try:\n os.makedirs(self.output_dir)\n except OSError:\n pass", "def buildDestination(self, mapping, options, src):\n prefixTemplate = options['prefix']\n if prefixTemplate is None:\n prefixTemplate = self.defaultPrefixTemplate\n\n if prefixTemplate is not None:\n prefix = os.path.expanduser(\n prefixTemplate.safe_substitute(mapping))\n else:\n prefixTemplate = string.Template(src.dirname())\n prefix = prefixTemplate.template\n\n ext = src.splitext()[-1]\n\n nameTemplate = options['name']\n if nameTemplate is None:\n nameTemplate = self.defaultNameTemplate\n\n filename = nameTemplate.safe_substitute(mapping)\n logging.msg(\n 'Building filename: prefix=%r name=%r mapping=%r' % (\n prefixTemplate.template, nameTemplate.template, mapping),\n verbosity=3)\n return FilePath(prefix).child(filename).siblingExtension(ext)", "def clone(src: str, dst: str):\n if dst is None:\n dst = getcwd()\n destination = path.abspath(dst)\n # TODO: replace with false this is just for testing:\n makedirs(destination, exist_ok=True)\n\n sync_chunk(src, destination)\n copy(src, destination)", "def create_path(inputfile, outputdir):\n pathdata = '/'.join(inputfile.split('/')[-3:])\n newpath = join(outputdir, pathdata)\n dirout = dirname(newpath)\n if not isdir(dirout):\n os.makedirs(dirout)\n return newpath", "def task():\n if os.path.isdir(orig):\n for fP in [ fP for fP in glob.glob(os.path.join(orig, '*-*/*')) if \\\n os.path.isdir(fP) ]:\n if not os.path.exists(dest + fP[len(orig):]):\n os.makedirs(dest + fP[len(orig):])\n for fP in [ fP for fP in glob.glob(os.path.join(orig, '*-*/*/%s.log' %fmt.get_date())) if \\\n os.path.isfile(fP) ]:\n convert(fP, dest + fP[len(orig):])", "def copyFile(src_dir, dst_dir, f_name):\n\n try:\n src_file = open(osp.join(src_dir, f_name),\"rb\")\n dst_file = open(osp.join(dst_dir, f_name),\"wb\")\n dst_file.write(src_file.read())\n dst_file.close()\n src_file.close()\n except Exception, e:\n msg = \"!!! In copying files from < %s > dir to < %s > dir exception occur. Details: %s.\" % (src_dir,dst_dir, str(e))\n print >> import_out, msg\n LOG('performImportToPortal',INFO,'copyFile', msg)", "def move(self,src,dst):\n src = os.path.join(self.testpath,src)\n dst = os.path.join(self.testpath,dst)\n directory = os.path.split(dst)[0]\n try:\n os.makedirs(directory)\n except OSError:\n pass\n\n shutil.move(src,dst)", "def copydir(src, dst):\n for item in os.listdir(src):\n s, d = os.path.join(src, item), os.path.join(dst, item)\n if os.path.isdir(s):\n if not os.path.isdir(d):\n os.mkdir(d)\n copydir(s, d)\n else:\n shutil.copy(s, d)", "def _createDirsAbove(fname):\n\n dirToCreate = os.path.dirname(fname)\n # https://stackoverflow.com/a/600612\n try:\n os.makedirs(dirToCreate)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(dirToCreate):\n pass\n else:\n raise", "def create_dir(name_new_path):\n\n try:\n os.mkdir(config_tools.full_dest + name_new_path)\n except OSError:\n print(\"Создать директорию %s не удалось\" % name_new_path)\n else:\n print(\"Успешно создана директория %s \" % name_new_path)", "def makefilename(self):\n fp= (pathlib.Path(self.vr_folder).expanduser()/(time.strftime(self.vr_filename))).with_suffix('')\n fp.parent.mkdir(parents=True, exist_ok=True)\n print('files setup', str(fp))\n return fp", "def install(src, dst):\n try:\n dst = os.path.join(install_dir, dst, os.path.basename(src))\n src = os.path.join(source_dir, src)\n assert os.path.isfile(src)\n assert not os.path.isdir(dst)\n if not os.path.isdir(os.path.dirname(dst)):\n os.makedirs(os.path.dirname(dst))\n shutil.copy(src, dst)\n print 'Installed', dst\n except Exception:\n print 'Could not install', dst", "def create_folder(output_directory: str, fldrname: str):\n\n os.makedirs(output_directory, exist_ok=True)\n tstmp = datetime.now().strftime('%Y%m%d_%H%M%S')\n try:\n fldr_path = os.path.join(output_directory, fldrname)\n os.mkdir(fldr_path)\n except FileExistsError:\n fldr_path = os.path.join(output_directory, fldrname + '_{}'.format(tstmp))\n os.mkdir(fldr_path)\n return fldr_path", "def create_directory_for_file_if_needed(file_name):\n directory_name = os.path.dirname(file_name)\n create_directory_if_needed(directory_name)\n return directory_name", "def ensure_directory(self, name, dest, mode=0777):\n self.m.path.assert_absolute(dest)\n self._run(\n name, ['ensure-directory', '--mode', oct(mode), dest])\n self.m.path.mock_add_paths(dest)", "def _create_dir(filename):\n head = os.path.dirname(filename)\n if head != '' and not os.path.isdir(head):\n os.makedirs(head)", "def create_symlink_dir(src_dir, src_list, dst):\n if not src_list:\n return\n message = \"creating symlink directory at {dst} with files {src_list}\".format(\n dst=dst,\n src_list=pformat(src_list))\n logging.info(message)\n if not os.path.exists(dst):\n os.makedirs(dst)\n for src_file in src_list:\n if not src_file:\n continue\n source = os.path.join(src_dir, src_file)\n destination = os.path.join(dst, src_file)\n if os.path.lexists(destination):\n continue\n try:\n os.symlink(source, destination)\n except Exception as e:\n msg = format_debug(e)\n logging.error(e)", "def copy_file(src_file,dst_folder):\n from shutil import copyfile\n from os.path import split\n copyfile(src_file, dst_folder+split(src_file)[1])\n return", "def add_to_split(rec_dir, target, label):\n for file_name in os.listdir(rec_dir):\n path = os.path.join(rec_dir, file_name)\n if (os.path.isfile(path)):\n if not os.path.isdir(os.path.join(target, str(label))):\n os.makedirs(os.path.join(target, str(label)))\n shutil.copy(path, os.path.join(target, str(label), file_name))", "def _copy_files(src_paths,dst_dir,class_numbers):\n\n class_dirs=[os.path.join(dst_dir,class_name+\"/\")for class_name in self.class_names]\n\n for dir in class_dirs:\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n for src,cls in zip(src_paths,class_numbers):\n shutil.copy(src=src,dst=class_dirs[cls])", "def create_dir(output_path):\n if not os.path.exists(output_path) and is_directory(output_path):\n os.makedirs(output_path)", "def prepare_output_dir(out_dir, test_dir):\r\n\r\n if not out_dir.exists():\r\n out_dir.mkdir()\r\n\r\n # get the necessary file names\r\n file_names = get_file_names(test_dir, args.distance, print_file_names=False)\r\n\r\n # copy the images in the firstIms into the output folder\r\n for name in file_names[1][0]:\r\n file_path = Path(test_dir / name)\r\n copy_to = Path(out_dir / name)\r\n shutil.copy(file_path, copy_to)\r\n\r\n # the firstIms list does not contain the last image,\r\n # so we need to also copy the last image of the secIms into the output folder\r\n last_im = file_names[1][1][-1]\r\n shutil.copy(Path(test_dir/last_im), Path(out_dir/last_im))\r\n\r\n return file_names", "def CopyFolder(src, dst, overwrite=True):\n\n if not src.endswith((\"/\", \"\\\\\")):\n src = src + \"\\\\\"\n if not dst.endswith((\"/\", \"\\\\\")):\n dst = dst + \"\\\\\"\n os.makedirs(dst, exist_ok=True)\n\n for file in os.listdir(src):\n if not overwrite and os.path.isfile(dst + file):\n continue\n if os.path.isfile(src + file):\n shutil.copy(src + file, dst + file)\n elif os.path.isdir(src + file):\n CopyFolder(src + file, dst + file, overwrite)\n return 0", "def move_files(fname_fout, root_dir, dest_dir):\n fname, f_ext = os.path.splitext(fname_fout)\n # Find files which filename of fname_fout\n matches = []\n pattern = fname + '*'\n root_fnames = os.listdir(root_dir)\n for filename in fnmatch.filter(root_fnames, pattern):\n matches.append([filename, os.path.join(root_dir, filename)])\n # Extract new folder name based on fname_fout\n new_folder_name = reshape_fname(fname_fout, ['nairfoil', 'nsetup'])\n dest_dir = os.path.join(dest_dir, new_folder_name)\n # Move files\n for cur_file in matches:\n os.renames(cur_file[1], os.path.join(dest_dir, cur_file[0]))", "def convertPath(srcpath, dstdir):\n bits = srcpath.split(\"/\")\n bits.pop(0)\n # Strip out leading 'unsigned' from paths like unsigned/update/win32/...\n if bits[0] == 'unsigned':\n bits.pop(0)\n return os.path.join(dstdir, *bits)", "def folder_to_s3(src, dst, region, max_parallelism=1, force_copy=False, **kwargs):\n bucket, root = utils.path.reverse_split(dst)\n\n s3 = boto3.resource('s3')\n\n # check if the bucket exists\n if not __bucket_exists(bucket):\n\n if force_copy:\n print('creating bucket: ' + bucket)\n\n try:\n s3.create_bucket(Bucket=bucket,\n CreateBucketConfiguration={'LocationConstraint': region})\n except botocore.exceptions.ClientError as e:\n raise e\n else:\n exit(-1)\n\n # instanciate transfer configuration\n conf = boto3.s3.transfer.TransferConfig(use_threads=True, **kwargs)\n\n # start uploading\n with ProcessPoolExecutor(max_workers=max_parallelism) as executor:\n try:\n for file in utils.path.dir_tree(src):\n # removes the root so that it can be\n # later added to the input string\n suffix = file.replace(src, '')\n executor.submit(file_to_s3,\n bucket,\n file,\n os.path.join(root, suffix),\n conf,\n utils.path.progress\n )\n\n except (BrokenProcessPool):\n try:\n # deleting the bucket if created\n # to do so, the bucket must be empty\n print(\"removing %s from %s\" % (root, bucket))\n delete_folder(bucket, root, region)\n if force_copy:\n print(\"attempting to delete %s\" % bucket)\n s3.Bucket(bucket).delete()\n\n except botocore.exceptions.ClientError as e:\n print(\"operation failed: %s\" % e)\n exit(-1)\n\n else:\n print(\"operation aborted. exiting...\")\n exit(0)", "def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already", "def path_src_to_dest(src_pathname, dest_filename_suffix=None):\n src_relpath = Path(src_pathname).relative_to(config[\"topdir\"])\n dest_pathname = Path(config[\"outdir\"]).joinpath(src_relpath)\n if dest_filename_suffix:\n dest_pathname = dest_pathname.with_suffix(dest_filename_suffix)\n return dest_pathname", "def compress(src,dstfile):\n\tafile = zipfile.ZipFile(dstfile,\"w\",zipfile.ZIP_DEFLATED)\n\tfor root,dirs,files in os.walk(src):\n\t\tfor filename in files:\n\t\t\tabspath = osp.join(root,filename)\n\t\t\trelpath = osp.relpath(abspath,src)\n\t\t\tafile.write(abspath, relpath)\n\tafile.close();", "def safe_copy(file_path, out_dir, dst=None):\n name = dst or os.path.basename(file_path)\n if not os.path.exists(os.path.join(out_dir, name)):\n shutil.copy(file_path, os.path.join(out_dir, name))", "def create_output_folder(campaign_name: str, path: str) -> str:\n folder_name = \"\".join([campaign_name, \"_druckfiles\"])\n path = os.path.split(path)[0]\n out_path = os.path.join(path, folder_name)\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n return out_path", "def copy(self, src, dst, label=None):\n self._tag(dst, label)\n self._mkdir_for(dst)\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))", "def applyDir(self,srcDir,destDir,exts): \n for srcFile in os.listdir(srcDir):\n srcExt = os.path.splitext(srcFile)[-1].lower()\n srcPath = os.path.join(srcDir,srcFile)\n destPath = os.path.join(destDir,srcFile)\n if srcExt in exts:\n if not os.path.exists(destDir):\n os.makedirs(destDir)\n shutil.copyfile(srcPath,destPath)\n if self.progress: \n self.cumSize += os.path.getsize(srcPath)\n self.progress(self.cumSize,_('Copying Files...'))\n elif os.path.isdir(srcPath):\n self.applyDir(srcPath,destPath,exts)", "def _prepare_directory(destination, connector_id):\n\n # Use the destination directory when provided\n if destination:\n if not os.path.exists(destination):\n # Create all sub-directories\n os.makedirs(destination)\n # Create a sub-directory in the current directory\n # when a destination isn't provided\n else:\n if not os.path.isdir(connector_id):\n os.mkdir(connector_id)\n destination = connector_id\n\n if os.path.isdir(destination):\n os.chdir(destination)\n else:\n error = 'Couldn\\'t download to the desination directory {}.'\n raise CLIError(error.format(destination))\n\n return os.getcwd()", "def m_cp(*args):\n if (not args or len(args) < 2) :\n print(\"parameter is invalid\")\n return\n\n src = args[0:len(args)-1]\n dst = args[len(args)-1]\n\n if not os.path.exists(dst) :\n os.mkdir(dst)\n\n for s in src:\n print(\"cp %s ==> %s\" % (s, dst))\n cpfile(s, dst)", "def save(url, dst, force=False):\n if not os.path.isfile(dst) or force:\n # Test if the directory exist or create\n d = os.path.dirname(dst)\n if not os.path.exists(d):\n os.makedirs(d)\n print(u\"\\nDownloading: {0} to {1}\".format(url, dst))\n urllib.urlretrieve(url, dst, report)", "def make_dir(file_name): # output_file_loc = des\n for i in os.walk(f'{tmp_path}/{file_name}'):\n fld = i[0].split(file_name)[-1]\n if fld:\n loc = f\"{output_path}{fld}\"\n if not os.path.exists(f'{output_path}/{fld}'):\n os.makedirs(f'{output_path}/{fld}')\n # print(\"MAKE_DIR completed...\") \n return", "def dst_to_src(self,dst_file):\n rel_path=os.path.relpath(dst_file,start=self.dst_root)\n if (rel_path == '.'):\n rel_path=''\n else:\n rel_path= '/'+rel_path\n if (os.sep != '/'):\n # if directoty path sep isn't / then translate for URI \n rel_path=rel_path.replace(os.sep,'/')\n return(self.src_root+rel_path)", "def copy_dir(src, dst):\n try:\n debug.log(\"copy dir from \"+ src, \"to \"+ dst)\n shutil.copytree(src, dst)\n except Exception as e:\n debug.log(\"Error: happened while copying!\\n%s\\n\"%e)", "def set_dest(self, mode, dest, prefix):\n try: \n os.mkdir(dest)\n os.mkdir(dest+'/images')\n os.mkdir(dest+'/images'+'/train')\n os.mkdir(dest+'/images'+'/test')\n os.mkdir(dest+'/labels')\n os.mkdir(dest+'/labels'+'/train')\n os.mkdir(dest+'/labels'+'/test')\n except FileExistsError:\n pass\n\n if mode == 'train':\n self.detImage = dest+'images/'+'train/'\n self.detLabel = dest+'labels/'+'train/'\n self.detList = dest+'train.txt'\n elif mode == 'test':\n self.detImage = dest+'images/'+'test/'\n self.detLabel = dest+'labels/'+'test/'\n self.detList = dest+'test.txt'\n self.prefix = prefix", "def copy(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))", "def copy(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))", "def make_dir(name='results'):\n if os.path.isabs(name):\n output_path = name\n else:\n output_path = os.path.join(os.getcwd(), name)\n\n if ('.' not in output_path):\n directory = os.path.dirname(os.path.join(output_path, 'toto')) # doesn't work w/o 'toto'\n else :\n directory = os.path.dirname(output_path);\n\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n return output_path", "def make_path(self, basename):\n return os.path.join(self.output_folder, basename.format(self.sample_name))", "def copy(self, fname):\n _, ext = osp.splitext(fname)\n spath = osp.join(self.src, fname)\n oname = fname\n path = osp.join(self.dst, oname)\n os.makedirs(osp.dirname(path), exist_ok=True)\n if ext in [\".css\"]:\n content = self.include(fname)\n with open(path, \"wt\") as fp:\n fp.write(content)\n else:\n shutil.copyfile(spath, path)\n return osp.relpath(oname, self.root)", "def copyDir(src, dst, includes, excludes = []):\n\tmultiFilesReplacements([], dst, src, includes, excludes)", "def _install_file(srcdir, filename, dstdir):\n srcfilename = os.path.join(srcdir, filename)\n dstfilename = os.path.join(dstdir, filename)\n if not os.path.exists(srcfilename):\n if os.path.exists(dstfilename):\n subprocess.run(['rm', dstfilename], check=True)\n return (False, True)\n return (False, False)\n\n equal = subprocess.run(['diff', '-q', srcfilename, dstfilename],\n check=False,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL).returncode == 0\n if not equal:\n subprocess.run(['mv', srcfilename, dstfilename], check=True)\n return (True, not equal)", "def check_path(fp):\n if not Path(fp).exists():\n\n if len(Path(fp).suffix) > 0: # check if file\n Path(fp).parent.mkdir(exist_ok=True, parents=True)\n\n else: # or directory\n Path(fp).mkdir(exist_ok=True, parents=True)", "def copy_dir(src=\"\", dst=\"\", header=\"\", footer=\"\", clip=0, ext=\"\", test=False):\n failed = []\n nfiles = 0\n if not os.path.exists(dst):\n os.makedirs(dst)\n if not os.path.exists(src):\n raise argparse.ArgumentError(\"source does not exist! It must be a directory.\")\n else:\n for root, dirs, files in os.walk(src, topdown=False):\n for name in files:\n name_wo_ext, file_ext = os.path.splitext(name)\n\n src_path = os.path.join(root, name)\n dstfilename = header + os.path.join(root[len(src)+1:], name_wo_ext[clip:]) + footer + file_ext\n dst_path = os.path.join(dst, dstfilename)\n\n dst_pdir = os.path.dirname(dst_path)\n if not os.path.exists(dst_pdir):\n os.makedirs(dst_pdir)\n\n if not os.path.exists(dst_path):\n if ext == \"\" or ext == file_ext[1:]:\n try:\n shutil.copy(src_path, dst_path)\n except:\n failed.append(src_path)\n print(f\"... {src_path} failed\")\n else:\n print(f\"... {dst_path} already exists'. Skipping\")\n nfiles += 1\n\n if test:\n break\n if test:\n break\n print(f\"{nfiles - len(failed)} / {nfiles} files were copied.\")\n return failed", "def validate_file_destination(namespace):\n try:\n path = namespace.destination\n except AttributeError:\n return\n else:\n # TODO: Need to confirm this logic...\n file_path = path\n file_dir = os.path.dirname(path)\n if os.path.isdir(path):\n file_name = os.path.basename(namespace.file_name)\n file_path = os.path.join(path, file_name)\n elif not os.path.isdir(file_dir):\n try:\n os.mkdir(file_dir)\n except EnvironmentError as exp:\n message = \"Directory {} does not exist, and cannot be created: {}\"\n raise ValueError(message.format(file_dir, exp))\n if os.path.isfile(file_path):\n raise ValueError(\"File {} already exists.\".format(file_path))\n namespace.destination = file_path", "def put(self, src, dst):\r\n abs_src = os.path.expanduser(src)\r\n assert os.path.exists(abs_src), 'File does not exist, cannot copy: %s' % abs_src\r\n return self._do_put(abs_src, dst)", "def ConstructDstUri(self, src_uri, exp_src_uri, base_dst_uri):\n if base_dst_uri.names_container():\n # To match naming semantics of UNIX 'cp' command, copying files\n # to buckets/dirs should result in objects/files named by just the\n # final filename component; while copying directories should result\n # in objects/files mirroring the directory hierarchy. Example of the\n # first case:\n # gsutil cp dir1/file1 gs://bucket\n # should create object gs://bucket/file1\n # Example of the second case:\n # gsutil cp dir1/dir2 gs://bucket\n # should create object gs://bucket/dir2/file2 (assuming dir1/dir2\n # contains file2).\n if src_uri.names_container():\n dst_path_start = (src_uri.object_name.rstrip(os.sep)\n .rpartition(os.sep)[-1])\n start_pos = exp_src_uri.object_name.find(dst_path_start)\n dst_key_name = exp_src_uri.object_name[start_pos:]\n else:\n # src is a file or object, so use final component of src name.\n dst_key_name = os.path.basename(exp_src_uri.object_name)\n if base_dst_uri.is_file_uri():\n # dst names a directory, so append src obj name to dst obj name.\n dst_key_name = '%s%s%s' % (base_dst_uri.object_name, os.sep,\n dst_key_name)\n self.CheckForDirFileConflict(exp_src_uri, dst_key_name)\n else:\n # dest is an object or file: use dst obj name\n dst_key_name = base_dst_uri.object_name\n return base_dst_uri.clone_replace_name(dst_key_name)", "def create_out_dir(out): \n out_path = os.path.join(out,out_dir_name)\n try:\n os.stat(out_path)\n except:\n os.mkdir(out_path)", "def _make_dirs(filepath, mode):\n parent = filepath.parent\n if \"w\" in mode and parent:\n os.makedirs(parent, exist_ok=True)", "def prepare_destination(self):\n self.movie_root_path = self.config.share_movie_root_path % (\n self.share_path, self.title)\n\n if os.path.isdir(self.movie_root_path):\n if self.capacity_reached():\n Logger.log(\n '[!] Capacity reached. Skipping adding movie %s.' % self.title)\n else:\n if not os.path.isdir(self.movie_root_path):\n Logger.log('[+] Adding Movie: %s' % self.title)\n os.mkdir(self.movie_root_path)", "def create_file(backup_file, input_root, output_root):\n input_path = get_input_path(backup_file, input_root)\n if input_path is None:\n logging.warning(f\"Missing file: {backup_file.file_id} ({backup_file.relative_path})\")\n return 0\n output_path = get_output_path(backup_file, output_root)\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n copyfile(input_path, output_path)", "def save(annotation, new_filename, original_path):\n \n destination = \"../../standardized-data/\"\n if os.path.isdir(destination + \"/\" + annotation) == False:\n os.mkdir(destination + \"/\" + annotation)\n print(annotation, \"FOLDER CREATED\")\n if os.path.exists(destination + \"/\" + annotation + \"/\" + new_filename):\n print('FILE EXISTS: DOUBLE CHECK FOR DUPLICATION :', new_filename)\n else:\n shutil.copyfile(original_path, destination + \"/\" + annotation + \"/\" + new_filename)\n return", "def create_file_path(fname, direc=\"data/result/\"):\n path = os.path.join(TOP_LEVEL, direc, fname)\n return path", "def _make_fname(song, ext=None, av=None, subdir=None):\n # pylint: disable=E1103\n # Instance of 'bool' has no 'extension' member (some types not inferable)\n ddir = os.path.join(Config.DDIR.get, subdir) if subdir else Config.DDIR.get\n if not os.path.exists(ddir):\n os.makedirs(ddir)\n\n if ext:\n extension = ext\n\n else:\n stream = streams.select(streams.get(song),\n audio=av == \"audio\", m4a_ok=True)\n extension = stream['ext']\n\n # filename = song.title[:59] + \".\" + extension\n filename = song.title + \".\" + extension\n filename = os.path.join(ddir, mswinfn(filename.replace(\"/\", \"-\")))\n filename = filename.replace('\"', '')\n return filename", "def ensure_path_exists(filename):\n targetdir = dirname(expanduser(filename))\n if exists(targetdir):\n return\n os.makedirs(abspath(targetdir))", "def createWorkingFolder(newpath):\n newpath = pdbName[:-4]+'_ARM_input'\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n globals().update({ \"workingFolder\" : newpath+\"/\"})\n\n shutil.move( pdbName, newpath+\"/\"+pdbName) \n if glob.glob(\"*.seqmut\"):\n MutFile = glob.glob(\"*.seqmut\")[0]\n shutil.copyfile(str(MutFile), newpath+\"/\"+pdbName[:-4]+\".seqmut\")\n else:\n pass\n os.chdir(newpath)", "def make_source_dir():\n\n os.makedirs(files['source_dir'].rel)", "def create_files(save_dir, vid_name):\n file_name = vid_name.split('/')[-1].split('.')[0]\n if not os.path.isdir(os.path.join(save_dir, file_name)):\n os.makedirs(os.path.join(save_dir, file_name))\n return file_name", "def setup(self, newdir=None):\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)\n if newdir:\n _new = os.path.join(self.output_path, newdir)\n if not os.path.exists(_new):\n os.makedirs(_new)", "def moveAsset(self, src, dst):\n if not self.exists( self.dirname(dst) ):\n self.makedirs( self.dirname(dst) )\n self.move(src, dst)\n\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return \n\n cache_dst = self.cache_path(dst)\n if not os.path.exists( os.path.dirname(cache_dst) ):\n os.makedirs( os.path.dirname(cache_dst) )\n shutil.move(cache_src, cache_dst)", "def copy_one(self, src, dest):\n if self.manager.no_sourcemaps and self.is_ignored_sourcemap(src.name):\n return\n\n if dest.is_dir():\n shutil.rmtree(dest)\n elif dest.exists():\n dest.unlink()\n\n if not dest.parent.exists():\n self.log.debug(f\"creating folder {dest.parent}\")\n dest.parent.mkdir(parents=True)\n\n self.maybe_timestamp(dest.parent)\n\n copytree_kwargs = {}\n\n if self.manager.no_sourcemaps:\n copytree_kwargs[\"ignore\"] = SOURCEMAP_IGNORE_PATTERNS\n\n if src.is_dir():\n shutil.copytree(src, dest, **copytree_kwargs)\n else:\n shutil.copy2(src, dest)\n\n self.maybe_timestamp(dest)", "def copy_files_and_create_dirs(files) -> None:\r\n for file in files:\r\n target_dir_name = os.path.dirname(file[1])\r\n\r\n # will create all intermediate-level directories\r\n if not os.path.exists(target_dir_name):\r\n os.makedirs(target_dir_name)\r\n\r\n shutil.copyfile(file[0], file[1])", "def generate_filename_template_path(output_dir, filename_template):\n if output_dir:\n os.makedirs(output_dir, exist_ok=True)\n return os.path.join(output_dir, filename_template)\n return None", "def prep_folder(args):\n if(args.save_folder[-1]!='/'):\n args.save_folder += '/'\n if(not os.path.isdir(args.save_folder)):\n os.mkdir(args.save_folder)", "def CheckForDirFileConflict(self, src_uri, dst_path):\n final_dir = os.path.dirname(dst_path)\n if os.path.isfile(final_dir):\n raise CommandException('Cannot retrieve %s because it a file exists '\n 'where a directory needs to be created (%s).' %\n (src_uri, final_dir))\n if os.path.isdir(dst_path):\n raise CommandException('Cannot retrieve %s because a directory exists '\n '(%s) where the file needs to be created.' %\n (src_uri, dst_path))", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])", "def make_zipfile(output_filename, source_dir):\n relroot = os.path.abspath(os.path.join(source_dir, os.pardir))\n with zipfile.ZipFile(output_filename, \"w\", zipfile.ZIP_DEFLATED) as zip:\n for root, dirs, files in os.walk(source_dir):\n # add directory (needed for empty dirs)\n zip.write(root, os.path.relpath(root, relroot))\n for file in files:\n filename = os.path.join(root, file)\n if os.path.isfile(filename): # regular files only\n arcname = os.path.join(os.path.relpath(root, relroot), file)\n zip.write(filename, arcname)", "def copyDir(self, src, subpath):\n dst = self.output_path + \"/\" + subpath\n shutil.copytree(src, dst)", "def get_path(dir_name, file_format, args):\n fname = \"{exp}-excl-{excl}\".format(exp=args.exp,\n excl=args.feature_set)\n path = os.path.join(SAVE_ROOT, dir_name, args.rbp,\n fname + file_format)\n\n # make the directory if it doesn't exist_ok\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n return path", "def copy_file(self, dst, tmpdir=None):\n if tmpdir is None:\n tmpfn = sameDir\n else:\n tmpfn = lambda _: tmpdir._path\n assert isinstance(dst, Path)\n with open(self._path, 'rb') as src_fd:\n with safeopen(dst._path, 'wb', useDir=tmpfn) as dst_fd:\n copyfileobj(src_fd, dst_fd)", "def cp_dir_or_files(self):\n if self.recursive:\n if self.cmdtype == 'upload' and not self.srcpath.endswith(os.path.sep):\n basename = os.path.basename(self.srcpath)\n self.destpath = join_obs_path(self.destpath, basename)\n elif self.cmdtype == 'download' and not self.srcpath.endswith('/'):\n bucket, key = split_bucket_key(self.srcpath)\n basename = key.split('/')[-1]\n if basename:\n self.destpath = os.path.join(self.destpath, basename)\n elif not self.srcpath.endswith('/'):\n bucket, key = split_bucket_key(self.srcpath)\n basename = key.split('/')[-1]\n if basename:\n self.destpath = join_obs_path(self.destpath, basename)" ]
[ "0.7712215", "0.7621111", "0.6710671", "0.6514752", "0.63302606", "0.62029976", "0.6054697", "0.6051175", "0.6038802", "0.5964814", "0.5951201", "0.59485644", "0.5948254", "0.5946226", "0.59095293", "0.58884376", "0.58736056", "0.58708906", "0.5866591", "0.5849726", "0.58469474", "0.5839048", "0.5807639", "0.5806094", "0.5761289", "0.57328033", "0.57309127", "0.57285875", "0.57232267", "0.5713726", "0.5706059", "0.56945825", "0.5693603", "0.5680116", "0.5671909", "0.56689256", "0.56661475", "0.5640255", "0.56282455", "0.562352", "0.55943", "0.5582576", "0.557928", "0.55291486", "0.5526603", "0.5513261", "0.5511419", "0.5509769", "0.5505771", "0.55011344", "0.54961807", "0.5490744", "0.5487028", "0.5486514", "0.548605", "0.5478819", "0.54769206", "0.5468459", "0.5464938", "0.54597336", "0.5459439", "0.54578114", "0.5454339", "0.5443674", "0.54409295", "0.54409295", "0.54396343", "0.543266", "0.54291797", "0.54105395", "0.54103184", "0.5404263", "0.5378119", "0.537359", "0.5372133", "0.5371678", "0.53660464", "0.53643304", "0.5357919", "0.5355391", "0.53483963", "0.5341635", "0.5338536", "0.5332116", "0.532981", "0.5329335", "0.5327814", "0.5316662", "0.5312688", "0.53121144", "0.5311315", "0.5311232", "0.5306307", "0.53056496", "0.53034556", "0.5292448", "0.5291944", "0.5285185", "0.527991", "0.52796346" ]
0.71607935
2
gpu_model_to_scale is a dict from model string to scale.
def avail_gpu_compute(self, gpu_model_to_scale): self._check_spy_stats_available() l = [] for u, model in zip(self._util.gpu_compute, self._capacity.gpu_model): found = False for k, scale in gpu_model_to_scale.items(): if k in model: found = True break if found: l.append(scale * (1 - u)) else: raise Exception('Unknown GPU model %s found on host %s' % (model, self.name)) return l
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale_model(model, scale):\n params = model.named_parameters()\n dict_params = dict(params)\n with torch.no_grad():\n for name, param in dict_params.items():\n dict_params[name].set_(dict_params[name].data * scale)", "def scale_model(model,scaleparname='A',scaleval=1):\n model = get_model_instance(model)\n if scaleparname in model.params:\n scaleparname += '1'\n if isinstance(model,FunctionModel1D):\n compclass = CompositeModel1D\n else:\n compclass = CompositeModel\n res = compclass((model,'constant'),operation='*',\n parnames={'C1':scaleparname})\n setattr(res,scaleparname,scaleval)\n return res", "def petab_scale_to_amici_scale(scale_str):\n\n if scale_str == 'lin':\n return amici.ParameterScaling_none\n if scale_str == 'log':\n return amici.ParameterScaling_ln\n if scale_str == 'log10':\n return amici.ParameterScaling_log10\n raise ValueError(\"Invalid pscale \" + scale_str)", "def _scaling_model_from_dict(obj):\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == obj[\"__id__\"]:\n return entry_point.load().from_dict(obj)", "def get_scales(min_scale=0.2, max_scale=0.9,num_layers=6):\n\n # this code follows the original implementation of wei liu\n # for more, look at ssd/score_ssd_pascal.py:310 in the original caffe implementation\n min_ratio = int(min_scale * 100)\n max_ratio = int(max_scale * 100)\n step = int(np.floor((max_ratio - min_ratio) / (num_layers - 2)))\n min_sizes = []\n max_sizes = []\n for ratio in xrange(min_ratio, max_ratio + 1, step):\n min_sizes.append(ratio / 100.)\n max_sizes.append((ratio + step) / 100.)\n min_sizes = [int(100*min_scale / 2.0) / 100.0] + min_sizes\n max_sizes = [min_scale] + max_sizes\n\n # convert it back to this implementation's notation:\n scales = []\n for layer_idx in range(num_layers):\n scales.append([min_sizes[layer_idx], np.single(np.sqrt(min_sizes[layer_idx] * max_sizes[layer_idx]))])\n return scales", "def convert_scale(g, op, block):\n\n scale = op.attr(\"scale\")\n bias = op.attr(\"bias\")\n bias_after_scale = op.attr(\"bias_after_scale\")\n x = g.get_node(op.input(\"X\")[0])\n if np.isclose(scale, 1.0) and np.isclose(bias, 0.0):\n out = x\n else:\n if np.isclose(bias, 0.0):\n out = x * _expr.const(np.array(scale).astype(\"float32\"))\n elif np.isclose(scale, 1.0):\n out = x + _expr.const(np.array(bias).astype(\"float32\"))\n else:\n if bias_after_scale:\n out = x * _expr.const(np.array(scale).astype(\"float32\")) + _expr.const(\n np.array(bias).astype(\"float32\")\n )\n else:\n out = (x + _expr.const(np.array(bias).astype(\"float32\"))) * _expr.const(\n np.array(scale).astype(\"float32\")\n )\n g.add_node(op.output(\"Out\")[0], out)", "def scale_module(module, scale):\n for p in module.parameters():\n p.detach().mul_(scale)\n return module", "def _hyperparam_to_scale(self, hyperparam):\n\n # If logscale is used, input hyperparam is log of the scale.\n if self.use_log_scale:\n scale = 10.0**hyperparam\n else:\n scale = numpy.abs(hyperparam)\n\n return scale", "def set_scale(self, motor_model):\n for driver_re, motor_dict in self.__SCALE_FACTORS_BY_MODEL.iteritems():\n if driver_re.match(self._apt.model_number) is not None:\n if motor_model in motor_dict:\n self.scale_factors = motor_dict[motor_model]\n return\n else:\n break\n # If we've made it down here, emit a warning that we didn't find the\n # model.\n logger.warning(\n \"Scale factors for controller {} and motor {} are unknown\".format(\n self._apt.model_number, motor_model\n )\n )", "def make_feature_scale_factors():\n X, y = make_X_and_y()\n sqm = make_sqm_X()\n scale_factors = {\n \"indoor_temp\": np.max(X[:,:,0]),\n \"outdoor_temp\": np.max(X[:,:,1]),\n \"gas_kwh\": np.max(X[:,:,2]),\n \"elec_kwh\": np.max(X[:,:,3]),\n \"floor_area\": np.max(sqm),\n \"htc\": np.max(y),\n }\n\n with open(os.path.join(_TRAINING_DATA_PATH, \"scalefactors.json\"), \"w+\") as f:\n json.dump(scale_factors, f)", "def _update_model_params(self, params, model_ID, model, param_grid):\n \n params = params.copy()\n param_grid = param_grid.copy()\n \n params_transform = {}\n \n for key in params.keys():\n \n if 'log10.' in key:\n log10_transform = True\n else:\n log10_transform = False\n \n key = key.replace('log10.','')\n \n type_str = str(type(param_grid[key][0]))\n \n if 'int' in type_str: \n if log10_transform:\n params_transform[key] = int(10**params['log10.'+key])\n else:\n params_transform[key] = int(params[key])\n \n elif 'float' in type_str:\n if log10_transform:\n params_transform[key] = float(10**params['log10.'+key])\n \n else:\n params_transform[key] = float(params[key])\n \n elif 'str' in type_str: #index the param grid for hyperparams using 'choice'\n params_transform[key] = param_grid[key][params[key]]\n \n if 'densenet' not in model_ID.lower(): \n model.__dict__[key] = params_transform[key]\n \n assert(type_str == str(type(params_transform[key]))), 'type(param_grid[key][0]) changed from '+type_str+' to '+str(type(param_grid[key][0]))+' after updating params for key:'+str(key)\n \n if 'str' in type_str:\n assert(params_transform[key] in param_grid[key]), 'params_transform['+key+']='+str(params_transform[key])+' is not in the list of valid parameter choices:'+str(param_grid[key])\n \n else:\n assert(params_transform[key]<=max(param_grid[key]) and params_transform[key]>=min(param_grid[key])), 'params_transform['+key+']='+str(params_transform[key])+' does not lie in the range of valid values:'+str([min(param_grid[key]),max(param_grid[key])] )\n \n if 'densenet' in model_ID.lower(): \n model = model(**params_transform)\n \n return params_transform, model", "def scale_data(x, y, x_scale_f = '../saved_models/scalers/9_params_21_2_x_scaler.pkl', y_scale_f = '../saved_models/scalers/9_params_21_2_y_scaler.pkl', par_slice = range(7) + range(8,9)):\n\tx_scaler = sklearn.externals.joblib.load(x_scale_f)\n\ty_scaler = sklearn.externals.joblib.load(y_scale_f)\n\tx_scaler.transform(x)\n\ty_scaler.transform(y)\n\tx = x[:,par_slice] \n\treturn x, y, x_scaler, y_scaler", "def get_scale():\r\n\r\n \r\n return 0.5", "def scale(self):\n return self._gev_bijector.scale", "def scale(self, data: np.ndarray):\n if self.scale_type == \"min_max\":\n scaled_data = (data - self.predictor_min) / (\n self.predictor_max - self.predictor_mean\n )\n elif self.scale_type == \"normalize\":\n scaled_data = (data - self.predictor_mean) / (\n self.predictor_max - self.predictor_min\n )\n elif self.scale_type == \"standardize\":\n scaled_data = (data - self.predictor_mean) / self.predictor_std\n elif self.scale_type == \"scale\":\n scaled_data = data - self.predictor_mean\n else:\n scaled_data = data\n return scaled_data", "def _call_scale(vecObj, sc):\n res = vecObj.scale(sc)\n return res", "def task_scaling(input_array, scaling_factor):\n return(np.multiply(scaling_factor, input_array))", "def colorscale(self):\n return self[\"colorscale\"]", "def any_scale(scale):\n return scale", "def scale_to_factor(scale):\n return (B.pi / 2) / (2 * scale**2)", "def scale(self) -> Optional[pulumi.Input['ScaleArgs']]:\n return pulumi.get(self, \"scale\")", "def yscale(value):\n impl.yscale(**locals())", "def scale_data(cube, user_input, plot_type):\n\n scaled = False\n for input_pair in user_input:\n scale_factor, user_plot_type = input_pair\n if user_plot_type == plot_type:\n assert scaled == False, \"Multiple scale factors entered for single plot type\"\n cube.data = cube.data / 10**int(scale_factor)\n cube.units = '10^%s %s' %(scale_factor, str(cube.units))\n scaled = True\n\n return cube", "def scale_uv(mesh_name, scale):\n mesh = bpy.data.meshes[mesh_name]\n if len(mesh.uv_layers) == 0:\n return\n uv_layer = mesh.uv_layers[0].data\n for uv_index in range(0, len(uv_layer)):\n uv = uv_layer[uv_index].uv\n uv *= scale", "def scale(self, factor):\n for a in self.symbol_attributes:\n a.scale(factor)", "def _scale_independent_metrics() -> list:\n return ['mape', 'r2', 'nse']", "def scaleProcess(process,scale):\n #print '>>> scaleProcess(\"%s\",%.3f):'%(process.process(),scale)\n #print \">>> rate before = %s\"%(process.rate())\n process.set_rate(process.rate()*scale)\n #print \">>> rate after = %s\"%(process.rate())", "def _scale_to_hyperparam(self, scale):\n\n # If logscale is used, output hyperparam is log of scale.\n if self.use_log_scale:\n hyperparam = numpy.log10(numpy.abs(scale))\n else:\n hyperparam = numpy.abs(scale)\n\n return hyperparam", "def get_scale(units, compartmentId, volume, extracellularVolume):\r\n if compartmentId == 'c':\r\n V = volume\r\n else:\r\n V = extracellularVolume\r\n\r\n if units == 'uM':\r\n return 1. / N_AVOGADRO / V * 1e6\r\n elif units == 'mM':\r\n return 1. / N_AVOGADRO / V * 1e3\r\n elif units == 'molecules':\r\n return 1.\r\n else:\r\n raise Exception('Invalid units \"%s\"' % units)", "def get_scale_op(self):\n\t\treturn self.variables.get('scale')", "def write_scale_map(f: h5py.File, parameter_scale_mapping: List[List[str]],\n parameter_df: pd.DataFrame, amici_model: amici.Model):\n\n # for simulation\n # set parameter scaling for all parameters\n pscale = np.zeros(shape=(len(parameter_scale_mapping),\n len(parameter_scale_mapping[0])))\n for i, cond_scale_list in enumerate(parameter_scale_mapping):\n for j, s in enumerate(cond_scale_list):\n pscale[i, j] = petab_scale_to_amici_scale(s)\n\n f.require_dataset('/parameters/pscaleSimulation',\n shape=pscale.shape,\n dtype=\"<i4\",\n data=pscale)\n\n # for cost function parameters\n pscale = np.array([petab_scale_to_amici_scale(s)\n for s in parameter_df.parameterScale.values[\n (parameter_df.estimate == 1)\n & ~parameter_df.index.isin(\n amici_model.getFixedParameterIds())]])\n f.require_dataset('/parameters/pscaleOptimization',\n shape=pscale.shape,\n dtype=\"<i4\",\n data=pscale)", "def colorscale(self):\n return self['colorscale']", "def b_scale_object():\n \n bpy.ops.transform.resize(value=(7.5,1,1), constraint_axis=(True,False,False))\n bpy.ops.transform.resize(value=(1,7.5,1), constraint_axis=(False,True,False))\n bpy.ops.transform.resize(value=(1,1,3.5), constraint_axis=(False,False,True))\n bpy.ops.object.transform_apply(scale=True)", "def to_multi_gpu(model, n_gpus=4):\n\n with tf.device('/cpu:0'):\n x = Input(model.input_shape[1:], name=model.input_names[0])\n towers = []\n device=[0,1,2,3]\n for g in range(n_gpus):\n with tf.device('/gpu:' + str(device[g])):\n slice_g = Lambda(slice_batch, lambda shape: shape,\n arguments={'n_gpus':n_gpus, 'part':g})(x)\n towers.append(model(slice_g))\n\n with tf.device('/cpu:0'):\n merged = merge(towers, mode='concat', concat_axis=0)\n\n return Model(inputs=[x], outputs=merged)", "def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")", "def _scale_bbox(bbox_list: List[List[int]], dst_dims: Tuple[int, int]) -> List[List[int]]:\n ret_val = []\n [x_scale, y_scale] = [dst / src for dst, src in zip(dst_dims, CityScapesDataset.base_size)]\n for bbox in bbox_list:\n ret_val.append([bbox[0] * x_scale, bbox[1] * y_scale,\n bbox[2] * x_scale, bbox[3] * y_scale])\n return ret_val", "def scale_mvs_camera(cams, scale=1):\n for view in range(FLAGS.view_num):\n cams[view] = scale_camera(cams[view], scale=scale)\n return cams", "def test_replace_namespaced_scale_scale(self):\n pass", "def with_scale_op(self, scale):\n\t\tself.variables['scale'] = scale\n\t\treturn self", "def scale_to(scale_to, group,\n allow_zero_scale=False, allow_unknown_scale=False):\n if isinstance(scale_to, numbers.Number):\n scale = scale_to\n else:\n # get scale to be used\n scale_to = lena.flow.Selector(scale_to)\n cands = [val for val in group if scale_to(val)]\n if len(cands) > 1:\n raise lena.core.LenaValueError(\n \"only one candidate to provide scale must be selected, \"\n \"{} found\".format(cands)\n )\n elif not cands:\n raise lena.core.LenaValueError(\n \"at least one item to get scale from must be selected\"\n )\n else:\n cand = cands[0]\n scale = lena.flow.get_data(cand).scale()\n\n # rescale\n for val in group:\n data, context = lena.flow.get_data_context(val)\n try:\n data.scale(scale)\n except AttributeError as err:\n # scale was not set and can't be determined\n if not allow_unknown_scale:\n raise lena.core.LenaValueError(\n \"could not determine the scale of {}\"\n .format(val)\n )\n except lena.core.LenaValueError as err:\n # scale is zero and can't be changed\n if not allow_zero_scale:\n raise err\n return None", "def scale(self, fname, **kw):\n return self.scales.scale(fname, **kw)", "def scale(train, test):\n # fit scaler\n scaler = MinMaxScaler(feature_range=(-1, 1))\n scaler = scaler.fit(train)\n # transform train\n train = train.reshape(train.shape[0], train.shape[1])\n train_scaled = scaler.transform(train)\n # transform test\n test = test.reshape(test.shape[0], test.shape[1])\n test_scaled = scaler.transform(test)\n return scaler, train_scaled, test_scaled", "def test_read_namespaced_scale_scale(self):\n pass", "def action_scaling(env, action_scaler):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print(\"Using dm_control so need to get state_dim differently\")\n state_dim = len(env.observation_space['observations'].low)\n\n action_dim = len(env.action_space.low)\n\n # state_scaling = float(state_scaling)\n action_scaler = float(action_scaler)\n\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32) * action_scaler\n\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)", "def get_image_scale (wcs):\n return list(proj_plane_pixel_scales(wcs))", "def get_scale_net():\n return nn.Sequential(nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), nn.LeakyReLU(), nn.Linear(64, 2), nn.Tanh())", "def get_scaler(scaler):\n if scaler == 'standard':\n from sklearn.preprocessing import StandardScaler\n return StandardScaler()\n if scaler == 'minmax':\n from sklearn.preprocessing import MinMaxScaler\n return MinMaxScaler()", "def scale(self):", "def set_constraint_scaling_factor(self, con):\n condata = self.get_representative_data_object(con)\n vardata = self.con2var[condata]\n scaling_factor = self.scaling_factor\n\n var_factor = scaling_factor[vardata]\n if self.dim == 0:\n scaling_factor[con] = var_factor\n else:\n for c in con.values():\n scaling_factor[c] = var_factor", "def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list):\n features = np.array(data.features, dtype=float)\n for i in process_cols_list:\n value = features[i]\n if value > max_value_list[i]:\n value = max_value_list[i]\n elif value < min_value_list[i]:\n value = min_value_list[i]\n\n features[i] = (value - min_value_list[i]) / scale_value_list[i]\n _data = copy.deepcopy(data)\n _data.features = features\n return _data", "def scale_and_offset_model(model,scaleparname='A',offsetparname='C',scaleval=1,offsetval=0):\n model = get_model_instance(model)\n if scaleparname in model.params:\n scaleparname += '1'\n if offsetparname in model.params:\n offsetparname += '2'\n if isinstance(model,FunctionModel1D):\n compclass = CompositeModel1D\n else:\n compclass = CompositeModel\n res = compclass((model,'constant','constant'),operation=['*','+'],\n parnames={'C1':scaleparname,'C2':offsetparname})\n setattr(res,scaleparname,scaleval)\n setattr(res,offsetparname,offsetval)\n return res", "def scale(self):\n return self.distribution.scale", "def set_ui_scale():\n # TODO test on other OS and resolutions\n moniter_h = QtWidgets.QDesktopWidget().screenGeometry(-1).height()\n if sys.platform == 'win32':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.0\n else:\n scale = 1.0\n elif sys.platform == 'linux':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.23\n else:\n scale = 1.4\n elif sys.platform == 'darwin':\n if moniter_h == 1080:\n scale = 1.0\n elif moniter_h == 1440:\n scale = 1.25\n else:\n scale = 1.55\n return scale", "def factor_to_scale(factor):\n return 1 / B.sqrt(4 * factor / B.pi)", "def parallel_scale(self, value):\n self.camera.parallel_scale = value\n self.Modified()", "def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])", "def get_scale_factor(value_dict, max_length=os.get_terminal_size().columns):\n max_value = max(value_dict.values(), key=abs)\n try:\n scale = max_length / abs(max_value)\n except ZeroDivisionError:\n scale = 1\n return scale", "def parallel_scale(self):\n return self.camera.parallel_scale", "def keypoints_scale(keypoints, scale_x, scale_y):\n keypoints[:, :2] = keypoints[:, :2] * (scale_x, scale_y)\n return keypoints", "def GetScale(self):\n ...", "def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase,\n model_execution_mode: ModelExecutionMode) -> DeviceAwareModule:\n if config.use_gpu:\n model = model.cuda()\n logging.info(\"Adjusting the model to use mixed precision training.\")\n # If model parallel is set to True, then partition the network across all available gpus.\n if config.use_model_parallel:\n devices = config.get_cuda_devices()\n assert devices is not None # for mypy\n model.partition_model(devices=devices) # type: ignore\n else:\n logging.info(\"Making no adjustments to the model because no GPU was found.\")\n\n # Update model related config attributes (After Model Parallel Activated)\n config.adjust_after_mixed_precision_and_parallel(model)\n\n # DataParallel enables running the model with multiple gpus by splitting samples across GPUs\n # If the model is used in training mode, data parallel is activated by default.\n # Similarly, if model parallel is not activated, data parallel is used as a backup option\n use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel)\n if config.use_gpu and use_data_parallel:\n logging.info(\"Adjusting the model to use DataParallel\")\n # Move all layers to the default GPU before activating data parallel.\n # This needs to happen even though we put the model to the GPU at the beginning of the method,\n # but we may have spread it across multiple GPUs later.\n model = model.cuda()\n model = DataParallelModel(model, device_ids=config.get_cuda_devices())\n\n return model", "def test_scale_value(make_rampmodel):\n\n datmod = make_rampmodel(2, 2, 4, 2048, 2048)\n\n # Calculate the scale based off of the input.\n scale = datmod.meta.exposure.frame_divisor / datmod.meta.exposure.nframes\n\n output = GroupScaleStep.call(datmod)\n\n scale_from_data = np.unique(output.data / datmod.data)\n\n # Since the scale value is applied uniformly to the array, if we divide the output\n # by the input then we should get a single unique value (ie the scale) calculated\n # by the pipeline.\n assert len(scale_from_data) == 1\n\n # Make sure the scale calculated manually from the data model above matched what the\n # pipeline calculated.\n assert scale == scale_from_data[0]", "def _rescale_module(module):\n for sub in module.modules():\n if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)):\n std = sub.weight.std().detach()\n scale = (std / 0.1) ** 0.5\n sub.weight.data /= scale\n if sub.bias is not None:\n sub.bias.data /= scale", "def set_scale_factors_to_output_size(self):\n # Compute the scale_factor using rounded scaled image size.\n height = tf.shape(self._image)[0]\n width = tf.shape(self._image)[1]\n max_image_size = tf.to_float(tf.maximum(height, width))\n image_scale = tf.to_float(self._output_size) / max_image_size\n scaled_height = tf.to_int32(tf.to_float(height) * image_scale)\n scaled_width = tf.to_int32(tf.to_float(width) * image_scale)\n self._image_scale = image_scale\n self._scaled_height = scaled_height\n self._scaled_width = scaled_width", "def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor:\n return sample", "def __scale_bboxes(self, bboxes, scale_x, scale_y):\n with tf.variable_scope('scale_bboxes'):\n return tf.multiply(bboxes, tf.tile([[scale_y, scale_x, scale_y,\n scale_x]],\n [tf.shape(bboxes)[0], 1]))", "def scaling():\n \n for i in range(cfg.nfea):\n dm = 0\n var = 0\n for j in range(cfg.ntrain):\n dm += cfg.a[j,i]\n dm = dm/cfg.ntrain\n \n for j in range(cfg.ntrain):\n var += (cfg.a[j,i]-dm)**2\n\n var = var/cfg.ntrain\n var = np.sqrt(var)\n \n if var >= 10**(-5):\n cfg.clin[i] = 1.0/var \n cfg.dlin[i] = -dm/var \n \n else: \n if np.abs(dm)<=1.0:\n cfg.clin[i] = 1.0\n cfg.dlin[i] = 0.0 \n else: \n cfg.clin[i] = 1.0/dm\n cfg.dlin[i] = 0.0 \n \n for j in range(cfg.ntrain):\n cfg.a_scaled[j,i] = cfg.clin[i]*cfg.a[j,i] + cfg.dlin[i]\n \n return", "def create_scaling_model(params, experiments, reflections):\n autos = [None, Auto, \"auto\", \"Auto\"]\n use_auto_model = params.model in autos\n # Determine non-auto model to use outside the loop over datasets.\n if not use_auto_model:\n model_class = None\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == params.model:\n model_class = entry_point.load()\n break\n if not model_class:\n raise ValueError(f\"Unable to create scaling model of type {params.model}\")\n\n for expt, refl in zip(experiments, reflections):\n if not expt.scaling_model or params.overwrite_existing_models:\n # need to make a new model\n if use_auto_model:\n if not expt.scan:\n model = KBScalingModel\n else: # set model as physical unless scan < 1.0 degree\n osc_range = expt.scan.get_oscillation_range()\n abs_osc_range = abs(osc_range[1] - osc_range[0])\n if abs_osc_range < 1.0:\n model = KBScalingModel\n else:\n model = PhysicalScalingModel\n else:\n model = model_class\n expt.scaling_model = model.from_data(params, expt, refl)\n else:\n # allow for updating of an existing model.\n expt.scaling_model.update(params)\n return experiments", "def plate_scale(platescale):\n if platescale.unit.is_equivalent(si.arcsec / si.m):\n platescale_val = platescale.to_value(si.radian / si.m)\n elif platescale.unit.is_equivalent(si.m / si.arcsec):\n platescale_val = (1 / platescale).to_value(si.radian / si.m)\n else:\n raise UnitsError(\"The pixel scale must be in angle/distance or distance/angle\")\n\n return Equivalency(\n [(si.m, si.radian, lambda d: d * platescale_val, lambda a: a / platescale_val)],\n \"plate_scale\",\n {\"platescale\": platescale},\n )", "def supervised_cost_scale(\n scale, loss_supervised, output_noise_labelled, labelled_target\n):\n cost_supervised = loss_supervised.forward(output_noise_labelled, labelled_target)\n\n cost_supervised *= scale\n return cost_supervised", "def _linear_to_mel_scale(linear_scale_in, n_mel_bins, mel_lower_hertz_edge,\n mel_upper_hertz_edge):\n\n linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(\n n_mel_bins, linear_scale_in.shape[-1], _SAMPLE_RATE, mel_lower_hertz_edge,\n mel_upper_hertz_edge\n )\n\n mel_scale_out = tf.tensordot(linear_scale_in, linear_to_mel_weight_matrix, 1)\n return mel_scale_out", "def _score_converter_fn_with_logit_scale(tf_score_converter_fn, logit_scale):\n\n def score_converter_fn(logits):\n scaled_logits = np.divide(logits, logit_scale)\n\n return tf_score_converter_fn(scaled_logits)\n\n return score_converter_fn", "def scale(original_train, new_train):\n # find magnitude original training data\n o_mag = np.linalg.norm(np.stack(original_train[:,1]))\n # find magnitude new data\n n_mag = np.linalg.norm(np.stack(new_train[:,1]))\n # scale new data\n scale = o_mag / n_mag\n return scale", "def scale(self):\n return self._scale", "def scaling(self) -> Optional['outputs.AiFeatureStoreOnlineServingConfigScaling']:\n return pulumi.get(self, \"scaling\")", "def compute_scaler(args):\n workspace = args.workspace\n data_type = args.data_type\n dir_name = args.dir_name \n # Load data. \n t1 = time.time()\n hdf5_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, dir_name, \"data.h5\")\n with h5py.File(hdf5_path, 'r') as hf:\n x = hf.get('x') \n x = np.array(x) # (n_segs, n_concat, n_freq)\n \n # Compute scaler. \n (n_segs, n_concat, n_freq) = x.shape\n x2d = x.reshape((n_segs * n_concat, n_freq))\n scaler = preprocessing.StandardScaler(with_mean=True, with_std=True).fit(x2d)\n print(scaler.mean_)\n print(scaler.scale_)\n \n # Write out scaler. \n out_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, dir_name, \"scaler.p\")\n create_folder(os.path.dirname(out_path))\n pickle.dump(scaler, open(out_path, 'wb'))\n \n print(\"Save scaler to %s\" % out_path)\n print(\"Compute scaler finished! %s s\" % (time.time() - t1,))", "def get_scaling_type(df_mfactors: pd.DataFrame, cmdl_args):\n eps = 0.9\n normalized_inst_ratio = 0\n\n # Check if there is only one trace\n if len(df_mfactors.index) == 1:\n return 'strong'\n\n for index, row in df_mfactors.iterrows():\n inst_ratio = float(row[MOD_FACTORS_DOC['useful_ins']]) / float(df_mfactors[MOD_FACTORS_DOC['useful_ins']][0])\n proc_ratio = float(row[MOD_FACTORS_DOC['num_processes']]) / float(\n df_mfactors[MOD_FACTORS_DOC['num_processes']][0])\n normalized_inst_ratio += inst_ratio / proc_ratio\n\n # Get the average inst increase. Ignore ratio of first trace (1.0)\n normalized_inst_ratio = (normalized_inst_ratio - 1) / (len(df_mfactors.index) - 1)\n\n scaling_computed = ''\n\n if normalized_inst_ratio > eps:\n scaling_computed = 'weak'\n else:\n scaling_computed = 'strong'\n\n if cmdl_args.scaling == 'auto':\n if cmdl_args.debug:\n print(f'==DEBUG== Detected {scaling_computed} scaling.')\n print('')\n return scaling_computed\n\n if cmdl_args.scaling == 'weak':\n if scaling_computed == 'strong':\n print('==WARNING== Scaling set to weak scaling but detected strong scaling.')\n print('')\n return 'strong'\n\n if cmdl_args.scaling == 'strong':\n if scaling_computed == 'weak':\n print('==WARNING== Scaling set to strong scaling but detected weak scaling.')\n print('')\n return 'strong'\n\n print('==ERROR== Reached undefined control flow state.')\n sys.exit(1)", "def scale(requestContext, seriesList, factor):\n for series in seriesList:\n series.name = \"scale(%s,%g)\" % (series.name,float(factor))\n series.pathExpression = series.name\n for i,value in enumerate(series):\n series[i] = safeMul(value,factor)\n return seriesList", "def scale(inp, ab):\n\n return inp * ab[0] + ab[1]", "def find_scale(dataset, log_scales, path=_default_path):\n fname = \"\".join([\"evaluate_\", dataset, \"_64x64.h5\"])\n store = h5py.File(join(path, fname))\n\n rocs = dict()\n dists = [\"L1\", \"L2\", \"COSINE\"]\n norms = [\"l1\", \"l2\", \"id\"]\n for scale in log_scales:\n sift = \"\".join([\"evaluate_\", dataset, \"_sift_\", str(scale), \".h5\"])\n sift = h5py.File(join(path, sift), \"w\")\n store_as_sift(store, sift, scale=2**scale)\n rocs[scale] = evaluate(sift, distances=dists, normalizations=norms)\n sift.close()\n return rocs", "def find_suggested_tonemap_scale(session):\n avg_film_luminance = session.GetFilm().GetFilmY()\n return (1.25 / avg_film_luminance * (118 / 255))\n\n # TODO\n # measure this all the time, show a message to the user if\n # abs(old - new) > threshold\n # so the user can set the new value with one click\n\n # imagepipeline = scene.camera.data.luxcore.imagepipeline\n # imagepipeline.tonemapper.linear_scale = suggested_linear_scale\n # imagepipeline.tonemapper.use_autolinear = False", "def _scale_param(self, resid_us):\n return((resid_us**2).sum().sum() / self.dof)", "def test_auto_scale_batch_size_trainer_arg(tmpdir, scale_arg):\n tutils.reset_seed()\n hparams = EvalModelTemplate.get_default_hparams()\n model = EvalModelTemplate(**hparams)\n before_batch_size = hparams.get('batch_size')\n trainer = Trainer(default_root_dir=tmpdir,\n max_epochs=1,\n auto_scale_batch_size=scale_arg,\n gpus=1)\n trainer.tune(model)\n after_batch_size = model.batch_size\n assert before_batch_size != after_batch_size, \\\n 'Batch size was not altered after running auto scaling of batch size'", "def price_user_scale_factor(self, new_price_scale_factor):\n self._price_user_scale_factor = new_price_scale_factor", "def scale(self,bvp):\n\n sol = bvp.solution\n # Additional aux entries for initial and terminal BCs\n extras = [{'type':'initial','vars':self.problem_data['state_list']},\n {'type':'terminal','vars':self.problem_data['state_list']}]\n\n # Scale the states and costates\n for idx,state in enumerate(self.problem_data['state_list']):\n sol.y[idx,:] /= self.scale_vals['states'][state]\n\n # Scale auxiliary variables\n for aux in (self.problem_data['aux_list']+extras):\n if aux['type'] not in Scaling.excluded_aux:\n for var in aux['vars']:\n sol.aux[aux['type']][var] /= self.scale_vals[aux['type']][var]\n\n # Scale parameters\n for idx, param in enumerate(self.problem_data['parameter_list']):\n sol.parameters[idx] /= self.scale_vals['parameters'][param]", "def __get_scale_units(self, device_type_name, channel_number):\n if device_type_name == \"BPMS\":\n if channel_number == 0:\n fault_name = \"X\"\n elif channel_number == 1:\n fault_name = \"Y\"\n elif channel_number == 2:\n fault_name = \"TMIT\"\n else:\n raise ValueError(\"Function \\\"get_app_name(device_type_name={}, channel_number={})\\\". Invalid channel number for BPMS device type\"\n .format(device_type_name, channel_number))\n return self.__get_app_units(device_type_name, fault_name)\n else:\n return self.__get_app_units(device_type_name, \"\")", "def ConvertGpuToVendorName(gpu):\n if not gpu:\n return 'No GPU'\n elif '8086' in gpu:\n return 'Intel'\n elif '10de' in gpu:\n return 'NVIDIA'\n elif '1002' in gpu:\n return 'AMD'\n return gpu", "def plane_scale(self, scale):\n cmd = '{}testPlaneScale {}'.format(self.console, scale)\n self.write_command(cmd)", "def scale(self, X_train, X_test):\n\n #X_train, X_test, y_train, y_test = self.split_X_y_sets()\n self.scaler.fit(X_train)\n X_train_sc = self.scaler.transform(X_train)\n X_test_sc = self.scaler.transform(X_test)\n\n return X_train_sc, X_test_sc #, y_train, y_test", "def freqscale(self):\n index = self._ordered_input_names.index('freqscale')\n return self._inputs[index]", "def scale_matrix(self, matrix_df):\n\n print('scaling features...')\n\n originalkeys = matrix_df[['segment_id','date','time']].reset_index()\n originalkeys['index'] = originalkeys.index\n\n columns = list(matrix_df)\n columns.remove('segment_id')\n columns.remove('date')\n columns.remove('time')\n data_to_scale = matrix_df[columns]\n matrix_df_scaled_tmp = pd.DataFrame(preprocessing.scale(data_to_scale), columns = columns).reset_index()\n matrix_df_scaled_tmp['index']=matrix_df_scaled_tmp.index\n\n matrix_df_scaled_labels_tmp = pd.merge(matrix_df_scaled_tmp, originalkeys, on=['index'])\n matrix_df_scaled = matrix_df_scaled_tmp.drop(columns=['index'])\n matrix_df_scaled_withlabels = matrix_df_scaled_labels_tmp.drop(columns=['index'])\n \n return matrix_df_scaled_withlabels", "def scale(image):\n image = tf.cast(image, tf.float32)\n image /= 255\n return image", "def scaled_component(self, key):\n\n if key in self.components:\n dat = self.components[key] \n # Aliases\n elif key in component_from_alias:\n comp = component_from_alias[key]\n if comp in self.components:\n dat = self.components[comp] \n else:\n # Component not present, make zeros\n return np.zeros(self.shape)\n else:\n raise ValueError(f'Component not available: {key}')\n \n # Multiply by scale factor\n factor = self.factor \n \n if factor != 1:\n return factor*dat\n else:\n return dat", "def test_patch_namespaced_scale_scale(self):\n pass", "def config_from_pytorch_model(\n model,\n granularity='model',\n backend=None,\n default_precision='ap_fixed<16,6>',\n default_reuse_factor=1,\n inputs_channel_last=False,\n transpose_outputs=True,\n):\n\n config = {}\n\n model_config = {}\n model_config['Precision'] = default_precision\n model_config['ReuseFactor'] = default_reuse_factor\n model_config['InputsChannelLast'] = inputs_channel_last\n model_config['TransposeOutputs'] = transpose_outputs\n model_config['Strategy'] = 'Latency'\n\n config['Model'] = model_config\n\n return config", "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def getscales(self):\n return self.scales", "def convert_scaling_to_form_factors(qz, scale):\n apply_absorption_correction(qz, scale)\n apply_Lorentz_correction(qz, scale)\n for i in xrange(len(scale)):\n scale[i] = np.sign(scale[i]) * math.sqrt(abs(scale[i]))", "def matScale(mat, scale):\n shape=matShape(mat)\n return [[matGet(mat,x,y)*scale for y in range(shape[1])] \\\n for x in range(shape[0])]", "def scale_ticks_params(tick_scale='linear'):\n if tick_scale == 'linear':\n base = None\n label_scale = 'Linear Scale'\n else:\n if tick_scale == 'log2':\n base = 2\n label_scale = 'Log2 Scale'\n elif tick_scale == 'log10':\n base = 10\n label_scale = 'Log10 Scale'\n else:\n raise ValueError('The specified tick scale is not supported.')\n return base, label_scale" ]
[ "0.67240673", "0.6272403", "0.5994706", "0.5941544", "0.5833461", "0.5681746", "0.5676147", "0.5382901", "0.5348", "0.53371793", "0.5301205", "0.52537143", "0.52262044", "0.51800525", "0.51736313", "0.5139863", "0.512118", "0.5083435", "0.50683033", "0.50677323", "0.50584626", "0.5052913", "0.5050851", "0.5048136", "0.50284874", "0.50180674", "0.5017098", "0.50109756", "0.5006812", "0.4985893", "0.49858075", "0.49794883", "0.49584532", "0.4958185", "0.49545544", "0.49530986", "0.4952522", "0.49500015", "0.49393916", "0.493769", "0.4927588", "0.49175602", "0.49135134", "0.4904828", "0.4903294", "0.4899042", "0.4898761", "0.48929045", "0.48893055", "0.48892927", "0.48753592", "0.48639697", "0.48587188", "0.4849531", "0.48460126", "0.4842077", "0.48391664", "0.48297128", "0.48265994", "0.48206714", "0.48115358", "0.48081496", "0.4805505", "0.4799054", "0.4791941", "0.4785347", "0.47742388", "0.47654986", "0.47644013", "0.47605643", "0.47600996", "0.47552285", "0.47552136", "0.475323", "0.47503623", "0.47493386", "0.47468555", "0.47452906", "0.4732579", "0.47198063", "0.47178286", "0.4713385", "0.47104812", "0.47053972", "0.47023824", "0.47014537", "0.46941185", "0.4693801", "0.46932673", "0.46926004", "0.46917307", "0.46898133", "0.46876448", "0.4674922", "0.4670565", "0.46699354", "0.46675408", "0.4667024", "0.46642593", "0.46607885" ]
0.6472911
1
From all the data, it takes the columns TopicID and Question and for each topic, count the number of+ different SubTopic/Question
def get_data_frame_count_type_of_topic(data_frame: DataFrame) -> pb.DataFrame: try: data_frame = data_frame \ .select("TopicID", "Question") \ .distinct() \ .groupBy("TopicID") \ .count() \ .sort("TopicID") except Py4JError: raise AnalysisException('One columns is incorrect') print("The following table represent the number of the type of each topic") data_frame.show() data_frame_pandas = data_frame.toPandas() return data_frame_pandas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def topic_count():\n # get the number topics and their counts as tuples: ('Topic', 123)\n query = peewee.RawQuery(Post, \"select topic, count(topic) from post group by topic\").tuples()\n\n # turn the result of the query object into a list of tuples\n tuple_result = []\n for each_tuple in query:\n tuple_result.append(each_tuple)\n\n # sort by the the second element, which is value, of each tuple in the list\n tuple_result = sorted(tuple_result, key=lambda x: x[1], reverse=True)\n\n # separate the topic and count into two lists for graphing purpose\n topics = []\n counts = []\n\n for each_tuple in tuple_result:\n topics.append(each_tuple[0])\n counts.append(each_tuple[1])\n\n return counts, topics", "def get_data_frame_count_male_gender_by_topic(data_frame: DataFrame) -> pb.DataFrame:\n data_frame_topic = data_frame \\\n .filter(data_frame[\"Stratification1\"].contains(\"Male\")) \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n\n print(\"The following table represent the number of men group by the topic: \")\n data_frame_topic.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas", "def topic_stats(df_topic_sents_keywords):\n\n # Number of Documents for Each Topic\n topic_counts = df_topic_sents_keywords['Dominant_Topic'].value_counts()\n\n # Percentage of Documents for Each Topic\n topic_contribution = round(topic_counts/topic_counts.sum(), 4)\n\n # Topic Number and Keywords\n topic_num_keywords = df_topic_sents_keywords[['Dominant_Topic', 'Topic_Keywords']]\n\n # Concatenate Column wise\n df_dominant_topics = pd.concat([topic_num_keywords, topic_counts, topic_contribution], axis=1)\n\n # Change Column names\n df_dominant_topics.columns = ['Dominant_Topic', 'Topic_Keywords', 'Num_Documents', 'Perc_Documents']\n\n # Show\n df_dominant_topics", "def get_data_frame_count_black_ethnicity_by_topic(data_frame: DataFrame) -> pb.DataFrame:\n data_frame_topic = data_frame \\\n .filter(data_frame[\"Stratification1\"].contains(\"Black, non-Hispanic\")) \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n\n print(\"The following table represent the number of black ethnicity people group by the topic: \")\n data_frame_topic.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas", "def trainCount(\n trainData, \n questionType,\n questionDict,\n questionIdict, \n objDict, \n objIdict,\n numAns):\n count_wa = np.zeros((len(objIdict), numAns))\n count_a = np.zeros((numAns))\n objIds = extractObjId(\n trainData[0], \n questionType, \n questionDict, \n questionIdict)\n for i in range(objIds.shape[0]):\n objId = objIds[i]\n obj = questionIdict[objId - 1]\n ansId = trainData[1][i, 0]\n objId2 = objDict[obj]\n count_wa[objId2, ansId] += 1\n count_a[ansId] += 1\n # Add UNK count\n count_a[-1] += 1\n return count_wa, count_a", "def get_paper_counter_per_topic_id(all_topic_assignments):\n counter = {}\n for topic_assignment in all_topic_assignments:\n for topic_index, topic_value in topic_assignment:\n if topic_index not in counter:\n counter[topic_index] = 0\n\n counter[topic_index] += 1\n\n return counter", "def count_topic_dist(self):\n if len(self.representants) == 0:\n self.log_writer(\"Representants not set. Cannot make topic dist.\")\n return\n for key, value in self.representants.items():\n self.topic_distributions.append(len(value)/len(self.training_docs))\n self.topic_numbers.append(key)", "def get_rdd_count_type_of_topy(rdd: list) -> pb.DataFrame:\n data_frame_pandas = pb.DataFrame(rdd, columns=['Topic', 'Question'])\n print(data_frame_pandas)\n return data_frame_pandas", "def get_diseases(self):\n self.diseases = self.data.groupby('topic')['topic'].count()", "def test_question_topics(self):\n p = ProductFactory()\n t1 = TopicFactory(slug='doesnotexist', product=p)\n t2 = TopicFactory(slug='cookies', product=p)\n t3 = TopicFactory(slug='sync', product=p)\n\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t3)\n\n self.refresh()\n\n topic_vals = (\n (t1.slug, 0),\n (t2.slug, 2),\n (t3.slug, 1),\n )\n\n qs = {'a': 1, 'w': 2, 'format': 'json'}\n for topics, number in topic_vals:\n qs.update({'topics': topics})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(number, json.loads(response.content)['total'])", "def test_extract_topics():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model = BERTopic()\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def test_topics_for_products(self):\n desktop_topics = topics_for(product=self.desktop)\n eq_(len(desktop_topics), 3)\n\n mobile_topics = topics_for(product=self.mobile)\n eq_(len(mobile_topics), 2)", "def test_extract_topics(base_bertopic):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(documents)\n c_tf_idf = base_bertopic._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def assign_topics(self):\n\n # Load the dataset which has topic tags for each sentence\n dt, dt_less = self.assign_topics_to_sentences()\n \n dt_copy = dt\n \n # Minimum number of tags needed to tag the overall response\n dt['min_num_tags'] = dt['num_sent'].apply(lambda x: math.ceil(0.3*x))\n \n # Final dataset with full survey response and its tags\n final_dt = dt.groupby(self.id_col_name).agg({'tags': sum\n , 'num_sent': min\n , 'min_num_tags': min\n# , 'sentences': lambda x: \"%s\" % '. '.join(x)\n , self.col_name: min})\n final_dt.reset_index(level = 0, inplace = True)\n final_dt['topics'] = final_dt.apply(lambda x: set([i for i in x.tags if x.tags.count(i) >= x.min_num_tags])\n , axis = 1)\n\n final_dt_less = final_dt[[self.id_col_name, self.col_name, 'topics']]\n\n return dt_copy, dt_less, final_dt, final_dt_less", "def iterate_data(self):\n if \"single\" in self.dataset_name:\n # Index 0 for list of sentence lengths, index 1 for list of token lengths\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for answer_id in self.data:\n summary = self.data[answer_id]['summary']\n articles = self.data[answer_id]['articles']\n question = self.data[answer_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(articles, 'article')\n self._get_token_cnts(question, 'question')\n self._write_stats(\"token_counts\")\n\n if \"multi\" in self.dataset_name:\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for q_id in self.data:\n summary = self.data[q_id]['summary']\n question = self.data[q_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(question, 'question')\n question = self.data[q_id]['question']\n for answer_id in self.data[q_id]['articles']:\n articles = self.data[q_id]['articles'][answer_id][0]\n if args.tokenize:\n self._get_token_cnts(articles, 'article')\n self._write_stats(\"token_counts\")\n\n if self.dataset_name == \"complete_dataset\":\n self.stat_dict = {'urls': [], 'sites': []}\n article_dict = {}\n print(\"Counting answers, sites, unique urls, and tokenized counts of unique articles\")\n answer_cnt = 0\n for q_id in self.data:\n for a_id in self.data[q_id]['answers']:\n answer_cnt += 1\n url = self.data[q_id]['answers'][a_id]['url']\n article = self.data[q_id]['answers'][a_id]['article']\n if url not in article_dict:\n article_dict[url] = article\n self.stat_dict['urls'].append(url)\n assert \"//\" in url, url\n site = url.split(\"//\")[1].split(\"/\")\n self.stat_dict['sites'].append(site[0])\n print(\"# of Answers:\", answer_cnt)\n print(\"Unique articles: \", len(article_dict)) # This should match up with count written to file\n self._write_stats(\"full collection\")\n\n # Get token/sent averages of unique articles\n if args.tokenize:\n self.stat_dict = {'article': [[], []]}\n for a in article_dict:\n self._get_token_cnts(article_dict[a], 'article')\n self._write_stats(\"token_counts\")", "def so_data_statistics(data_file):\n with open(data_file, \"r\") as f:\n data = json.load(f)\n\n answer_to_num_questions = collections.Counter()\n comment_to_num_questions = collections.Counter()\n num_comments = 0\n num_answers = 0\n num_questions = len(data)\n\n for q in data:\n q = json.loads(q)\n q_comments = 0\n q_comments += len(q[\"comments\"])\n q_answers = len(q[\"answers\"])\n for a in q[\"answers\"]:\n q_comments += len(a[\"comments\"])\n\n answer_to_num_questions[q_answers] += 1\n comment_to_num_questions[q_comments] += 1\n\n num_comments += q_comments\n num_answers += q_answers\n\n print \"Num comments: {0}, Num answers: {1}, Num_questions: {2}\".format(\n num_comments, num_answers, num_questions)\n print \"-\" * 10\n print \"Answers map: \", answer_to_num_questions\n print \"Comments map: \", comment_to_num_questions\n\n return num_comments, num_answers, num_questions, answer_to_num_questions, \\\n comment_to_num_questions", "def quiz_questions(topic):\n try:\n\n # returns size of quiz question\n current_list_size = len(topic)\n score = 0\n # loops through the passed topic questions list\n for i in copy.deepcopy(topic):\n print(i)\n user = input('Enter here:\\t').lower()\n # return correct if the given key question matches the its corresponding value i.e users answer\n if topic[i].lower() == user:\n print('correct\\n')\n score += 1\n else:\n print(\n f'Sorry that was incorrect. The correct answer I was looking for was {topic[i]}\\n') # if not correct display the correct answer\n # del questions[topic][i]\n\n if score == current_list_size: # check to see if they answered all the questions correctly\n print('Nice! you got all the answers correct!')\n else:\n # otherwise return score.\n print(f'You got {score}/{str(current_list_size)} correct!')\n except KeyError:\n print('Something went wrong in quiz_questions(), check logic')", "def ngram_detection(self, min_topic_count=5, min_text_id_count=4):\n\n for text_id, text in self.texts.items():\n # single-word topics act a bit different (no zips or comprehensions)\n # store data in self.topics, not zip_grams\n for word in text['doc']:\n word_lemma = word.text.lower() if word.lemma_ == '-PRON-' else word.lemma_\n\n if {word.text}.intersection(self.punct) or {word.lemma_}.intersection(self.stop_words):\n continue\n\n if not (word.pos in self.nouns or word.ent_type in self.entities):\n continue\n\n if word_lemma in self.topics:\n self.topics[word_lemma][\"count\"] += 1\n self.topics[word_lemma][\"textIDs\"] |= {text_id}\n self.topics[word_lemma][\"verbatims\"] |= {word.text.lower()}\n else:\n self.topics[word_lemma] = {\"name\": word_lemma,\n \"count\": 1,\n \"textIDs\": {text_id},\n \"verbatims\": {word.text.lower()},\n \"subtopics\": {}}\n\n # Populate self.ngrams and self.topics\n for text_id, text in self.texts.items():\n doc = text['doc']\n\n # Find pentagrams - ngrams with 5 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:], doc[4:]):\n self._ngram_counter(ngram, 5, text_id, doc)\n\n # Find pentagrams - ngrams with 4 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:]):\n self._ngram_counter(ngram, 4, text_id, doc)\n\n for ngram in zip(doc, doc[1:], doc[2:]):\n self._ngram_counter(ngram, 3, text_id, doc)\n\n for ngram in zip(doc, doc[1:]):\n self._ngram_counter(ngram, 2, text_id, doc)\n\n\n # Add text_id_count (the number of texts that the topic occurs in; so a topic might occur 50 times,\n # but it's only mentioned in 3 different texts, we'd show 3.\n for _, topic in self.topics.items():\n topic['textIDCount'] = len(topic['textIDs'])\n for _, ngram in self.ngrams.items():\n ngram['textIDCount'] = len(ngram['textIDs'])\n\n # Eliminate rarely occurring topics and ngrams.\n self.topics = {k: v for k, v in self.topics.items() if\n v['textIDCount'] >= min_text_id_count and v['count'] >= min_topic_count}\n self.ngrams = {k: v for k, v in self.ngrams.items() if\n v['textIDCount'] >= min_text_id_count}\n\n # Loop through each ngram pair: outer loop is all ngrams, inner loop is all ngrams\n for ngram_lemma, ngram in self.ngrams.items():\n for ngram_plus_lemma, ngram_plus in self.ngrams.items():\n # only stay in this loop if the inner ngram is one word longer than the outer loop and if the\n # inner loop lemma contains the outer group lemma (avoid partial word matches like man in woman)\n # r'\\b' + ngram_lemma + r'\\b' --> does the ngram lemma fit in ngram_plus lemma (\\b is word boundary)\n if ngram['n'] + 1 != ngram_plus['n']:\n continue\n\n if not re.search(r'\\b' + ngram_lemma + r'\\b', ngram_plus_lemma):\n continue\n\n # Is the absolute count of occurrences and the count of text_id occurrences both big enough to use it\n # instead of the other loop?\n if ngram_plus['count'] + 3 >= ngram['count'] and ngram_plus['textIDCount'] + 3 >= ngram['textIDCount']:\n # TODO: Is this the right action (deleting shorter, but not much more explanatory) phrase?\n # TODO: Is this enough? Or will I end up double explaining things sometimes?\n ngram['count'] = -1\n\n # Eliminate newly demoted items\n self.ngrams = {ngram_lemma: ngram for ngram_lemma, ngram in self.ngrams.items() if ngram['count'] > 0}", "def export_topics(self):\n\n # format as a list (for json output), then sort descending by textIDCount\n topics = [{'name': topic['name'], 'count': topic['count'],\n 'verbatims': list(topic['verbatims']), 'textIDs': list(topic['textIDs']),\n 'textIDCount': topic['textIDCount'], 'rank': topic['rank'],\n 'children': '' if 'children' not in topic else topic['children']}\n for topic_id, topic in self.topics.items()]\n topics = sorted(topics, key=lambda topic: topic['textIDCount'], reverse=True)\n\n for i, topic in enumerate(topics):\n # Note that 'rank' is from topic, not child.\n topic['children'] = [{'name': child['name'], 'count': child['count'], 'rank': topic['rank'],\n 'verbatims': list(child['verbatims']), 'textIDs': list(child['textIDs']),\n 'textIDCount': child['textIDCount']}\n for _, child in topic['children'].items()]\n\n topic['children'] = sorted(topic['children'], key=lambda lemma: lemma['textIDCount'], reverse=True)\n\n # If the subtopic count is greater than the topic count, than calc a multiplier to size each subtopic\n child_count = sum([child['textIDCount'] for child in topic['children']])\n child_count_multiplier = 1 if child_count < topic['textIDCount'] else topic['textIDCount'] / child_count\n\n for child in topic['children']:\n child['size'] = child['textIDCount'] * child_count_multiplier\n\n topic['size'] = topic['textIDCount'] - (child_count * child_count_multiplier)\n\n # Prune topics over max_topics (default ~40): we stopped calc'ing rank over the max_topics\n self.model_output[\"children\"] = [topic for topic in topics]\n\n # Build file name and save\n if self.data_date:\n date = datetime.strptime(self.data_date, \"%Y-%m-%d\").strftime('%d') # from YYYY-MM-DD to DD\n file_name = '{}-{}-Topics.txt'.format(self.corpus_name, date)\n else:\n file_name = '{}-Topics.txt'.format(self.corpus_name)\n\n with open(config.OUTPUT_DIR + file_name, 'w') as file:\n json.dump(self.model_output, file)", "def get_questions_of_topic(topic):\n\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n question_table = dynamodb.Table(\"Questions\")\n\n fe = Attr(\"TopicId\").eq(topic.get(\"TopicId\"))\n response = question_table.scan(FilterExpression=fe)\n questions = response.get(\"Items\")\n return questions", "def get_n_questions(self):\n return self.df.question.nunique()", "def test_topic_reduction(reduced_topics):\n model = BERTopic()\n nr_topics = reduced_topics + 2\n model.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents.copy())\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents.copy())\n new_freq = model.get_topic_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(model.mapped_topics, dict)\n assert not set(model.get_topic_freq().Topic).difference(set(new_documents.Topic))\n assert model.mapped_topics", "def check_intersections(db, topics, papers_by_topic):\n\n\t# Print the distribution of \"number of topics\"\n\tnum_subjects = []\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tnum_subjects.append(len(p.subject))\n\t\telse:\n\t\t\tnum_subjects.append(0)\n\tnum_subjects = np.array(num_subjects)\n\n\tfor i in range(np.max(num_subjects)+1):\n\t\tprint(\"Number of papers with\", i, \"topics:\", \n\t\t\tlen(np.where(num_subjects==i)[0]))\n\n\t# Figure out what's going on with triple-tagged guys (nothing weird)\n\t\"\"\"\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tif len(p.subject) > 2:\n\t\t\t\tprint(\"\\n\",p.title,\"\\n\\t\",p.container_title,\"\\n\\t\", p.subject)\n\t\t\t\t\n\t\t\t\tfor topic, topic_words in topics.items():\n\t\t\t\t\tprint(\"\\tCheck against '\" + topic + \"':\")\n\t\t\t\t\tfor journal in p.container_title:\n\t\t\t\t\t\tcheck_words(journal, topic_words, verbose=True)\n\t\"\"\"\n\n\t# Look in more detail at double-tagged guysfor p_hash, p in db.all_papers.items():\n\tcombos = defaultdict(int)\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tif len(p.subject) == 2:\n\t\t\t\tcombos[frozenset(p.subject)] += 1\n\t\t\t\t#print(\"\\n\",p.title,\"\\n\\t\",p.container_title,\"\\n\\t\", p.subject)\n\t\t\t\tif p.subject == {'Computer Science', 'Biology'}:\n\t\t\t\t\t#print(\"\\n\",p.title,\"\\n\\t\",p.container_title)#,\"\\n\\t\", p.subject)\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tbio_words = set()\n\t\t\t\t\tCS_words = set()\n\t\t\t\t\tfor journal in p.container_title:\n\t\t\t\t\t\tfor word in topics['Biology']:\n\t\t\t\t\t\t\tif journal.find(word) >= 0:\n\t\t\t\t\t\t\t\tbio_words.add(word)\n\t\t\t\t\t\tfor word in topics['Computer Science']:\n\t\t\t\t\t\t\tif journal.find(word) >= 0:\n\t\t\t\t\t\t\t\tCS_words.add(word)\n\n\t\t\t\t\t#print(\"\\tBiology words:\", bio_words)\n\t\t\t\t\t#print(\"\\tCS words:\", CS_words)\n\t\n\tfor k, v in combos.items():\n\t\tprint(k, v)", "def get_paper_count_per_topic(topic_model, start_year, end_year, debug=False):\n papers_count = get_papers_per_topic(topic_model, None, start_year, end_year, None, debug=debug)\n return sorted(papers_count.items(), key=operator.itemgetter(1), reverse=True)", "def assign_topics_to_sentences(self):\n \n\n # 10 topics\n topic_dict = {0: 'academics'\n , 1: 'career'\n , 2: 'commute'\n , 3: 'diversity'\n , 4: 'community'\n , 5: 'extracurricular'\n , 6: 'facilities'\n , 7: 'finance'\n , 8: 'housing'\n , 9: 'wellness'\n }\n\n # Some important words that should be included under each topic\n topics = [['academic', 'exam', 'study', 'learn', 'education', 'class', 'course', 'grade', 'assignment'\n , 'degree', 'research', 'elective'\n , 'professor', 'project', 'scholarship', 'knowledge']\n , ['career', 'job', 'coop', 'employment']\n , ['commute', 'skytrain', 'transport', 'commuter']\n , ['diversity', 'diverse', 'background']\n , ['community', 'welcome', 'support', 'social', 'friend', 'fun', 'network', 'home']\n , ['extracurricular', 'club', 'sport', 'activity']\n , ['facility', 'infrastructure', 'food', 'building', 'gym']\n , ['finance', 'tuition', 'expensive']\n , ['housing', 'live', 'residence']\n , ['wellness', 'health', 'stress', 'depression', 'anxiety']]\n\n # Read the data - id and reponse column\n dt = pd.read_csv(self.path_data_col\n , encoding = \"ISO-8859-1\"\n , usecols = [self.id_col_name, self.col_name])\n\n\n \n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n \n # Split into sentences\n dt['sentences'] = self.getSentences(dt[self.col_name])\n \n \n \n\n \n \n\n # Store number of sentences in each response as a column\n dt['num_sent'] = dt['sentences'].apply(lambda x: len(x))\n\n # Split each row into multiple rows - one row for each sentence\n dt = (dt\n .set_index([self.id_col_name, self.col_name, 'num_sent'])['sentences']\n .apply(pd.Series)\n .stack()\n .reset_index()\n .drop('level_3', axis = 1)\n .rename(columns = {0:'sentences'}))\n\n\n # Clean the sentences\n dt['sentences_cleaned'] = self.cln.clean(dt['sentences'], typo = self.typo_ind)\n\n # Remove useless sentences\n dt['sentences_cleaned'] = self.getValid(dt['sentences_cleaned'])\n\n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n\n # Tokenize words in the cleaned sentences\n responses = list(self.sent_to_words(dt['sentences_cleaned'].values.tolist()))\n\n\n # Call the lexicon function\n topic_lexicons = self.prepare_lexicons()\n\n # Lists to store results\n count_topic_all = []\n actual_topic_all = []\n\n # Tag each response into a topic\n for response in responses:\n\n count_topic = []\n actual_topic = []\n\n for topic in topic_lexicons:\n\n # Count occurance of each word in word stock in the response\n temp = sum(dict((x, response.count(x)) for x in topic).values())\n count_topic.append(temp)\n\n\n for index, value in enumerate(count_topic):\n\n # Consider the topic if atleast one(?) word from its word-stock occurs in the response\n if value > 0:\n actual_topic.append(topic_dict[index])\n\n\n # If more than 3 topics are tagged for single sentence, refine by increasing\n # cutoff to at least 2 words instead of 1\n if len(actual_topic) > 3:\n\n actual_topic = []\n for index, value in enumerate(count_topic):\n\n if value > 1: # Increase cutoff\n actual_topic.append(topic_dict[index])\n\n count_topic_all.append(count_topic)\n actual_topic_all.append(actual_topic)\n\n\n dt['tags'] = actual_topic_all\n dt['num_tags'] = count_topic_all\n\n\n # Select only the most important columns\n dt_less = dt[[self.id_col_name, 'sentences', 'tags']]\n\n return dt, dt_less", "def group_topics(sent_topics_sorteddf):\n new_topics=pd.concat([sent_topics_sorteddf.groupby('Topic_Num').head()[['Keywords']],\n topic_contribution.sort_index(),\n pd.Series(['Economy','Immigration','Environment','Event',\n 'Civil Rights','Civil Rights','Healthcare',\n 'Defense','Trump','Community','Event','Event',\n 'Thanks','Legislation','Trump','Community',\n 'Community','Trump','Defense',\n 'Legislation','Thanks','Economy','Thanks','Healthcare',\n 'Legislation'])],axis=1).groupby(0).sum()\n plt.pie(new_topics,labels=new_topics.index,autopct='%.0f',pctdistance=.8)\n plt.title('Topic Share %');\n\n new_topic_words = pd.concat([sent_topics_sorteddf.groupby('Topic_Num').head()[['Keywords']],\n topic_contribution.sort_index(),\n pd.Series(['Economy','Immigration','Environment','Event',\n 'Civil Rights','Civil Rights','Healthcare',\n 'Defense','Trump','Community','Event','Event',\n 'Thanks','Legislation','Trump','Community',\n 'Community','Trump','Defense',\n 'Legislation','Thanks','Economy','Thanks','Healthcare',\n 'Legislation'])],axis=1).groupby(0)['Keywords'].sum()\n [print(f'{topic}: ' + words) for topic,words in zip(new_topic_words.index,new_topic_words)]", "def __len__(self):\n size = self.HEADER_LEN + 2 + 4 + 4 # acks + timeout + len(topics)\n for topic, parts in iteritems(self.msets):\n # topic name\n size += 2 + len(topic) + 4 # topic name + len(parts)\n # partition + mset size + len(mset)\n size += sum(4 + 4 + len(mset) for mset in itervalues(parts))\n return size", "def counts_table(data, attr):\n pd.options.mode.chained_assignment = None # default='warn'\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n # expanding a table to have all variable options in a column with their \n # parent attribute\n allvariables = attr.apply(lambda x: pd.Series(x['vars']),axis=1).stack().reset_index(level=1, drop=True)\n allvariables.name='var'\n allvariables = attr.drop('vars', axis=1).join(allvariables)\n av = allvariables.drop(attr.index[-1])\n # populate the table with counts\n for c in classlist:\n clist = []\n count = 0\n for i, row in av.iterrows():\n att = row['attr']\n var = row['var']\n sub = data[[att,'class']]\n sub = sub[sub[att]==var]\n if not sub.empty:\n ssub = sub[sub['class']==c]\n if not ssub.empty:\n count = len(ssub)\n else:\n count = 0\n clist.append(count)\n av[c] = clist\n\n return av", "def total_exs(dataset):\n total = 0\n for article in dataset['data']:\n for para in article['paragraphs']:\n total += len(para['qas'])\n return total", "def document_distribution_per_topic(df_dominant_topic):\n cols = [color for name, color in mcolors.TABLEAU_COLORS.items()] # more colors: 'mcolors.XKCD_COLORS'\n\n\n fig, axes = plt.subplots(2,2,figsize=(16,14), dpi=160, sharex=True, sharey=True)\n\n for i, ax in enumerate(axes.flatten()):\n df_dominant_topic_sub = df_dominant_topic.loc[df_dominant_topic.Dominant_Topic == i, :]\n doc_lens = [len(d) for d in df_dominant_topic.Text]\n ax.hist(doc_lens, bins = 1000, color=cols[i])\n ax.tick_params(axis='y', labelcolor=cols[i], color=cols[i])\n sns.kdeplot(doc_lens, color=\"black\", shade=False, ax=ax.twinx())\n ax.set(xlim=(0, 1000), xlabel='Document Word Count')\n ax.set_ylabel('Number of Documents', color=cols[i])\n ax.set_title('Topic: '+str(i), fontdict=dict(size=16, color=cols[i]))\n\n fig.tight_layout()\n fig.subplots_adjust(top=0.90)\n plt.xticks(np.linspace(0,1000,9))\n fig.suptitle('Distribution of Document Word Counts by Dominant Topic', fontsize=22)\n plt.show()", "def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n\n self.total = 2\n ##Your code here\n\n for sentence in corpus:\n temp_1 = get_ngrams(sentence,1)\n temp_2 = get_ngrams(sentence,2)\n temp_3 = get_ngrams(sentence,3)\n for i in range(len(temp_1)):\n if temp_1[i] in self.unigramcounts:\n self.unigramcounts[temp_1[i]] += 1\n else:\n self.unigramcounts[temp_1[i]] = 1\n self.total += 1\n\n for i in range(len(temp_2)):\n if temp_2[i] in self.bigramcounts:\n self.bigramcounts[temp_2[i]] += 1\n else:\n self.bigramcounts[temp_2[i]] = 1\n\n for i in range(len(temp_3)):\n if temp_3[i] in self.trigramcounts:\n self.trigramcounts[temp_3[i]] += 1\n else:\n self.trigramcounts[temp_3[i]] = 1\n return", "def _initialize(self):\n for doc_index, doc in enumerate(self.document):\n temp_word_topic_matrix = []\n for word in doc:\n if word in self.word2id.keys():\n start_topic_index = np.random.randint(0, self.K)\n temp_word_topic_matrix.append(start_topic_index)\n self.doc_topic_matrix[doc_index, start_topic_index] += 1\n self.topic_word_matrix[start_topic_index, self.word2id[word]] += 1\n self.topic_matrix[start_topic_index] += 1\n self.current_word_topic_matrix.append(temp_word_topic_matrix)", "def count_mentioned_pol_figures(data):\n figures_mentioned = {}\n figures = get_political_figures()\n\n for ind, row in data.iterrows():\n subject_words = row[\"MetadataSubject\"].lower()\n message_words = row[\"RawText\"].lower()\n\n for figure in figures:\n if figure + \" \" in (subject_words + message_words):\n if figure in figures_mentioned:\n figures_mentioned[figure] += 1\n else:\n figures_mentioned[figure] = 0\n\n return pd.DataFrame(figures_mentioned)", "def __len__(self):\n # Header + consumer group + len(topics)\n size = self.HEADER_LEN + 2 + len(self.consumer_group) + 4\n for topic, parts in iteritems(self._reqs):\n # topic name + len(parts)\n size += 2 + len(topic) + 4\n # partition => for each partition\n size += 4 * len(parts)\n return size", "def __len__(self):\n # Header + replicaId + len(topics)\n size = self.HEADER_LEN + 4 + 4\n for topic, parts in iteritems(self._reqs):\n # topic name + len(parts)\n size += 2 + len(topic) + 4\n # partition + fetch offset + max bytes => for each partition\n size += (4 + 8 + 4) * len(parts)\n return size", "def subtype_occurences(self):\n\n subtype_counts = Counter()\n\n for seqkey,seqs in self.seqs.iteritems():\n for seq,seqentry in seqs.iteritems():\n\n subtype_counts[seqentry['subtype']] += 1\n\n return subtype_counts", "def analyze_count(data):\n\n dsct_vk = pd.unique(data['vk'])\n dsct_itemid = pd.unique(data['itemid'])\n\n print 'number of user:', dsct_vk.shape\n print 'number of items:', dsct_itemid.shape\n print 'the number of ratings:', data.shape\n\n print 'unique actions:', pd.unique(data['action'])\n print 'the number of action 0:', np.sum(data['action'] == 0)\n print 'the number of action 1:', np.sum(data['action'] == 1)\n print 'the number of action 2:', np.sum(data['action'] == 2)\n print 'the number of action 3:', np.sum(data['action'] == 3)\n print 'the number of action 4:', np.sum(data['action'] == 4)\n \n time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)\n print 'Max Range:', np.max(time_range_item)\n print 'Mean Range:', np.mean(time_range_item)\n print 'Median Range:', np.median(time_range_item)", "def get_topics_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'topics')", "def test_topic_reduction(reduced_topics):\n base_bertopic = BERTopic(bert_model='distilbert-base-nli-mean-tokens', verbose=False)\n nr_topics = reduced_topics + 2\n base_bertopic.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(old_documents)\n c_tf_idf = base_bertopic._extract_topics(old_documents.copy(), topic_reduction=True)\n old_freq = base_bertopic.get_topics_freq()\n\n new_documents = base_bertopic._reduce_topics(old_documents.copy(), c_tf_idf)\n new_freq = base_bertopic.get_topics_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(base_bertopic.mapped_topics, dict)\n assert not set(base_bertopic.get_topics_freq().Topic).difference(set(new_documents.Topic))\n assert base_bertopic.mapped_topics", "def uncategorized(df):\n\n counter = 0\n for movie in df.index:\n if len(df.loc[movie, 'imdbGenres']) == 1 and\\\n df.loc[movie, 'Political'] == 0:\n counter += 1\n\n return counter", "def count_mentioned_countries(data):\n countries_mentioned = {}\n countries = get_countries()\n\n for ind, row in data.iterrows():\n subject_words = row[\"MetadataSubject\"].lower()\n message_words = row[\"RawText\"].lower()\n\n for country in countries:\n if country in (subject_words + message_words):\n if country in countries_mentioned:\n countries_mentioned[country] += 1\n else:\n countries_mentioned[country] = 1\n\n return pd.DataFrame.from_dict(countries_mentioned, orient=\"index\")", "def subtrace_count(self, trace, subtrace):\n if len(subtrace) == 0:\n return 0\n\n count = 0\n\n tr = list(map(lambda ac: self.activity_concept_name(ac), trace))\n\n for index in range(len(tr) - len(subtrace) + 1):\n slice = tr[index:index + len(subtrace)]\n\n if subtrace == slice:\n count += 1\n\n return count", "def validate_new_curriculum_topics(self, curriculum_topics):\n\n for cur in curriculum_topics:\n # check to make sure its in the general topics table\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Topic WHERE name = %s\"\"\", (cur,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n print(\"topic does not exist, we must create new one or cancel\") # todo\n\n return True", "def compute_subj_metrics():\n\t# each participant does task 1,2,3 with method A,B = 3*2*N\n\t# likert data includes age, gender, Q1 - Q10 = 2+10\n\n\t# set up data structure\n\tsubj_metrics = {}\n\tfor ID in range(10):\n\t\tfor method in [\"A\",\"B\"]:\n\t\t\t# sanity checks\n\t\t\tif ID not in subj_metrics:\n\t\t\t\tsubj_metrics[ID] = {}\n\t\t\tif method not in subj_metrics[ID]:\n\t\t\t\tsubj_metrics[ID][method] = [None]*12\n\n\there = os.path.dirname(os.path.realpath(__file__))\n\tsubdir = \"/data/experimental/\"\n\tdatapath = here + subdir + \"Likert_Responses.csv\"\n\n\tdata = {}\n\tfirstline = True\n\twith open(datapath, 'r') as f:\n\t\tfor line in f:\n\t\t\tif firstline:\n\t\t\t\tfirstline = False\n\t\t\t\tcontinue\n\t\t\tvalues = line.split(',')\n\t\t\tinfo = values[1:len(values)]\n\t\t\tID = int(info[9])\n\t\t\tmethod = info[10]\n\t\t\tage = info[11]\n\t\t\tgender = info[12]\n\t\t\t# store age\n\t\t\tsubj_metrics[ID][method][0] = age\n\t\t\tsubj_metrics[ID][method][1] = gender\n\t\t\t# parse likert data\n\t\t\tfor i in range(8):\n\t\t\t\tsubj_metrics[ID][method][i+2] = info[i]\n\t\t\tsubj_metrics[ID][method][10] = info[14]\n\t\t\tsubj_metrics[ID][method][11] = info[15]\n\n\treturn subj_metrics", "def count_score(self, docsInfo, avgdl, k1, b):\n\t\tdocScore = []\n\t\tfor doc in docsInfo:\n\t\t\tcurDocScore = 0\n\t\t\tfor queryWord in range(len(doc['meetCnt'])):\n\t\t\t\tTF = float(doc['meetCnt'][queryWord])\n\t\t\t\tfreaq = sum(1 for x in docsInfo if x['meetCnt'][queryWord])\n\t\t\t\tcurDocScore += self.count_IDF(len(docsInfo), freaq) * self.count_main_fraction(TF, k1, b, avgdl, doc['len'])\n\t\t\tdocScore.append(curDocScore)\n\t\treturn docScore", "def _count_subscriptions(self):\n for partner in self:\n subscriptions = self.env['subscription.subscription']\n count = subscriptions.sudo().search_count([('partner_id', '=', partner.id)])\n for child in partner.child_ids:\n count += subscriptions.sudo().search_count([('partner_id', '=', child.id)])\n partner.subscriptions_count = count", "def question_count_total(self, obj):\n return obj.questions.count()", "def cubetest_per_topic(topic_truth, topic_result, gamma, max_height, cutoff):\n subtopic_num = topic_truth[1]\n topic_truth = topic_truth[0]\n\n subtopic_height = Counter() # current height of every subtopic\n subtopic_count = Counter() # #docs found relevant to every subtopic (nrels)\n\n weight_per_subtopic = 1.0 / subtopic_num\n\n def gain_per_doc(doc_no):\n if doc_no not in topic_truth:\n return 0\n gain = 0\n for subtopic_id, rating in topic_truth[doc_no].items():\n if subtopic_height[subtopic_id] < max_height:\n discount_height = (gamma ** (subtopic_count[subtopic_id] + 1)) * rating\n if discount_height + subtopic_height[subtopic_id] > max_height:\n discount_height = max_height - subtopic_height[subtopic_id]\n\n gain += weight_per_subtopic * discount_height\n # print(doc_no, subtopic_id,\"original_height\", rating, \"discount height\", discount_height)\n subtopic_height[subtopic_id] += discount_height\n subtopic_count[subtopic_id] += 1\n # print(doc_no, gain)\n return gain\n\n sorted_result = sorted(topic_result.items(), key=lambda x: x[0])\n time = 0.0\n total_gain = 0\n accu_gain = 0\n doc_num = 0\n for iter_num, doclist in sorted_result:\n if iter_num >= cutoff:\n break\n time += 1\n # gain_per_iteration = 0\n for doc_no in doclist:\n total_gain += gain_per_doc(doc_no)\n accu_gain += (total_gain / max_height / time)\n doc_num += 1\n\n # print(time)\n if time != 0:\n ct = total_gain / max_height / time\n else:\n ct = 0\n # print(doc_num)\n if doc_num > 0:\n act = accu_gain / doc_num\n else:\n act = 0\n # print( accu_gain , total_gain)\n return total_gain / max_height, ct, act", "def part_one(answer_data):\n group_counts = []\n for answer_group in parse_groups(answer_data):\n group_counts.append(count_answer_set_union(answer_group))\n return sum(group_counts)", "def _summarize_multiple_questions(self, data, id_prefix):\n type_info_dict = data['containedTypes']\n questions_list = []\n\n for instanceid, type_info in type_info_dict.iteritems():\n if isinstance(type_info, list):\n # This is a question group.\n mc_indices = [i for i in xrange(len(type_info))\n if type_info[i] == self.MC_QUESTION]\n questions_list += [{\n 'id': '%s.c.%s.i.%s' % (id_prefix, instanceid, index),\n 'score': data['individualScores'][instanceid][index],\n 'answers': data['answers'][instanceid][index]\n } for index in mc_indices if (\n data['answers'][instanceid][index])]\n\n elif (type_info == self.MC_QUESTION and\n data['answers'][instanceid]):\n # This is an individual multiple-choice question.\n questions_list += [{\n 'id': '%s.c.%s' % (id_prefix, instanceid),\n 'score': data['individualScores'][instanceid],\n 'answers': data['answers'][instanceid]\n }]\n\n return questions_list", "def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics", "def test_extract_topics_custom_cv():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n\n cv = CountVectorizer(ngram_range=(1, 2))\n model = BERTopic(vectorizer=cv)\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def count_ngrams(self, corpus):\n \n self.unigramcounts = defaultdict(int)\n self.bigramcounts = defaultdict(int)\n self.trigramcounts = defaultdict(int)\n\n self.sentence_counts = 0\n self.word_count = 0\n\n for line in corpus:\n sequence = line\n self.sentence_counts +=1\n\n unigrams = get_ngrams(sequence, n=1)\n for gram in unigrams:\n self.word_count += 1\n self.unigramcounts[gram] +=1\n\n bigrams = get_ngrams(sequence, n=2)\n for gram in bigrams:\n self.bigramcounts[gram] +=1\n\n trigrams = get_ngrams(sequence, n=3)\n for gram in trigrams:\n self.trigramcounts[gram] +=1\n\n #self.unigramcounts[('START')] = self.sentence_counts *2\n self.bigramcounts[('START', 'START')] = self.sentence_counts\n\n #return self", "def __call__(self, example):\n para_counter = data.count_tokens(example['context_tokens'] if not self._iterate_over_example\n else [c for tkn in example['context_tokens'] for c in tkn])\n ques_counter = data.count_tokens(example['ques_tokens'] if not self._iterate_over_example\n else [c for tkn in example['ques_tokens'] for c in tkn])\n counter = para_counter + ques_counter\n return list(counter.items())", "def test_topic_reduction_edge_cases():\n model = BERTopic()\n nr_topics = 5\n model.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents)\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents)\n new_freq = model.get_topic_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)", "def build_intersection_matrix_of_subreddits(sub_count_list, top_sub_n=2000):\n sorted_keys = []\n for sub_counts in sub_count_list:\n sorted_keys.append(set(sorted(sub_counts, key=lambda x: sub_counts[x], reverse=True)[:top_sub_n]))\n \n int_list = []\n for i, s_key in enumerate(sorted_keys):\n temp_key_list = []\n temp_key_list.extend(sorted_keys[:i])\n temp_key_list.extend(sorted_keys[i+1:])\n temp_key_set = calculate_intersection(temp_key_list)\n sub_set = set(s_key)\n int_count = len(sub_set.intersection(temp_key_set))\n int_list.append(int_count)\n return int_list", "def test_extract_topics_custom_cv(base_bertopic_custom_cv):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic_custom_cv._update_topic_size(documents)\n c_tf_idf = base_bertopic_custom_cv._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic_custom_cv.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def random_init(self, docs):\n for di in xrange(len(docs)):\n doc = docs[di]\n topics = np.random.randint(self.n_topic, size=len(doc))\n self.topic_assignment.append(topics)\n\n for wi in xrange(len(doc)):\n topic = topics[wi]\n word = doc[wi]\n self.TW[topic, word] += 1\n self.sum_T[topic] += 1\n self.DT[di, topic] += 1", "def get_num_con_cat(con_by_sample, cat_by_sample):\r\n num_con_cat = defaultdict(float)\r\n num_con = 0\r\n for sample, connects in con_by_sample.items():\r\n sample_categories = cat_by_sample[sample]\r\n for s_con in connects:\r\n if s_con not in cat_by_sample.keys():\r\n continue\r\n for s_cat, con_cat in zip(sample_categories, cat_by_sample[s_con]):\r\n if s_cat == con_cat:\r\n num_con_cat[s_cat[0]] += 0.5\r\n num_con += 0.5\r\n\r\n return num_con_cat, num_con", "def full_summarizer_word_comparison(sentences, topic_sentences, number_topics):\n\n word_counts = []\n\n for sentence in sentences:\n document_1_words = sentence.split()\n document_2_words = ''.join(topic_sentences).split()\n\n common_words = set(document_1_words).intersection(set(document_2_words))\n word_counts.append(len(common_words))\n\n return [j for i, j in sorted(list(zip(word_counts, sentences)), reverse=True)][0:number_topics]", "def test_get_topics(self):\n\n for m in self.models:\n\n topics = m.topics\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertEqual(topics.num_rows(), 25)\n self.assertEqual(topics.num_columns(), 2)\n z = m.topics[\"topic_probabilities\"]\n for k in range(m.num_topics):\n self.assertTrue(\n abs(sum(z.vector_slice(k)) - 1) < DELTA,\n \"Returned probabilities do not sum to 1.\",\n )\n\n # Make sure returned object is an SFrame of the right size\n topics = m.get_topics()\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertTrue(\n topics.num_columns() == 3,\n \"Returned SFrame should have a topic, word, and probs.\",\n )\n\n # Make sure that requesting a single topic returns only that topic\n num_words = 8\n topics = m.get_topics([5], num_words=num_words)\n self.assertTrue(\n all(topics[\"topic\"] == 5), \"Returned topics do not have the right id.\"\n )\n self.assertEqual(topics.num_rows(), num_words)\n topics = m.get_topics([2, 4], num_words=num_words)\n self.assertEqual(set(list(topics[\"topic\"])), set([2, 4]))\n self.assertEqual(topics.num_rows(), num_words + num_words)\n\n # Make sure the cumulative probability of the returned words is\n # is less than the cutoff we provided.\n # A cutoff of 1.0 should return num_words for every topic.\n cutoff = 1.0\n topics = m.get_topics(cdf_cutoff=cutoff, num_words=len(m.vocabulary))\n totals = topics.groupby(\n \"topic\", {\"total_score\": turicreate.aggregate.SUM(\"score\")}\n )\n self.assertTrue(\n all(totals[\"total_score\"] <= (cutoff + DELTA)),\n \"More words were returned than expected for this cutoff.\",\n )\n\n # Make sure we raise errors for bad input\n with self.assertRaises(ValueError):\n m.get_topics([-1])\n with self.assertRaises(ValueError):\n m.get_topics([10000])\n with self.assertRaises(ToolkitError):\n topics = m.get_topics(output_type=\"other\")\n\n # Test getting topic_words\n topic_words = m.get_topics(output_type=\"topic_words\", num_words=5)\n self.assertEqual(type(topic_words), turicreate.SFrame)\n\n # Test words are sorted correctly for the first topic\n # TODO: Make this more deterministic.\n\n # topic_probs = m.get_topics(num_words=5)\n # expected = [w for w in topic_probs['word'][:5]]\n # observed = topic_words['words'][0]\n # self.assertEqual(observed[0], expected[0])", "def __len__(self):\n count = 0\n topics = set(six.iterkeys(self._topics))\n while topics:\n event_type = topics.pop()\n try:\n listeners = self._topics[event_type]\n count += len(listeners)\n except KeyError:\n pass\n return count", "def get_n_per_feat(self, feat_subset=None):\n zipper = self.create_zipper(feat_subset)\n\n for key in zipper:\n # turns zipper values into lists storing the number of entries of the respective features per dataset\n zipper[key] = [len(z) for z in zipper[key]]\n\n counts = pd.DataFrame(zipper).transpose()\n counts.index.name = \"features\"\n counts.columns = self.df_names\n\n return counts", "def listTopicRelevance(request):\n if request.method == 'GET':\n user = request.user;\n data = [];\n for topic in Topic.objects.all():\n row = {};\n\n topicSerializer = TopicNestedSerializer(topic)\n topicSerializer.Meta.depth = 1;\n #row['topic'] = topicSerializer.data;\n user_visits = topic.visits.filter(user=user)\n visitSerializer = VisitSerializer(user_visits, many=True)\n #visitSerializer.Meta.depth = 1;\n row['visit_count'] = len(user_visits);\n if row['visit_count'] > 0:\n row['last_visit'] = user_visits.order_by('-visit_date')[0].visit_date\n else:\n row['last_visit'] = topic.created_at\n\n neighbor_visits = Visit.objects.filter(user=user, topic__relates_to__topic_to=topic)\n\n row['neighbor_visits_count'] = len(neighbor_visits);\n if row['neighbor_visits_count'] > 0:\n row['last_neighbor_visit'] = neighbor_visits.order_by('-visit_date')[0].visit_date;\n else:\n row['last_neighbor_visit'] = topic.created_at\n\n row['post_count'] = len(topic.posts.filter(user=user))\n row['like_count'] = len(topic.posts.filter(votes__user=user))\n row['relevance_score'] = 5*row['neighbor_visits_count'] - (timezone.now()-row['last_neighbor_visit']).total_seconds()/3600\n row['recommendation'] = row['relevance_score'] + topic.hotness\n\n data.append(row)\n\n print(data)\n return Response(data)", "def __rank_topics(self, found_topics, explanation):\n max_value = 0\n scores = []\n for _,topic in found_topics.items():\n topic[\"score\"] = topic[\"times\"] * len(topic['grams'].keys())\n scores.append(topic[\"score\"])\n if topic[\"score\"] > max_value:\n max_value = topic[\"score\"]\n\n for _,topic in found_topics.items():\n if \"syntactic\" in topic:\n topic[\"score\"] = max_value\n\n\n\n\n # Selection of unique topics\n unique_topics = {}\n for t_p,topic in found_topics.items():\n prim_label = self.cso.get_primary_label_wu(t_p)\n if prim_label in unique_topics:\n if unique_topics[prim_label] < topic[\"score\"]:\n unique_topics[prim_label] = topic[\"score\"]\n else:\n unique_topics[prim_label] = topic[\"score\"]\n\n # ranking topics by their score. High-scored topics go on top\n sort_t = sorted(unique_topics.items(), key=lambda v: v[1], reverse=True)\n #sort_t = sorted(found_topics.items(), key=lambda k: k[1]['score'], reverse=True)\n\n\n # perform\n vals = []\n for t_p in sort_t:\n vals.append(t_p[1]) #in 0, there is the topic, in 1 there is the info\n\n\n #### suppressing some warnings that can be raised by the kneed library\n warnings.filterwarnings(\"ignore\")\n try:\n x_vals = range(1,len(vals)+1)\n t_kn = KneeLocator(x_vals, vals, direction='decreasing')\n if t_kn.knee is None:\n #print(\"I performed a different identification of knee\")\n t_kn = KneeLocator(x_vals, vals, curve='convex', direction='decreasing')\n except ValueError:\n pass\n\n ##################### Pruning\n\n try:\n knee = int(t_kn.knee)\n except TypeError:\n knee = 0\n except UnboundLocalError:\n knee = 0\n\n if knee > 5:\n try:\n knee += 0\n except TypeError:\n print(\"ERROR: \",t_kn.knee,\" \",knee, \" \", len(sort_t))\n\n else:\n try:\n if sort_t[0][1] == sort_t[4][1]:\n top = sort_t[0][1]\n test_topics = [item[1] for item in sort_t if item[1]==top]\n knee = len(test_topics)\n\n else:\n knee = 5\n except IndexError:\n knee = len(sort_t)\n\n final_topics = []\n final_topics = [self.cso.get_topic_wu(sort_t[i][0]) for i in range(0,knee)]\n self.reset_explanation()\n self.explanation = {self.cso.topics_wu[sort_t[i][0]]: explanation[sort_t[i][0]] for i in range(0,knee)}\n\n return final_topics", "def subtype_counts(node_set, G, log=False):\n subtypes = Counter()\n for n in node_set:\n subtype = G.node[n]['subtype']\n subtypes[subtype] += 1\n\n if log:\n for k, v in subtypes.items():\n subtypes[k] = np.log10(v)\n \n return subtypes", "def get_number_of_locations():\n count = 0\n tree = ET.parse('./devset_topics.xml')\n root = tree.getroot()\n for item in root.findall('./topic'):\n count = count + 1\n return count", "def __find_topics(self, concepts):\n\n # Set up\n found_topics = dict() # to store the matched topics\n explanation = dict()\n\n # finding matches\n for concept in concepts:\n evgrams = everygrams(concept.split(), 1, 3) # list of unigrams, bigrams, trigrams\n for grams in evgrams:\n gram = \"_\".join(grams)\n gram_without_underscore = \" \".join(grams)\n #### Finding similar words contained in the model\n\n list_of_matched_topics = []\n\n if self.fast_classification:\n list_of_matched_topics = self.__get_similar_words_from_cached_model(gram,grams)\n else:\n list_of_matched_topics = self.__get_similar_words_from_full_model(gram, grams)\n\n\n for topic_item in list_of_matched_topics:\n\n topic = topic_item[\"topic\"]\n str_sim = topic_item[\"sim_t\"]\n wet = topic_item[\"wet\"]\n sim = topic_item[\"sim_w\"]\n\n\n if str_sim >= self.min_similarity and topic in self.cso.topics_wu:\n\n\n if topic in found_topics:\n #tracking this match\n found_topics[topic][\"times\"] += 1\n\n found_topics[topic][\"gram_similarity\"].append(sim)\n\n #tracking the matched gram\n if gram in found_topics[topic][\"grams\"]:\n found_topics[topic][\"grams\"][gram] += 1\n else:\n found_topics[topic][\"grams\"][gram] = 1\n\n #tracking the most similar gram to the topic\n if str_sim > found_topics[topic][\"embedding_similarity\"]:\n found_topics[topic][\"embedding_similarity\"] = str_sim\n found_topics[topic][\"embedding_matched\"] = wet\n\n else:\n #creating new topic in the result set\n found_topics[topic] = {'grams': {gram:1},\n 'embedding_matched': wet,\n 'embedding_similarity': str_sim,\n 'gram_similarity':[sim],\n 'times': 1,\n 'topic':topic}\n\n\n\n if sim == 1:\n found_topics[topic][\"syntactic\"] = True\n\n\n\n primary_label_topic = self.cso.get_primary_label_wu(topic)\n if primary_label_topic not in explanation:\n explanation[primary_label_topic] = set()\n\n explanation[primary_label_topic].add(gram_without_underscore)\n\n return found_topics, explanation", "def get_counts(data):\n\n word_count = {}\n syll_count = {}\n\n infile = data.corpus\n try:\n\n open_file = codecs.open(infile, 'r', encoding='utf-16')\n for line in open_file:\n line = line.lower()\n # Remove tablet indexing info and line numbers. Grab only text data\n line = line.split(',')\n text = clean_line(line[7])\n\n # Update the occurrences of the words in the line\n for word in text.split():\n count = word_count.setdefault(word, 0)\n word_count[word] = count + 1\n\n # Track occurrences of syllables\n update_syllable_count(word, syll_count)\n\n open_file.close()\n except IOError:\n print(\"Cannot open: \" + infile)\n\n return (word_count, syll_count)", "def create_tag_frequencies(dataframe):\n from pyspark.sql.functions import desc\n from pyspark.sql.functions import col\n df_tags = dataframe.selectExpr(\"tag1 AS tag\").union(dataframe.selectExpr(\"tag2 AS tag\")).union(dataframe.selectExpr(\"tag3 AS tag\")) \\\n .union(dataframe.selectExpr(\"tag4 AS tag\")).union(dataframe.selectExpr(\"tag5 AS tag\"))\n df_tags = df_tags.na.drop(subset=[\"tag\"])\n tags_total_count = df_tags.count()\n print(\"Total number of tags used, including duplicates:\",tags_total_count)\n df_tag_freq = df_tags.groupBy(\"tag\").count().orderBy(desc(\"count\"))\n df_tag_freq = df_tag_freq.withColumn(\"frequency\", col(\"count\")/tags_total_count)\n df_tag_freq.orderBy(desc(\"frequency\")).show(20)", "def connect_topic_id_to_topics(self, model):\n confidence = []\n for key, value in self.representants.items():\n connection_results = {}\n for article in value:\n try:\n # get most possible index\n topic_index = max(model.analyse_text(article[1]), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\") # TODO replace with if\n continue\n # add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n # find index that occured mostly\n print(connection_results)\n for tp_num, val in connection_results.items():\n confidence.append([key, tp_num, val / len(value)])\n confidence = sorted(confidence, key=operator.itemgetter(2), reverse=True)\n associated_indexes = []\n associated_topics = []\n for conf in confidence:\n if conf[1] in associated_indexes or conf[0] in associated_topics:\n continue\n associated_indexes.append(conf[1])\n associated_topics.append(conf[0])\n self.log_writer.add_log(\n 'Connecting topic {} to model index {} based on highest unused confidence of {}'.format(conf[0],\n conf[1],\n conf[2]))\n self.topic_indexes[conf[0]] = conf[1]\n\n for key, value in self.topic_indexes.items():\n self.topics_of_index[value] = [key]", "def return_topic_figures(n_topics=5):\n\n ### import data ###\n\n data = return_keywords()\n data_for_topics = data[\"abstract_kw\"].apply(\n lambda x: list(ast.literal_eval(x).keys())\n )\n\n ### Build topic model ###\n\n # parameters\n n_topics = n_topics\n\n # Create Dictionary\n id2word = corpora.Dictionary(data_for_topics)\n\n # Create Corpus: Term Document Frequency\n corpus = [id2word.doc2bow(text) for text in data_for_topics]\n\n # Build LDA model\n lda_model = gensim.models.ldamodel.LdaModel(\n corpus=corpus,\n id2word=id2word,\n num_topics=n_topics,\n random_state=100,\n update_every=1,\n chunksize=10,\n passes=10,\n alpha=\"symmetric\",\n iterations=100,\n per_word_topics=True,\n )\n\n topics = lda_model.show_topics(formatted=False)\n data_flat = [w for w_list in data_for_topics for w in w_list]\n counter = Counter(data_flat)\n out = []\n for i, topic in topics:\n for word, weight in topic:\n out.append([word, i, weight, counter[word]])\n df = pd.DataFrame(out, columns=[\"word\", \"topic_id\", \"importance\", \"word_count\"])\n\n specs = np.full((ceil(n_topics / 2), 2), {\"secondary_y\": True})\n topic_bar_charts = make_subplots(\n rows=ceil(n_topics / 2),\n cols=2,\n specs=specs.tolist(),\n horizontal_spacing=0.1,\n vertical_spacing=0.15,\n )\n row, col = (0, 0)\n for topic in range(n_topics):\n if (topic % 2) != 0:\n col = 2\n else:\n col = 1\n row += 1\n color = px.colors.qualitative.Vivid[topic]\n topic_bar_charts.add_trace(\n go.Bar(\n x=df.loc[df.topic_id == topic, \"word\"],\n y=df.loc[df.topic_id == topic, \"word_count\"],\n width=0.5,\n opacity=0.3,\n marker_color=color,\n name=(\"Topic \" + str(topic) + \" word count\"),\n ),\n secondary_y=False,\n row=row,\n col=col,\n )\n topic_bar_charts.add_trace(\n go.Bar(\n x=df.loc[df.topic_id == topic, \"word\"],\n y=df.loc[df.topic_id == topic, \"importance\"],\n width=0.2,\n marker_color=color,\n name=(\"Topic \" + str(topic) + \" weight\"),\n ),\n secondary_y=True,\n row=row,\n col=col,\n )\n topic_bar_charts.update_layout(barmode=\"overlay\")\n\n topic_bar_charts.update_layout(\n height=800, width=1000, margin=dict(l=50, r=50, t=50, b=100)\n )\n\n # append all charts\n figures = [dict(data=topic_bar_charts)]\n\n return figures", "def test_properties_count_group_by_group_by_and_sub_group_by_get(self):\n pass", "def execQ2():\n # Put columns together\n frame = pan.DataFrame(data, columns=['Product', 'Amount'] )\n amount = frame.groupby(['Product']).count()\n return amount", "def initialize(self):\n # Initializing the counter and distribution.\n for k in range(0, self.topic_number,1):\n self.topic_term_count_matrix[k]= [0.0] * self.term_number\n self.topic_distribution_over_term[k] = [0.0] * self.term_number\n self.sum_topic_by_term_count[k] = 0.0\n for m in range(0, self.document_number,1):\n self.document_topic_count_matrix[m] = [0.0] * self.topic_number\n self.document_distribution_over_topic[m] = [0.0] * self.topic_number\n self.sum_document_by_topic_count[m] = 0.0\n\n # Initializing topics assigned to all words of all documents.\n for m in range(0, self.document_number, 1):\n N = len(self.documents[m])\n self.word_topic_assignment[m] = [-1] * N\n for n in range(0, N,1):\n topic = int(random.uniform(0,1) * self.topic_number)\n self.document_topic_count_matrix[m][topic] += 1.0\n self.topic_term_count_matrix[topic][self.documents[m][n]] += 1.0\n self.sum_topic_by_term_count[topic] += 1.0\n self.word_topic_assignment[m][n] = topic\n self.sum_document_by_topic_count[m] = N", "def solution_2(arr):\n total = 0\n for group in arr:\n group_list = []\n for person in group:\n group_list = group_list + person\n group_table = Counter(''.join(group_list))\n for k, v in group_table.items():\n if v == len(group):\n total += 1\n return total", "def get_combined_topics(team):\n know1, know2 = team\n knowledge = int(know1, base=2) | int(know2, base=2)\n return bin(knowledge).count('1')", "def count_by_tag(self, dataframe, tags):\r\n if tags and not dataframe['tags'].empty:\r\n data_to_return = []\r\n counter = 0\r\n for tag in tags:\r\n for datafield in dataframe['tags']:\r\n if tag in datafield:\r\n counter += 1\r\n data_to_return.append([tag, counter])\r\n counter = 0\r\n return pandas.DataFrame(data_to_return, columns=('TAG', 'TagCount'))", "def initialize(self):\n self.n_words = len(self.vocab)\n self.n_docs = len(self.documents)\n\n # Initialize the three count matrices.\n # The (i,j) entry of self.nmz is the number of words in document i assigned to topic j.\n self.nmz = np.zeros((self.n_docs, self.n_topics))\n # The (i,j) entry of self.nzw is the number of times term j is assigned to topic i.\n self.nzw = np.zeros((self.n_topics, self.n_words))\n # The (i)-th entry is the number of times topic i is assigned in the corpus.\n self.nz = np.zeros(self.n_topics)\n\n # Initialize the topic assignment dictionary.\n self.topics = {} # key-value pairs of form (m,i):z\n\n for m in range(self.n_docs):\n for i in self.documents[m]:\n # Get random topic assignment, i.e. z is a random integer in the range of topics\n z = np.random.randint(self.n_topics)\n # Increment count matrices\n self.nmz[m,z] += 1\n self.nzw[z,self.documents[m][i]] += 1\n self.nz[z] += 1\n # Store topic assignment\n self.topics[(m,i)] = z", "def count_questions(txt):\n count = 0\n for c in txt:\n if c == '?':\n count += 1\n return count", "def get_query_count(dataset_id, dataset_type, query_dataset_dict):\n if dataset_type == \"result_table\":\n return query_dataset_dict.get(dataset_id, 0)\n else:\n clean_rt_list = get_clean_rt(dataset_id)\n query_count = 0\n for each_rt in clean_rt_list:\n query_count += query_dataset_dict.get(each_rt, 0)\n return query_count", "def getClassCounts(column, uniqueVal, decision, yes, no , total):\r\n dataDict = {} # a dictionary of labels\r\n for val in uniqueVal:\r\n label1 = val + '/Y'\r\n label2 = val + '/N'\r\n dataDict[label1] = 0; dataDict[label2] = 0\r\n for dec, at in zip(decision, column):\r\n if at == val and dec == 'No':\r\n dataDict[label2] += 1\r\n if at == val and dec == 'Yes':\r\n dataDict[label1] += 1\r\n dataDict[val] = (dataDict[label2]+ dataDict[label1])/ total\r\n dataDict[label2] = dataDict[label2] / no\r\n dataDict[label1] = dataDict[label1] / yes\r\n return dataDict", "def run_compute_reread_counts(self):\n questions = []\n contexts = []\n student_data = self.responses[:]\n for response in student_data:\n if response.question.text not in questions:\n questions.append(response.question.text)\n if response.context.text not in contexts:\n contexts.append(response.context.text)\n\n compute_reread_counts_data = []\n\n for question in questions:\n for context in contexts:\n compute_reread_counts_data.append(self.compute_reread_counts(\n question, context))\n\n return compute_reread_counts_data", "def cat_count(data, column_str, criteria):\r\n ct1 = []\r\n ct2 = []\r\n for i in range(len(find_cats_freq(data, column_str))):\r\n ct1.append(find_cats_freq(data[criteria], column_str)[i])\r\n ct2.append(find_cats_freq(data, column_str)[i])\r\n return np.array(ct1)/np.array(ct2)", "def posts_per_topic_all(request, pk):\n #update is_expired in all posts\n update_posts_expiration()\n #get all posts with a certain topic\n posts = Post.objects.filter(topic=pk)\n serializer = ViewPostSerializer(posts, many=True)\n return Response(serializer.data)", "def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)", "def display_topics(df, n_rows=10, n_cols=12):\n\n exemplar_scores, hovers = topic_exemplars(df)\n top_columns = sorted(range(len(exemplar_scores)),\n key=lambda i: exemplar_scores[i],\n reverse=True)[:n_cols]\n #I comented this line Im not 100% sure what was the purpuse of this\n # topics = df.pivot(index='pos', columns='topic',values='word*').replace([None], [''], regex=True)\n topics = df.pivot(index='pos', columns='topic',values='word*')\n\n topics_display = topics[top_columns].head(n_rows)\n\n return topics_display, top_columns", "def __len__(self):\n # Header + string size + consumer group size\n size = self.HEADER_LEN + 2 + len(self.consumer_group)\n # + generation id + string size + consumer_id size + array length\n size += 4 + 2 + len(self.consumer_id) + 4\n for topic, parts in iteritems(self._reqs):\n # topic name + len(parts)\n size += 2 + len(topic) + 4\n # partition + offset + timestamp => for each partition\n size += (4 + 8 + 8) * len(parts)\n # metadata => for each partition\n for partition, (_, _, metadata) in iteritems(parts):\n size += 2 + len(metadata)\n return size", "def get_num_cat(sample_by_cat, samples_in_otus):\r\n num_cat = defaultdict(int)\r\n for cat, samples in sample_by_cat.items():\r\n num_samples = len(set(samples_in_otus) & set(samples))\r\n num_cat[cat[0]] += (num_samples * (num_samples - 1)) / 2\r\n return num_cat", "def get_total_partitions_for_topic(self, topic):\n return len(self.client.cluster.partitions_for_topic(topic))", "def count_frequency(data: list) -> list:\n counts = [{'A': 0, 'C': 0, 'G': 0, 'T': 0} for _ in range(len(data[0][1]))]\n for i in range(len(data[0][1])):\n for datum in [datum[1] for datum in data]:\n counts[i][datum[i]] += 1\n return counts", "def test_topic_reduction_edge_cases(base_bertopic):\n\n nr_topics = 5\n base_bertopic.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(old_documents)\n c_tf_idf = base_bertopic._extract_topics(old_documents, topic_reduction=True)\n old_freq = base_bertopic.get_topics_freq()\n\n new_documents = base_bertopic._reduce_topics(old_documents, c_tf_idf)\n new_freq = base_bertopic.get_topics_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def basic_statistics_of_email(data):\n word_counts = []\n character_count = 0\n\n for ind, row in data.iterrows():\n tokenizer = RegexpTokenizer(r'\\w+')\n real_words = tokenizer.tokenize(row[\"RawText\"].lower())\n\n character_count += sum(map(len, real_words))\n word_counts.append(len(real_words))\n\n return character_count, pd.Series(word_counts)", "def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))", "def get_topics(self):\n topics = self.word_topics\n return topics / topics.sum(axis=1)[:, None]", "def analysis():\r\n data_frame = load_from_mysql('core', 'BDFMHQAA_D')\r\n data_frame.registerTempTable('business')\r\n gd = data_frame.select('AA03CSNO', 'AA08PRON')\r\n\r\n def merge_count(a, b):\r\n r = {}\r\n for p, c in a.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n for p, c in b.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n return r\r\n result = gd.map(lambda row: (row.AA03CSNO, {row.AA08PRON: 1})).reduceByKey(merge_count)\r\n pron_count = gd.map(lambda row: (row.AA08PRON, 1)).reduceByKey(lambda a, b: a + b)\r\n\r\n # result = gd.map(lambda row: (row.AA03CSNO, row.AA08PRON))\r\n print(result.take(10))\r\n print('----------------pron count-----------------')\r\n print(pron_count.collect())\r\n\r\n print(gd)", "def _update_counts(self, msg, subtype, by):\n\n try:\n counts = self.get_local(msg, \"counts\")\n except KeyError:\n counts = defaultdict(int)\n\n counts['all'] += by\n counts[subtype] += by\n self.set_local(msg, \"counts\", counts)", "def test_topic_model_generator_dimensions( ):\n N = 100\n D = 1000\n K = 10\n W = 100\n\n tm = TopicModel.generate( K, D )\n assert( tm.topics.shape == (D, K) )\n assert( tm.weights.shape == (K,) )\n\n docs = tm.sample( N, words = W )\n # Each document is a column\n assert( docs.shape == (N, D) ) \n # Each doc should have 100 words\n assert( sc.all(docs.sum(1) == W) )", "def test_table_counts():\n number_of_test_run = 2 # Run the pipeline twice\n for i in range(number_of_test_run):\n dp = DataPipeline()\n dp.run()\n\n dp = DataPipeline()\n assert dp.get_product_count() == (500000,)\n assert dp.get_duplicate_count(from_table=\"products\") == (0,)\n assert dp.get_aggregate_table_result_count() == (222024, )\n 222024\n dp.close()" ]
[ "0.6717123", "0.63702446", "0.61713773", "0.5966875", "0.5961095", "0.5927321", "0.5897049", "0.579996", "0.5752333", "0.5736918", "0.5653994", "0.5621641", "0.55750763", "0.54705304", "0.54477894", "0.54441655", "0.5436638", "0.5420717", "0.5412772", "0.53985864", "0.5367074", "0.53649616", "0.5307551", "0.5292152", "0.5262273", "0.52395135", "0.5210288", "0.520255", "0.51896495", "0.5169957", "0.5127032", "0.5111573", "0.510007", "0.5087801", "0.50854045", "0.5079777", "0.5072392", "0.5065675", "0.5036765", "0.5035437", "0.50350946", "0.5033354", "0.5031456", "0.502549", "0.501906", "0.49957436", "0.49928945", "0.49774235", "0.497683", "0.49719694", "0.49691325", "0.49684167", "0.49680936", "0.49659014", "0.49578416", "0.49550122", "0.4954171", "0.49471876", "0.49356496", "0.49332508", "0.49275005", "0.49233413", "0.49055085", "0.48977187", "0.48957607", "0.48929524", "0.48916018", "0.4886652", "0.48847088", "0.4861437", "0.48612806", "0.48562106", "0.48551533", "0.485392", "0.48475695", "0.4847457", "0.4846999", "0.48432577", "0.48335657", "0.4831336", "0.48305693", "0.4823364", "0.4819344", "0.4813468", "0.4810518", "0.4810284", "0.48072892", "0.48056188", "0.47954002", "0.47933036", "0.47869015", "0.47766662", "0.4774449", "0.477056", "0.47664762", "0.47646415", "0.4763438", "0.47596654", "0.47589552", "0.4758853" ]
0.62771106
2
Take an specific list from rdd spark, which is formed as list of tuples (Topic, Question)
def get_rdd_count_type_of_topy(rdd: list) -> pb.DataFrame: data_frame_pandas = pb.DataFrame(rdd, columns=['Topic', 'Question']) print(data_frame_pandas) return data_frame_pandas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_questions_of_topic(topic):\n\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n question_table = dynamodb.Table(\"Questions\")\n\n fe = Attr(\"TopicId\").eq(topic.get(\"TopicId\"))\n response = question_table.scan(FilterExpression=fe)\n questions = response.get(\"Items\")\n return questions", "def get_words_from_tuples(examples):\n return [[t[0] for t in example] for example in examples]", "def getSubmissionsByTopic(self, i):\r\n return [(ind,sub) for ind, sub in enumerate(self.submissions) if sub.topicId == i]", "def to_simple_rdd(sc: SparkContext, features: np.array, labels: np.array):\n pairs = [(x, y) for x, y in zip(features, labels)]\n return sc.parallelize(pairs)", "def get_users_movies(myRatings):\n #return [x[1] for x in myRatings]\n return list(myRatings.map(lambda x: x[1]).collect())", "def partition(rows: list, question: Question) -> (list, list):\n true_rows = []\n false_rows = []\n for row in rows:\n if question.match(row): # True\n true_rows.append(row)\n else:\n false_rows.append(row)\n return true_rows, false_rows", "def apply_transformations(rdd):\n ### BEGIN SOLUTION ###\n return rdd", "def getPracticesRDD(sqlContext):\n from db import *\n return sqlContext \\\n .read \\\n .format(\"jdbc\") \\\n .options(\n driver=driver,\n url=url,\n dbtable=\"user_practice\",\n user=user,\n password=password\n ) \\\n .load() \\\n .rdd \\\n .map(lambda row: (row.id, row)) # We'll use the id to join with the users rdd defined above", "def get_questions():\n data = pd.read_excel('Data/APNLP_QuestionsToUser.xlsx')\n df = pd.DataFrame(data, columns=['Questions'])\n lst = df.values.tolist()\n questions = []\n for q in lst:\n q = str(q)[2:-2]\n questions.append(q)\n return questions", "def read_tuples_list(corpus_path):\n idx = 0\n data = []\n with open(corpus_path, encoding='utf-8') as fr:\n lines = fr.readlines()\n sent_, label_h_, pos_, ner_ = [], [], [], []\n\n for line in lines:\n idx += 1\n if line.find(\"DOC-ID\") < 0 and line != '\\n':\n try:\n [char, label_h, pos_tag, ner_tag] = line.strip().split()\n sent_.append(char)\n label_h_.append(label_h)\n pos_.append(pos_tag)\n ner_.append(ner_tag)\n except BaseException as e:\n print(e)\n print(line)\n else:\n # print(line)\n if idx > 1:\n data.append((sent_, label_h_, pos_, ner_))\n sent_, label_h_, pos_, ner_ = [], [], [], []\n\n return data", "def processRDD(RDD, label):\n try:\n newRDD = RDD.map(lambda x: ((x[2], x[3]), (float(x[0]), float(x[1]), float(x[4]), label)))\n return newRDD\n except Exception as e:\n print e", "def build_recommendations(sc, myRatings, model):\n #myRatedMovieIds = set([x[1] for x in myRatings])\n uid = get_uid_from_ratings(myRatings)\n #print \"uid:\", uid\n myRatedMovieIds = set([x[1] for x in myRatings.collect()])\n #print \"myRatedMovieIds:\", myRatedMovieIds\n candidates = sc.parallelize([m for m in movies if m not in myRatedMovieIds]).cache()\n #print candidates\n predictions = model.predictAll(candidates.map(lambda x: (uid, x))).collect()\n #print predictions\n recommendations = sorted(predictions, key = lambda x: x.product)\n return recommendations", "def get_topic_quiz(topic_id):\n quiz_data = query_db(\"SELECT id, name FROM quizzes WHERE topic_id=?;\", [topic_id])\n quizzes = []\n for quiz in quiz_data:\n quiz_topic = {}\n quiz_topic[\"id\"] = quiz[0]\n quiz_topic[\"name\"] = quiz[1]\n quizzes.append(quiz_topic)\n return quizzes", "def get_store_item_list(spark) -> list:\n sqlDF = spark.sql(\"SELECT DISTINCT SKU, Store FROM dfView\")\n store_item_list = sqlDF.rdd.map(tuple).collect()\n return store_item_list", "def get_topic_of_question(question):\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n topic_table = dynamodb.Table(\"Topics\")\n\n topic_id = question.get(\"TopicId\")\n # query topic_id of the question\n try:\n response = topic_table.get_item(Key={\"TopicId\": topic_id})\n topic = response[\"Item\"]\n except:\n print(\"No topic found, returning None..\")\n return None\n return topic", "def split_by_time(rdd, ratio):\n #(user_id, [(item_id, rating, time)])\n sort_by_time = rdd.map(lambda x:(x[0], (x[1], x[2], x[3])))\\\n .groupByKey().mapValues(list)\\\n .map(lambda x:(x[0],sorted(x[1], key=operator.itemgetter(2), reverse=False)))\n\n def _split_f(x, ratio):\n s = sum(ratio)\n b = 0\n l = []\n for r in ratio:\n e = int(float(r)/s*len(x))\n l.append(x[b:b+e])\n b = b+e\n return l\n\n #(user_id, [[(item_id, rating, time)]...[]])\n split_by_ratio = sort_by_time.map(lambda x: (x[0], _split_f(x[1], ratio)))\n print(\"split by ratio: \", split_by_ratio.take(1))\n\n split_list = []\n for i, _ in enumerate(ratio):\n split_list.append(split_by_ratio.map(lambda x:(x[0], x[1][i]))\\\n .flatMap(lambda x:[(x[0], y[0], y[1], y[2]) for y in x[1]]))\n print(\"i \", i)\n print(\"len \", split_list[i].count())\n\n return tuple(split_list)", "def spark_df_to_records(df):\n return [tuple(r) for r in df.collect()]", "def partition(self, rows, question):\n true_rows, false_rows = [], []\n for row in rows:\n if question.match(row):\n true_rows.append(row)\n else:\n false_rows.append(row)\n return true_rows, false_rows", "def compute_topic_assignment(topic_model, abstracts, min_df=2, max_df=0, tf_matrix_dump_filename=None):\n if max_df == 0:\n max_df = 0.9 if len(abstracts) < 100 else 0.8\n\n\n # get the gensim representation of new papers in order to query the topic model\n papers_gensim = get_gensim_representation_new_docs_for_inference(abstracts, topic_model.id2word,\n use_lemmer=True, min_df=2,\n max_df=max_df,\n tf_matrix_dump_filename=tf_matrix_dump_filename)\n\n # for each paper compute the topics assignment\n topic_assignment = [None] * len(papers_gensim)\n for i, paper in enumerate(papers_gensim):\n topic_assignment[i] = topic_model[paper]\n\n return topic_assignment", "def get_valid_phrases():\n return [x[0] for x in all_topics if x[1] == \"1\"]", "def ex_list(data):\n return tuple(data)", "def read_data(file_path, sparkContext):\n data_rdd = sparkContext \\\n .textFile(file_path) \\\n .map(eval) \\\n .map(lambda x: (x[0], x[1]))\n return data_rdd", "def _topics_words(self, num_of_words):\n x = self.model.show_topics(-1, num_of_words, formatted=False)\n # `show_topics` method return a list of `(topic_number, topic)` tuples,\n # where `topic` is a list of `(word, probability)` tuples.\n return [[i[0] for i in topic[1]] for topic in x]", "def get_custom_phrases():\n return [x[0] for x in all_topics if x[2] == \"1\"]", "def DatasetToTuple(sample):\n \n X_elem = []\n Y_elem = []\n for x,y in sample:\n X_elem.append(x if x.dim() > 0 else x.item())\n Y_elem.append(y if y.dim() > 0 else y.item()) \n return (torch.stack(X_elem),torch.stack(Y_elem))", "def DatasetToTuple(sample):\n \n X_elem = []\n Y_elem = []\n for x,y in sample:\n X_elem.append(x if x.dim() > 0 else x.item())\n Y_elem.append(y if y.dim() > 0 else y.item()) \n return (torch.stack(X_elem),torch.stack(Y_elem))", "def getUsersRDD(sqlContext):\n # Currently the id field ranges from '0' to '1000000'.\n # To avoid loading it all in memory, partition on the id field (100 partitions, about 10k records per partition).\n # Also setting fetch size to 10,000 to avoid multiple database calls per partition.\n # All records from a single partition will come in a single query.\n # If we need to use less memory, we can increase the # of partitions and decrease the lower/uppper bounds.\n # We are also relying on Spark to spill to disk if no memory is available.\n from db import *\n return sqlContext \\\n .read \\\n .format(\"jdbc\") \\\n .options(\n driver=driver,\n url=url,\n dbtable=\"user\",\n user=user,\n password=password,\n fetchSize=10000,\n numPartitions=100,\n partitionColumn=\"id\",\n lowerBound=0,\n upperBound=1000000\n ) \\\n .load() \\\n .rdd \\\n .persist(StorageLevel.MEMORY_AND_DISK) \\\n .map(lambda row: (row.practice_id, row)) # We are setting practice_id as the key here because we'll use that to join with user_practice table", "def get_tweets_by_topic(topic, start_date, end_date):\n try:\n query = f\"select tweet, sentence, polarity, subjectivity from {db_schema}.{db_table_tweet} t, {db_schema}.{db_table_pred} tp where t.id_tweet=tp.id_tweet and topic='{topic}' and tweet_date between str_to_date('{start_date}', '%Y-%m-%d') and str_to_date('{end_date}', '%Y-%m-%d')\"\n logger.info(f'QUERY: {query}') \n with MysqlCursor() as cur:\n cur.execute(query)\n tweets = cur.fetchall()\n columns = [col[0] for col in cur.description]\n logger.info(f'TOPIC: {topic}, N° TWEETS: {len(tweets)}') \n return tweets, columns\n \n except Exception as ex:\n logger.exception(ex)\n return f'Exception: {ex}'", "def lp_to_simple_rdd(lp_rdd: RDD, categorical: bool = False, nb_classes: int = None):\n if categorical:\n if not nb_classes:\n labels = np.asarray(lp_rdd.map(\n lambda lp: lp.label).collect(), dtype='int32')\n nb_classes = np.max(labels) + 1\n rdd = lp_rdd.map(lambda lp: (from_vector(lp.features),\n encode_label(lp.label, nb_classes)))\n else:\n rdd = lp_rdd.map(lambda lp: (from_vector(lp.features), lp.label))\n return rdd", "def get_training_data(self) -> List[Tuple[str, Optional[str]]]:\n return [(k, v) for k, v in sorted(self._clusters.items(), key=lambda x: (x[1] if x[1] else '', x[0]))]", "def get_subscription_ids(self, topic: str) -> Tuple[int, ...]:\n try:\n return tuple(self.__sub_ids[topic])\n except KeyError:\n return ()", "def convert_partitions_to_list(partition):\n parts = list()\n for part in partition:\n parts.append(part)\n return parts", "def partition(rows, question):\n true_rows, false_rows = [], []\n for row in rows:\n if question.match(row):\n # the row's value of the column was greater than or equal to the questions value\n true_rows.append(row)\n else:\n false_rows.append(row)\n return true_rows, false_rows", "def get_ped_topics(n_peds):\n ls = []\n for n in range(n_peds):\n ls += [coord.format(n) for coord in PED_TEMPL_TOPICS]\n return ls", "def get_related_topics(self,keyword,cut=0.5):\n \n ret = []\n\n if type(keyword) == str:\n if keyword in self.topic_map.keys():\n ret = [(keyword,1.0)]\n keyword = \"\"\n else:\n _keyword = []\n for k in keyword:\n if k in self.topic_map.keys():\n ret.append((k,1.0))\n else:\n _keyword.append(k) \n keyword = _keyword\n\n keyword_rels = set(self.get_related_keywords(keyword,self.keyword_map_rel,_score=False))\n\n if len(keyword_rels) > 0:\n for topic,topic_rels in self.topic_map.items():\n alike = keyword_rels.intersection(topic_rels)\n score = (len(alike) * (100/len(keyword_rels)))/100\n ret.append((topic,round(score,3)))\n ret.sort(key=lambda x : x[1], reverse=True)\n ret = [t for t in ret if t[1] >= cut]\n \n return ret", "def _topics_weights(self, num_of_words):\n topics = self.model.show_topics(-1, num_of_words, formatted=False)\n # `show_topics` method return a list of `(topic_number, topic)` tuples,\n # where `topic` is a list of `(word, probability)` tuples.\n return [[i[1] for i in t[1]] for t in topics]", "def get_most_relevant_topics(topics_list):\n topics_list.sort(cmp=lambda x, y: 1 if x[1] < y[1] else -1)\n topics_list = topics_list[:3] # ARBITRARY (at most 3 topic names)\n topics_id, _ = zip(*topics_list)\n return [topics_tools.lda_topic_names[tid] for tid in topics_id]", "def tup_list_maker(tup_list):\n final_list = []\n for item in tup_list:\n index = item[0]\n sentences = item[1]\n for sentence in sentences:\n pair = (index, sentence)\n final_list.append(pair)\n return final_list", "def get_user_list(dataset):\n res = dataset\\\n .map(lambda x: x[0])\\\n .collect()\n return list(set(res))", "def select_essays_with_topic(conn, topic):\n cur = conn.cursor()\n cur.execute('SELECT link, title FROM essays WHERE \"{}\" > 0 ORDER BY \"{}\" DESC'.format(topic, topic))\n\n d = {}\n count = 0\n rows = cur.fetchall()\n\n print(\"Total: \", len(rows))\n for row in rows:\n d[count] = (list(row)[0], list(row)[1])\n count += 1\n # print(row)\n #print(list(rows[0]))\n return d", "def schools_by_topic(mongo_collection, topic):\n res = []\n returned_values = mongo_collection.find({\"topics\": {\"$all\": [topic]}})\n for value in returned_values:\n res.append(value)\n return res", "def get_topics(self):\r\n return [x[0] for x in get_published_topics()]", "def get_teacher_topic_all():\n topic_data = query_db(\n \"SELECT topics.id, topics.name, classes.name FROM topics JOIN classes \"\n \"ON topics.class_id=classes.id WHERE teacher_id=?;\",\n [flask.session[\"id\"]],\n )\n topics = []\n for topic in topic_data:\n topic_dict_teacher = {}\n topic_dict_teacher[\"id\"] = topic[0]\n topic_dict_teacher[\"name\"] = flask.escape(str(topic[1]))\n topic_dict_teacher[\"class\"] = flask.escape(str(topic[2]))\n topics.append(topic_dict_teacher)\n return topics", "def transform(self):\n result = []\n for item in self.doc_topic_matrix:\n result.append(item / np.sum(item))\n result = np.array(result)\n return result", "def populate_weight_tuple_list(list_object):\n tuple_list = []\n\n for i in range(len(list_object[0])):\n weight_tuple = (list_object[0][i], float(list_object[1][i]))\n tuple_list.append(weight_tuple)\n \n return tuple_list", "def _fix_examples(examples):\n if len(examples) > 0 and len(examples[0]) > 0 and len(examples[0][0]) > 2:\n # Assumes all elements use the same format of tuple of length 3 (so we only check [0][0])\n return [\n [(tuple_ex[0], tuple_ex[1]) for tuple_ex in example]\n for example in examples\n ]\n else:\n return examples", "def get_valid_indices():\n return [i for i, val in enumerate(all_topics) if val[1] == \"1\"]", "def partitionData(data, labels, partition):\n\treturn [s[partition] for s in data], labels[partition]", "def to_sample_rdd(x, y, sc, num_slices=None):\n x_rdd = sc.parallelize(x, num_slices)\n y_rdd = sc.parallelize(y, num_slices)\n return x_rdd.zip(y_rdd).map(lambda item: Sample.from_ndarray(item[0], item[1]))", "def combine_sub_blocks(rdd):\n ### BEGIN SOLUTION ###\n return rdd", "def get_keypoint_tuples(eval_config):\n tuple_list = []\n kp_list = eval_config.keypoint_edge\n for edge in kp_list:\n tuple_list.append((edge.start, edge.end))\n return tuple_list", "def kml_extract_RDD(xml_file):\n soup = BeautifulSoup(xml_file, \"lxml-xml\")\n return get_kml_content(soup)", "def get_tuples(outputs) -> list:\n return list(map(get_tuples_helper, outputs))", "def _process_data(rdd_entry, feature_list):\n events = []\n for event in rdd_entry:\n events.append(event[RDD_EVENT])\n return IptablesIngestor.vectorize_events(events, feature_list)", "def generate_sub_blocks(rdd):\n ### BEGIN SOLUTION ###\n return rdd", "def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics", "def get_custom_indices():\n return [i for i, val in enumerate(all_topics) if val[2] == \"1\"]", "def extract_records_for_nltk(iaa_df: pd.DataFrame) -> List[Tuple]:\n return [(b, c, d) for _, b, c, d in iaa_df.to_records()]", "def get_score_book(self) -> List[Tuple[str, float]]:\n returned = []\n\n for item, size in self.score_book.items():\n my_tuple = item, size\n returned.append(my_tuple)\n\n return returned", "def get_topic_words(self, topics):\n topic_words = []\n for topic, top_n_words in topics.items():\n words = [word for word, c_tf_idf in top_n_words]\n topic_words.append(words)\n return topic_words", "def convert_shapely_points_to_tuples(list_of_points) -> list:\n return [(p.x, p.y) for p in list_of_points]", "def process_rdd(time, rdd):\n print(\"----------- %s -----------\" % str(time))\n try:\n if rdd:\n # Get spark sql singleton context from the current context\n sql_context = get_sql_context_instance(rdd.context.getConf())\n # convert the RDD to Row RDD\n row_rdd = rdd.map(lambda w: Row(hashtag=w[0], hashtag_count=w[1]))\n # create a DF from the Row RDD\n hashtags_dataframe = sql_context.createDataFrame(row_rdd)\n # Register the dataframe as table\n hashtags_dataframe.createOrReplaceTempView(\"hashtags\")\n # get the top 10 hashtags from the table using SQL and print them\n hashtag_counts_dataframe = sql_context.sql(\n \"select hashtag, hashtag_count from hashtags order by hashtag_count desc limit 10\")\n hashtag_counts_dataframe.show()\n\n # call this method to prepare top 10 hashtags DF and send them\n\n def send_dataframe_to_dashboard(dataframe):\n \"\"\"\n Function to send DataFrame to the dashboard for visualization.\n :param dataframe: Spark DataFrame created by process_rdd().\n \"\"\"\n # extract the hashtags from dataframe and convert them into array\n top_tags = [str(t.hashtag) for t in dataframe.select(\"hashtag\").collect()]\n # extract the counts from dataframe and convert them into array\n tags_count = [p.hashtag_count for p in dataframe.select(\"hashtag_count\").collect()]\n # initialize and send the data through REST API\n request_data = {'label': str(top_tags), 'data': str(tags_count)}\n response = post(dashboard_url, data=request_data)\n\n send_dataframe_to_dashboard(hashtag_counts_dataframe)\n except:\n pass", "def ID_Data(targ_driver, RDD, sc, K = 200):\n try:\n newID1 = [targ_driver] * K\n newID2 = np.arange(200, 201+K).astype(str)\n newID = zip(newID1, newID2)\n oldID = RDD.map(lambda x: (x[2],x[3])).distinct().collect()\n glossary = sc.parallelize(zip(oldID, newID))\n newRDD = RDD.map(lambda x: ((x[2],x[3]), ([x[0],x[1],x[4]]))).join(glossary)\n newID_RDD = newRDD.map(lambda x: (x[1][0][0], x[1][0][1], x[1][1][0], x[1][1][1], x[1][0][2]))\n return newID_RDD\n except Exception as e:\n print e", "def answerer(embeddings, tuples: tf.Variable, scoring=multilinear):\n n_data, n_slots, rank = [d.value for d in embeddings.get_shape()]\n n_data, n_tuples, order = [d.value for d in tuples.get_shape()]\n\n shift_indices = tf.constant(np.reshape(\n np.outer(range(n_data), np.ones(n_tuples * n_slots)) * n_slots, (n_data, n_tuples, n_slots)), dtype='int64')\n questions_shifted = tuples + shift_indices\n\n preds = scoring(\n tf.reshape(embeddings, (n_data * n_slots, rank)),\n tf.reshape(questions_shifted, (n_data * n_tuples, order)))\n\n return tf.reshape(preds, (n_data, n_tuples))", "def create_featseltuple(ids):\n newlist = []\n for part_id in ids:\n newlist.extend([part_id, part_id + 91])\n return tuple(sorted(newlist))", "def _partition_rdd(self, rdd, function): \n\n N, buffer_tau, dom_mins, dom_maxs, symmetric = self.N, self.buffer_tau, \\\n self.dom_mins, self.dom_maxs, \\\n self.symmetric \n def partition_helper(iterator):\n for arr in iterator: \n res = function(arr,N,buffer_tau,symmetric,dom_mins,dom_maxs)\n for r in res: \n yield r\n return rdd.mapPartitions(partition_helper)", "def toListOfTuple(self, df:pd.core.frame.DataFrame) -> List[Tuple]: \n df['TIME_STAMP'] = df['TIME_STAMP'].astype('str')\n records = df.to_records(index=False)\n listOfTuple = list(records)\n return listOfTuple", "def get_topic_assign(topic_id):\n assignment_data = query_db(\n \"SELECT id, name, due_date FROM assignments WHERE topic_id=?;\", [topic_id]\n )\n assignments = []\n for assignment in assignment_data:\n topic_assign_dict = {}\n topic_assign_dict[\"id\"] = assignment[0]\n topic_assign_dict[\"name\"] = str(assignment[1])\n topic_assign_dict[\"due_date\"] = assignment[2]\n assignments.append(topic_assign_dict)\n return assignments", "def get_topics(model, nlp_model, n_top_words):\n\n words = nlp_model.get_feature_names()\n\n return [convert_to_string([words[i] for i in topic.argsort()[:-n_top_words - 1:-1]]) for topic_idx, topic in enumerate(model.components_)]", "def get_topics_for_alt(alt_list, pgm_dict):\n for epv in alt_list:\n name = epv.get('package', {}).get('name', [''])[0]\n if name:\n for pgm_pkg_key, pgm_list in pgm_dict.items():\n for pgm_epv in pgm_list:\n if name == pgm_epv.get('package_name', ''):\n epv['package']['pgm_topics'] = pgm_epv.get('topic_list', [])\n\n return alt_list", "def get_points_rdd(self, sc, parser_class=None):\n parser_class = parser_class or RowParser\n\n input_rdd = sc.textFile(self.output()['dataset'].path)\n\n # Build parser for dataset\n # maybe use csv module?\n fields = input_rdd.take(1)[0].split(',')\n feature_indices = [\n i\n for i, f in enumerate(fields)\n if f != 'id' and f != 'target'\n ]\n target_index = fields.index('target')\n id_index = fields.index('id')\n parser = parser_class(feature_indices, target_index, id_index)\n\n points = (\n input_rdd\n .map(parser)\n .filter(lambda x: x is not None)\n .persist(StorageLevel.DISK_ONLY)\n )\n return points", "def get_topics(self):\n try:\n with self.__db_lock:\n sql = \"SELECT * FROM 'topics' ORDER BY 'name' ASC\"\n self.__cursor.execute(sql)\n topics = self.__cursor.fetchall()\n if topics is None or len(topics) == 0:\n return []\n return [topic[1] for topic in topics]\n except Exception as e:\n logging.error(\n \"Exception when trying to get topics list: {}\".format(e))\n return []", "def get_dataset_tuple(Xtrain, cols_type_received, cols_ref):\n if len(cols_ref) <= 1 :\n return Xtrain\n\n Xtuple_train = []\n # cols_ref is the reference for types of cols groups (sparse/continuous)\n # This will result in deviding the dataset into many groups of features\n for cols_groupname in cols_ref :\n # Assert the group name is in the cols reference\n assert cols_groupname in cols_type_received, \"Error missing colgroup in config data_pars[cols_model_type] \"\n cols_i = cols_type_received[cols_groupname]\n # Add the columns of this group to the list\n Xtuple_train.append( Xtrain[cols_i] )\n\n if len(cols_ref) == 1 :\n return Xtuple_train[0] ### No tuple\n else :\n return Xtuple_train", "def _covert_list_tensor_to_tuple_tensor(list_of_tensor):\n if isinstance(list_of_tensor, list):\n tuple_of_tensor = ()\n for tensor in list_of_tensor:\n tuple_of_tensor += (tensor,)\n return tuple_of_tensor\n return list_of_tensor", "def get_variants(topic):\n return [topic['variant%s'%i] for i in range(1,3) if not pd.isnull(topic['variant%s'%i])]", "def infertopics(self):\n\n # Iterate over nodes missing topic attribute (only occurs for new nodes)\n for uid in self.scan(attribute=\"updated\"):\n # Remove updated attribute\n self.removeattribute(uid, \"updated\")\n\n # Get list of neighboring nodes\n ids = self.edges(uid)\n\n # Infer topic\n topic = Counter(self.attribute(x, \"topic\") for x in ids).most_common(1)[0][0] if ids else None\n if topic:\n # Add id to topic list and set topic attribute\n self.topics[topic].append(uid)\n self.addattribute(uid, \"topic\", topic)\n\n # Set topic rank\n self.addattribute(uid, \"topicrank\", len(self.topics[topic]) - 1)\n\n # Infer category\n category = Counter(self.attribute(x, \"category\") for x in ids).most_common(1)[0][0]\n self.addattribute(uid, \"category\", category)", "def tuple_batch(l):\n def min_one(rev):\n if len(rev)==0:\n rev = [[1]]\n return rev\n\n\n _,review,rating = zip(*l)\n r_t = torch.Tensor(rating).long()\n \n list_rev = [min_one(x) for x in review]\n #word_len = [[len(w) for w in rev] for rev in list_rev ]\n chars = []\n lens = []\n len_c = 0\n #pour chaque tweet\n for rev in list_rev:\n chars_r = []\n #chaque mot\n for w in rev:\n #chaque lettre\n for c in w:\n chars_r.append(c)\n chars_r.append(2) #space character\n chars.append(chars_r)\n lens.append(len(chars_r))\n\n if len(chars_r) > len_c: #save max \n len_c = len(chars_r)\n\n tweets = torch.zeros(len(chars),len_c).long()\n len_s = torch.LongTensor(lens)\n\n for i,data in enumerate(chars):\n tweets[i,0:len(data)] = torch.LongTensor(data)\n \n\n return tweets, len_s, r_t", "def tuple_to_list(tup):\n return [element for element in tup]", "def get_data(self, topic, datetime_from=None, datetime_to=None):\n try:\n with self.__db_lock:\n time_column = \"timestamp\"\n sql = \"SELECT `{}`, `data`, `format_string` FROM `events` WHERE `topic` == ? ORDER BY `{}` ASC\".format(\n time_column, time_column)\n self.__cursor.execute(sql, (topic,))\n data = self.__cursor.fetchall()\n if data is None or len(data) == 0:\n return []\n\n # first column holds the datetime, second is the data (bytes), third is the format string, fourth is the timestamp\n data_decoded = []\n for d in data:\n timestamp = d[0]\n if d[2] == Database.__BYTES_DB_FORMAT_STRING:\n data = d[1]\n elif d[2] == Database.__UTF8_DB_FORMAT_STRING:\n data = d[1].decode('utf-8')\n else:\n data = struct.unpack(d[2], d[1])[0]\n data_decoded.append([timestamp, data])\n return data_decoded\n except Exception as e:\n logging.error(\n \"Exception when trying to get topics list: {}\".format(e))\n return []", "def get_topics_for_comp(comp_list, pgm_list):\n for epv in comp_list:\n name = epv.get('package', {}).get('name', [''])[0]\n if name:\n for pgm_epv in pgm_list:\n if name == pgm_epv.get('package_name', ''):\n epv['package']['pgm_topics'] = pgm_epv.get('topic_list', [])\n epv['package']['cooccurrence_probability'] = pgm_epv.get(\n 'cooccurrence_probability', 0)\n epv['package']['cooccurrence_count'] = pgm_epv.get(\n 'cooccurrence_count', 0)\n\n return comp_list", "def connect_topic_id_to_topics(model, representants, log_writer):\n # t = model.get_topics()\n topic_indexes = {}\n topics_of_index = {}\n confidence = []\n for key, value in representants.items():\n connection_results = {}\n for article in value:\n try:\n # get most possible index\n topic_index = max(model.analyse_text(article), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\") # TODO replace with if\n continue\n # add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n # find index that occured mostly\n print(connection_results)\n for tp_num, val in connection_results.items():\n confidence.append([key,tp_num,val/len(value)])\n confidence = sorted(confidence, key=operator.itemgetter(2),reverse=True)\n associated_indexes = []\n associated_topics = []\n for conf in confidence:\n if conf[1] in associated_indexes or conf[0] in associated_topics:\n continue\n associated_indexes.append(conf[1])\n associated_topics.append(conf[0])\n log_writer.add_log('Connecting topic {} to model index {} based on highest unused confidence of {}'.format(conf[0],conf[1],conf[2]))\n topic_indexes[conf[0]] = conf[1]\n\n for key, value in topic_indexes.items():\n topics_of_index[value] = [key]\n print(topic_indexes)\n print(topics_of_index)\n return topic_indexes, topics_of_index", "def select_data_from_record(self, record):\n x = record['input_ids']\n y = record['label_ids']\n return (x, y)", "def gather(session, selected_clusters):\n doc_ids = []\n titles = []\n summaries = []\n links = []\n doc_vector_list = []\n tfidf_vector_list = []\n for i, label in enumerate(session['kmodel'].labels_):\n if str(label) in selected_clusters:\n doc_ids.append(i)\n titles.append(session['titles'][i])\n summaries.append(session['summaries'][i])\n links.append(session['links'][i])\n\n # Create a new topic space matrix by selecting only the vector\n # representations of the new scatter collection documents.\n doc_vector_list.append(session['vector_space'].getrow(i))\n tfidf_vector_list.append(session['tfidf'].getrow(i))\n\n vector_space = vstack(doc_vector_list, format='csr')\n tfidf = vstack(tfidf_vector_list, format='csr')\n\n return doc_ids, titles, summaries, links, vector_space, tfidf", "def assign_topics_to_sentences(self):\n \n\n # 10 topics\n topic_dict = {0: 'academics'\n , 1: 'career'\n , 2: 'commute'\n , 3: 'diversity'\n , 4: 'community'\n , 5: 'extracurricular'\n , 6: 'facilities'\n , 7: 'finance'\n , 8: 'housing'\n , 9: 'wellness'\n }\n\n # Some important words that should be included under each topic\n topics = [['academic', 'exam', 'study', 'learn', 'education', 'class', 'course', 'grade', 'assignment'\n , 'degree', 'research', 'elective'\n , 'professor', 'project', 'scholarship', 'knowledge']\n , ['career', 'job', 'coop', 'employment']\n , ['commute', 'skytrain', 'transport', 'commuter']\n , ['diversity', 'diverse', 'background']\n , ['community', 'welcome', 'support', 'social', 'friend', 'fun', 'network', 'home']\n , ['extracurricular', 'club', 'sport', 'activity']\n , ['facility', 'infrastructure', 'food', 'building', 'gym']\n , ['finance', 'tuition', 'expensive']\n , ['housing', 'live', 'residence']\n , ['wellness', 'health', 'stress', 'depression', 'anxiety']]\n\n # Read the data - id and reponse column\n dt = pd.read_csv(self.path_data_col\n , encoding = \"ISO-8859-1\"\n , usecols = [self.id_col_name, self.col_name])\n\n\n \n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n \n # Split into sentences\n dt['sentences'] = self.getSentences(dt[self.col_name])\n \n \n \n\n \n \n\n # Store number of sentences in each response as a column\n dt['num_sent'] = dt['sentences'].apply(lambda x: len(x))\n\n # Split each row into multiple rows - one row for each sentence\n dt = (dt\n .set_index([self.id_col_name, self.col_name, 'num_sent'])['sentences']\n .apply(pd.Series)\n .stack()\n .reset_index()\n .drop('level_3', axis = 1)\n .rename(columns = {0:'sentences'}))\n\n\n # Clean the sentences\n dt['sentences_cleaned'] = self.cln.clean(dt['sentences'], typo = self.typo_ind)\n\n # Remove useless sentences\n dt['sentences_cleaned'] = self.getValid(dt['sentences_cleaned'])\n\n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n\n # Tokenize words in the cleaned sentences\n responses = list(self.sent_to_words(dt['sentences_cleaned'].values.tolist()))\n\n\n # Call the lexicon function\n topic_lexicons = self.prepare_lexicons()\n\n # Lists to store results\n count_topic_all = []\n actual_topic_all = []\n\n # Tag each response into a topic\n for response in responses:\n\n count_topic = []\n actual_topic = []\n\n for topic in topic_lexicons:\n\n # Count occurance of each word in word stock in the response\n temp = sum(dict((x, response.count(x)) for x in topic).values())\n count_topic.append(temp)\n\n\n for index, value in enumerate(count_topic):\n\n # Consider the topic if atleast one(?) word from its word-stock occurs in the response\n if value > 0:\n actual_topic.append(topic_dict[index])\n\n\n # If more than 3 topics are tagged for single sentence, refine by increasing\n # cutoff to at least 2 words instead of 1\n if len(actual_topic) > 3:\n\n actual_topic = []\n for index, value in enumerate(count_topic):\n\n if value > 1: # Increase cutoff\n actual_topic.append(topic_dict[index])\n\n count_topic_all.append(count_topic)\n actual_topic_all.append(actual_topic)\n\n\n dt['tags'] = actual_topic_all\n dt['num_tags'] = count_topic_all\n\n\n # Select only the most important columns\n dt_less = dt[[self.id_col_name, 'sentences', 'tags']]\n\n return dt, dt_less", "def muckrack_trending_topics():\n\tresponse = requests.get('http://muckrack.com')\n\thtml = response.text\n\tdom = pq(html)\n\ttrending_list = dom('.trending')\n\treturn [topic.text for topic in trending_list]", "def get_matched_custom_feed(cls, fsid, topic_name): \n custom_feed_map = cls.get_custom_feed_map(fsid)\n if not custom_feed_map:\n return [], None\n if custom_feed_map.has_key(topic_name):\n return [], custom_feed_map.get(topic_name)\n wc_topic_name, required_keywords = cls.to_bracket_wild_card(topic_name)\n if wc_topic_name and custom_feed_map.has_key(wc_topic_name):\n return required_keywords, custom_feed_map.get(wc_topic_name)\n return [], None", "def connect_topic_id_to_topics(self, model):\n confidence = []\n for key, value in self.representants.items():\n connection_results = {}\n for article in value:\n try:\n # get most possible index\n topic_index = max(model.analyse_text(article[1]), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\") # TODO replace with if\n continue\n # add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n # find index that occured mostly\n print(connection_results)\n for tp_num, val in connection_results.items():\n confidence.append([key, tp_num, val / len(value)])\n confidence = sorted(confidence, key=operator.itemgetter(2), reverse=True)\n associated_indexes = []\n associated_topics = []\n for conf in confidence:\n if conf[1] in associated_indexes or conf[0] in associated_topics:\n continue\n associated_indexes.append(conf[1])\n associated_topics.append(conf[0])\n self.log_writer.add_log(\n 'Connecting topic {} to model index {} based on highest unused confidence of {}'.format(conf[0],\n conf[1],\n conf[2]))\n self.topic_indexes[conf[0]] = conf[1]\n\n for key, value in self.topic_indexes.items():\n self.topics_of_index[value] = [key]", "def getCategories(URIList, annotatedWords):\n \n L=[]\n wordByCategory=dict()\n i=0\n for URI in URIList:\n sparql = SPARQLWrapper(\"http://dbpedia.org/sparql\")\n sparql.setQuery(\"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX dc: <http://purl.org/dc/terms/>\n SELECT ?label\n WHERE { \"\"\"+ \"<\"+ URI + \"> dc:subject ?label }\"\n )\n sparql.setReturnFormat(JSON)\n results = sparql.query().convert()\n for result in results[\"results\"][\"bindings\"]:\n category=result[\"label\"][\"value\"].encode(\"UTF-8\").split(\"/\")[-1].replace(\"_\",\" \").replace(\"Category:\",\"\")\n L.append(category)\n if category in wordByCategory:\n if i>= len(annotatedWords):\n print \"getCategories is computing URI=\",URI\n print \"Trying to append element number\",i,\n print \"from a list having\",len(annotatedWords),\"elements.\"\n wordByCategory[category].append(annotatedWords[i])\n else:\n wordByCategory[category]=[annotatedWords[i]]\n i+=1\n return L, wordByCategory", "def topic_count():\n # get the number topics and their counts as tuples: ('Topic', 123)\n query = peewee.RawQuery(Post, \"select topic, count(topic) from post group by topic\").tuples()\n\n # turn the result of the query object into a list of tuples\n tuple_result = []\n for each_tuple in query:\n tuple_result.append(each_tuple)\n\n # sort by the the second element, which is value, of each tuple in the list\n tuple_result = sorted(tuple_result, key=lambda x: x[1], reverse=True)\n\n # separate the topic and count into two lists for graphing purpose\n topics = []\n counts = []\n\n for each_tuple in tuple_result:\n topics.append(each_tuple[0])\n counts.append(each_tuple[1])\n\n return counts, topics", "def __rank_topics(self, found_topics, explanation):\n max_value = 0\n scores = []\n for _,topic in found_topics.items():\n topic[\"score\"] = topic[\"times\"] * len(topic['grams'].keys())\n scores.append(topic[\"score\"])\n if topic[\"score\"] > max_value:\n max_value = topic[\"score\"]\n\n for _,topic in found_topics.items():\n if \"syntactic\" in topic:\n topic[\"score\"] = max_value\n\n\n\n\n # Selection of unique topics\n unique_topics = {}\n for t_p,topic in found_topics.items():\n prim_label = self.cso.get_primary_label_wu(t_p)\n if prim_label in unique_topics:\n if unique_topics[prim_label] < topic[\"score\"]:\n unique_topics[prim_label] = topic[\"score\"]\n else:\n unique_topics[prim_label] = topic[\"score\"]\n\n # ranking topics by their score. High-scored topics go on top\n sort_t = sorted(unique_topics.items(), key=lambda v: v[1], reverse=True)\n #sort_t = sorted(found_topics.items(), key=lambda k: k[1]['score'], reverse=True)\n\n\n # perform\n vals = []\n for t_p in sort_t:\n vals.append(t_p[1]) #in 0, there is the topic, in 1 there is the info\n\n\n #### suppressing some warnings that can be raised by the kneed library\n warnings.filterwarnings(\"ignore\")\n try:\n x_vals = range(1,len(vals)+1)\n t_kn = KneeLocator(x_vals, vals, direction='decreasing')\n if t_kn.knee is None:\n #print(\"I performed a different identification of knee\")\n t_kn = KneeLocator(x_vals, vals, curve='convex', direction='decreasing')\n except ValueError:\n pass\n\n ##################### Pruning\n\n try:\n knee = int(t_kn.knee)\n except TypeError:\n knee = 0\n except UnboundLocalError:\n knee = 0\n\n if knee > 5:\n try:\n knee += 0\n except TypeError:\n print(\"ERROR: \",t_kn.knee,\" \",knee, \" \", len(sort_t))\n\n else:\n try:\n if sort_t[0][1] == sort_t[4][1]:\n top = sort_t[0][1]\n test_topics = [item[1] for item in sort_t if item[1]==top]\n knee = len(test_topics)\n\n else:\n knee = 5\n except IndexError:\n knee = len(sort_t)\n\n final_topics = []\n final_topics = [self.cso.get_topic_wu(sort_t[i][0]) for i in range(0,knee)]\n self.reset_explanation()\n self.explanation = {self.cso.topics_wu[sort_t[i][0]]: explanation[sort_t[i][0]] for i in range(0,knee)}\n\n return final_topics", "def get_topic_data(bagFile, topic, return_t=False):\n\n all_msg = []\n if return_t:\n all_t = []\n\n\n # Initialize rosbag object\n bag = rosbag.Bag(bagFile)\n\n for topic, msg, t in bag.read_messages(topics=[topic]):\n all_msg = np.append(all_msg, msg)\n if return_t:\n all_t = np.append(all_t, t.to_sec())\n\n if return_t:\n return all_msg, all_t\n else:\n return all_msg", "def extract_MSQs(line):\n \n tokens = sent_tokenize(line)\n questions = []\n \n for i in range(len(tokens)):\n if tokens[i].endswith('?'):\n q1 = tokens[i] # find first question\n sep = 0 # counter for separation between end of q1 and start q2\n for j in range(i+1, len(tokens)): # look for paired question\n if tokens[j].endswith('?'):\n q2 = tokens[j]\n questions.append((q1, q2, sep))\n break\n else:\n sep += len(tokens[j])\n\n return questions", "def get_class_topic(class_id):\n topic_data = query_db(\"SELECT id, name FROM topics WHERE class_id=?\", [class_id])\n topics = []\n for topic in topic_data:\n topic_dict_class = {}\n topic_dict_class[\"id\"] = topic[0]\n topic_dict_class[\"name\"] = topic[1]\n topics.append(topic_dict_class)\n return topics", "def resulttolist(result, feedback = 0):\n\n newlist = []\n\n if feedback == 2:\n for i in result:\n j = \" \".join(i)\n k = list(j.split(\" \"))\n newlist.append(k)\n elif feedback == 3:\n for i in result:\n j = \" \".join(i)\n k = list(j.split(\" \"))\n newlist.append(k)\n else:\n for i in result:\n j = \"\".join(i)\n newlist.append(j)\n\n return newlist", "def get_list(vertices, partition):\n vertices.sort()\n return_array = list()\n for vertex in vertices:\n position = get_position(vertex, partition)\n return_array.append(position)\n return return_array", "def _create_pairs_row(model, st_prefs, st_num):\n pairs_row = []\n simp_st_prefs, simp_st_ranks = _get_simple_pref_list_and_ranks(st_prefs)\n\n for i in range(len(simp_st_prefs)):\n pairs_row.append(Pair(st_num, simp_st_prefs[i], simp_st_ranks[i]))\n in_tie = True\n\n return pairs_row", "def extract_queries(self, path_topics=\"../topics-rnd5.xml\"):\n \n topic_queries = []\n with open(path_topics, \"r\") as f:\n for line in f:\n match = re.match(\".*<query>([^<]*)<\\/query>.*\", line)\n if match:\n topic_queries.append(match.group(1))\n if len(topic_queries) != 50:\n sys.exit(\"There should be 50 topics, found {}\".format(\n len(topic_queries)))\n \n return topic_queries", "def get_topic_labels(topics, num_labels=1, values=False):\n result = []\n for topic in topics:\n labels = get_label_for_topic(topic)\n sorted_labels = sorted(labels.items(), key=operator.itemgetter(1), reverse=False)\n if values:\n result.append(sorted_labels[:num_labels])\n else:\n result.append([h for h, s in sorted_labels][:num_labels])\n return result", "def process(data_item, article_id):\n questions = []\n answers = []\n paragraph = [article_id, data_item['context']]\n\n for item in data_item['qas']:\n question = [item[\"id\"], item[\"question\"], item['is_impossible']]\n questions.append(question)\n if item['is_impossible']:\n continue\n answer_options = item[\"answers\"]\n answer_set = set()\n for option in answer_options:\n answer_tuple = (option['text'], option['answer_start'])\n answer_set.add(answer_tuple)\n for index, answer_tuple in enumerate(answer_set):\n answer = [\"{}_{}\".format(item[\"id\"], index+1), item[\"id\"], answer_tuple[0], answer_tuple[1]]\n answers.append(answer)\n return paragraph, questions, answers", "def test_question_topics(self):\n p = ProductFactory()\n t1 = TopicFactory(slug='doesnotexist', product=p)\n t2 = TopicFactory(slug='cookies', product=p)\n t3 = TopicFactory(slug='sync', product=p)\n\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t3)\n\n self.refresh()\n\n topic_vals = (\n (t1.slug, 0),\n (t2.slug, 2),\n (t3.slug, 1),\n )\n\n qs = {'a': 1, 'w': 2, 'format': 'json'}\n for topics, number in topic_vals:\n qs.update({'topics': topics})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(number, json.loads(response.content)['total'])" ]
[ "0.57467985", "0.5711735", "0.55085766", "0.5506645", "0.53411525", "0.5329091", "0.53050333", "0.52999526", "0.524876", "0.5236957", "0.5155814", "0.51508963", "0.514394", "0.51368946", "0.5046468", "0.50216925", "0.5004249", "0.4996361", "0.49733895", "0.49623284", "0.4962278", "0.49580574", "0.4954229", "0.4952703", "0.49385032", "0.49385032", "0.49313816", "0.49104592", "0.48869136", "0.4878003", "0.4869872", "0.48559257", "0.485005", "0.4847874", "0.48436883", "0.4830317", "0.48299396", "0.48239222", "0.48215353", "0.48198238", "0.48137048", "0.4807396", "0.4792458", "0.47920457", "0.47918186", "0.47887972", "0.47860616", "0.47735697", "0.47713384", "0.4758382", "0.4751026", "0.47396493", "0.47383827", "0.47197968", "0.47164208", "0.47062388", "0.4702375", "0.46968347", "0.46926036", "0.46876323", "0.468089", "0.4664692", "0.4662875", "0.46528718", "0.46502876", "0.46495485", "0.4633695", "0.46248013", "0.46162102", "0.46160448", "0.46156764", "0.4609398", "0.46062556", "0.45938736", "0.45918742", "0.45850766", "0.45815086", "0.45694357", "0.45679224", "0.4566932", "0.45614386", "0.45576695", "0.45471543", "0.4538135", "0.45335302", "0.45270234", "0.45236865", "0.4519061", "0.45159587", "0.45152387", "0.45140436", "0.45087564", "0.45027378", "0.45015958", "0.4496275", "0.44917238", "0.44875497", "0.44862512", "0.4480108", "0.4479065" ]
0.5424828
4
From all the data, it takes the columns TopicID, and count the topic based on the gender
def get_data_frame_count_male_gender_by_topic(data_frame: DataFrame) -> pb.DataFrame: data_frame_topic = data_frame \ .filter(data_frame["Stratification1"].contains("Male")) \ .distinct() \ .groupBy("TopicID") \ .count() \ .sort("TopicID") print("The following table represent the number of men group by the topic: ") data_frame_topic.show() data_frame_pandas = data_frame.toPandas() return data_frame_pandas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_male_female_topicsDF(data_dict, gender):\n dataDF = pd.DataFrame.from_dict(data_dict[gender], orient='index')\n outlet_gender_topicsDF = pd.json_normalize(dataDF['topic_mean'])\n outlet_gender_topicsDF.index = dataDF.index\n outlet_gender_topicsDF = outlet_gender_topicsDF.sort_index()\n outlet_gender_topicsDF = outlet_gender_topicsDF.transpose()\n return outlet_gender_topicsDF", "def construct_gender_df(data_dict):\n gender_dict = data_dict['perGenderTopics']\n topics = data_dict['topics']\n # Convert to Pandas DataFrame\n genderDF = pd.DataFrame.from_dict(gender_dict, orient='index').transpose()\n genderDF = genderDF[['female', 'male']]\n genderDF['diff'] = genderDF['female'] - genderDF['male']\n # Sort in order of the sum of mean values for each topic\n genderDF = genderDF.sort_values('diff')\n genderDF['topic'] = [f\"t{i}\" for i in genderDF.index]\n\n # Get a properly ordered list of topics based on prominence for axis labelling\n ordered_topics_dict = {idx: topics[idx] for idx in genderDF.index}\n ordered_names = [topics[idx]['name'] for idx in genderDF.index]\n top_5_words = get_top_n_words(ordered_topics_dict)\n # Get topic names for axis labels if they exist, otherwise return top-5 words per topic\n y_labels = [name if name else top_5_words[i] for i, name in enumerate(ordered_names)]\n genderDF['topic_names'] = y_labels\n return genderDF", "def topic_stats(df_topic_sents_keywords):\n\n # Number of Documents for Each Topic\n topic_counts = df_topic_sents_keywords['Dominant_Topic'].value_counts()\n\n # Percentage of Documents for Each Topic\n topic_contribution = round(topic_counts/topic_counts.sum(), 4)\n\n # Topic Number and Keywords\n topic_num_keywords = df_topic_sents_keywords[['Dominant_Topic', 'Topic_Keywords']]\n\n # Concatenate Column wise\n df_dominant_topics = pd.concat([topic_num_keywords, topic_counts, topic_contribution], axis=1)\n\n # Change Column names\n df_dominant_topics.columns = ['Dominant_Topic', 'Topic_Keywords', 'Num_Documents', 'Perc_Documents']\n\n # Show\n df_dominant_topics", "def user_gender_statistics(df):\n print('Count of gender \\n')\n gender_counts=df['Gender'].value_counts()\n #loop through to print the total number of gender\n for index, gender_count in enumerate(gender_counts):\n print(' {}: {}'.format(gender_counts.index[index],gender_count))\n \n print()", "def topic_count():\n # get the number topics and their counts as tuples: ('Topic', 123)\n query = peewee.RawQuery(Post, \"select topic, count(topic) from post group by topic\").tuples()\n\n # turn the result of the query object into a list of tuples\n tuple_result = []\n for each_tuple in query:\n tuple_result.append(each_tuple)\n\n # sort by the the second element, which is value, of each tuple in the list\n tuple_result = sorted(tuple_result, key=lambda x: x[1], reverse=True)\n\n # separate the topic and count into two lists for graphing purpose\n topics = []\n counts = []\n\n for each_tuple in tuple_result:\n topics.append(each_tuple[0])\n counts.append(each_tuple[1])\n\n return counts, topics", "def count_gender(data):\n data = column_to_list(data, -2)\n male = data.count(\"Male\")\n female = data.count(\"Female\")\n return [male, female]", "def gender_word_counts(data):\n\n # We use the stopwords package from NLTK corpus.\n stop_words = set(stopwords.words('english'))\n data['tweet_words'] = data['text_cleaned'].str.split()\n # Ignoring all the stop words\n data['tweet_words'] = data['tweet_words'].apply(lambda tweet: [word for word in tweet if word not in stop_words])\n\n # Separating Male, Female and Brand profiles.\n male_profiles = data[data['gender'] == 'male']\n female_profiles = data[data['gender'] == 'female']\n brand_profiles = data[data['gender'] == 'brand']\n\n print(\"Top 20 most frequent words used by Men\")\n all_male_tweets = ' '.join(male_profiles['tweet_words'].astype(str))\n Male_words = pd.Series(all_male_tweets.split(\" \")).value_counts()[:20]\n print(Male_words)\n print()\n\n print(\"Top 20 most frequent words used by Women\")\n all_female_tweets = ' '.join(female_profiles['tweet_words'].astype(str))\n Female_words = pd.Series(all_female_tweets.split(\" \")).value_counts()[:20]\n print(Female_words)\n print()\n\n print(\"Top 20 most frequent words used by Brands\")\n all_brand_tweets = ' '.join(brand_profiles['tweet_words'].astype(str))\n Brand_words = pd.Series(all_brand_tweets.split(\" \")).value_counts()[:20]\n print(Brand_words)\n\n # Plotting horizontal bar graphs showing Top 20 tweet words used Vs. the word frequency.\n mp = Male_words.plot(kind='barh', stacked=True, colormap='plasma', title=\"Top 20 most frequently words used by Men\")\n mp.set_ylabel(\"Tweet words used by Males\")\n mp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n fp = Female_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Women\")\n fp.set_ylabel(\"Tweet words used by Females\")\n fp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n bp = Brand_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Brands\")\n bp.set_ylabel(\"Tweet words used by Brands\")\n bp.set_xlabel(\"Word Frequency\")\n plt.show()", "def get_data_frame_count_black_ethnicity_by_topic(data_frame: DataFrame) -> pb.DataFrame:\n data_frame_topic = data_frame \\\n .filter(data_frame[\"Stratification1\"].contains(\"Black, non-Hispanic\")) \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n\n print(\"The following table represent the number of black ethnicity people group by the topic: \")\n data_frame_topic.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas", "def count_topic_dist(self):\n if len(self.representants) == 0:\n self.log_writer(\"Representants not set. Cannot make topic dist.\")\n return\n for key, value in self.representants.items():\n self.topic_distributions.append(len(value)/len(self.training_docs))\n self.topic_numbers.append(key)", "def construct_outlet_gender_DF(data_dict):\n outlet_gender_dict = data_dict['perOutletGenderTopics']\n topics = data_dict['topics']\n male_outlet_topics = get_male_female_topicsDF(outlet_gender_dict, 'male')\n female_outlet_topics = get_male_female_topicsDF(outlet_gender_dict, 'female')\n # Plot the difference between the male-dominant and female-dominant topics\n diff = female_outlet_topics - male_outlet_topics\n # Calculate sum of all columns to decide sorting order\n diff['net'] = diff[diff.columns].sum(axis=1)\n diff = diff.sort_values('net').drop('net', axis=1)\n # Get a properly ordered list of topics based on prominence for axis labelling\n ordered_topics_dict = {idx: topics[idx] for idx in diff.index}\n ordered_names = [topics[idx]['name'] for idx in diff.index]\n top_5_words = get_top_n_words(ordered_topics_dict)\n # Get topic names for axis labels if they exist, otherwise return top-5 words per topic\n y_labels = [name if name else top_5_words[i] for i, name in enumerate(ordered_names)]\n return diff, y_labels", "def get_data_frame_count_type_of_topic(data_frame: DataFrame) -> pb.DataFrame:\n try:\n data_frame = data_frame \\\n .select(\"TopicID\", \"Question\") \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n except Py4JError:\n raise AnalysisException('One columns is incorrect')\n print(\"The following table represent the number of the type of each topic\")\n data_frame.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas", "def init():\n\n # Reading the data from the CSV file using the latin1 encoding.\n data_read = pd.read_csv(\"gender-classifier-DFE-791531.csv\", encoding='latin1') # Dataset Size = 20050\n\n # If all the attribute values are empty for any of the rows, we drop them.\n data = data_read.dropna(how='all') # After dropping, data set size is still 20050\n\n # Checking the names of the columns/attributes which contains at least one null value\n columns_containing_missing_values = data.columns[data.isnull().any()].tolist()\n print(\"Column names which has missing values\")\n print(columns_containing_missing_values)\n\n # Since 'gender' is our target variable, we would like to have values for it.\n # So, dropping all the rows which have no values for the 'gender' attribute.\n data = data[data['gender'].notnull()] # After dropping, dataset size = 19953 rows\n # Also, dropping all the rows which have values as 'unknown' for the 'gender' attribute\n data = data[data['gender'] != 'unknown'] # After dropping, dataset size = 18836 rows\n\n male_profile_count = len(data[data['gender'] == 'male'])\n print(\"Male Profile Count \" + str(male_profile_count))\n female_profile_count = len(data[data['gender'] == 'female'])\n print(\"Female Profile Count \" + str(female_profile_count))\n brand_profile_count = len(data[data['gender'] == 'brand'])\n print(\"Brand Profile Count \" + str(brand_profile_count))\n\n return data", "def test_extract_topics():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model = BERTopic()\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def get_diseases(self):\n self.diseases = self.data.groupby('topic')['topic'].count()", "def user_stats_gender(df):\n # Display counts of gender\n print(\"Counts of gender:\\n\")\n start_time = time.time()\n gender_counts = df['Gender'].value_counts()\n # iteratively print out the total numbers of genders \n # in this loop , it will iterative over the user_counts and its numbering\n for index,gender_count in enumerate(gender_counts):\n print(\" {}: {}\".format(gender_counts.index[index], gender_count))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*50)", "def count_mentioned_countries(data):\n countries_mentioned = {}\n countries = get_countries()\n\n for ind, row in data.iterrows():\n subject_words = row[\"MetadataSubject\"].lower()\n message_words = row[\"RawText\"].lower()\n\n for country in countries:\n if country in (subject_words + message_words):\n if country in countries_mentioned:\n countries_mentioned[country] += 1\n else:\n countries_mentioned[country] = 1\n\n return pd.DataFrame.from_dict(countries_mentioned, orient=\"index\")", "def count_gender(dictionary, gender_variable):\r\n boy = 0\r\n girl = 0\r\n for num in dictionary[gender_variable]:\r\n if num == 1:\r\n boy += 1\r\n elif num == 2:\r\n girl += 1\r\n return (boy, girl)", "def count_authors_by_gender(self, gender):\n count = 0\n for document in self.documents:\n try:\n if document.author_gender.lower() == gender.lower():\n count += 1\n except AttributeError:\n raise MissingMetadataError(['author_gender'])\n\n return count", "def test_extract_topics(base_bertopic):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(documents)\n c_tf_idf = base_bertopic._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def count_male_teams(self):\n return len(self.df['Adult male 11v11 (16-45)'].dropna())", "def index():\n import numpy as np\n import random\n\n total_gender = {}\n total_gender['Male'] = db(db.patient.sex == 'Male').count()\n total_gender['Female'] = db(db.patient.sex == 'Female').count()\n total_gender['Undeclared'] = db(db.patient.sex == 'Undeclared').count()\n\n groups = db(db.groups).select()\n freq_groups = {}\n grp_gender = {}\n for g in groups:\n freq_groups[g.code] = db(db.patient.groups.contains(g.id)).count()\n grp_gender[g.code] = {}\n grp_gender[g.code]['Male'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Male')).count()\n grp_gender[g.code]['Female'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Female')).count()\n grp_gender[g.code]['Undeclared'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Undeclared')).count()\n\n experiments = db(db.experiments).select()\n freq_experiments = {}\n exp_gender = {}\n for e in experiments:\n freq_experiments[e.code] = db(db.patient.experiments.contains(e.id)).count()\n exp_gender[e.code] = {}\n exp_gender[e.code]['Male'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Male')).count()\n exp_gender[e.code]['Female'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Female')).count()\n exp_gender[e.code]['Undeclared'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Undeclared')).count()\n\n grp_exp = {}\n for e in experiments:\n grp_exp[e.code] = {}\n for g in groups:\n grp_exp[e.code][g.code] = db(db.patient.experiments.contains(e.id) & db.patient.groups.contains(g.id)).count()\n\n return dict(message=T('Pain Network: A web-based tool for diagnosis of the Chronic Pain.'),\n freq_gender=total_gender,freq_groups=freq_groups,freq_experiments=freq_experiments,\n exp_gender=exp_gender,grp_gender=grp_gender,grp_exp=grp_exp)", "def uncategorized(df):\n\n counter = 0\n for movie in df.index:\n if len(df.loc[movie, 'imdbGenres']) == 1 and\\\n df.loc[movie, 'Political'] == 0:\n counter += 1\n\n return counter", "def summarize_corpus():\n\t\n\t# get metadata\n\t#get_metadata.from_TEIP5(wdir, corpus_inpath, \"metadata\", md_mode)\n\t\n\t# visualize some metadata\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"author-continent\")\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"author-country\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"language\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_hist\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_x\")\n\tvisualize_metadata.plot_pie(wdir, md_csv, \"subgenre\")\n\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"subgenre\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"gender\")\n\t\n\t# make some counts\n\tmd_table = pd.DataFrame.from_csv(os.path.join(wdir, md_csv), header=0)\n\tnum_texts = len(md_table)\n\t#num_language = len(md_table.groupby([\"language\"]))\n\t#num_continent = len(md_table.groupby([\"author-continent\"]))\n\t#num_countries = len(md_table.groupby([\"author-country\"]))\n\t#num_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_subgenre = len(md_table.groupby([\"subgenre\"]))\n\t#num_subgenre_x = len(md_table.groupby([\"subgenre_x\"]))\n\t#fr_subgenre_hist = md_table.groupby([\"subgenre_hist\"]).count()\n\t#num_historical = fr_subgenre_hist[\"idno\"][\"historical\"]\n\t#num_not_historical = fr_subgenre_hist[\"idno\"][\"not_historical\"]\n\t\n\t\n\td = {\"texts\":[num_texts], \n\t#\"languages\":[num_language],\n\t#\"continents\":[num_continent],\n\t#\"countries\":[num_countries],\n\t\"authors\":[num_authors],\n\t#\"subgenre_x\":[num_subgenre_x],\n\t\"subgenre\":[num_subgenre]}\n\t#\"num_historical\":[num_historical],\n\t#\"num_not_historical\":[num_not_historical]}\n\t\n\t\n\t\n\tcount_fr = pd.DataFrame(d)\n\tcount_fr.to_csv(os.path.join(wdir, \"corpus-description.csv\"), sep=\",\", header=True)\n\tprint(\"Done: summarize corpus\")", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"Counts of user types:\\n\")\n user_counts = df['User Type'].value_counts()\n # printing out the total numbers of user types\n for index, user_count in enumerate(user_counts):\n print(\" {}: {}\".format(user_counts.index[index], user_count))\n\n # Display counts of gender", "def test_extract_topics_custom_cv():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n\n cv = CountVectorizer(ngram_range=(1, 2))\n model = BERTopic(vectorizer=cv)\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def assign_topics_to_sentences(self):\n \n\n # 10 topics\n topic_dict = {0: 'academics'\n , 1: 'career'\n , 2: 'commute'\n , 3: 'diversity'\n , 4: 'community'\n , 5: 'extracurricular'\n , 6: 'facilities'\n , 7: 'finance'\n , 8: 'housing'\n , 9: 'wellness'\n }\n\n # Some important words that should be included under each topic\n topics = [['academic', 'exam', 'study', 'learn', 'education', 'class', 'course', 'grade', 'assignment'\n , 'degree', 'research', 'elective'\n , 'professor', 'project', 'scholarship', 'knowledge']\n , ['career', 'job', 'coop', 'employment']\n , ['commute', 'skytrain', 'transport', 'commuter']\n , ['diversity', 'diverse', 'background']\n , ['community', 'welcome', 'support', 'social', 'friend', 'fun', 'network', 'home']\n , ['extracurricular', 'club', 'sport', 'activity']\n , ['facility', 'infrastructure', 'food', 'building', 'gym']\n , ['finance', 'tuition', 'expensive']\n , ['housing', 'live', 'residence']\n , ['wellness', 'health', 'stress', 'depression', 'anxiety']]\n\n # Read the data - id and reponse column\n dt = pd.read_csv(self.path_data_col\n , encoding = \"ISO-8859-1\"\n , usecols = [self.id_col_name, self.col_name])\n\n\n \n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n \n # Split into sentences\n dt['sentences'] = self.getSentences(dt[self.col_name])\n \n \n \n\n \n \n\n # Store number of sentences in each response as a column\n dt['num_sent'] = dt['sentences'].apply(lambda x: len(x))\n\n # Split each row into multiple rows - one row for each sentence\n dt = (dt\n .set_index([self.id_col_name, self.col_name, 'num_sent'])['sentences']\n .apply(pd.Series)\n .stack()\n .reset_index()\n .drop('level_3', axis = 1)\n .rename(columns = {0:'sentences'}))\n\n\n # Clean the sentences\n dt['sentences_cleaned'] = self.cln.clean(dt['sentences'], typo = self.typo_ind)\n\n # Remove useless sentences\n dt['sentences_cleaned'] = self.getValid(dt['sentences_cleaned'])\n\n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n\n # Tokenize words in the cleaned sentences\n responses = list(self.sent_to_words(dt['sentences_cleaned'].values.tolist()))\n\n\n # Call the lexicon function\n topic_lexicons = self.prepare_lexicons()\n\n # Lists to store results\n count_topic_all = []\n actual_topic_all = []\n\n # Tag each response into a topic\n for response in responses:\n\n count_topic = []\n actual_topic = []\n\n for topic in topic_lexicons:\n\n # Count occurance of each word in word stock in the response\n temp = sum(dict((x, response.count(x)) for x in topic).values())\n count_topic.append(temp)\n\n\n for index, value in enumerate(count_topic):\n\n # Consider the topic if atleast one(?) word from its word-stock occurs in the response\n if value > 0:\n actual_topic.append(topic_dict[index])\n\n\n # If more than 3 topics are tagged for single sentence, refine by increasing\n # cutoff to at least 2 words instead of 1\n if len(actual_topic) > 3:\n\n actual_topic = []\n for index, value in enumerate(count_topic):\n\n if value > 1: # Increase cutoff\n actual_topic.append(topic_dict[index])\n\n count_topic_all.append(count_topic)\n actual_topic_all.append(actual_topic)\n\n\n dt['tags'] = actual_topic_all\n dt['num_tags'] = count_topic_all\n\n\n # Select only the most important columns\n dt_less = dt[[self.id_col_name, 'sentences', 'tags']]\n\n return dt, dt_less", "def calc_topic_mode_log_stats(user_exercise_graph, topic_id,\n just_earned_proficiency):\n topic = topic_models.Topic.get_by_id(topic_id)\n topic_exercises = topic.get_exercises()\n\n total_exercises = len(topic_exercises)\n count_proficient = len(set(ex.name for ex in topic_exercises) &\n set(user_exercise_graph.proficient_exercise_names()))\n just_completed = (just_earned_proficiency and total_exercises ==\n count_proficient)\n\n return {\n 'total_exercises': total_exercises,\n 'count_proficient': count_proficient,\n 'just_completed': just_completed,\n }", "def pre_process_data(df):\n # setting `passengerID` as Index since it wont be necessary for the analysis\n df = df.set_index(\"PassengerId\")\n\n # convert 'Sex' values\n df['gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n # We see that 2 passengers embarked data is missing, we fill those in as the most common Embarked value\n df.loc[df.Embarked.isnull(), 'Embarked'] = df['Embarked'].mode()[0]\n\n # Replace missing age values with median ages by gender\n for gender in df['gender'].unique():\n median_age = df[(df['gender'] == gender)].Age.median()\n df.loc[(df['Age'].isnull()) & (df['gender'] == gender), 'Age'] = median_age\n\n # convert 'gender' values to new columns\n df = pd.get_dummies(df, columns=['gender'])\n\n # convert 'Embarked' values to new columns\n df = pd.get_dummies(df, columns=['Embarked'])\n\n # bin Fare into five intervals with equal amount of values\n df['Fare-bin'] = pd.qcut(df['Fare'], 5, labels=[1, 2, 3, 4, 5]).astype(int)\n\n # bin Age into seven intervals with equal amount of values\n # ('baby','child','teenager','young','mid-age','over-50','senior')\n bins = [0, 4, 12, 18, 30, 50, 65, 100]\n age_index = (1, 2, 3, 4, 5, 6, 7)\n df['Age-bin'] = pd.cut(df['Age'], bins, labels=age_index).astype(int)\n\n # create a new column 'family' as a sum of 'SibSp' and 'Parch'\n df['family'] = df['SibSp'] + df['Parch'] + 1\n df['family'] = df['family'].map(lambda x: 4 if x > 4 else x)\n\n # create a new column 'FTicket' as the first character of the 'Ticket'\n df['FTicket'] = df['Ticket'].map(lambda x: x[0])\n # combine smaller categories into one\n df['FTicket'] = df['FTicket'].replace(['W', 'F', 'L', '5', '6', '7', '8', '9'], '4')\n # convert 'FTicket' values to new columns\n df = pd.get_dummies(df, columns=['FTicket'])\n\n # get titles from the name\n df['title'] = df.apply(lambda row: re.split('[,.]+', row['Name'])[1], axis=1)\n\n # convert titles to values\n df['title'] = df['title'].map({' Capt': 'Other', ' Master': 'Master', ' Mr': 'Mr', ' Don': 'Other',\n ' Dona': 'Other', ' Lady': 'Other', ' Col': 'Other', ' Miss': 'Miss',\n ' the Countess': 'Other', ' Dr': 'Other', ' Jonkheer': 'Other', ' Mlle': 'Other',\n ' Sir': 'Other', ' Rev': 'Other', ' Ms': 'Other', ' Mme': 'Other', ' Major': 'Other',\n ' Mrs': 'Mrs'})\n # convert 'title' values to new columns\n df = pd.get_dummies(df, columns=['title'])\n\n df = df.drop(['Name', 'Ticket', 'Cabin', 'Sex', 'Fare', 'Age'], axis=1)\n\n return df", "def get_paper_counter_per_topic_id(all_topic_assignments):\n counter = {}\n for topic_assignment in all_topic_assignments:\n for topic_index, topic_value in topic_assignment:\n if topic_index not in counter:\n counter[topic_index] = 0\n\n counter[topic_index] += 1\n\n return counter", "def topic(df, num_topics=5):\r\n# X, y = df[df.columns[:-1]], df[df.columns[-1]]\r\n lda = LatentDirichletAllocation(n_topics=num_topics,\r\n max_iter=5,\r\n learning_method='online',\r\n learning_offset=50.,\r\n random_state=0)\r\n return lda.fit_transform(df)", "def ngram_detection(self, min_topic_count=5, min_text_id_count=4):\n\n for text_id, text in self.texts.items():\n # single-word topics act a bit different (no zips or comprehensions)\n # store data in self.topics, not zip_grams\n for word in text['doc']:\n word_lemma = word.text.lower() if word.lemma_ == '-PRON-' else word.lemma_\n\n if {word.text}.intersection(self.punct) or {word.lemma_}.intersection(self.stop_words):\n continue\n\n if not (word.pos in self.nouns or word.ent_type in self.entities):\n continue\n\n if word_lemma in self.topics:\n self.topics[word_lemma][\"count\"] += 1\n self.topics[word_lemma][\"textIDs\"] |= {text_id}\n self.topics[word_lemma][\"verbatims\"] |= {word.text.lower()}\n else:\n self.topics[word_lemma] = {\"name\": word_lemma,\n \"count\": 1,\n \"textIDs\": {text_id},\n \"verbatims\": {word.text.lower()},\n \"subtopics\": {}}\n\n # Populate self.ngrams and self.topics\n for text_id, text in self.texts.items():\n doc = text['doc']\n\n # Find pentagrams - ngrams with 5 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:], doc[4:]):\n self._ngram_counter(ngram, 5, text_id, doc)\n\n # Find pentagrams - ngrams with 4 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:]):\n self._ngram_counter(ngram, 4, text_id, doc)\n\n for ngram in zip(doc, doc[1:], doc[2:]):\n self._ngram_counter(ngram, 3, text_id, doc)\n\n for ngram in zip(doc, doc[1:]):\n self._ngram_counter(ngram, 2, text_id, doc)\n\n\n # Add text_id_count (the number of texts that the topic occurs in; so a topic might occur 50 times,\n # but it's only mentioned in 3 different texts, we'd show 3.\n for _, topic in self.topics.items():\n topic['textIDCount'] = len(topic['textIDs'])\n for _, ngram in self.ngrams.items():\n ngram['textIDCount'] = len(ngram['textIDs'])\n\n # Eliminate rarely occurring topics and ngrams.\n self.topics = {k: v for k, v in self.topics.items() if\n v['textIDCount'] >= min_text_id_count and v['count'] >= min_topic_count}\n self.ngrams = {k: v for k, v in self.ngrams.items() if\n v['textIDCount'] >= min_text_id_count}\n\n # Loop through each ngram pair: outer loop is all ngrams, inner loop is all ngrams\n for ngram_lemma, ngram in self.ngrams.items():\n for ngram_plus_lemma, ngram_plus in self.ngrams.items():\n # only stay in this loop if the inner ngram is one word longer than the outer loop and if the\n # inner loop lemma contains the outer group lemma (avoid partial word matches like man in woman)\n # r'\\b' + ngram_lemma + r'\\b' --> does the ngram lemma fit in ngram_plus lemma (\\b is word boundary)\n if ngram['n'] + 1 != ngram_plus['n']:\n continue\n\n if not re.search(r'\\b' + ngram_lemma + r'\\b', ngram_plus_lemma):\n continue\n\n # Is the absolute count of occurrences and the count of text_id occurrences both big enough to use it\n # instead of the other loop?\n if ngram_plus['count'] + 3 >= ngram['count'] and ngram_plus['textIDCount'] + 3 >= ngram['textIDCount']:\n # TODO: Is this the right action (deleting shorter, but not much more explanatory) phrase?\n # TODO: Is this enough? Or will I end up double explaining things sometimes?\n ngram['count'] = -1\n\n # Eliminate newly demoted items\n self.ngrams = {ngram_lemma: ngram for ngram_lemma, ngram in self.ngrams.items() if ngram['count'] > 0}", "def prob4():\n#pass\n n_men = 0\n n_women = 0\n db = sql.connect('sql2')\n cur = db.cursor()\n cur.execute(\"SELECT Gender FROM ICD WHERE Age>=25 AND Age<=35\")\n for gen in cur :\n if gen[0] == 'M' :\n n_men += 1\n else :\n n_women += 1\n db.close()\n return n_men,n_women", "def test_topic_reduction(reduced_topics):\n model = BERTopic()\n nr_topics = reduced_topics + 2\n model.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents.copy())\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents.copy())\n new_freq = model.get_topic_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(model.mapped_topics, dict)\n assert not set(model.get_topic_freq().Topic).difference(set(new_documents.Topic))\n assert model.mapped_topics", "def get_mutual_information(filename):\n categories = {} #{category: speakers of this category}\n features = {} #{feat: speakers who use this feature}\n pos_categories_features = {} #{category: {feat: speakers of category who use this feat}}\n neg_categories_features = {} #{category: {feat: speakers of category who do not use this feat}}\n users = set() #set of all users in data\n \n for line in open(filename):\n userid, c, date, statusid, rawtweet, toktweet, tagtweet = line.split('\\t')\n users.add(userid)\n \n if c not in categories:\n categories[c] = set()\n pos_categories_features[c] = {}\n categories[c].add(userid)\n \n feats = set(toktweet.lower().split()) #lowercase tweet and split into words\n\n for feat in feats:\n if feat not in pos_categories_features[c]:\n pos_categories_features[c][feat] = set()\n pos_categories_features[c][feat].add(userid)\n \n if feat not in features:\n features[feat] = set()\n features[feat].add(userid)\n\n print \"Parsed data\"\n\n numfeats = len(features) #num of features\n print numfeats, \"features\"\n numusers = len(users) #num of users \n print numusers, \"users\"\n\n #keep sizes of sets, not sets themselves\n for feat in features:\n features[feat] = len(features[feat])\n for c in categories:\n categories[c] = len(categories[c])\n for c in pos_categories_features:\n for feat in features:\n if feat in pos_categories_features[c]:\n pos_categories_features[c][feat] = len(pos_categories_features[c][feat])\n else:\n pos_categories_features[c][feat] = 0\n\n for c in categories:\n print c, categories[c], \"users\"\n\n print \"Computed counts\"\n \n mi = {}\n for feat in features:\n mi[feat] = 0.0\n for c in categories:\n #print c, feat, features[feat], pos_categories_features[c][feat]\n \n catprob = categories[c]/numusers\n\n #prob of speakers of category c using feat\n featprob = features[feat]/numusers\n jointprob = pos_categories_features[c][feat]/numusers\n if jointprob > 0 and featprob > 0:\n mi[feat] += jointprob * log2(jointprob/(catprob * featprob))\n \n #prob of speakers of category c NOT using feat\n featprob = 1 - featprob\n jointprob = (categories[c] - pos_categories_features[c][feat])/numusers\n if jointprob > 0 and featprob > 0:\n mi[feat] += jointprob * log2(jointprob/(catprob * featprob))\n\n print \"Computed mutual information\"\n\n feature_scores = sorted(mi.items(), key=lambda x:x[1], reverse=True)\n refcat = categories.keys()[0] #pick one of the categories\n print 'Feature\\tMI\\tP({0}|Feature)\\tNum. users'.format(refcat)\n for feat, score in feature_scores[:200]:\n prob = pos_categories_features[refcat][feat]/features[feat]\n print '{0}\\t{1:.3f}\\t{2:.3f}\\t{3}'.format(feat, score, prob, features[feat])", "def topic(df, num_topics=5):\r\n\r\n lda = LatentDirichletAllocation(n_topics=num_topics,\r\n max_iter=5,\r\n learning_method='online',\r\n learning_offset=50.,\r\n random_state=0)\r\n return lda.fit_transform(df)", "def analysis_1_result(primary_person_df,output_folder_path):\n male_death_count_df = primary_person_df\\\n .filter(col(\"PRSN_GNDR_ID\") == \"MALE\").agg(count(\"PRSN_GNDR_ID\").alias(\"MALE_DEATH_CNT\"))\n print(\"Analysis 1: \\nTotal number of crashes (accidents) in which number of persons killed are male is :\")\n male_death_count_df.show() #Dispalying result\n write_df_to_csv(male_death_count_df,output_folder_path+\"analysis_1_result\") #Writing to csv file", "def topic_extraction(df, col_name):\n tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tfidf = tfidf_vectorizer.fit_transform(df[col_name])\n\n tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tf = tf_vectorizer.fit_transform(df[col_name])\n nmf = NMF(n_components=20, random_state=1,\n alpha=.1, l1_ratio=.5)\n tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n nmf_w = nmf.fit_transform(tfidf)\n nmf_h = nmf.components_\n df['labels'] = nmf_w.argmax(axis=1) # this was the right code to get labels/clusters\n\n\n print(\"\\nTopics in NMF model:\")\n print_top_words(nmf, tfidf_feature_names)\n\n\n lda = LatentDirichletAllocation(n_topics=20, max_iter=5,\n learning_method='online',\n learning_offset=50.,\n random_state=0,\n n_jobs=-1)\n lda.fit(tf)\n doc_topic_distrib = lda.transform(tf)\n lda_labels = doc_topic_distrib.argmax(axis=1)\n print lda_labels[:100]\n df['lda_labels'] = lda_labels\n print(\"\\nTopics in LDA model:\")\n tf_feature_names = tf_vectorizer.get_feature_names()\n print_top_words(lda, tf_feature_names)\n return df", "def calculate_statistics(city,df):\n \n #Total Trip duration per gender\n if 'Gender' in df.columns:\n viewData=df.groupby(['Gender'])['Trip Duration'].sum().reset_index(name='Trip Duration')\n print('Trip duration per gender\\n')\n print(viewData)\n #number of missing values in the entire sheet\n missing_values=np.count_nonzero(df.isnull())\n print('Number of missing values in the {} dataset:{}'.format(city,missing_values))", "def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df", "def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df", "def countCountry(self, docID, data):\n for entry in data.genData():\n if 'subject_doc_id' in entry and 'visitor_country' in entry and 'event_type' in entry:\n if entry['event_type'] == 'read':\n if str(entry['subject_doc_id']) == docID: # cast to string needed as input is string\n if entry['visitor_country'] in self.countryCounts:\n self.countryCounts[entry['visitor_country']] += 1\n else:\n self.countryCounts[entry['visitor_country']] = 1", "def document_distribution_per_topic(df_dominant_topic):\n cols = [color for name, color in mcolors.TABLEAU_COLORS.items()] # more colors: 'mcolors.XKCD_COLORS'\n\n\n fig, axes = plt.subplots(2,2,figsize=(16,14), dpi=160, sharex=True, sharey=True)\n\n for i, ax in enumerate(axes.flatten()):\n df_dominant_topic_sub = df_dominant_topic.loc[df_dominant_topic.Dominant_Topic == i, :]\n doc_lens = [len(d) for d in df_dominant_topic.Text]\n ax.hist(doc_lens, bins = 1000, color=cols[i])\n ax.tick_params(axis='y', labelcolor=cols[i], color=cols[i])\n sns.kdeplot(doc_lens, color=\"black\", shade=False, ax=ax.twinx())\n ax.set(xlim=(0, 1000), xlabel='Document Word Count')\n ax.set_ylabel('Number of Documents', color=cols[i])\n ax.set_title('Topic: '+str(i), fontdict=dict(size=16, color=cols[i]))\n\n fig.tight_layout()\n fig.subplots_adjust(top=0.90)\n plt.xticks(np.linspace(0,1000,9))\n fig.suptitle('Distribution of Document Word Counts by Dominant Topic', fontsize=22)\n plt.show()", "def main():\n\n # open links.csv in order to access IMDB id numbers\n ifile = open('movie-countries.csv', \"rb\")\n reader = csv.reader(ifile)\n \n # writer for csv with countries\n ofile = open('country_stats.csv', \"wb\")\n writer = csv.writer(ofile)\n\n # deal with headers\n reader.next() # skip first line\n writer.writerow(['country', 'number of movies', 'number of primary movies'])\n\n # one dictionary for all mention of a country, one dictionary for if the country was the first one listed\n country_count_dict = {}\n country_count_primary_dict= {}\n\n # iterate through data\n for row in reader:\n # get the countries column\n countries = row[3]\n\n # add to dicionary of countries\n for country in countries.split(\"|\"):\n country_count_dict[country] = country_count_dict.get(country, 0) + 1\n\n # if it's the primary country\n if country == countries.split(\"|\")[0]:\n country_count_primary_dict[country] = country_count_primary_dict.get(country, 0) + 1\n\n # write to the file\n for key, value in country_count_dict.iteritems():\n writer.writerow([key , str(value), country_count_primary_dict.get(key, \"0\")])\n\n ifile.close()\n ofile.close()", "def posts_per_topic_all(request, pk):\n #update is_expired in all posts\n update_posts_expiration()\n #get all posts with a certain topic\n posts = Post.objects.filter(topic=pk)\n serializer = ViewPostSerializer(posts, many=True)\n return Response(serializer.data)", "def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics", "def check_confidence(data):\n gender_confident_data = data[data['gender:confidence'] == 1] # Dataset size = 13926\n return gender_confident_data", "def get_titanic_fea(dataset):\n dataset['Name_length'] = dataset['Name'].apply(len)\n\n # Mapping Sex 不在map定义的 就是NaN\n dataset['Sex'] = dataset['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n dataset['Has_Cabin'] = dataset['Cabin'].apply(lambda x: 0 if type(x) == float else 1)\n dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1\n\n dataset['IsAlone'] = 0\n dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1\n\n # [Embarked]\n dataset['Embarked'] = dataset['Embarked'].fillna('0')\n dataset['Fare'] = dataset['Fare'].fillna(0)\n # Mapping Embarked\n dataset['Embarked'] = dataset['Embarked'].map({'0': 0, 'S': 1, 'C': 2, 'Q': 3}).astype(int)\n\n # [Fare]\n dataset['CategoricalFare'] = pd.qcut(dataset['Fare'], 4)\n # Mapping Fare\n dataset.loc[dataset['Fare'] <= 7.91, 'Fare'] = 0\n dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1\n dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2\n dataset.loc[dataset['Fare'] > 31, 'Fare'] = 3\n dataset['Fare'] = dataset['Fare'].astype(int)\n\n # [Age]\n age_avg = dataset['Age'].mean()\n age_std = dataset['Age'].std()\n age_null_count = dataset['Age'].isnull().sum()\n age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count)\n dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list\n dataset['Age'] = dataset['Age'].astype(int)\n dataset['CategoricalAge'] = pd.cut(dataset['Age'], 5)\n # Mapping Age\n dataset.loc[dataset['Age'] <= 16, 'Age'] = 0\n dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1\n dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2\n dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3\n dataset.loc[dataset['Age'] > 64, 'Age'] = 4\n\n # [Name]\n # 称谓 Mr 、Miss 等\n def get_title(name):\n title_search = re.search(' ([A-Za-z]+)\\.', name)\n # If the title exists, extract and return it.\n if title_search:\n return title_search.group(1)\n return \"\"\n dataset['Title'] = dataset['Name'].apply(get_title)\n\n # 只保留4类Title\n dataset['Title'] = dataset['Title'].replace(\n ['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')\n dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')\n # Mapping titles\n title_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Rare\": 5}\n dataset['Title'] = dataset['Title'].map(title_mapping)\n dataset['Title'] = dataset['Title'].fillna(0)\n\n # Feature selection\n drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp']\n dataset = dataset.drop(drop_elements, axis=1)\n dataset = dataset.drop(['CategoricalAge', 'CategoricalFare'], axis=1)\n\n return dataset", "def test_extract_topics_custom_cv(base_bertopic_custom_cv):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic_custom_cv._update_topic_size(documents)\n c_tf_idf = base_bertopic_custom_cv._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic_custom_cv.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def export_topics(self):\n\n # format as a list (for json output), then sort descending by textIDCount\n topics = [{'name': topic['name'], 'count': topic['count'],\n 'verbatims': list(topic['verbatims']), 'textIDs': list(topic['textIDs']),\n 'textIDCount': topic['textIDCount'], 'rank': topic['rank'],\n 'children': '' if 'children' not in topic else topic['children']}\n for topic_id, topic in self.topics.items()]\n topics = sorted(topics, key=lambda topic: topic['textIDCount'], reverse=True)\n\n for i, topic in enumerate(topics):\n # Note that 'rank' is from topic, not child.\n topic['children'] = [{'name': child['name'], 'count': child['count'], 'rank': topic['rank'],\n 'verbatims': list(child['verbatims']), 'textIDs': list(child['textIDs']),\n 'textIDCount': child['textIDCount']}\n for _, child in topic['children'].items()]\n\n topic['children'] = sorted(topic['children'], key=lambda lemma: lemma['textIDCount'], reverse=True)\n\n # If the subtopic count is greater than the topic count, than calc a multiplier to size each subtopic\n child_count = sum([child['textIDCount'] for child in topic['children']])\n child_count_multiplier = 1 if child_count < topic['textIDCount'] else topic['textIDCount'] / child_count\n\n for child in topic['children']:\n child['size'] = child['textIDCount'] * child_count_multiplier\n\n topic['size'] = topic['textIDCount'] - (child_count * child_count_multiplier)\n\n # Prune topics over max_topics (default ~40): we stopped calc'ing rank over the max_topics\n self.model_output[\"children\"] = [topic for topic in topics]\n\n # Build file name and save\n if self.data_date:\n date = datetime.strptime(self.data_date, \"%Y-%m-%d\").strftime('%d') # from YYYY-MM-DD to DD\n file_name = '{}-{}-Topics.txt'.format(self.corpus_name, date)\n else:\n file_name = '{}-Topics.txt'.format(self.corpus_name)\n\n with open(config.OUTPUT_DIR + file_name, 'w') as file:\n json.dump(self.model_output, file)", "def get_rdd_count_type_of_topy(rdd: list) -> pb.DataFrame:\n data_frame_pandas = pb.DataFrame(rdd, columns=['Topic', 'Question'])\n print(data_frame_pandas)\n return data_frame_pandas", "def test_topic_model_generator_dimensions( ):\n N = 100\n D = 1000\n K = 10\n W = 100\n\n tm = TopicModel.generate( K, D )\n assert( tm.topics.shape == (D, K) )\n assert( tm.weights.shape == (K,) )\n\n docs = tm.sample( N, words = W )\n # Each document is a column\n assert( docs.shape == (N, D) ) \n # Each doc should have 100 words\n assert( sc.all(docs.sum(1) == W) )", "def analysis_3_result(primary_person_df,output_folder_path):\n rankWindowSpec = Window.orderBy(col(\"MAX_FEMALE_COUNT\").desc())\n state_with_female_involved_df = primary_person_df \\\n .filter(col(\"PRSN_GNDR_ID\") == \"FEMALE\") \\\n .groupBy(\"DRVR_LIC_STATE_ID\", \"PRSN_GNDR_ID\") \\\n .agg(count(\"*\").alias(\"MAX_FEMALE_COUNT\"))\\\n .withColumn(\"RANK\", row_number().over(rankWindowSpec))\\\n .filter(\"RANK=1\") \\\n .drop(\"RANK\",\"PRSN_GNDR_ID\")\\\n .select(col(\"DRVR_LIC_STATE_ID\").alias(\"STATE\"),\"MAX_FEMALE_COUNT\")\n print(\"Analysis 3:\\nState with highest number of accidents in which females are involved is: \")\n state_with_female_involved_df.show()\n write_df_to_csv(state_with_female_involved_df,output_folder_path+\"analysis_3_result\")", "def group_topics(sent_topics_sorteddf):\n new_topics=pd.concat([sent_topics_sorteddf.groupby('Topic_Num').head()[['Keywords']],\n topic_contribution.sort_index(),\n pd.Series(['Economy','Immigration','Environment','Event',\n 'Civil Rights','Civil Rights','Healthcare',\n 'Defense','Trump','Community','Event','Event',\n 'Thanks','Legislation','Trump','Community',\n 'Community','Trump','Defense',\n 'Legislation','Thanks','Economy','Thanks','Healthcare',\n 'Legislation'])],axis=1).groupby(0).sum()\n plt.pie(new_topics,labels=new_topics.index,autopct='%.0f',pctdistance=.8)\n plt.title('Topic Share %');\n\n new_topic_words = pd.concat([sent_topics_sorteddf.groupby('Topic_Num').head()[['Keywords']],\n topic_contribution.sort_index(),\n pd.Series(['Economy','Immigration','Environment','Event',\n 'Civil Rights','Civil Rights','Healthcare',\n 'Defense','Trump','Community','Event','Event',\n 'Thanks','Legislation','Trump','Community',\n 'Community','Trump','Defense',\n 'Legislation','Thanks','Economy','Thanks','Healthcare',\n 'Legislation'])],axis=1).groupby(0)['Keywords'].sum()\n [print(f'{topic}: ' + words) for topic,words in zip(new_topic_words.index,new_topic_words)]", "def iterate_data(self):\n if \"single\" in self.dataset_name:\n # Index 0 for list of sentence lengths, index 1 for list of token lengths\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for answer_id in self.data:\n summary = self.data[answer_id]['summary']\n articles = self.data[answer_id]['articles']\n question = self.data[answer_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(articles, 'article')\n self._get_token_cnts(question, 'question')\n self._write_stats(\"token_counts\")\n\n if \"multi\" in self.dataset_name:\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for q_id in self.data:\n summary = self.data[q_id]['summary']\n question = self.data[q_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(question, 'question')\n question = self.data[q_id]['question']\n for answer_id in self.data[q_id]['articles']:\n articles = self.data[q_id]['articles'][answer_id][0]\n if args.tokenize:\n self._get_token_cnts(articles, 'article')\n self._write_stats(\"token_counts\")\n\n if self.dataset_name == \"complete_dataset\":\n self.stat_dict = {'urls': [], 'sites': []}\n article_dict = {}\n print(\"Counting answers, sites, unique urls, and tokenized counts of unique articles\")\n answer_cnt = 0\n for q_id in self.data:\n for a_id in self.data[q_id]['answers']:\n answer_cnt += 1\n url = self.data[q_id]['answers'][a_id]['url']\n article = self.data[q_id]['answers'][a_id]['article']\n if url not in article_dict:\n article_dict[url] = article\n self.stat_dict['urls'].append(url)\n assert \"//\" in url, url\n site = url.split(\"//\")[1].split(\"/\")\n self.stat_dict['sites'].append(site[0])\n print(\"# of Answers:\", answer_cnt)\n print(\"Unique articles: \", len(article_dict)) # This should match up with count written to file\n self._write_stats(\"full collection\")\n\n # Get token/sent averages of unique articles\n if args.tokenize:\n self.stat_dict = {'article': [[], []]}\n for a in article_dict:\n self._get_token_cnts(article_dict[a], 'article')\n self._write_stats(\"token_counts\")", "def property_count_function(listOfProperties):\n\n property_count = {} # Empty dict, is gonna look like this: property_count{property : count}\n for lists in listOfProperties:\n try:\n for properties in lists:\n property_count[properties] = property_count.get(properties, 0) + 1\n except TypeError as e:\n print(e)\n\n # Converts the dictionary to a dataframe\n property_dataframe = pd.DataFrame(list(property_count.items()), columns=['Property', 'Frequency'])\n # property_dataframe = property_dataframe.set_index(\"Property\")\n property_dataframe = property_dataframe.sort_values(by=['Frequency'], ascending=False)\n\n return property_dataframe", "def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1", "def run_gender_freq(corpus):\n novels = corpus._load_novels()\n c = len(novels)\n loops = c//10 + 1\n\n num = 0\n\n while num < loops:\n dictionary = {}\n for novel in novels[num * 10: min(c, num * 10 + 9)]:\n d = {'he': novel.get_word_freq('he'), 'she': novel.get_word_freq('she')}\n d = get_comparative_word_freq(d)\n lst = [d[\"he\"], d[\"she\"]]\n book = novel.title[0:20] + \"\\n\" + novel.author\n dictionary[book] = lst\n display_gender_freq(dictionary, str(num))\n num += 1", "def process_family_frequencies(self, parent, family):\n if not family:\n return None\n counts = {}\n n_tokens = 0\n for description_array in family:\n seen = set([])\n for token in description_array:\n counts[token] = counts.get(token, 0) + 1\n n_tokens += 1\n\n self.word2tf[token] = self.word2tf.get(token, 0) + 1\n self.n_words += 1\n if token not in seen:\n self.word2df[token] = self.word2df.get(token, 0) + 1\n seen.add(token)\n\n self.family2tf[parent] = {tok: count/n_tokens for tok, count in counts.items()}\n self.data.extend(family)\n return True", "def age_12_count() :\n\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n import itertools\n\n train = pd.read_csv('./data/train.csv')\n \n # Split data to contain ages only up to 12 months\n train_12_months = train.loc[train['Age'] < 13, ['State','Type', 'Age', 'AdoptionSpeed']]\n\n # Divide by dog (Type = 1) and cat (Type = 2)\n dog_df = train_12_months.loc[train_12_months['Type'] == 1, :]\n cat_df = train_12_months.loc[train_12_months['Type'] == 2, :]\n \n dog_max_age = max(dog_df.loc[:, 'Age'])\n dog_min_age = min(dog_df.loc[:, 'Age'])\n \n cat_max_age = max(cat_df.loc[:, 'Age'])\n cat_min_age = min(cat_df.loc[:, 'Age'])\n \n dog_age_labels = []\n dog_count = []\n \n cat_age_labels = []\n cat_count = []\n \n # Find dog count for each age\n for i in range(dog_min_age, dog_max_age + 1) :\n count = (dog_df.Age == i).sum()\n if(count > 0) :\n dog_count.append(count)\n dog_age_labels.append(i)\n\n # Find cat count for each age\n for i in range(cat_min_age, cat_max_age + 1) :\n count = (cat_df.Age == i).sum()\n if(count > 0) :\n cat_count.append(count)\n cat_age_labels.append(i)\n \n # Plot bar graphs\n plt.figure()\n index = np.arange(len(dog_age_labels))\n plt.bar(index, dog_count)\n plt.xlabel('Age in Months')\n plt.xticks(index, dog_age_labels)\n plt.ylabel('Count')\n plt.title('Count of Dogs Up to 12 Months of Age')\n plt.savefig('dog12.png', bbox_inches='tight')\n \n \n plt.figure()\n index = np.arange(len(cat_age_labels))\n plt.bar(index, cat_count)\n plt.xlabel('Age in Months')\n plt.xticks(index, cat_age_labels)\n plt.ylabel('Count')\n plt.title('Count of Cats Up to 12 Months of Age')\n plt.savefig('cat12.png', bbox_inches='tight')", "def task2(self, doc) -> dict:\n country_count = {}\n match_records = []\n for entry in self.records:\n if (entry['event_type'] =='read'):\n if entry['subject_doc_id'] == doc:\n match_records.append(entry)\n for rec in match_records:\n if (rec['visitor_country'] in country_count):\n country_count[rec['visitor_country']] += 1\n else:\n country_count[rec['visitor_country']] = 1\n print(country_count)\n return country_count", "def analyze_data(df, sentiment_col, tweet_col, path):\n\n # create empty dictionaries to store all encountered words and their frequencies\n all_dict = {}\n pos_dict = {}\n neg_dict = {}\n neu_dict = {}\n # initialize counters to counter total number of tweets based on their emotion\n pos_count = 0\n neg_count = 0\n neu_count = 0\n\n # iterate through each row of the df\n for index, row in df.iterrows():\n if row[sentiment_col] == \"positive\":\n pos_count = iterate_words(\n pos_count, row[tweet_col], all_dict, pos_dict)\n\n if row[sentiment_col] == \"negative\":\n neg_count = iterate_words(\n neg_count, row[tweet_col], all_dict, neg_dict)\n\n if row[sentiment_col] == \"neutral\":\n neu_count = iterate_words(\n neu_count, row[tweet_col], all_dict, neu_dict)\n\n # visualize statistics\n visualize_stats(all_dict, 'all_plot.png', 'all_cloud.png',\n 'Word frequency in all tweets', path)\n visualize_stats(pos_dict, 'pos_plot.png', 'pos_cloud.png',\n 'Word frequency in positive tweets', path)\n visualize_stats(neg_dict, 'neg_plot.png', 'neg_cloud.png',\n 'Word frequency in negative tweets', path)\n visualize_stats(neu_dict, 'neu_plot.png', 'neu_cloud.png',\n 'Word frequency in neutral tweets', path)\n\n # make plot for emotion frequency\n emotions = ('Positive', 'Negative', 'Neutral')\n freq = [pos_count, neg_count, neu_count]\n sns.set_style(\"darkgrid\")\n ax = plt.figure().gca()\n ax.xaxis.grid(False)\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n plt.bar(range(len(emotions)), freq, align='center',\n color=['forestgreen', 'firebrick', 'goldenrod'])\n plt.xticks(range(len(emotions)), emotions)\n plt.title('Tweet frequency based on emotion')\n plt.savefig(path + 'emotion_plot.png')\n plt.close()\n\n # make pie for emotion frequency\n sizes = [pos_count / len(df.index), neg_count /\n len(df.index), neu_count / len(df.index)]\n colors = ['forestgreen', 'firebrick', 'goldenrod']\n plt.pie(sizes, labels=emotions, colors=colors,\n autopct='%1.1f%%', startangle=140)\n plt.title('Tweet frequency based on emotion')\n plt.axis('equal')\n plt.savefig(path + 'emotion_pie.png')\n plt.close()", "def trainCount(\n trainData, \n questionType,\n questionDict,\n questionIdict, \n objDict, \n objIdict,\n numAns):\n count_wa = np.zeros((len(objIdict), numAns))\n count_a = np.zeros((numAns))\n objIds = extractObjId(\n trainData[0], \n questionType, \n questionDict, \n questionIdict)\n for i in range(objIds.shape[0]):\n objId = objIds[i]\n obj = questionIdict[objId - 1]\n ansId = trainData[1][i, 0]\n objId2 = objDict[obj]\n count_wa[objId2, ansId] += 1\n count_a[ansId] += 1\n # Add UNK count\n count_a[-1] += 1\n return count_wa, count_a", "def get_data(self):\n\n self.cur.execute('SELECT year, sex, education, score from vocabulary_scores;')\n scores = dict()\n education = dict()\n count = dict()\n\n for row in self.cur :\n if row[0] in scores:\n if row[1] in scores[row[0]]:\n scores[row[0]][row[1]] += int(row[3])\n education[row[0]][row[1]] += int(row[2])\n count[row[0]][row[1]] += 1\n else:\n scores[row[0]][row[1]] = int(row[3])\n education[row[0]][row[1]] = int(row[2])\n count[row[0]][row[1]] = 1\n else:\n # scores[year] = {gender: score}\n scores[row[0]] = {row[1]: int(row[3])}\n education[row[0]] = {row[1]: int(row[2])}\n count[row[0]] = {row[1]: 1}\n\n scores, education = self.average_scores(scores, education, count)\n\n return scores, education", "def test_topics_for_products(self):\n desktop_topics = topics_for(product=self.desktop)\n eq_(len(desktop_topics), 3)\n\n mobile_topics = topics_for(product=self.mobile)\n eq_(len(mobile_topics), 2)", "def get_paper_count_per_topic(topic_model, start_year, end_year, debug=False):\n papers_count = get_papers_per_topic(topic_model, None, start_year, end_year, None, debug=debug)\n return sorted(papers_count.items(), key=operator.itemgetter(1), reverse=True)", "def num_words():\n # Load the GT.\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n stats = {\n \"T\": {\"words\": [], \"duration\": []},\n \"P\": {\"words\": [], \"duration\": []},\n \"sess\": {\"words\": [], \"duration\": []},\n }\n\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n stats[\"P\"][\"words\"].append(float(row[\"gt_patient_num_words\"]))\n stats[\"T\"][\"words\"].append(float(row[\"gt_therapist_num_words\"]))\n stats[\"P\"][\"duration\"].append(float(row[\"gt_patient_time_spoken\"]))\n stats[\"T\"][\"duration\"].append(\n float(row[\"gt_therapist_time_spoken\"])\n )\n stats[\"sess\"][\"duration\"].append(float(row[\"sess_dur\"]))\n n_words = (\n row[\"gt_therapist_num_words\"] + row[\"gt_patient_num_words\"]\n )\n stats[\"sess\"][\"words\"].append(n_words)\n\n for speaker in stats:\n for metric in stats[speaker]:\n print(f\"------ {speaker} | {metric} ------\")\n print_stats(stats[speaker][metric])", "def model_topics(df):\n\n data = df.text.values.tolist()\n data_words = list(sent_to_words(data))\n\n # Build the bigram and trigram models\n bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)\n trigram = gensim.models.Phrases(bigram[data_words], threshold=100) \n\n # Faster way to get a sentence clubbed as a trigram/bigram\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n trigram_mod = gensim.models.phrases.Phraser(trigram)\n\n # Remove Stop Words\n data_words_nostops = remove_stopwords(data_words)\n\n # Form Bigrams\n data_words_bigrams = make_bigrams(data_words_nostops,bigram_mod)\n\n # Initialize spacy 'en' model, keeping only tagger component (for efficiency)\n nlp = spacy.load('en', disable=['parser', 'ner'])\n\n # Do lemmatization keeping only noun, adj, vb, adv\n data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\n\n # Create Dictionary\n id2word = corpora.Dictionary(data_lemmatized)\n\n # Create Corpus\n texts = data_lemmatized\n\n # Term Document Frequency\n corpus = [id2word.doc2bow(text) for text in texts]\n\n # Perform Topic Modeling for number of topics ranging from 5 to 50 in steps of 5\n model_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_lemmatized, start=5, limit=50, step=5)\n\n return model_list,coherence_values,corpus,id2word", "def counts_table(data, attr):\n pd.options.mode.chained_assignment = None # default='warn'\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n # expanding a table to have all variable options in a column with their \n # parent attribute\n allvariables = attr.apply(lambda x: pd.Series(x['vars']),axis=1).stack().reset_index(level=1, drop=True)\n allvariables.name='var'\n allvariables = attr.drop('vars', axis=1).join(allvariables)\n av = allvariables.drop(attr.index[-1])\n # populate the table with counts\n for c in classlist:\n clist = []\n count = 0\n for i, row in av.iterrows():\n att = row['attr']\n var = row['var']\n sub = data[[att,'class']]\n sub = sub[sub[att]==var]\n if not sub.empty:\n ssub = sub[sub['class']==c]\n if not ssub.empty:\n count = len(ssub)\n else:\n count = 0\n clist.append(count)\n av[c] = clist\n\n return av", "def get_mentions(self, column, list_of_types, total=False, average=False):\n for mbti_type in list_of_types:\n self.df[mbti_type + '_mentions'] = [sum([x.casefold().count(mbti_type.casefold()) for x in post]) for post in self.df[column]]\n if total == True:\n mention_cols = [col for col in self.df.columns if 'mentions' in col]\n self.df['total_mentions'] = self.df.filter(mention_cols).sum(axis=1)\n if average == True:\n self.df['avg_mentions_per_post'] = self.df['total_mentions'] / self.df['count_posts']", "def create_topic_columns(videos, topics):\n \n # Clear values\n videos['relevant'] = False\n\n # Create masks for each topic so we can later filter videos by topics\n topic_masks = []\n for _, topic in topics.iterrows():\n videos[topic['slug']] = False # Clear values\n pattern = get_pattern(topic)\n topic_mask = videos.apply(lambda video: is_relevant(video, pattern), axis=1)\n topic_masks.append(topic_mask)\n videos[topic['slug']] = topic_mask\n\n # Mark video as 'relevant' if it mentions any of the topics\n videos['relevant'] = np.any(np.column_stack(topic_masks), axis=1)", "def user_stats(df):\n ##task4: user info\\type\\gender&birth year if available for the city\n ##user type data is avalable for all cities\n ## count of each type, frequency is printed in a table \"only i New York city 3 types!\n # TO DO: Display counts of user types\n user_counts = df['User Type'].value_counts().to_frame()\n user_percentage = df['User Type'].value_counts(normalize=True)*100\n user_count_stat=pd.concat([user_counts,user_percentage],axis=1,join=\"inner\")\n user_count_stat.columns=['User_Count', 'User_Frequency%'] \n print(\"There are {} types of users : \".format(len(user_counts)))\n for i in range (len(user_counts)):\n print(user_counts.index.values[i])\n print(\"\\n\\n\",\"*\"* 40)\n print(\"The count and percentage of each type is:\")\n print(\"*\"* 40)\n print(user_count_stat.round(2))\n print(\"*\"* 40)\n\n #Task 4.2: TO DO: Display counts of gender\n ##functions to calculate gender distriburion\"only for NY and washington\"\n ## start with if statement to check if the data is in dataseries\n ## if data is apended in the future no need to change the code\n ## display gender count and frequency in every dataset\n if 'Gender' in df.columns :\n print(\"\\n Gender data is available for this city\")\n user_gender = df['Gender'].value_counts()\n user_gender_percentage = df['Gender'].value_counts(normalize=True)*100\n gender_stat=pd.concat([user_gender,user_gender_percentage],axis=1,join=\"inner\")\n gender_stat.columns=['Gender_Count', 'Gender_Frequency%']\n print(\"The Number of users from each Gender and their percentage is given by\")\n print(\"#\"* 40)\n print(gender_stat.round(2))\n print(\"#\"* 40)\n \n else :\n print(\"\\n Gender data is not available for this city\")\n\n # task 4.2: TO DO: Display earliest, most recent, and most common year of birth\n ## functions to calculate year of birth statistics\n ## condition to check if data is available for the current city\n # calculate the most common city, earliest and latest\n if 'Birth Year' in df.columns :\n print(\"\\n Birth Year data is available for this city\")\n birth_year = df['Birth Year']\n popular_year =df['Birth Year'].mode()[0]\n print(\"Some statistics related to year of birth\")\n print(\"-\"* 40)\n print(\"The earliest year of birth is : ****\"+\"{:.0f}\".format(min(birth_year)),\"***\");\n print(\"The latest year of birth is : *** \"+\"{:.0f}\".format(max(birth_year)),\"***\");\n print(\"The most common year of birth is : ***\"+\"{:.0f}\".format(popular_year),\"***\");\n \n else :\n print(\"\\n Birth Year data is not available for this city\")\n start_time = time.time()\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def train_ngrams(dataset):\n trigram_counts = dict()\n bigram_counts = dict()\n unigram_counts = dict()\n token_count = 0\n ### YOUR CODE HERE\n raise NotImplementedError\n ### END YOUR CODE\n return trigram_counts, bigram_counts, unigram_counts, token_count", "def test_get_topics(self):\n\n for m in self.models:\n\n topics = m.topics\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertEqual(topics.num_rows(), 25)\n self.assertEqual(topics.num_columns(), 2)\n z = m.topics[\"topic_probabilities\"]\n for k in range(m.num_topics):\n self.assertTrue(\n abs(sum(z.vector_slice(k)) - 1) < DELTA,\n \"Returned probabilities do not sum to 1.\",\n )\n\n # Make sure returned object is an SFrame of the right size\n topics = m.get_topics()\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertTrue(\n topics.num_columns() == 3,\n \"Returned SFrame should have a topic, word, and probs.\",\n )\n\n # Make sure that requesting a single topic returns only that topic\n num_words = 8\n topics = m.get_topics([5], num_words=num_words)\n self.assertTrue(\n all(topics[\"topic\"] == 5), \"Returned topics do not have the right id.\"\n )\n self.assertEqual(topics.num_rows(), num_words)\n topics = m.get_topics([2, 4], num_words=num_words)\n self.assertEqual(set(list(topics[\"topic\"])), set([2, 4]))\n self.assertEqual(topics.num_rows(), num_words + num_words)\n\n # Make sure the cumulative probability of the returned words is\n # is less than the cutoff we provided.\n # A cutoff of 1.0 should return num_words for every topic.\n cutoff = 1.0\n topics = m.get_topics(cdf_cutoff=cutoff, num_words=len(m.vocabulary))\n totals = topics.groupby(\n \"topic\", {\"total_score\": turicreate.aggregate.SUM(\"score\")}\n )\n self.assertTrue(\n all(totals[\"total_score\"] <= (cutoff + DELTA)),\n \"More words were returned than expected for this cutoff.\",\n )\n\n # Make sure we raise errors for bad input\n with self.assertRaises(ValueError):\n m.get_topics([-1])\n with self.assertRaises(ValueError):\n m.get_topics([10000])\n with self.assertRaises(ToolkitError):\n topics = m.get_topics(output_type=\"other\")\n\n # Test getting topic_words\n topic_words = m.get_topics(output_type=\"topic_words\", num_words=5)\n self.assertEqual(type(topic_words), turicreate.SFrame)\n\n # Test words are sorted correctly for the first topic\n # TODO: Make this more deterministic.\n\n # topic_probs = m.get_topics(num_words=5)\n # expected = [w for w in topic_probs['word'][:5]]\n # observed = topic_words['words'][0]\n # self.assertEqual(observed[0], expected[0])", "def test_topic_reduction_edge_cases():\n model = BERTopic()\n nr_topics = 5\n model.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents)\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents)\n new_freq = model.get_topic_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)", "def get_initial_student_count(self):\n initial_date = self.start_date - timedelta(1)\n # date cannot be earlier than the school year start\n school_year_start_date = \\\n SchoolDB.models.SchoolYear.school_year_start_for_date()\n if (initial_date < school_year_start_date):\n initial_date = school_year_start_date\n section_roster_changes = self.section.get_section_roster_changes()\n student_dict = \\\n section_roster_changes.get_single_day_information(\n self.students, initial_date)\n male_count = 0\n female_count = 0\n for student in student_dict.values():\n if (student.gender == \"Male\"):\n male_count += 1\n else:\n female_count +=1\n #do not return 0 because it will generate a divide by zero in\n #the calling function\n if not male_count:\n male_count = 1\n if not female_count:\n female_count = 1\n return male_count, female_count", "def listTopicRelevance(request):\n if request.method == 'GET':\n user = request.user;\n data = [];\n for topic in Topic.objects.all():\n row = {};\n\n topicSerializer = TopicNestedSerializer(topic)\n topicSerializer.Meta.depth = 1;\n #row['topic'] = topicSerializer.data;\n user_visits = topic.visits.filter(user=user)\n visitSerializer = VisitSerializer(user_visits, many=True)\n #visitSerializer.Meta.depth = 1;\n row['visit_count'] = len(user_visits);\n if row['visit_count'] > 0:\n row['last_visit'] = user_visits.order_by('-visit_date')[0].visit_date\n else:\n row['last_visit'] = topic.created_at\n\n neighbor_visits = Visit.objects.filter(user=user, topic__relates_to__topic_to=topic)\n\n row['neighbor_visits_count'] = len(neighbor_visits);\n if row['neighbor_visits_count'] > 0:\n row['last_neighbor_visit'] = neighbor_visits.order_by('-visit_date')[0].visit_date;\n else:\n row['last_neighbor_visit'] = topic.created_at\n\n row['post_count'] = len(topic.posts.filter(user=user))\n row['like_count'] = len(topic.posts.filter(votes__user=user))\n row['relevance_score'] = 5*row['neighbor_visits_count'] - (timezone.now()-row['last_neighbor_visit']).total_seconds()/3600\n row['recommendation'] = row['relevance_score'] + topic.hotness\n\n data.append(row)\n\n print(data)\n return Response(data)", "def test_topic_reduction(reduced_topics):\n base_bertopic = BERTopic(bert_model='distilbert-base-nli-mean-tokens', verbose=False)\n nr_topics = reduced_topics + 2\n base_bertopic.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(old_documents)\n c_tf_idf = base_bertopic._extract_topics(old_documents.copy(), topic_reduction=True)\n old_freq = base_bertopic.get_topics_freq()\n\n new_documents = base_bertopic._reduce_topics(old_documents.copy(), c_tf_idf)\n new_freq = base_bertopic.get_topics_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(base_bertopic.mapped_topics, dict)\n assert not set(base_bertopic.get_topics_freq().Topic).difference(set(new_documents.Topic))\n assert base_bertopic.mapped_topics", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n #Display counts of user types\n counts_of_user_types = df['User Type'].value_counts()\n print('counts of user types :',counts_of_user_types )\n \n \n\n\n # Display counts of gender\n try:\n counts_of_gender = df['Gender'].value_counts()\n print('counts of gender :',counts_of_gender)\n except KeyError:\n print('counts of gender : no data available')\n \n\n \n #Display earliest, most recent, and most common year of birth\n try:\n earliest=df['Birth Year'].min()\n most_recent=df['Birth Year'].max()\n most_common_year_of_birth=df['Birth Year'].mode()[0]\n print('earliest : ', earliest)\n print('most recent : ', most_recent)\n print( 'most common year of birth : ', most_common_year_of_birth)\n except KeyError:\n print('counts of gender : no data available') \n \n \n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n \n\n #display_raw_data\n #please detrmine if you want to see some lines of raw data \n \n i=0\n while True:\n display_data = input('would you like to see (5) lines/next (5) lines of raw data ? , yes or no : ').lower()\n \n if display_data == 'yes':\n print(df[i:i+5])\n i+=5\n else:\n break", "def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n\n self.total = 2\n ##Your code here\n\n for sentence in corpus:\n temp_1 = get_ngrams(sentence,1)\n temp_2 = get_ngrams(sentence,2)\n temp_3 = get_ngrams(sentence,3)\n for i in range(len(temp_1)):\n if temp_1[i] in self.unigramcounts:\n self.unigramcounts[temp_1[i]] += 1\n else:\n self.unigramcounts[temp_1[i]] = 1\n self.total += 1\n\n for i in range(len(temp_2)):\n if temp_2[i] in self.bigramcounts:\n self.bigramcounts[temp_2[i]] += 1\n else:\n self.bigramcounts[temp_2[i]] = 1\n\n for i in range(len(temp_3)):\n if temp_3[i] in self.trigramcounts:\n self.trigramcounts[temp_3[i]] += 1\n else:\n self.trigramcounts[temp_3[i]] = 1\n return", "def infertopics(self):\n\n # Iterate over nodes missing topic attribute (only occurs for new nodes)\n for uid in self.scan(attribute=\"updated\"):\n # Remove updated attribute\n self.removeattribute(uid, \"updated\")\n\n # Get list of neighboring nodes\n ids = self.edges(uid)\n\n # Infer topic\n topic = Counter(self.attribute(x, \"topic\") for x in ids).most_common(1)[0][0] if ids else None\n if topic:\n # Add id to topic list and set topic attribute\n self.topics[topic].append(uid)\n self.addattribute(uid, \"topic\", topic)\n\n # Set topic rank\n self.addattribute(uid, \"topicrank\", len(self.topics[topic]) - 1)\n\n # Infer category\n category = Counter(self.attribute(x, \"category\") for x in ids).most_common(1)[0][0]\n self.addattribute(uid, \"category\", category)", "def filter_by_gender(self, gender):\n\n return self.subcorpus('author_gender', gender)", "def process_frequencies(df_corpus, wdir, min_MFF, max_MFF, mode, names_MFF):\n # Normalization of the frequencies by the sum of the text\n df_corpus = df_corpus.loc[:].div(df_corpus.sum(axis='columns'), axis=\"index\")\n if mode == \"train\":\n # If we are doing a training corpus, it is easier\n \n # The dataframe gets a new summatory column that we use to order the df \n df_corpus = df_corpus.T\n df_corpus[\"sum\"]=df_corpus.sum(axis=\"columns\")\n df_corpus = df_corpus.sort_values(by=\"sum\", ascending=False)\n \n # Only a given amount of words is taken\n df_corpus = df_corpus[min_MFF:max_MFF]\n # Summatory column is deleted and the df goes back to its normal format\n del df_corpus['sum']\n df_corpus = df_corpus.T\n print(mode, \" last feature: \", df_corpus.columns[-1])\n \n elif mode == \"eval\" or mode == \"test\":\n # If we create the evaluation or the test corpus, we have to check first the features of the train corpus because the 5000 MFW of the train corpus are NOT the 5000 MFW of the test corpus.\n # TODO: I don't know if that is the best way to do it. Maybe we should calculate the total amount of features in the different corpora, get the list of the n MFF and then fill the diferent matrixs with this features.\n df_corpus = df_corpus.reindex_axis(names_MFF, axis=1)\n # Only a given amount of words is taken\n df_corpus = df_corpus.T\n df_corpus = df_corpus[min_MFF:max_MFF]\n df_corpus = df_corpus.T\n print(mode, \" last feature: \", df_corpus.columns[-1])\n\n df_corpus = df_corpus.fillna(0)\n \n # The table is saved as csv\n df_corpus.to_csv(wdir+\"freq_table.csv\", sep='\\t', encoding='utf-8', index=True)\n\n return df_corpus", "def compute_subj_metrics():\n\t# each participant does task 1,2,3 with method A,B = 3*2*N\n\t# likert data includes age, gender, Q1 - Q10 = 2+10\n\n\t# set up data structure\n\tsubj_metrics = {}\n\tfor ID in range(10):\n\t\tfor method in [\"A\",\"B\"]:\n\t\t\t# sanity checks\n\t\t\tif ID not in subj_metrics:\n\t\t\t\tsubj_metrics[ID] = {}\n\t\t\tif method not in subj_metrics[ID]:\n\t\t\t\tsubj_metrics[ID][method] = [None]*12\n\n\there = os.path.dirname(os.path.realpath(__file__))\n\tsubdir = \"/data/experimental/\"\n\tdatapath = here + subdir + \"Likert_Responses.csv\"\n\n\tdata = {}\n\tfirstline = True\n\twith open(datapath, 'r') as f:\n\t\tfor line in f:\n\t\t\tif firstline:\n\t\t\t\tfirstline = False\n\t\t\t\tcontinue\n\t\t\tvalues = line.split(',')\n\t\t\tinfo = values[1:len(values)]\n\t\t\tID = int(info[9])\n\t\t\tmethod = info[10]\n\t\t\tage = info[11]\n\t\t\tgender = info[12]\n\t\t\t# store age\n\t\t\tsubj_metrics[ID][method][0] = age\n\t\t\tsubj_metrics[ID][method][1] = gender\n\t\t\t# parse likert data\n\t\t\tfor i in range(8):\n\t\t\t\tsubj_metrics[ID][method][i+2] = info[i]\n\t\t\tsubj_metrics[ID][method][10] = info[14]\n\t\t\tsubj_metrics[ID][method][11] = info[15]\n\n\treturn subj_metrics", "def get_topics(self):\n topics = self.word_topics\n return topics / topics.sum(axis=1)[:, None]", "def test_question_topics(self):\n p = ProductFactory()\n t1 = TopicFactory(slug='doesnotexist', product=p)\n t2 = TopicFactory(slug='cookies', product=p)\n t3 = TopicFactory(slug='sync', product=p)\n\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t3)\n\n self.refresh()\n\n topic_vals = (\n (t1.slug, 0),\n (t2.slug, 2),\n (t3.slug, 1),\n )\n\n qs = {'a': 1, 'w': 2, 'format': 'json'}\n for topics, number in topic_vals:\n qs.update({'topics': topics})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(number, json.loads(response.content)['total'])", "def user_stats(df):\n try: \n df = load_data(city, month, day)\n # TO DO: Display counts of user types\n count_user_type = df.groupby(['User Type']).count()\n print('Count of user type: ', count_user_type)\n\n # TO DO: Display counts of gender\n count_gender = df.groupby(['Gender']).count()\n print('Count of gender: ', count_gender)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n earliest = df['Birth Year'].min()\n print('Earliest birth year: ', earliest)\n\n recent = df['Birth Year'].max()\n print('Most recent birth year: ', recent)\n\n common = df['Birth Year'].mode()[0]\n print('Most common birth year: ', common)\n \n except KeyError:\n df = load_data(city, month, day)\n # TO DO: Display counts of user types\n count_user_type = df.groupby(['User Type']).count()\n print('Count of user type: ', count_user_type)\n \n \n return user_stats", "def query_transition_counts(ordered_pitch_types, query_filter):\n transition_counts = {}\n\n for first_pitch_type in ordered_pitch_types:\n transition_counts[first_pitch_type] = {}\n\n for second_pitch_type in ordered_pitch_types:\n count_query = PitchTransition.select().where(\n PitchTransition.first_pitch_type == first_pitch_type).where(\n PitchTransition.second_pitch_type == second_pitch_type)\n\n if query_filter:\n count_query = count_query.where(query_filter)\n\n transition_counts[first_pitch_type][second_pitch_type] = count_query.count()\n\n return transition_counts", "def test_IsCorrectGender(self):\n \n # load data\n ilist, flist = load_data()\n\n # test case 01\n for fm in flist:\n result, msg = IsCorrectGender(fm.WifeID,fm.HusbandID, ilist)\n self.assertTrue(result)\n\n # test case 02\n ilist[0].Gender = 'F'\n ilist[1].Gender = 'M'\n result, msg = IsCorrectGender(flist[0].WifeID,flist[0].HusbandID, ilist)\n self.assertFalse(result)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_type = df['User Type'].value_counts()\n print('The types of users are: \\n{}' .format(user_type))\n\n # TO DO: Display counts of gender\n while True:\n try:\n gender_type = df['Gender'].value_counts()\n print('The users classified by gender are: \\n{}' .format(gender_type))\n max_birth_year = df.groupby(['Gender'])['Birth Year'].max()\n min_birth_year = df.groupby(['Gender'])['Birth Year'].min()\n mode_birth_year = df['Birth Year'].mode()\n print ('1. The most recent year of birth is: {}\\n2. The most earliest year of birth is: {}\\n3. The most common year of birth is: {}'.format(max_birth_year, min_birth_year, mode_birth_year))\n\n # TO DO: Display earliest, most recent, and most common year of birth\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n break\n except:\n print('\\n Sorry, the genre field does not exist in the selected file, so no results are shown.')\n break", "def assign_topics(self):\n\n # Load the dataset which has topic tags for each sentence\n dt, dt_less = self.assign_topics_to_sentences()\n \n dt_copy = dt\n \n # Minimum number of tags needed to tag the overall response\n dt['min_num_tags'] = dt['num_sent'].apply(lambda x: math.ceil(0.3*x))\n \n # Final dataset with full survey response and its tags\n final_dt = dt.groupby(self.id_col_name).agg({'tags': sum\n , 'num_sent': min\n , 'min_num_tags': min\n# , 'sentences': lambda x: \"%s\" % '. '.join(x)\n , self.col_name: min})\n final_dt.reset_index(level = 0, inplace = True)\n final_dt['topics'] = final_dt.apply(lambda x: set([i for i in x.tags if x.tags.count(i) >= x.min_num_tags])\n , axis = 1)\n\n final_dt_less = final_dt[[self.id_col_name, self.col_name, 'topics']]\n\n return dt_copy, dt_less, final_dt, final_dt_less", "def get_tops(name_file,number,gender):\r\n tops = []\r\n num = 0\r\n f = open(name_file)\r\n reader = csv.reader(f)\r\n for row in reader:\r\n if row.__contains__(gender.upper()):\r\n row[2] = int(row[2])\r\n tops.append(row)\r\n num = num + 1\r\n if num == number:\r\n break\r\n return tops", "def count_persons(self):\n\n # full path of personData file\n path = self.pretty_print_path()\n\n data = self.person_data[[\"person_id\", \"telestatus\"]].copy()\n\n conditions = [(data[\"telestatus\"] == \"Not a Worker\"),\n (data[\"telestatus\"].isin([\"No Telecommute\",\n \"1 day a week\",\n \"2-3 days a week\",\n \"4+ days a week\",\n \"Work from Home\"]))]\n choices = [\"Non-worker\", \"Worker\"]\n data[\"telestatus\"] = np.select(conditions, choices, default=data[\"telestatus\"])\n\n counts = data.groupby([\"telestatus\"]).count()\n counts.loc['Total'] = counts.sum()\n counts.reset_index(inplace=True)\n counts['File'] = path # add file name\n\n results = counts.pivot(index=\"File\", columns=\"telestatus\", values=\"person_id\")\n\n # add percentage share\n counts[\"Total\"] = data.person_id.count()\n counts[\"share\"] = counts.person_id / counts.Total\n percent_share = counts.pivot(index=\"File\", columns=\"telestatus\", values=\"share\")\n\n results = results.append(percent_share, ignore_index=False, sort=True)\n\n cols = [\"Worker\",\n \"Non-worker\",\n \"Total\"]\n\n return results[cols]", "def find_dominant_topic(df_topic_sents_keywords):\n\n # Format\n df_dominant_topic = df_topic_sents_keywords.reset_index()\n df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\n\n # Group top 5 sentences under each topic\n sent_topics_sorteddf = pd.DataFrame()\n\n sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')\n start = time.time()\n for i, grp in sent_topics_outdf_grpd:\n sent_topics_sorteddf = pd.concat([sent_topics_sorteddf, \n grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)], \n axis=0)\n print(f'Group done. Total time {time.time() - start} seconds.')\n\n # Reset Index \n sent_topics_sorteddf.reset_index(drop=True, inplace=True)\n\n # Format\n sent_topics_sorteddf.columns = ['Topic_Num', \"Topic_Perc_Contrib\", \"Keywords\", \"Text\"]\n return sent_topics_sorteddf", "def count_mentioned_pol_figures(data):\n figures_mentioned = {}\n figures = get_political_figures()\n\n for ind, row in data.iterrows():\n subject_words = row[\"MetadataSubject\"].lower()\n message_words = row[\"RawText\"].lower()\n\n for figure in figures:\n if figure + \" \" in (subject_words + message_words):\n if figure in figures_mentioned:\n figures_mentioned[figure] += 1\n else:\n figures_mentioned[figure] = 0\n\n return pd.DataFrame(figures_mentioned)", "def get_tweets_by_topic(topic, start_date, end_date):\n try:\n query = f\"select tweet, sentence, polarity, subjectivity from {db_schema}.{db_table_tweet} t, {db_schema}.{db_table_pred} tp where t.id_tweet=tp.id_tweet and topic='{topic}' and tweet_date between str_to_date('{start_date}', '%Y-%m-%d') and str_to_date('{end_date}', '%Y-%m-%d')\"\n logger.info(f'QUERY: {query}') \n with MysqlCursor() as cur:\n cur.execute(query)\n tweets = cur.fetchall()\n columns = [col[0] for col in cur.description]\n logger.info(f'TOPIC: {topic}, N° TWEETS: {len(tweets)}') \n return tweets, columns\n \n except Exception as ex:\n logger.exception(ex)\n return f'Exception: {ex}'", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_count = df['User Type'].value_counts()\n print(f'{user_count}\\n')\n\n # Display counts of gender if only the data contains that column\n if 'Gender' in df:\n gender_count = df['Gender'].value_counts()\n print(gender_count, '\\n')\n\n # Display earliest, most recent, and most common year of birth if \"Birth Year\" column is found\n earliest = df['Birth Year'].min()\n print('Most earliest birth year:', int(earliest))\n recent = df['Birth Year'].max()\n print('Most recent birth year', int(recent))\n common = df['Birth Year'].value_counts().idxmax()\n print('Most common birth year', int(common))\n\n print(\"\\nTotal time taken: %s seconds.\" % (round(time.time() - start_time, 2)))\n print('-' * 40)", "def topic_table(topic):\r\n table = Table(['property', 'value'])\r\n table.align['property'] = 'r'\r\n table.align['value'] = 'l'\r\n\r\n table.add_row(['name', topic['name']])\r\n table.add_row(['tags', listing(topic['tags'] or [])])\r\n return table", "def split_dataset_for_one_attribute(gender_confident_data, attribute='text_cleaned'):\n\n # Converting text strings into a matrix of word token counts.\n cv = CountVectorizer()\n inputString = cv.fit_transform(gender_confident_data[attribute])\n\n # Encodes class labels from 0 to Num_of_classes-1\n le = LabelEncoder()\n outputString = le.fit_transform(gender_confident_data['gender'])\n\n # Splitting the data such that 66% of the data is assigned as training data and the rest as the test data set.\n input_train, input_test, output_train, output_test = train_test_split(inputString, outputString, train_size=0.66)\n\n return input_train, output_train, input_test, output_test", "def expectation_step(self,number_of_topics):\n createdDataMat = np.zeros([self.number_of_documents, number_of_topics, self.vocabulary_size], dtype=np.float)\n print(\"E step:\")\n\t\t#print(\"Initial value of createdDataMat >>>>>\"+createdDataMat)\n \n \t\n for docValue in range(0, self.number_of_documents):\n for wordValue in range(0, self.vocabulary_size):\n recordSum = 0\n for topicValue in range(0, number_of_topics):\n self.topic_prob[docValue][topicValue][wordValue] = (self.document_topic_prob[docValue][topicValue] * self.topic_word_prob[topicValue][wordValue])\n recordSum = recordSum + self.topic_prob[docValue][topicValue][wordValue]\n for topicValue in range(0, number_of_topics):\n self.topic_prob[docValue][topicValue][wordValue] = self.topic_prob[docValue][topicValue][wordValue]/recordSum", "def connect_topic_id_to_topics(self, model):\n confidence = []\n for key, value in self.representants.items():\n connection_results = {}\n for article in value:\n try:\n # get most possible index\n topic_index = max(model.analyse_text(article[1]), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\") # TODO replace with if\n continue\n # add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n # find index that occured mostly\n print(connection_results)\n for tp_num, val in connection_results.items():\n confidence.append([key, tp_num, val / len(value)])\n confidence = sorted(confidence, key=operator.itemgetter(2), reverse=True)\n associated_indexes = []\n associated_topics = []\n for conf in confidence:\n if conf[1] in associated_indexes or conf[0] in associated_topics:\n continue\n associated_indexes.append(conf[1])\n associated_topics.append(conf[0])\n self.log_writer.add_log(\n 'Connecting topic {} to model index {} based on highest unused confidence of {}'.format(conf[0],\n conf[1],\n conf[2]))\n self.topic_indexes[conf[0]] = conf[1]\n\n for key, value in self.topic_indexes.items():\n self.topics_of_index[value] = [key]" ]
[ "0.6474987", "0.63504976", "0.6320572", "0.62746453", "0.627228", "0.6258495", "0.61637425", "0.6114557", "0.5977991", "0.58644605", "0.5836324", "0.5831244", "0.5673213", "0.5647648", "0.5610889", "0.5574155", "0.5492381", "0.54264843", "0.53719985", "0.53717935", "0.5295619", "0.52562594", "0.5255951", "0.52543795", "0.522742", "0.52179205", "0.519302", "0.51659757", "0.51119727", "0.51042306", "0.5091851", "0.5090571", "0.50634915", "0.50474554", "0.50215715", "0.5015411", "0.5008505", "0.49809456", "0.49774876", "0.49774876", "0.49541524", "0.49456522", "0.4938621", "0.4927697", "0.49162355", "0.4905255", "0.49034148", "0.4901713", "0.48947382", "0.48945874", "0.48576233", "0.4853687", "0.48180065", "0.4796382", "0.47956917", "0.47933", "0.47849742", "0.47778904", "0.4776961", "0.47758076", "0.47752246", "0.47732186", "0.47640452", "0.47639966", "0.475554", "0.4753862", "0.47459054", "0.47447842", "0.47386268", "0.4736431", "0.4734776", "0.47332802", "0.4733193", "0.4702847", "0.47012332", "0.47006556", "0.46998748", "0.46925905", "0.46923918", "0.46828443", "0.46812356", "0.4676416", "0.46734202", "0.46649432", "0.46560308", "0.46494696", "0.4647872", "0.4646516", "0.46460456", "0.46407625", "0.46205994", "0.46184963", "0.46179497", "0.46134374", "0.4610658", "0.45986965", "0.4596978", "0.45967084", "0.45942363", "0.45926726" ]
0.80123305
0
From all the data, it takes the columns TopicID, and count the topic based on the ethnicity
def get_data_frame_count_black_ethnicity_by_topic(data_frame: DataFrame) -> pb.DataFrame: data_frame_topic = data_frame \ .filter(data_frame["Stratification1"].contains("Black, non-Hispanic")) \ .distinct() \ .groupBy("TopicID") \ .count() \ .sort("TopicID") print("The following table represent the number of black ethnicity people group by the topic: ") data_frame_topic.show() data_frame_pandas = data_frame.toPandas() return data_frame_pandas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data_frame_count_male_gender_by_topic(data_frame: DataFrame) -> pb.DataFrame:\n data_frame_topic = data_frame \\\n .filter(data_frame[\"Stratification1\"].contains(\"Male\")) \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n\n print(\"The following table represent the number of men group by the topic: \")\n data_frame_topic.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas", "def topic_count():\n # get the number topics and their counts as tuples: ('Topic', 123)\n query = peewee.RawQuery(Post, \"select topic, count(topic) from post group by topic\").tuples()\n\n # turn the result of the query object into a list of tuples\n tuple_result = []\n for each_tuple in query:\n tuple_result.append(each_tuple)\n\n # sort by the the second element, which is value, of each tuple in the list\n tuple_result = sorted(tuple_result, key=lambda x: x[1], reverse=True)\n\n # separate the topic and count into two lists for graphing purpose\n topics = []\n counts = []\n\n for each_tuple in tuple_result:\n topics.append(each_tuple[0])\n counts.append(each_tuple[1])\n\n return counts, topics", "def topic_stats(df_topic_sents_keywords):\n\n # Number of Documents for Each Topic\n topic_counts = df_topic_sents_keywords['Dominant_Topic'].value_counts()\n\n # Percentage of Documents for Each Topic\n topic_contribution = round(topic_counts/topic_counts.sum(), 4)\n\n # Topic Number and Keywords\n topic_num_keywords = df_topic_sents_keywords[['Dominant_Topic', 'Topic_Keywords']]\n\n # Concatenate Column wise\n df_dominant_topics = pd.concat([topic_num_keywords, topic_counts, topic_contribution], axis=1)\n\n # Change Column names\n df_dominant_topics.columns = ['Dominant_Topic', 'Topic_Keywords', 'Num_Documents', 'Perc_Documents']\n\n # Show\n df_dominant_topics", "def get_data_frame_count_type_of_topic(data_frame: DataFrame) -> pb.DataFrame:\n try:\n data_frame = data_frame \\\n .select(\"TopicID\", \"Question\") \\\n .distinct() \\\n .groupBy(\"TopicID\") \\\n .count() \\\n .sort(\"TopicID\")\n except Py4JError:\n raise AnalysisException('One columns is incorrect')\n print(\"The following table represent the number of the type of each topic\")\n data_frame.show()\n data_frame_pandas = data_frame.toPandas()\n return data_frame_pandas", "def count_topic_dist(self):\n if len(self.representants) == 0:\n self.log_writer(\"Representants not set. Cannot make topic dist.\")\n return\n for key, value in self.representants.items():\n self.topic_distributions.append(len(value)/len(self.training_docs))\n self.topic_numbers.append(key)", "def test_extract_topics():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model = BERTopic()\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def get_diseases(self):\n self.diseases = self.data.groupby('topic')['topic'].count()", "def get_paper_counter_per_topic_id(all_topic_assignments):\n counter = {}\n for topic_assignment in all_topic_assignments:\n for topic_index, topic_value in topic_assignment:\n if topic_index not in counter:\n counter[topic_index] = 0\n\n counter[topic_index] += 1\n\n return counter", "def get_rdd_count_type_of_topy(rdd: list) -> pb.DataFrame:\n data_frame_pandas = pb.DataFrame(rdd, columns=['Topic', 'Question'])\n print(data_frame_pandas)\n return data_frame_pandas", "def test_extract_topics(base_bertopic):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(documents)\n c_tf_idf = base_bertopic._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def trainCount(\n trainData, \n questionType,\n questionDict,\n questionIdict, \n objDict, \n objIdict,\n numAns):\n count_wa = np.zeros((len(objIdict), numAns))\n count_a = np.zeros((numAns))\n objIds = extractObjId(\n trainData[0], \n questionType, \n questionDict, \n questionIdict)\n for i in range(objIds.shape[0]):\n objId = objIds[i]\n obj = questionIdict[objId - 1]\n ansId = trainData[1][i, 0]\n objId2 = objDict[obj]\n count_wa[objId2, ansId] += 1\n count_a[ansId] += 1\n # Add UNK count\n count_a[-1] += 1\n return count_wa, count_a", "def connect_topic_id_to_topics(self, model):\n confidence = []\n for key, value in self.representants.items():\n connection_results = {}\n for article in value:\n try:\n # get most possible index\n topic_index = max(model.analyse_text(article[1]), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\") # TODO replace with if\n continue\n # add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n # find index that occured mostly\n print(connection_results)\n for tp_num, val in connection_results.items():\n confidence.append([key, tp_num, val / len(value)])\n confidence = sorted(confidence, key=operator.itemgetter(2), reverse=True)\n associated_indexes = []\n associated_topics = []\n for conf in confidence:\n if conf[1] in associated_indexes or conf[0] in associated_topics:\n continue\n associated_indexes.append(conf[1])\n associated_topics.append(conf[0])\n self.log_writer.add_log(\n 'Connecting topic {} to model index {} based on highest unused confidence of {}'.format(conf[0],\n conf[1],\n conf[2]))\n self.topic_indexes[conf[0]] = conf[1]\n\n for key, value in self.topic_indexes.items():\n self.topics_of_index[value] = [key]", "def ngram_detection(self, min_topic_count=5, min_text_id_count=4):\n\n for text_id, text in self.texts.items():\n # single-word topics act a bit different (no zips or comprehensions)\n # store data in self.topics, not zip_grams\n for word in text['doc']:\n word_lemma = word.text.lower() if word.lemma_ == '-PRON-' else word.lemma_\n\n if {word.text}.intersection(self.punct) or {word.lemma_}.intersection(self.stop_words):\n continue\n\n if not (word.pos in self.nouns or word.ent_type in self.entities):\n continue\n\n if word_lemma in self.topics:\n self.topics[word_lemma][\"count\"] += 1\n self.topics[word_lemma][\"textIDs\"] |= {text_id}\n self.topics[word_lemma][\"verbatims\"] |= {word.text.lower()}\n else:\n self.topics[word_lemma] = {\"name\": word_lemma,\n \"count\": 1,\n \"textIDs\": {text_id},\n \"verbatims\": {word.text.lower()},\n \"subtopics\": {}}\n\n # Populate self.ngrams and self.topics\n for text_id, text in self.texts.items():\n doc = text['doc']\n\n # Find pentagrams - ngrams with 5 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:], doc[4:]):\n self._ngram_counter(ngram, 5, text_id, doc)\n\n # Find pentagrams - ngrams with 4 words\n for ngram in zip(doc, doc[1:], doc[2:], doc[3:]):\n self._ngram_counter(ngram, 4, text_id, doc)\n\n for ngram in zip(doc, doc[1:], doc[2:]):\n self._ngram_counter(ngram, 3, text_id, doc)\n\n for ngram in zip(doc, doc[1:]):\n self._ngram_counter(ngram, 2, text_id, doc)\n\n\n # Add text_id_count (the number of texts that the topic occurs in; so a topic might occur 50 times,\n # but it's only mentioned in 3 different texts, we'd show 3.\n for _, topic in self.topics.items():\n topic['textIDCount'] = len(topic['textIDs'])\n for _, ngram in self.ngrams.items():\n ngram['textIDCount'] = len(ngram['textIDs'])\n\n # Eliminate rarely occurring topics and ngrams.\n self.topics = {k: v for k, v in self.topics.items() if\n v['textIDCount'] >= min_text_id_count and v['count'] >= min_topic_count}\n self.ngrams = {k: v for k, v in self.ngrams.items() if\n v['textIDCount'] >= min_text_id_count}\n\n # Loop through each ngram pair: outer loop is all ngrams, inner loop is all ngrams\n for ngram_lemma, ngram in self.ngrams.items():\n for ngram_plus_lemma, ngram_plus in self.ngrams.items():\n # only stay in this loop if the inner ngram is one word longer than the outer loop and if the\n # inner loop lemma contains the outer group lemma (avoid partial word matches like man in woman)\n # r'\\b' + ngram_lemma + r'\\b' --> does the ngram lemma fit in ngram_plus lemma (\\b is word boundary)\n if ngram['n'] + 1 != ngram_plus['n']:\n continue\n\n if not re.search(r'\\b' + ngram_lemma + r'\\b', ngram_plus_lemma):\n continue\n\n # Is the absolute count of occurrences and the count of text_id occurrences both big enough to use it\n # instead of the other loop?\n if ngram_plus['count'] + 3 >= ngram['count'] and ngram_plus['textIDCount'] + 3 >= ngram['textIDCount']:\n # TODO: Is this the right action (deleting shorter, but not much more explanatory) phrase?\n # TODO: Is this enough? Or will I end up double explaining things sometimes?\n ngram['count'] = -1\n\n # Eliminate newly demoted items\n self.ngrams = {ngram_lemma: ngram for ngram_lemma, ngram in self.ngrams.items() if ngram['count'] > 0}", "def test_topics_for_products(self):\n desktop_topics = topics_for(product=self.desktop)\n eq_(len(desktop_topics), 3)\n\n mobile_topics = topics_for(product=self.mobile)\n eq_(len(mobile_topics), 2)", "def assign_topics_to_sentences(self):\n \n\n # 10 topics\n topic_dict = {0: 'academics'\n , 1: 'career'\n , 2: 'commute'\n , 3: 'diversity'\n , 4: 'community'\n , 5: 'extracurricular'\n , 6: 'facilities'\n , 7: 'finance'\n , 8: 'housing'\n , 9: 'wellness'\n }\n\n # Some important words that should be included under each topic\n topics = [['academic', 'exam', 'study', 'learn', 'education', 'class', 'course', 'grade', 'assignment'\n , 'degree', 'research', 'elective'\n , 'professor', 'project', 'scholarship', 'knowledge']\n , ['career', 'job', 'coop', 'employment']\n , ['commute', 'skytrain', 'transport', 'commuter']\n , ['diversity', 'diverse', 'background']\n , ['community', 'welcome', 'support', 'social', 'friend', 'fun', 'network', 'home']\n , ['extracurricular', 'club', 'sport', 'activity']\n , ['facility', 'infrastructure', 'food', 'building', 'gym']\n , ['finance', 'tuition', 'expensive']\n , ['housing', 'live', 'residence']\n , ['wellness', 'health', 'stress', 'depression', 'anxiety']]\n\n # Read the data - id and reponse column\n dt = pd.read_csv(self.path_data_col\n , encoding = \"ISO-8859-1\"\n , usecols = [self.id_col_name, self.col_name])\n\n\n \n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n \n # Split into sentences\n dt['sentences'] = self.getSentences(dt[self.col_name])\n \n \n \n\n \n \n\n # Store number of sentences in each response as a column\n dt['num_sent'] = dt['sentences'].apply(lambda x: len(x))\n\n # Split each row into multiple rows - one row for each sentence\n dt = (dt\n .set_index([self.id_col_name, self.col_name, 'num_sent'])['sentences']\n .apply(pd.Series)\n .stack()\n .reset_index()\n .drop('level_3', axis = 1)\n .rename(columns = {0:'sentences'}))\n\n\n # Clean the sentences\n dt['sentences_cleaned'] = self.cln.clean(dt['sentences'], typo = self.typo_ind)\n\n # Remove useless sentences\n dt['sentences_cleaned'] = self.getValid(dt['sentences_cleaned'])\n\n # Remove rows with NA values\n dt = self.removeEmptyData(dt)\n\n # Tokenize words in the cleaned sentences\n responses = list(self.sent_to_words(dt['sentences_cleaned'].values.tolist()))\n\n\n # Call the lexicon function\n topic_lexicons = self.prepare_lexicons()\n\n # Lists to store results\n count_topic_all = []\n actual_topic_all = []\n\n # Tag each response into a topic\n for response in responses:\n\n count_topic = []\n actual_topic = []\n\n for topic in topic_lexicons:\n\n # Count occurance of each word in word stock in the response\n temp = sum(dict((x, response.count(x)) for x in topic).values())\n count_topic.append(temp)\n\n\n for index, value in enumerate(count_topic):\n\n # Consider the topic if atleast one(?) word from its word-stock occurs in the response\n if value > 0:\n actual_topic.append(topic_dict[index])\n\n\n # If more than 3 topics are tagged for single sentence, refine by increasing\n # cutoff to at least 2 words instead of 1\n if len(actual_topic) > 3:\n\n actual_topic = []\n for index, value in enumerate(count_topic):\n\n if value > 1: # Increase cutoff\n actual_topic.append(topic_dict[index])\n\n count_topic_all.append(count_topic)\n actual_topic_all.append(actual_topic)\n\n\n dt['tags'] = actual_topic_all\n dt['num_tags'] = count_topic_all\n\n\n # Select only the most important columns\n dt_less = dt[[self.id_col_name, 'sentences', 'tags']]\n\n return dt, dt_less", "def test_extract_topics_custom_cv():\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n\n cv = CountVectorizer(ngram_range=(1, 2))\n model = BERTopic(vectorizer=cv)\n model._update_topic_size(documents)\n model._extract_topics(documents)\n freq = model.get_topic_freq()\n\n assert model.c_tf_idf.shape[0] == 5\n assert model.c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def test_topic_reduction(reduced_topics):\n model = BERTopic()\n nr_topics = reduced_topics + 2\n model.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents.copy())\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents.copy())\n new_freq = model.get_topic_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(model.mapped_topics, dict)\n assert not set(model.get_topic_freq().Topic).difference(set(new_documents.Topic))\n assert model.mapped_topics", "def uncategorized(df):\n\n counter = 0\n for movie in df.index:\n if len(df.loc[movie, 'imdbGenres']) == 1 and\\\n df.loc[movie, 'Political'] == 0:\n counter += 1\n\n return counter", "def group_topics(sent_topics_sorteddf):\n new_topics=pd.concat([sent_topics_sorteddf.groupby('Topic_Num').head()[['Keywords']],\n topic_contribution.sort_index(),\n pd.Series(['Economy','Immigration','Environment','Event',\n 'Civil Rights','Civil Rights','Healthcare',\n 'Defense','Trump','Community','Event','Event',\n 'Thanks','Legislation','Trump','Community',\n 'Community','Trump','Defense',\n 'Legislation','Thanks','Economy','Thanks','Healthcare',\n 'Legislation'])],axis=1).groupby(0).sum()\n plt.pie(new_topics,labels=new_topics.index,autopct='%.0f',pctdistance=.8)\n plt.title('Topic Share %');\n\n new_topic_words = pd.concat([sent_topics_sorteddf.groupby('Topic_Num').head()[['Keywords']],\n topic_contribution.sort_index(),\n pd.Series(['Economy','Immigration','Environment','Event',\n 'Civil Rights','Civil Rights','Healthcare',\n 'Defense','Trump','Community','Event','Event',\n 'Thanks','Legislation','Trump','Community',\n 'Community','Trump','Defense',\n 'Legislation','Thanks','Economy','Thanks','Healthcare',\n 'Legislation'])],axis=1).groupby(0)['Keywords'].sum()\n [print(f'{topic}: ' + words) for topic,words in zip(new_topic_words.index,new_topic_words)]", "def test_get_topics(self):\n\n for m in self.models:\n\n topics = m.topics\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertEqual(topics.num_rows(), 25)\n self.assertEqual(topics.num_columns(), 2)\n z = m.topics[\"topic_probabilities\"]\n for k in range(m.num_topics):\n self.assertTrue(\n abs(sum(z.vector_slice(k)) - 1) < DELTA,\n \"Returned probabilities do not sum to 1.\",\n )\n\n # Make sure returned object is an SFrame of the right size\n topics = m.get_topics()\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertTrue(\n topics.num_columns() == 3,\n \"Returned SFrame should have a topic, word, and probs.\",\n )\n\n # Make sure that requesting a single topic returns only that topic\n num_words = 8\n topics = m.get_topics([5], num_words=num_words)\n self.assertTrue(\n all(topics[\"topic\"] == 5), \"Returned topics do not have the right id.\"\n )\n self.assertEqual(topics.num_rows(), num_words)\n topics = m.get_topics([2, 4], num_words=num_words)\n self.assertEqual(set(list(topics[\"topic\"])), set([2, 4]))\n self.assertEqual(topics.num_rows(), num_words + num_words)\n\n # Make sure the cumulative probability of the returned words is\n # is less than the cutoff we provided.\n # A cutoff of 1.0 should return num_words for every topic.\n cutoff = 1.0\n topics = m.get_topics(cdf_cutoff=cutoff, num_words=len(m.vocabulary))\n totals = topics.groupby(\n \"topic\", {\"total_score\": turicreate.aggregate.SUM(\"score\")}\n )\n self.assertTrue(\n all(totals[\"total_score\"] <= (cutoff + DELTA)),\n \"More words were returned than expected for this cutoff.\",\n )\n\n # Make sure we raise errors for bad input\n with self.assertRaises(ValueError):\n m.get_topics([-1])\n with self.assertRaises(ValueError):\n m.get_topics([10000])\n with self.assertRaises(ToolkitError):\n topics = m.get_topics(output_type=\"other\")\n\n # Test getting topic_words\n topic_words = m.get_topics(output_type=\"topic_words\", num_words=5)\n self.assertEqual(type(topic_words), turicreate.SFrame)\n\n # Test words are sorted correctly for the first topic\n # TODO: Make this more deterministic.\n\n # topic_probs = m.get_topics(num_words=5)\n # expected = [w for w in topic_probs['word'][:5]]\n # observed = topic_words['words'][0]\n # self.assertEqual(observed[0], expected[0])", "def test_topic_reduction_edge_cases():\n model = BERTopic()\n nr_topics = 5\n model.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n model._update_topic_size(old_documents)\n model._extract_topics(old_documents)\n old_freq = model.get_topic_freq()\n\n new_documents = model._reduce_topics(old_documents)\n new_freq = model.get_topic_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)", "def export_topics(self):\n\n # format as a list (for json output), then sort descending by textIDCount\n topics = [{'name': topic['name'], 'count': topic['count'],\n 'verbatims': list(topic['verbatims']), 'textIDs': list(topic['textIDs']),\n 'textIDCount': topic['textIDCount'], 'rank': topic['rank'],\n 'children': '' if 'children' not in topic else topic['children']}\n for topic_id, topic in self.topics.items()]\n topics = sorted(topics, key=lambda topic: topic['textIDCount'], reverse=True)\n\n for i, topic in enumerate(topics):\n # Note that 'rank' is from topic, not child.\n topic['children'] = [{'name': child['name'], 'count': child['count'], 'rank': topic['rank'],\n 'verbatims': list(child['verbatims']), 'textIDs': list(child['textIDs']),\n 'textIDCount': child['textIDCount']}\n for _, child in topic['children'].items()]\n\n topic['children'] = sorted(topic['children'], key=lambda lemma: lemma['textIDCount'], reverse=True)\n\n # If the subtopic count is greater than the topic count, than calc a multiplier to size each subtopic\n child_count = sum([child['textIDCount'] for child in topic['children']])\n child_count_multiplier = 1 if child_count < topic['textIDCount'] else topic['textIDCount'] / child_count\n\n for child in topic['children']:\n child['size'] = child['textIDCount'] * child_count_multiplier\n\n topic['size'] = topic['textIDCount'] - (child_count * child_count_multiplier)\n\n # Prune topics over max_topics (default ~40): we stopped calc'ing rank over the max_topics\n self.model_output[\"children\"] = [topic for topic in topics]\n\n # Build file name and save\n if self.data_date:\n date = datetime.strptime(self.data_date, \"%Y-%m-%d\").strftime('%d') # from YYYY-MM-DD to DD\n file_name = '{}-{}-Topics.txt'.format(self.corpus_name, date)\n else:\n file_name = '{}-Topics.txt'.format(self.corpus_name)\n\n with open(config.OUTPUT_DIR + file_name, 'w') as file:\n json.dump(self.model_output, file)", "def find_dominant_topic(df_topic_sents_keywords):\n\n # Format\n df_dominant_topic = df_topic_sents_keywords.reset_index()\n df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\n\n # Group top 5 sentences under each topic\n sent_topics_sorteddf = pd.DataFrame()\n\n sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')\n start = time.time()\n for i, grp in sent_topics_outdf_grpd:\n sent_topics_sorteddf = pd.concat([sent_topics_sorteddf, \n grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)], \n axis=0)\n print(f'Group done. Total time {time.time() - start} seconds.')\n\n # Reset Index \n sent_topics_sorteddf.reset_index(drop=True, inplace=True)\n\n # Format\n sent_topics_sorteddf.columns = ['Topic_Num', \"Topic_Perc_Contrib\", \"Keywords\", \"Text\"]\n return sent_topics_sorteddf", "def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous label to current label in transition_count_table.\n All \"rare words\" (those words that appear less than 3 times) are replaced by <UNK>.\n I also add label|START counts.\n \"\"\"\n # Build feature_count_table of V x labels and transition_count_table of labels x labels\n for instance in instance_list: # Set of <(w, pos), l>\n index = 0\n for t in instance.data: # Tuple of (w, pos)\n index = instance.data.index(t)\n # print t[0] # word\n # print instance.label[index] # label\n if t in self.V:\n self.feature_count_table[self.V.index(t)][self.labels.index(instance.label[index])] +=1\n else:\n self.feature_count_table[self.V.index('<UNK>')][self.labels.index(instance.label[index])] +=1\n if index > 0:\n self.transition_count_table[self.labels.index(instance.label[index-1])][self.labels.index(instance.label[index])] += 1\n else:\n self.transition_count_table[len(self.labels)][self.labels.index(instance.label[index])] += 1", "def summarize_corpus():\n\t\n\t# get metadata\n\t#get_metadata.from_TEIP5(wdir, corpus_inpath, \"metadata\", md_mode)\n\t\n\t# visualize some metadata\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"author-continent\")\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"author-country\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"language\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_hist\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"subgenre_x\")\n\tvisualize_metadata.plot_pie(wdir, md_csv, \"subgenre\")\n\n\tvisualize_metadata.describe_corpus(wdir, md_csv, \"subgenre\")\n\t#visualize_metadata.describe_corpus(wdir, md_csv, \"gender\")\n\t\n\t# make some counts\n\tmd_table = pd.DataFrame.from_csv(os.path.join(wdir, md_csv), header=0)\n\tnum_texts = len(md_table)\n\t#num_language = len(md_table.groupby([\"language\"]))\n\t#num_continent = len(md_table.groupby([\"author-continent\"]))\n\t#num_countries = len(md_table.groupby([\"author-country\"]))\n\t#num_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_authors = len(md_table.groupby([\"author-name\"]))\n\tnum_subgenre = len(md_table.groupby([\"subgenre\"]))\n\t#num_subgenre_x = len(md_table.groupby([\"subgenre_x\"]))\n\t#fr_subgenre_hist = md_table.groupby([\"subgenre_hist\"]).count()\n\t#num_historical = fr_subgenre_hist[\"idno\"][\"historical\"]\n\t#num_not_historical = fr_subgenre_hist[\"idno\"][\"not_historical\"]\n\t\n\t\n\td = {\"texts\":[num_texts], \n\t#\"languages\":[num_language],\n\t#\"continents\":[num_continent],\n\t#\"countries\":[num_countries],\n\t\"authors\":[num_authors],\n\t#\"subgenre_x\":[num_subgenre_x],\n\t\"subgenre\":[num_subgenre]}\n\t#\"num_historical\":[num_historical],\n\t#\"num_not_historical\":[num_not_historical]}\n\t\n\t\n\t\n\tcount_fr = pd.DataFrame(d)\n\tcount_fr.to_csv(os.path.join(wdir, \"corpus-description.csv\"), sep=\",\", header=True)\n\tprint(\"Done: summarize corpus\")", "def __rank_topics(self, found_topics, explanation):\n max_value = 0\n scores = []\n for _,topic in found_topics.items():\n topic[\"score\"] = topic[\"times\"] * len(topic['grams'].keys())\n scores.append(topic[\"score\"])\n if topic[\"score\"] > max_value:\n max_value = topic[\"score\"]\n\n for _,topic in found_topics.items():\n if \"syntactic\" in topic:\n topic[\"score\"] = max_value\n\n\n\n\n # Selection of unique topics\n unique_topics = {}\n for t_p,topic in found_topics.items():\n prim_label = self.cso.get_primary_label_wu(t_p)\n if prim_label in unique_topics:\n if unique_topics[prim_label] < topic[\"score\"]:\n unique_topics[prim_label] = topic[\"score\"]\n else:\n unique_topics[prim_label] = topic[\"score\"]\n\n # ranking topics by their score. High-scored topics go on top\n sort_t = sorted(unique_topics.items(), key=lambda v: v[1], reverse=True)\n #sort_t = sorted(found_topics.items(), key=lambda k: k[1]['score'], reverse=True)\n\n\n # perform\n vals = []\n for t_p in sort_t:\n vals.append(t_p[1]) #in 0, there is the topic, in 1 there is the info\n\n\n #### suppressing some warnings that can be raised by the kneed library\n warnings.filterwarnings(\"ignore\")\n try:\n x_vals = range(1,len(vals)+1)\n t_kn = KneeLocator(x_vals, vals, direction='decreasing')\n if t_kn.knee is None:\n #print(\"I performed a different identification of knee\")\n t_kn = KneeLocator(x_vals, vals, curve='convex', direction='decreasing')\n except ValueError:\n pass\n\n ##################### Pruning\n\n try:\n knee = int(t_kn.knee)\n except TypeError:\n knee = 0\n except UnboundLocalError:\n knee = 0\n\n if knee > 5:\n try:\n knee += 0\n except TypeError:\n print(\"ERROR: \",t_kn.knee,\" \",knee, \" \", len(sort_t))\n\n else:\n try:\n if sort_t[0][1] == sort_t[4][1]:\n top = sort_t[0][1]\n test_topics = [item[1] for item in sort_t if item[1]==top]\n knee = len(test_topics)\n\n else:\n knee = 5\n except IndexError:\n knee = len(sort_t)\n\n final_topics = []\n final_topics = [self.cso.get_topic_wu(sort_t[i][0]) for i in range(0,knee)]\n self.reset_explanation()\n self.explanation = {self.cso.topics_wu[sort_t[i][0]]: explanation[sort_t[i][0]] for i in range(0,knee)}\n\n return final_topics", "def get_result_table_and_info(cls):\n winning_dict = cls.get_winning_topics()\n winning_topics = winning_dict['winning_topics']\n runoff_poll_warning = winning_dict['runoff_poll_warning']\n\n # Create table\n result_table = []\n all_categories = sorted(Category.objects.all(), key=attrgetter('sum_of_votes', 'weight'), reverse=True)\n for category in all_categories:\n category_hoechstzahls = filter(lambda hoechstzahl: hoechstzahl.topic.category == category, cls.all_hoechstzahls)\n category_hoechstzahls.sort(key=lambda hoechstzahl: hoechstzahl.value, reverse=True)\n runoff_poll_warning = second_runoff_poll_check(runoff_poll_warning, category_hoechstzahls, winning_topics)\n category_hoechstzahls += (max(config['openslides_topicvoting_posts'], 3) - len(category_hoechstzahls)) * [None]\n result_table.append(category_hoechstzahls)\n\n # Return table and flags as dictionary\n return {'result_table': result_table,\n 'winning_topics': winning_topics,\n 'runoff_poll_warning': runoff_poll_warning,\n 'topic_post_warning': winning_dict['topic_post_warning']}", "def test_extract_topics_custom_cv(base_bertopic_custom_cv):\n nr_topics = 5\n documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic_custom_cv._update_topic_size(documents)\n c_tf_idf = base_bertopic_custom_cv._extract_topics(documents, topic_reduction=False)\n freq = base_bertopic_custom_cv.get_topics_freq()\n\n assert c_tf_idf.shape[0] == 5\n assert c_tf_idf.shape[1] > 100\n assert isinstance(freq, pd.DataFrame)\n assert nr_topics == len(freq.Topic.unique())\n assert freq.Count.sum() == len(documents)\n assert len(freq.Topic.unique()) == len(freq)", "def display_topics(df, n_rows=10, n_cols=12):\n\n exemplar_scores, hovers = topic_exemplars(df)\n top_columns = sorted(range(len(exemplar_scores)),\n key=lambda i: exemplar_scores[i],\n reverse=True)[:n_cols]\n #I comented this line Im not 100% sure what was the purpuse of this\n # topics = df.pivot(index='pos', columns='topic',values='word*').replace([None], [''], regex=True)\n topics = df.pivot(index='pos', columns='topic',values='word*')\n\n topics_display = topics[top_columns].head(n_rows)\n\n return topics_display, top_columns", "def count_mentioned_countries(data):\n countries_mentioned = {}\n countries = get_countries()\n\n for ind, row in data.iterrows():\n subject_words = row[\"MetadataSubject\"].lower()\n message_words = row[\"RawText\"].lower()\n\n for country in countries:\n if country in (subject_words + message_words):\n if country in countries_mentioned:\n countries_mentioned[country] += 1\n else:\n countries_mentioned[country] = 1\n\n return pd.DataFrame.from_dict(countries_mentioned, orient=\"index\")", "def get_paper_count_per_topic(topic_model, start_year, end_year, debug=False):\n papers_count = get_papers_per_topic(topic_model, None, start_year, end_year, None, debug=debug)\n return sorted(papers_count.items(), key=operator.itemgetter(1), reverse=True)", "def num_words():\n # Load the GT.\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n stats = {\n \"T\": {\"words\": [], \"duration\": []},\n \"P\": {\"words\": [], \"duration\": []},\n \"sess\": {\"words\": [], \"duration\": []},\n }\n\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n stats[\"P\"][\"words\"].append(float(row[\"gt_patient_num_words\"]))\n stats[\"T\"][\"words\"].append(float(row[\"gt_therapist_num_words\"]))\n stats[\"P\"][\"duration\"].append(float(row[\"gt_patient_time_spoken\"]))\n stats[\"T\"][\"duration\"].append(\n float(row[\"gt_therapist_time_spoken\"])\n )\n stats[\"sess\"][\"duration\"].append(float(row[\"sess_dur\"]))\n n_words = (\n row[\"gt_therapist_num_words\"] + row[\"gt_patient_num_words\"]\n )\n stats[\"sess\"][\"words\"].append(n_words)\n\n for speaker in stats:\n for metric in stats[speaker]:\n print(f\"------ {speaker} | {metric} ------\")\n print_stats(stats[speaker][metric])", "def return_topic_figures(n_topics=5):\n\n ### import data ###\n\n data = return_keywords()\n data_for_topics = data[\"abstract_kw\"].apply(\n lambda x: list(ast.literal_eval(x).keys())\n )\n\n ### Build topic model ###\n\n # parameters\n n_topics = n_topics\n\n # Create Dictionary\n id2word = corpora.Dictionary(data_for_topics)\n\n # Create Corpus: Term Document Frequency\n corpus = [id2word.doc2bow(text) for text in data_for_topics]\n\n # Build LDA model\n lda_model = gensim.models.ldamodel.LdaModel(\n corpus=corpus,\n id2word=id2word,\n num_topics=n_topics,\n random_state=100,\n update_every=1,\n chunksize=10,\n passes=10,\n alpha=\"symmetric\",\n iterations=100,\n per_word_topics=True,\n )\n\n topics = lda_model.show_topics(formatted=False)\n data_flat = [w for w_list in data_for_topics for w in w_list]\n counter = Counter(data_flat)\n out = []\n for i, topic in topics:\n for word, weight in topic:\n out.append([word, i, weight, counter[word]])\n df = pd.DataFrame(out, columns=[\"word\", \"topic_id\", \"importance\", \"word_count\"])\n\n specs = np.full((ceil(n_topics / 2), 2), {\"secondary_y\": True})\n topic_bar_charts = make_subplots(\n rows=ceil(n_topics / 2),\n cols=2,\n specs=specs.tolist(),\n horizontal_spacing=0.1,\n vertical_spacing=0.15,\n )\n row, col = (0, 0)\n for topic in range(n_topics):\n if (topic % 2) != 0:\n col = 2\n else:\n col = 1\n row += 1\n color = px.colors.qualitative.Vivid[topic]\n topic_bar_charts.add_trace(\n go.Bar(\n x=df.loc[df.topic_id == topic, \"word\"],\n y=df.loc[df.topic_id == topic, \"word_count\"],\n width=0.5,\n opacity=0.3,\n marker_color=color,\n name=(\"Topic \" + str(topic) + \" word count\"),\n ),\n secondary_y=False,\n row=row,\n col=col,\n )\n topic_bar_charts.add_trace(\n go.Bar(\n x=df.loc[df.topic_id == topic, \"word\"],\n y=df.loc[df.topic_id == topic, \"importance\"],\n width=0.2,\n marker_color=color,\n name=(\"Topic \" + str(topic) + \" weight\"),\n ),\n secondary_y=True,\n row=row,\n col=col,\n )\n topic_bar_charts.update_layout(barmode=\"overlay\")\n\n topic_bar_charts.update_layout(\n height=800, width=1000, margin=dict(l=50, r=50, t=50, b=100)\n )\n\n # append all charts\n figures = [dict(data=topic_bar_charts)]\n\n return figures", "def topic(df, num_topics=5):\r\n# X, y = df[df.columns[:-1]], df[df.columns[-1]]\r\n lda = LatentDirichletAllocation(n_topics=num_topics,\r\n max_iter=5,\r\n learning_method='online',\r\n learning_offset=50.,\r\n random_state=0)\r\n return lda.fit_transform(df)", "def connect_topic_id_to_topics(model, representants, log_writer):\n # t = model.get_topics()\n topic_indexes = {}\n topics_of_index = {}\n confidence = []\n for key, value in representants.items():\n connection_results = {}\n for article in value:\n try:\n # get most possible index\n topic_index = max(model.analyse_text(article), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\") # TODO replace with if\n continue\n # add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n # find index that occured mostly\n print(connection_results)\n for tp_num, val in connection_results.items():\n confidence.append([key,tp_num,val/len(value)])\n confidence = sorted(confidence, key=operator.itemgetter(2),reverse=True)\n associated_indexes = []\n associated_topics = []\n for conf in confidence:\n if conf[1] in associated_indexes or conf[0] in associated_topics:\n continue\n associated_indexes.append(conf[1])\n associated_topics.append(conf[0])\n log_writer.add_log('Connecting topic {} to model index {} based on highest unused confidence of {}'.format(conf[0],conf[1],conf[2]))\n topic_indexes[conf[0]] = conf[1]\n\n for key, value in topic_indexes.items():\n topics_of_index[value] = [key]\n print(topic_indexes)\n print(topics_of_index)\n return topic_indexes, topics_of_index", "def check_intersections(db, topics, papers_by_topic):\n\n\t# Print the distribution of \"number of topics\"\n\tnum_subjects = []\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tnum_subjects.append(len(p.subject))\n\t\telse:\n\t\t\tnum_subjects.append(0)\n\tnum_subjects = np.array(num_subjects)\n\n\tfor i in range(np.max(num_subjects)+1):\n\t\tprint(\"Number of papers with\", i, \"topics:\", \n\t\t\tlen(np.where(num_subjects==i)[0]))\n\n\t# Figure out what's going on with triple-tagged guys (nothing weird)\n\t\"\"\"\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tif len(p.subject) > 2:\n\t\t\t\tprint(\"\\n\",p.title,\"\\n\\t\",p.container_title,\"\\n\\t\", p.subject)\n\t\t\t\t\n\t\t\t\tfor topic, topic_words in topics.items():\n\t\t\t\t\tprint(\"\\tCheck against '\" + topic + \"':\")\n\t\t\t\t\tfor journal in p.container_title:\n\t\t\t\t\t\tcheck_words(journal, topic_words, verbose=True)\n\t\"\"\"\n\n\t# Look in more detail at double-tagged guysfor p_hash, p in db.all_papers.items():\n\tcombos = defaultdict(int)\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tif len(p.subject) == 2:\n\t\t\t\tcombos[frozenset(p.subject)] += 1\n\t\t\t\t#print(\"\\n\",p.title,\"\\n\\t\",p.container_title,\"\\n\\t\", p.subject)\n\t\t\t\tif p.subject == {'Computer Science', 'Biology'}:\n\t\t\t\t\t#print(\"\\n\",p.title,\"\\n\\t\",p.container_title)#,\"\\n\\t\", p.subject)\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tbio_words = set()\n\t\t\t\t\tCS_words = set()\n\t\t\t\t\tfor journal in p.container_title:\n\t\t\t\t\t\tfor word in topics['Biology']:\n\t\t\t\t\t\t\tif journal.find(word) >= 0:\n\t\t\t\t\t\t\t\tbio_words.add(word)\n\t\t\t\t\t\tfor word in topics['Computer Science']:\n\t\t\t\t\t\t\tif journal.find(word) >= 0:\n\t\t\t\t\t\t\t\tCS_words.add(word)\n\n\t\t\t\t\t#print(\"\\tBiology words:\", bio_words)\n\t\t\t\t\t#print(\"\\tCS words:\", CS_words)\n\t\n\tfor k, v in combos.items():\n\t\tprint(k, v)", "def testArticleCount(self):\n\n self.articleCount(17)", "def analysis():\r\n data_frame = load_from_mysql('core', 'BDFMHQAA_D')\r\n data_frame.registerTempTable('business')\r\n gd = data_frame.select('AA03CSNO', 'AA08PRON')\r\n\r\n def merge_count(a, b):\r\n r = {}\r\n for p, c in a.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n for p, c in b.items():\r\n if p in r:\r\n r[p] += c\r\n else:\r\n r[p] = c\r\n return r\r\n result = gd.map(lambda row: (row.AA03CSNO, {row.AA08PRON: 1})).reduceByKey(merge_count)\r\n pron_count = gd.map(lambda row: (row.AA08PRON, 1)).reduceByKey(lambda a, b: a + b)\r\n\r\n # result = gd.map(lambda row: (row.AA03CSNO, row.AA08PRON))\r\n print(result.take(10))\r\n print('----------------pron count-----------------')\r\n print(pron_count.collect())\r\n\r\n print(gd)", "def connect_topic_id_to_topics_old(self, model):\n #t = model.get_topics()\n for key, value in self.representants.items():\n connection_results = {}\n for article in value:\n try:\n #get most possible index\n topic_index = max(model.analyse_text(article[1]), key=lambda item: item[1])[0]\n except ValueError:\n print(\"No topic index returned continuing\")#TODO replace with if\n continue\n #add most possible index for this article to counter\n if topic_index not in connection_results:\n connection_results[topic_index] = 1\n else:\n connection_results[topic_index] += 1\n #find index that occured mostly\n best_candidates = max(connection_results.items(), key=operator.itemgetter(1))\n print(best_candidates)\n self.log_writer.add_log(\"Best candidate with index {} is connected to topic {} with {}% accuracy\".format(best_candidates[0], key, (connection_results[best_candidates[0]]/len(value))*100))\n #create connection between topic id and model topic index\n self.topic_indexes[key] = best_candidates[0]\n #creat connection in opposite direction if there already is some connection add found index to that connection (some model topic index can represent more than one real topic)\n if best_candidates[0] not in self.topics_of_index:\n self.topics_of_index[best_candidates[0]] = [key]\n else:\n self.topics_of_index[best_candidates[0]].append(key)\n\n self.log_writer.add_log(\"Out of {} real topics only {} were learned\".format(len(self.representants), len(self.topics_of_index)))", "def topic(df, num_topics=5):\r\n\r\n lda = LatentDirichletAllocation(n_topics=num_topics,\r\n max_iter=5,\r\n learning_method='online',\r\n learning_offset=50.,\r\n random_state=0)\r\n return lda.fit_transform(df)", "def test_topic_reduction(reduced_topics):\n base_bertopic = BERTopic(bert_model='distilbert-base-nli-mean-tokens', verbose=False)\n nr_topics = reduced_topics + 2\n base_bertopic.nr_topics = reduced_topics\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(old_documents)\n c_tf_idf = base_bertopic._extract_topics(old_documents.copy(), topic_reduction=True)\n old_freq = base_bertopic.get_topics_freq()\n\n new_documents = base_bertopic._reduce_topics(old_documents.copy(), c_tf_idf)\n new_freq = base_bertopic.get_topics_freq()\n\n assert old_freq.Count.sum() == new_freq.Count.sum()\n assert len(old_freq.Topic.unique()) == len(old_freq)\n assert len(new_freq.Topic.unique()) == len(new_freq)\n assert isinstance(base_bertopic.mapped_topics, dict)\n assert not set(base_bertopic.get_topics_freq().Topic).difference(set(new_documents.Topic))\n assert base_bertopic.mapped_topics", "def infertopics(self):\n\n # Iterate over nodes missing topic attribute (only occurs for new nodes)\n for uid in self.scan(attribute=\"updated\"):\n # Remove updated attribute\n self.removeattribute(uid, \"updated\")\n\n # Get list of neighboring nodes\n ids = self.edges(uid)\n\n # Infer topic\n topic = Counter(self.attribute(x, \"topic\") for x in ids).most_common(1)[0][0] if ids else None\n if topic:\n # Add id to topic list and set topic attribute\n self.topics[topic].append(uid)\n self.addattribute(uid, \"topic\", topic)\n\n # Set topic rank\n self.addattribute(uid, \"topicrank\", len(self.topics[topic]) - 1)\n\n # Infer category\n category = Counter(self.attribute(x, \"category\") for x in ids).most_common(1)[0][0]\n self.addattribute(uid, \"category\", category)", "def basic_statistics_of_email(data):\n word_counts = []\n character_count = 0\n\n for ind, row in data.iterrows():\n tokenizer = RegexpTokenizer(r'\\w+')\n real_words = tokenizer.tokenize(row[\"RawText\"].lower())\n\n character_count += sum(map(len, real_words))\n word_counts.append(len(real_words))\n\n return character_count, pd.Series(word_counts)", "def assign_topics(self):\n\n # Load the dataset which has topic tags for each sentence\n dt, dt_less = self.assign_topics_to_sentences()\n \n dt_copy = dt\n \n # Minimum number of tags needed to tag the overall response\n dt['min_num_tags'] = dt['num_sent'].apply(lambda x: math.ceil(0.3*x))\n \n # Final dataset with full survey response and its tags\n final_dt = dt.groupby(self.id_col_name).agg({'tags': sum\n , 'num_sent': min\n , 'min_num_tags': min\n# , 'sentences': lambda x: \"%s\" % '. '.join(x)\n , self.col_name: min})\n final_dt.reset_index(level = 0, inplace = True)\n final_dt['topics'] = final_dt.apply(lambda x: set([i for i in x.tags if x.tags.count(i) >= x.min_num_tags])\n , axis = 1)\n\n final_dt_less = final_dt[[self.id_col_name, self.col_name, 'topics']]\n\n return dt_copy, dt_less, final_dt, final_dt_less", "def preprocess(df):\n \n # drop the following columns - irrelevant now\n DROP_COLUMNS = ['id', 'original_title', 'release_date'\n , 'tmdbId', 'popularity', 'year']\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n # drop all of the language columns\n DROP_COLUMNS = [col for col in df.columns if col[:3]==\"lan\"]\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n\n # loop through the columns we want to aggregate\n for col_type in [\n \"original_language_\"\n , \"prod_comp_cntry_\"\n , \"prod_comp_names_\"\n , \"writers_\"\n , \"actors_\"\n , \"genres_\"\n , \"director_\"\n ]:\n # create a dictionary of each unique value and its frequency\n val_freq = {}\n for col in df.columns:\n if col.startswith(col_type):\n val_freq[col] = df[col].sum()\n\n # create a dataframe from this dictionary; sort by count\n counts = pd.DataFrame.from_dict(\n val_freq\n , orient='index'\n , columns=['count']\n ).sort_values('count', ascending=False)\n counts['frac'] = counts['count'].apply(lambda x: 100*x / df.shape[0])\n\n # handle special case of production company country\n if col_type == \"prod_comp_cntry_\":\n DROP_COLUMNS = [col for col in counts.index][3:]\n\n # handle special case of directors\n elif col_type == \"director_\":\n DIRECTOR_COLS = [col for col in df.columns\n if col.startswith(\"director_\")\n and col!=\"director_pop\"]\n df['established_director'] = df[DIRECTOR_COLS].max(axis=1)\n DROP_COLUMNS = DIRECTOR_COLS\n\n # handle special case of actors\n elif col_type == \"actors_\":\n ACTORS_COLS = [col for col in df.columns if \"actors\" in col]\n df['num_top_100_actors'] = df[ACTORS_COLS].sum(axis=1)\n DROP_COLUMNS = ACTORS_COLS\n\n # handle all the other cases\n else:\n DROP_COLUMNS = [col for col in counts.query('frac < 2').index]\n\n\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n ##########################################################################\n # adjust the data for inflation\n CPI_tf = df['CPIAUCSL'].max()\n df['budget'] = df[['budget', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n df['revenue'] = df[['revenue', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n # no longer need CPI data\n df.drop('CPIAUCSL', axis=1, inplace=True)\n \n ########################################################################## \n # add in useful features about the cast and crew \n df['cast_crew_sum_pop'] = (\n df['director_pop']\n + df['avg_actor_pop']\n + df['avg_writer_pop']\n )\n df['cast_crew_product_pop'] = (\n df['director_pop']\n * df['avg_actor_pop']\n * df['avg_writer_pop']\n )\n df['runtime'].replace(to_replace=0, value=df['runtime'].median(), inplace=True)\n df = df.query('10000 <= revenue').copy()\n df = df.query('100000 <= budget').copy()\n df.drop('sum_actor_pop', axis=1, inplace=True)\n df.drop('min_writer_pop', axis=1, inplace=True)\n\n # code to transform columns\n for col in [\n \"budget\", \"director_pop\", \"avg_writer_pop\"\n , \"max_writer_pop\", \"avg_actor_pop\", \"max_actor_pop\"\n , \"min_actor_pop\", 'cast_crew_sum_pop'\n , 'cast_crew_product_pop'\n ]:\n df['log10_'+col] = df[col].apply(lambda x: math.log10(x))\n df.drop(col, axis=1, inplace=True)\n \n return df", "def calc_topic_mode_log_stats(user_exercise_graph, topic_id,\n just_earned_proficiency):\n topic = topic_models.Topic.get_by_id(topic_id)\n topic_exercises = topic.get_exercises()\n\n total_exercises = len(topic_exercises)\n count_proficient = len(set(ex.name for ex in topic_exercises) &\n set(user_exercise_graph.proficient_exercise_names()))\n just_completed = (just_earned_proficiency and total_exercises ==\n count_proficient)\n\n return {\n 'total_exercises': total_exercises,\n 'count_proficient': count_proficient,\n 'just_completed': just_completed,\n }", "def append_counting(dict):\n row_c = []\n # for nuc in NUC: #Scans all the elements and adds it to the table.\n # row_c.append(dict[nuc])\n for mot in MOT:\n row_c.append(dict[mot])\n for nuc_nr in NUC_NR :\n row_c.append(dict[nuc_nr + \"_NR\"])\n # #row.extend([dict[\"AA_NR\"], dict[\"TT_NR\"], dict[\"CC_NR\"], dict[\"GG_NR\"]])\n return row_c", "def topic_extraction(df, col_name):\n tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tfidf = tfidf_vectorizer.fit_transform(df[col_name])\n\n tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tf = tf_vectorizer.fit_transform(df[col_name])\n nmf = NMF(n_components=20, random_state=1,\n alpha=.1, l1_ratio=.5)\n tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n nmf_w = nmf.fit_transform(tfidf)\n nmf_h = nmf.components_\n df['labels'] = nmf_w.argmax(axis=1) # this was the right code to get labels/clusters\n\n\n print(\"\\nTopics in NMF model:\")\n print_top_words(nmf, tfidf_feature_names)\n\n\n lda = LatentDirichletAllocation(n_topics=20, max_iter=5,\n learning_method='online',\n learning_offset=50.,\n random_state=0,\n n_jobs=-1)\n lda.fit(tf)\n doc_topic_distrib = lda.transform(tf)\n lda_labels = doc_topic_distrib.argmax(axis=1)\n print lda_labels[:100]\n df['lda_labels'] = lda_labels\n print(\"\\nTopics in LDA model:\")\n tf_feature_names = tf_vectorizer.get_feature_names()\n print_top_words(lda, tf_feature_names)\n return df", "def gender_word_counts(data):\n\n # We use the stopwords package from NLTK corpus.\n stop_words = set(stopwords.words('english'))\n data['tweet_words'] = data['text_cleaned'].str.split()\n # Ignoring all the stop words\n data['tweet_words'] = data['tweet_words'].apply(lambda tweet: [word for word in tweet if word not in stop_words])\n\n # Separating Male, Female and Brand profiles.\n male_profiles = data[data['gender'] == 'male']\n female_profiles = data[data['gender'] == 'female']\n brand_profiles = data[data['gender'] == 'brand']\n\n print(\"Top 20 most frequent words used by Men\")\n all_male_tweets = ' '.join(male_profiles['tweet_words'].astype(str))\n Male_words = pd.Series(all_male_tweets.split(\" \")).value_counts()[:20]\n print(Male_words)\n print()\n\n print(\"Top 20 most frequent words used by Women\")\n all_female_tweets = ' '.join(female_profiles['tweet_words'].astype(str))\n Female_words = pd.Series(all_female_tweets.split(\" \")).value_counts()[:20]\n print(Female_words)\n print()\n\n print(\"Top 20 most frequent words used by Brands\")\n all_brand_tweets = ' '.join(brand_profiles['tweet_words'].astype(str))\n Brand_words = pd.Series(all_brand_tweets.split(\" \")).value_counts()[:20]\n print(Brand_words)\n\n # Plotting horizontal bar graphs showing Top 20 tweet words used Vs. the word frequency.\n mp = Male_words.plot(kind='barh', stacked=True, colormap='plasma', title=\"Top 20 most frequently words used by Men\")\n mp.set_ylabel(\"Tweet words used by Males\")\n mp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n fp = Female_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Women\")\n fp.set_ylabel(\"Tweet words used by Females\")\n fp.set_xlabel(\"Word Frequency\")\n plt.show()\n\n bp = Brand_words.plot(kind='barh', stacked=True, colormap='plasma',\n title=\"Top 20 most frequently words used by Brands\")\n bp.set_ylabel(\"Tweet words used by Brands\")\n bp.set_xlabel(\"Word Frequency\")\n plt.show()", "def analyze_count(data):\n\n dsct_vk = pd.unique(data['vk'])\n dsct_itemid = pd.unique(data['itemid'])\n\n print 'number of user:', dsct_vk.shape\n print 'number of items:', dsct_itemid.shape\n print 'the number of ratings:', data.shape\n\n print 'unique actions:', pd.unique(data['action'])\n print 'the number of action 0:', np.sum(data['action'] == 0)\n print 'the number of action 1:', np.sum(data['action'] == 1)\n print 'the number of action 2:', np.sum(data['action'] == 2)\n print 'the number of action 3:', np.sum(data['action'] == 3)\n print 'the number of action 4:', np.sum(data['action'] == 4)\n \n time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)\n print 'Max Range:', np.max(time_range_item)\n print 'Mean Range:', np.mean(time_range_item)\n print 'Median Range:', np.median(time_range_item)", "def test_mead_summary_length(self):\n topics = {'PUP1A': [Document('TST_ENG_20190101.0001'), Document('TST_ENG_20190101.0002'),\n Document('TST20190201.0001'), Document('TST20190201.0002')],\n 'WAR2A': [Document('TST_ENG_20190301.0001'), Document('TST_ENG_20190301.0002'),\n Document('TST20190401.0001'), Document('TST20190401.0002')]}\n WordMap.create_mapping()\n vec = Vectors()\n vec.create_freq_vectors(topics)\n idf = MeadSummaryGenerator(self.doc_list, MeadContentSelector(), self.args).get_idf_array()\n max_length = 100\n\n for topic_id, documents in topics.items():\n generator = MeadSummaryGenerator(documents, MeadContentSelector(), self.args)\n generator.select_content(idf)\n generator.order_information()\n realized_content = generator.realize_content()\n realized_content = [w for w in realized_content.split(\" \") if not \" \"]\n content_length = len(realized_content)\n self.assertLessEqual(content_length, max_length)", "def metadata_summary(idx):\n tax_per_cluster = []\n genomes_per_tax = []\n genes_per_genome = []\n for cluster_id,v in idx.items():\n tax_per_cluster.append(len(v.keys()))\n for tax,vv in v.items():\n genomes_per_tax.append(len(vv.keys()))\n for genomeID,gene_ids in vv.items():\n genes_per_genome.append(len(set(gene_ids)))\n sum_stats(tax_per_cluster, 'Clades per cluster')\n sum_stats(genomes_per_tax, 'Gemomes per clade')\n sum_stats(genes_per_genome, 'Genes per genome')", "def count_interests(rows: List[Row]) -> int:\n return len([row for row in rows if row[\"interest\"] is not None])", "def reformat_countTable(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n if self.countTable: countTable = self.countTable[:];\n else: countTable = [];\n\n countTable_flat = self.reformat_countOrFPKMTable(\n countOrFPKMTable_I=countTable,\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,\n count_or_FPKM = 'count');\n return countTable_flat;", "def main_topic_doc(ldamodel, corpus=corpus): \n \n doc_topics = pd.DataFrame()\n\n for i, row in enumerate(ldamodel[corpus]):\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\n\n for j, (topic_num, prop_topic) in enumerate(row):\n if j == 0:\n wp = ldamodel.show_topic(topic_num)\n topic_keywords = \"' \".join([word for word, prop in wp])\n doc_topics = doc_topics.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\n else:\n break\n doc_topics.columns = ['Dominant_Topic', 'Percent_Contrib', 'Topic_keywords']\n return doc_topics", "def explore_topic_nouns(topic_number, topn=25, model=10):\n #\n if model==10:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_10'))\n topicname=topic_names_10[topic_number]\n gensimdic={0:9,1:8,2:6,3:7,4:3,5:10,6:5,7:1,8:2,9:4}\n gensimSTR=str(gensimdic[topic_number])\n \n # \n print(u'{:20} {}'.format(u'term', u'frequency') + u'\\n')\n \n dic={}\n j=0\n \n print('top 5 terms')\n for term, frequency in lda.show_topic(topic_number, topn):\n if dfff[dfff['nouns']==term].empty: ## dfff is loaded from pilot_path/bow_nouns.csv\n pass\n else:\n j=j+1\n if j<6:\n print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))\n dic[term]=frequency\n dff=pd.DataFrame.from_dict(dic,orient='index')\n dff.columns=[''.join(['topic:',topicname,' (gensim topic:',gensimSTR,')'])] \n return(dff)", "def model_topics(df):\n\n data = df.text.values.tolist()\n data_words = list(sent_to_words(data))\n\n # Build the bigram and trigram models\n bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)\n trigram = gensim.models.Phrases(bigram[data_words], threshold=100) \n\n # Faster way to get a sentence clubbed as a trigram/bigram\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n trigram_mod = gensim.models.phrases.Phraser(trigram)\n\n # Remove Stop Words\n data_words_nostops = remove_stopwords(data_words)\n\n # Form Bigrams\n data_words_bigrams = make_bigrams(data_words_nostops,bigram_mod)\n\n # Initialize spacy 'en' model, keeping only tagger component (for efficiency)\n nlp = spacy.load('en', disable=['parser', 'ner'])\n\n # Do lemmatization keeping only noun, adj, vb, adv\n data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\n\n # Create Dictionary\n id2word = corpora.Dictionary(data_lemmatized)\n\n # Create Corpus\n texts = data_lemmatized\n\n # Term Document Frequency\n corpus = [id2word.doc2bow(text) for text in texts]\n\n # Perform Topic Modeling for number of topics ranging from 5 to 50 in steps of 5\n model_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_lemmatized, start=5, limit=50, step=5)\n\n return model_list,coherence_values,corpus,id2word", "def explore_topic(topic_number, topn=25, model=10):\n #\n if model==25:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_25'))\n topicname=topic_names_25[topic_number]\n gensimSTR=''\n elif model==15:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_15'))\n topicname=topic_names_15[topic_number]\n gensimSTR=''\n elif model==10:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_10'))\n topicname=topic_names_10[topic_number]\n gensimdic={0:9,1:8,2:6,3:7,4:3,5:10,6:5,7:1,8:2,9:4}\n gensimSTR=str(gensimdic[topic_number])\n \n \n # \n print(u'{:20} {}'.format(u'term', u'frequency') + u'\\n')\n \n dic={}\n j=0\n \n print('top 5 terms')\n for term, frequency in lda.show_topic(topic_number, topn):\n j=j+1\n if j<6:\n print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))\n dic[term]=frequency\n dff=pd.DataFrame.from_dict(dic,orient='index')\n dff.columns=[''.join(['topic:',topicname,' (gensim topic:',gensimSTR,')'])] \n return(dff)\n ##", "def table_summary():\n \n t = dict()\n t['name'] = get_names()\n t['Name'] = [get_properties(name)['label'] for name in t['name']]\n N = len(t['name'])\n \n # host\n t['host'] = ['Sagittarius', 'Sagittarius', 'none', 'Gaia-Sausage-Enceladus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Sequoia / Arjuna', np.nan, np.nan, 'Sequoia / Arjuna', 'Gaia-Sausage-Enceladus', 'Sequoia / Arjuna', 'Helmi / Wukong', 'Helmi / Wukong', 'Sagittarius', 'in situ / Helmi / Wukong', 'Helmi / Wukong', 'Cetus', 'Cetus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Cetus', 'Sequoia / Arjuna / I\\'itoi']\n \n # progenitor\n t['progenitor'] = [np.nan, np.nan, 'itself', 'NGC 5139', 'NGC 4590', np.nan, 'NGC 3201', '(Wukong / Helmi)', '(Wukong / Helmi)', np.nan, np.nan, np.nan, np.nan, 'NGC 5024', np.nan, 'NGC 5272', 'NGC 5024', 'NGC 5824', 'NGC 5824', np.nan, np.nan, np.nan, np.nan]\n \n # progenitor type\n t['type'] = ['DG' if name in ['elqui', 'indus', 'jhelum'] else 'GC' for name in t['name']]\n \n # metallicity\n t['feh'] = [-2.4, -2.4, -2.2, -1.5, -2.16, -2.3, -1.5, -2.1, -2.1, -1.6, -1.95, -1.6, -2.7, np.nan, -1.7, -1.1, -2.7, -1.9, np.nan, np.nan, -2.2, np.nan, -1.9]\n \n # associations\n t['friends'] = ['ATLAS', 'Aliqa Uma', np.nan, np.nan, np.nan, np.nan, np.nan, 'Jhelum', 'Indus', np.nan, np.nan, np.nan, np.nan, 'Sylgr', np.nan, np.nan, 'Ravi', 'Turbio', 'Triangulum', np.nan, np.nan, np.nan, np.nan]\n \n tout = Table(t)\n tout.pprint()\n tout.write('../data/stream_origin.fits', overwrite=True)", "def get_tweets_by_topic(topic, start_date, end_date):\n try:\n query = f\"select tweet, sentence, polarity, subjectivity from {db_schema}.{db_table_tweet} t, {db_schema}.{db_table_pred} tp where t.id_tweet=tp.id_tweet and topic='{topic}' and tweet_date between str_to_date('{start_date}', '%Y-%m-%d') and str_to_date('{end_date}', '%Y-%m-%d')\"\n logger.info(f'QUERY: {query}') \n with MysqlCursor() as cur:\n cur.execute(query)\n tweets = cur.fetchall()\n columns = [col[0] for col in cur.description]\n logger.info(f'TOPIC: {topic}, N° TWEETS: {len(tweets)}') \n return tweets, columns\n \n except Exception as ex:\n logger.exception(ex)\n return f'Exception: {ex}'", "def property_count_function(listOfProperties):\n\n property_count = {} # Empty dict, is gonna look like this: property_count{property : count}\n for lists in listOfProperties:\n try:\n for properties in lists:\n property_count[properties] = property_count.get(properties, 0) + 1\n except TypeError as e:\n print(e)\n\n # Converts the dictionary to a dataframe\n property_dataframe = pd.DataFrame(list(property_count.items()), columns=['Property', 'Frequency'])\n # property_dataframe = property_dataframe.set_index(\"Property\")\n property_dataframe = property_dataframe.sort_values(by=['Frequency'], ascending=False)\n\n return property_dataframe", "def categorize_data(data, top_count):\n sorted_by_tcp = sorted(\n data, key=lambda x: x['TCP Utilization'], reverse=True\n )[0:top_count]\n sorted_by_udp = sorted(\n data, key=lambda x: x['UDP Utilization'], reverse=True\n )[0:top_count]\n\n print(f\"\\nTOP-{top_count} port flooders by TCP\")\n print(tabulate(sorted_by_tcp, headers='keys', tablefmt=\"psql\"))\n print(f\"\\nTOP-{top_count} port flooders by UDP\")\n print(tabulate(sorted_by_udp, headers='keys', tablefmt=\"psql\"))", "def get_num_topics(soup):\n try:\n #num = int(re.search(\"Suche ergab (\\\\d+)\", str(soup)).group(1)) \n num = int(re.search(\"(\\\\d+) Treffer\", str(soup)).group(1)) \n except: \n num = 0\n print(traceback.format_exc()) \n\n return num", "def get_top_keywords_from_articles(self, kwords_list):\n _all_keywords = []\n for a in kwords_list:\n if a != []:\n for w in a:\n _all_keywords.append([w['keyword'],w['weight'],w['label']])\n _df_g = pd.DataFrame(_all_keywords, columns=[\"Keyword\", \"Count\",\"Label\"])\n _df_g.sort_values(by=\"Count\", inplace=True, ascending=False)\n _df_g.reset_index(drop=True, inplace=True)\n _df_g.to_csv('test.csv')\n print(len(_df_g))\n\n _df_g['Keyword'] = _df_g['Keyword'].apply(self.remove_repeat_words)\n _df_g.dropna(axis=0, inplace=True)\n p1,p2 = self.pos_taggers(_df_g)\n _df_g['c_POS'] = p1\n _df_g['s_POS'] = p2\n _df_g['c_POS_score'] = _df_g['c_POS'].apply(self.combine_pos_score)\n _df_g['s_POS_score'] = _df_g['s_POS'].apply(self.specific_pos_score)\n _df_g['Count'] = _df_g['Count'] + _df_g['c_POS_score'] + _df_g['s_POS_score'] \n print(len(_df_g))\n _df_g.sort_values(by='Count',inplace=True, ascending=False)\n print(len(_df_g))\n _df_g = _df_g.reset_index(drop=True)\n _df_g = _df_g[:10]\n response_dict = dict()\n response_dict['nc'] = \", \".join(_df_g['Keyword'].to_list())\n return response_dict", "def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df", "def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df", "def get_topics(self):\n topics = self.word_topics\n return topics / topics.sum(axis=1)[:, None]", "def get_topic_quality():\n model.eval() \n with torch.no_grad():\n alpha = model.mu_q_alpha\n beta = model.get_beta(alpha) \n print('beta: ', beta.size())\n\n print('\\n')\n print('#'*100)\n print('Get topic diversity...')\n num_tops = 25\n\n TD_all = _diversity_helper(beta, num_tops) \n \n TD = np.mean(TD_all)\n print('Topic Diversity is: {}'.format(TD))\n\n print('\\n')\n print('Get topic coherence...')\n print('train_tokens: ', train_tokens[0])\n \n TC_all, cnt_all = get_topic_coherence(beta.cpu().detach().numpy(), train_tokens, vocab)\n\n TC_all = torch.tensor(TC_all)\n cnt_all = torch.tensor(cnt_all)\n TC_all = TC_all / cnt_all\n TC_all[TC_all<0] = 0\n\n TC = TC_all.mean().item()\n print('Topic Coherence is: ', TC)\n print('\\n')\n\n print('Get topic quality...')\n TQ = TC * TD\n print('Topic Quality is: {}'.format(TQ))\n print('#'*100)\n\n return TQ, TC, TD", "def get_topics_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'topics')", "def test_count_publications(self):\n pass", "def test_question_topics(self):\n p = ProductFactory()\n t1 = TopicFactory(slug='doesnotexist', product=p)\n t2 = TopicFactory(slug='cookies', product=p)\n t3 = TopicFactory(slug='sync', product=p)\n\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t3)\n\n self.refresh()\n\n topic_vals = (\n (t1.slug, 0),\n (t2.slug, 2),\n (t3.slug, 1),\n )\n\n qs = {'a': 1, 'w': 2, 'format': 'json'}\n for topics, number in topic_vals:\n qs.update({'topics': topics})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(number, json.loads(response.content)['total'])", "def query_transition_counts(ordered_pitch_types, query_filter):\n transition_counts = {}\n\n for first_pitch_type in ordered_pitch_types:\n transition_counts[first_pitch_type] = {}\n\n for second_pitch_type in ordered_pitch_types:\n count_query = PitchTransition.select().where(\n PitchTransition.first_pitch_type == first_pitch_type).where(\n PitchTransition.second_pitch_type == second_pitch_type)\n\n if query_filter:\n count_query = count_query.where(query_filter)\n\n transition_counts[first_pitch_type][second_pitch_type] = count_query.count()\n\n return transition_counts", "def init():\n\n # Reading the data from the CSV file using the latin1 encoding.\n data_read = pd.read_csv(\"gender-classifier-DFE-791531.csv\", encoding='latin1') # Dataset Size = 20050\n\n # If all the attribute values are empty for any of the rows, we drop them.\n data = data_read.dropna(how='all') # After dropping, data set size is still 20050\n\n # Checking the names of the columns/attributes which contains at least one null value\n columns_containing_missing_values = data.columns[data.isnull().any()].tolist()\n print(\"Column names which has missing values\")\n print(columns_containing_missing_values)\n\n # Since 'gender' is our target variable, we would like to have values for it.\n # So, dropping all the rows which have no values for the 'gender' attribute.\n data = data[data['gender'].notnull()] # After dropping, dataset size = 19953 rows\n # Also, dropping all the rows which have values as 'unknown' for the 'gender' attribute\n data = data[data['gender'] != 'unknown'] # After dropping, dataset size = 18836 rows\n\n male_profile_count = len(data[data['gender'] == 'male'])\n print(\"Male Profile Count \" + str(male_profile_count))\n female_profile_count = len(data[data['gender'] == 'female'])\n print(\"Female Profile Count \" + str(female_profile_count))\n brand_profile_count = len(data[data['gender'] == 'brand'])\n print(\"Brand Profile Count \" + str(brand_profile_count))\n\n return data", "def __find_topics(self, concepts):\n\n # Set up\n found_topics = dict() # to store the matched topics\n explanation = dict()\n\n # finding matches\n for concept in concepts:\n evgrams = everygrams(concept.split(), 1, 3) # list of unigrams, bigrams, trigrams\n for grams in evgrams:\n gram = \"_\".join(grams)\n gram_without_underscore = \" \".join(grams)\n #### Finding similar words contained in the model\n\n list_of_matched_topics = []\n\n if self.fast_classification:\n list_of_matched_topics = self.__get_similar_words_from_cached_model(gram,grams)\n else:\n list_of_matched_topics = self.__get_similar_words_from_full_model(gram, grams)\n\n\n for topic_item in list_of_matched_topics:\n\n topic = topic_item[\"topic\"]\n str_sim = topic_item[\"sim_t\"]\n wet = topic_item[\"wet\"]\n sim = topic_item[\"sim_w\"]\n\n\n if str_sim >= self.min_similarity and topic in self.cso.topics_wu:\n\n\n if topic in found_topics:\n #tracking this match\n found_topics[topic][\"times\"] += 1\n\n found_topics[topic][\"gram_similarity\"].append(sim)\n\n #tracking the matched gram\n if gram in found_topics[topic][\"grams\"]:\n found_topics[topic][\"grams\"][gram] += 1\n else:\n found_topics[topic][\"grams\"][gram] = 1\n\n #tracking the most similar gram to the topic\n if str_sim > found_topics[topic][\"embedding_similarity\"]:\n found_topics[topic][\"embedding_similarity\"] = str_sim\n found_topics[topic][\"embedding_matched\"] = wet\n\n else:\n #creating new topic in the result set\n found_topics[topic] = {'grams': {gram:1},\n 'embedding_matched': wet,\n 'embedding_similarity': str_sim,\n 'gram_similarity':[sim],\n 'times': 1,\n 'topic':topic}\n\n\n\n if sim == 1:\n found_topics[topic][\"syntactic\"] = True\n\n\n\n primary_label_topic = self.cso.get_primary_label_wu(topic)\n if primary_label_topic not in explanation:\n explanation[primary_label_topic] = set()\n\n explanation[primary_label_topic].add(gram_without_underscore)\n\n return found_topics, explanation", "def topic_table(topic):\r\n table = Table(['property', 'value'])\r\n table.align['property'] = 'r'\r\n table.align['value'] = 'l'\r\n\r\n table.add_row(['name', topic['name']])\r\n table.add_row(['tags', listing(topic['tags'] or [])])\r\n return table", "def __len__(self):\n count = 0\n topics = set(six.iterkeys(self._topics))\n while topics:\n event_type = topics.pop()\n try:\n listeners = self._topics[event_type]\n count += len(listeners)\n except KeyError:\n pass\n return count", "def test_data_counts(self):\n model = PoincareModel(self.data)\n self.assertEqual(len(model.all_relations), 5)\n self.assertEqual(len(model.node_relations[model.kv.vocab['kangaroo.n.01'].index]), 3)\n self.assertEqual(len(model.kv.vocab), 7)\n self.assertTrue('mammal.n.01' not in model.node_relations)", "def contentcheck_categorical():\n filename = \"Analysis.txt\"\n temp_line = \"\"\n count = 0\n for line in open(filename, 'r'):\n temp_line = temp_line + line\n if \"VALUE COUNTS\" in temp_line:\n count = count + 1\n if \"DATA INFORMATION\" in temp_line:\n count = count + 1\n if \"MEAN, MEDIAN AND MODE:\" in temp_line:\n count = count + 1\n if \"Correlation\" in temp_line:\n count = count + 1\n if \"Normality Tests\" in temp_line:\n count = count + 1\n return count", "def count_mentioned_pol_figures(data):\n figures_mentioned = {}\n figures = get_political_figures()\n\n for ind, row in data.iterrows():\n subject_words = row[\"MetadataSubject\"].lower()\n message_words = row[\"RawText\"].lower()\n\n for figure in figures:\n if figure + \" \" in (subject_words + message_words):\n if figure in figures_mentioned:\n figures_mentioned[figure] += 1\n else:\n figures_mentioned[figure] = 0\n\n return pd.DataFrame(figures_mentioned)", "def counts_table(data, attr):\n pd.options.mode.chained_assignment = None # default='warn'\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n # expanding a table to have all variable options in a column with their \n # parent attribute\n allvariables = attr.apply(lambda x: pd.Series(x['vars']),axis=1).stack().reset_index(level=1, drop=True)\n allvariables.name='var'\n allvariables = attr.drop('vars', axis=1).join(allvariables)\n av = allvariables.drop(attr.index[-1])\n # populate the table with counts\n for c in classlist:\n clist = []\n count = 0\n for i, row in av.iterrows():\n att = row['attr']\n var = row['var']\n sub = data[[att,'class']]\n sub = sub[sub[att]==var]\n if not sub.empty:\n ssub = sub[sub['class']==c]\n if not ssub.empty:\n count = len(ssub)\n else:\n count = 0\n clist.append(count)\n av[c] = clist\n\n return av", "def _transform_idoc(df):\n global _SIMPLECOUNT_COLUMNS\n\n try:\n df['comcnty'] = ((df['comcnty'] + 1) / 2).astype(int)\n df.columns = ['year', 'fk_simplecount_county'] + df.columns.tolist()[2:]\n\n indicator_list = [1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1620, 1621]\n \n c_nc = df['admtypo3'] == 1\n c_tv = df['admtypo3'] == 2\n c_pers = df['offtype2'] == 1 # df['offtype'] == 1\n c_prop = df['offtype2'] == 2 # df['offtype'] == 2\n c_sex = df['offtype2'] == 4 # df['offtype'] == 4\n c_drug = df['offtype2'].isin([3.1, 3.2, 3.3, 3.4, 3.5, 3.6]) # df['offtype'] == 3\n c_other = df['offtype2'].isin([0, 3, 5, 7]) # df['offtype'] == 7\n c_viol = df['offtype'] == 1\n c_male = df['sex'] == 'M'\n c_female = ~c_male\n\n c_first2 = [c_nc, c_tv]\n c_others = [c_pers, c_prop, c_sex, c_drug, c_other, c_viol, c_male, c_female]\n \n def helper(c, indicator_id, first2):\n df['fk_simplecount_indicator'] = indicator_id\n g = ['fk_simplecount_indicator', 'year', 'fk_simplecount_county']\n if first2:\n return df[c].groupby(g).size().reset_index(name='value')\n else:\n return df[c_nc & c].groupby(g).size().reset_index(name='value')\n\n out = pd.DataFrame()\n for i in range(2):\n out = out.append(helper(c_first2[i], indicator_list[i], first2=True))\n \n for i in range(len(c_others)):\n out = out.append(helper(c_others[i], indicator_list[i+2], first2=False))\n\n out = out.loc[out['fk_simplecount_county'].isin(range(1,102+1))]\n return out[_SIMPLECOUNT_COLUMNS]\n except:\n raise", "def test_topic_reduction_edge_cases(base_bertopic):\n\n nr_topics = 5\n base_bertopic.nr_topics = 100\n old_documents = pd.DataFrame({\"Document\": newsgroup_docs,\n \"ID\": range(len(newsgroup_docs)),\n \"Topic\": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})\n base_bertopic._update_topic_size(old_documents)\n c_tf_idf = base_bertopic._extract_topics(old_documents, topic_reduction=True)\n old_freq = base_bertopic.get_topics_freq()\n\n new_documents = base_bertopic._reduce_topics(old_documents, c_tf_idf)\n new_freq = base_bertopic.get_topics_freq()\n\n assert not set(old_documents.Topic).difference(set(new_documents.Topic))\n pd.testing.assert_frame_equal(old_documents, new_documents)\n pd.testing.assert_frame_equal(old_freq, new_freq)", "def get_winning_topics(cls):\n results_generator = cls.get_results()\n winning_hoechstzahls = []\n for i in range(config['openslides_topicvoting_posts']):\n try:\n winning_hoechstzahls.append(results_generator.next())\n except StopIteration:\n topic_post_warning = True\n runoff_poll_warning = False\n break\n else:\n topic_post_warning = False\n try:\n first_looser_hoechstzahl = results_generator.next()\n except StopIteration:\n runoff_poll_warning = False\n else:\n # First runoff poll check: Check equal hoechstzahls between the categories.\n if (first_looser_hoechstzahl.value == winning_hoechstzahls[-1].value and\n first_looser_hoechstzahl.topic.category.weight == winning_hoechstzahls[-1].topic.category.weight):\n runoff_poll_warning = True\n else:\n runoff_poll_warning = False\n winning_topics = map(lambda hoechstzahl: hoechstzahl.topic, winning_hoechstzahls)\n return {'winning_topics': winning_topics,\n 'topic_post_warning': topic_post_warning,\n 'runoff_poll_warning': runoff_poll_warning}", "def manual_community_topic_labels(date='2019_01'):\n comm_size = get_blob(bucket_name=date, blob_name=f\"community_modularity.csv\", df=True)\n comm_size.name = 'size'\n comm_size.index.name = 'community'\n comm_size = comm_size.reset_index()\n labels = {\n 0:'images/discussion',\n 1:'discussion/tv',\n 2:'runescape',\n 3:'funny/images',\n 4:'gaming/tech',\n 5:'pol/geo',\n 6:'generalist',\n 7:'porn',\n 8:'music',\n 9:'sports',\n 10:'SE/DK',\n 11:'SP/IT/PT',\n 12:'NL'\n }\n\n membership = load_membership()\n order_keys = membership.drop_duplicates('community').set_index('community')['size_order'].to_dict()\n\n comm_size['size_order'] = comm_size['community'].map(lambda x: order_keys[x])\n comm_size = comm_size.sort_values('size_order').reset_index(drop=True)\n\n comm_size['labels']=comm_size['community'].map(lambda x: labels[x])\n comm_size.to_csv(tables_path(f\"{date}/comm_size.csv\"))", "def create_topic_columns(videos, topics):\n \n # Clear values\n videos['relevant'] = False\n\n # Create masks for each topic so we can later filter videos by topics\n topic_masks = []\n for _, topic in topics.iterrows():\n videos[topic['slug']] = False # Clear values\n pattern = get_pattern(topic)\n topic_mask = videos.apply(lambda video: is_relevant(video, pattern), axis=1)\n topic_masks.append(topic_mask)\n videos[topic['slug']] = topic_mask\n\n # Mark video as 'relevant' if it mentions any of the topics\n videos['relevant'] = np.any(np.column_stack(topic_masks), axis=1)", "def posts_per_topic_all(request, pk):\n #update is_expired in all posts\n update_posts_expiration()\n #get all posts with a certain topic\n posts = Post.objects.filter(topic=pk)\n serializer = ViewPostSerializer(posts, many=True)\n return Response(serializer.data)", "def analysis_5_result(primary_person_df,units_df,output_folder_path):\n df1 = primary_person_df.distinct().select(\"CRASH_ID\",\"UNIT_NBR\",\"PRSN_ETHNICITY_ID\")\n df2 = units_df.distinct().select(\"CRASH_ID\",\"UNIT_NBR\",\"VEH_BODY_STYL_ID\")\n join_res_df = df1.join(broadcast(df2), [\"CRASH_ID\",\"UNIT_NBR\"])\n rank_window_spec = Window.partitionBy(\"VEH_BODY_STYL_ID\").orderBy(col(\"COUNT\").desc())\n top_ethnic_unique_body_df = join_res_df \\\n .groupBy(\"VEH_BODY_STYL_ID\",\"PRSN_ETHNICITY_ID\") \\\n .agg(count(\"*\").alias(\"COUNT\")) \\\n .withColumn(\"RANK\", dense_rank().over(rank_window_spec)) \\\n .filter(\"RANK == 1\")\\\n .drop(\"RANK\")\n\n print(\"Analysis 5: \\nMention the top ethnic user group of each unique body style\")\n top_ethnic_unique_body_df.show(truncate=False)\n write_df_to_csv(top_ethnic_unique_body_df, output_folder_path+\"analysis_5_result\")", "def get_topic_measures(dict_clusters, dict_groundtruth, exact_match=True, cluster_n=None, coverage_percentage=None,\n coverage_n=None, keep_cluster_duplicates=False):\n total_tp = 0\n total_fp = 0\n total_n = get_total_time_frame_count(dict_groundtruth)\n\n dict_time_frame_measures = dict()\n for time_frame in dict_clusters.keys():\n tp = 0\n fp = 0\n keyword_n = 0\n if time_frame not in dict_groundtruth:\n fp += 1\n # total_fp += 1\n else:\n groundtruth = dict_groundtruth[time_frame]\n clusters = dict_clusters[time_frame]\n\n keyword_n, keyword_tp, keyword_fp, matched_events = eval_clusters(clusters, groundtruth, exact_match,\n cluster_n,\n keep_cluster_duplicates=keep_cluster_duplicates)\n\n temp_tp = 0\n temp_fp = 0\n for k in matched_events.keys():\n matched_event = matched_events[k]\n\n # if coverage percentage is given\n if coverage_percentage:\n if matched_event.event_TP / matched_event.event_n >= coverage_percentage:\n temp_tp += 1\n else:\n temp_fp += 1\n\n # if coverage count is given\n elif coverage_n:\n if matched_event.event_TP >= coverage_n:\n temp_tp += 1\n else:\n temp_fp += 1\n\n else:\n if matched_event.event_TP == matched_event.event_n:\n temp_tp += 1\n else:\n temp_fp += 1\n\n # if all events belong to the time frame are identified, count time frame as a TP\n if temp_tp == len(groundtruth):\n tp += 1\n else:\n fp += 1\n\n dict_time_frame_measures[time_frame] = [len(groundtruth), temp_tp, temp_fp]\n total_tp += tp\n total_fp += fp\n\n return total_n, total_tp, total_fp, dict_time_frame_measures", "def filter_by_topic(tmp_df, keep_top_n_topics=0, min_count_threshold=0):\n\n topic_count_df = tmp_df[\"variety\"].value_counts()\n\n # filter by top n number of topics if specified\n if keep_top_n_topics is not None:\n topics_to_keep = topic_count_df.head(keep_top_n_topics).index\n tmp_df = tmp_df[tmp_df[\"variety\"].isin(topics_to_keep)]\n\n # filter out any topics that doesn't meet the minimum count threshold\n if min_count_threshold >= 0:\n topics_to_keep = topic_count_df[\n topic_count_df > min_count_threshold].index\n tmp_df = tmp_df[tmp_df[\"variety\"].isin(topics_to_keep)]\n\n return tmp_df", "def get_word_frequencies(topic_description):\n frequencies = {w:f for w,f in topic_description}\n return frequencies", "def get_stories(df):\n categories = df.get_categorical().columns\n continuous = df.get_numerical().columns\n\n stories = []\n cat_copy = list(categories)\n for col in categories:\n # Remove the current col\n if col in cat_copy:\n cat_copy.remove(col)\n try:\n # Get comparison variable\n x = cat_copy.pop()\n d = pd.pivot_table(df.data, index=(col), values=[x],\\\n aggfunc='count').reset_index().sort_values(by=x, ascending=False)\n stories.append({\n 'question': \"%s with high count of %s\" %(col, x),\n 'question_html': \"<span class='tag is-primary is-light'>%s</span>\\\n with high count of <span class='tag is-success is-light'>%s</span>\" % (col, x),\n 'answer': d[col].head(1).values[0],\n 'misc': d\n })\n except IndexError as e:\n pass\n \n for num in continuous:\n d = pd.pivot_table(df.data, index=[col], values=[num],\\\n aggfunc=np.sum).reset_index().sort_values(by=num, ascending=False)\n stories.append({\n 'question': \"%s with sum of %s\" % (col, num),\n 'question_html': \"<span class='tag is-primary is-light'>%s</span>\\\n with sum of <span class='tag is-success is-light'>%s</span>\" % (col, num),\n 'answer': round(d[num].head(1).values[0]),\n 'misc': d\n })\n\n return stories", "def getTopicPmi(self, folderpath, numTopic):\n # get tweets\n # helper = Utility.Helper(self.rootpath)\n folderPath = os.path.join(folderpath, 'final')\n tweets = self.helper.getTweet(folderPath)\n with codecs.open(os.path.join(self.rootpath, folderPath,\n \"tweets_line.txt\"),\n \"w\", encoding='utf8') as fp:\n for tweet in tweets:\n # print (type(tweet.text.encode('utf8')))\n c1 = self.preprocessData.cleanTweet(tweet.text)\n fp.write(c1 + '\\n')\n\n # get topic with pmi\n corpus_path = os.path.join(self.rootpath, folderPath, \"tweets_line.txt\"\n )\n n_topics = numTopic\n n_top_words = 5\n preprocessing_steps = ['tag']\n n_cand_labels = 100\n label_min_df = 5\n label_tags = ['NN,NN', 'JJ,NN']\n n_labels = 3\n lda_random_state = 12345\n lda_n_iter = 10000\n\n labels, words, dist = label_topic.get_topic_labels(corpus_path,\n n_topics,\n n_top_words,\n preprocessing_steps,\n n_cand_labels,\n label_min_df,\n label_tags,\n n_labels,\n lda_random_state,\n lda_n_iter)\n print(\"\\nTopical labels:\")\n print(\"-\" * 20)\n topics = defaultdict(list)\n for i, labels in enumerate(labels):\n topics[i] = map(lambda l: ' '.join(l), labels)\n print(u\"Topic {}: {}\".format(i, ', '.join(map(lambda l:\n ' '.join(l),\n labels))\n )\n )\n self.helper.dumpJson(folderPath, \"tweets_label_words_pmi.json\",\n labels)\n print(\"tweets_topic_label_pmi.json has been saved.\")\n self.helper.dumpJson(folderPath, \"tweets_topic_words_pmi.json\",\n words)\n print(\"tweets_topic_words_pmi.json has been saved.\")\n return dist", "def iterate_data(self):\n if \"single\" in self.dataset_name:\n # Index 0 for list of sentence lengths, index 1 for list of token lengths\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for answer_id in self.data:\n summary = self.data[answer_id]['summary']\n articles = self.data[answer_id]['articles']\n question = self.data[answer_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(articles, 'article')\n self._get_token_cnts(question, 'question')\n self._write_stats(\"token_counts\")\n\n if \"multi\" in self.dataset_name:\n self.stat_dict = {'question': [[], []], 'summary': [[], []], 'article': [[], []]}\n for q_id in self.data:\n summary = self.data[q_id]['summary']\n question = self.data[q_id]['question']\n if args.tokenize:\n self._get_token_cnts(summary, 'summary')\n self._get_token_cnts(question, 'question')\n question = self.data[q_id]['question']\n for answer_id in self.data[q_id]['articles']:\n articles = self.data[q_id]['articles'][answer_id][0]\n if args.tokenize:\n self._get_token_cnts(articles, 'article')\n self._write_stats(\"token_counts\")\n\n if self.dataset_name == \"complete_dataset\":\n self.stat_dict = {'urls': [], 'sites': []}\n article_dict = {}\n print(\"Counting answers, sites, unique urls, and tokenized counts of unique articles\")\n answer_cnt = 0\n for q_id in self.data:\n for a_id in self.data[q_id]['answers']:\n answer_cnt += 1\n url = self.data[q_id]['answers'][a_id]['url']\n article = self.data[q_id]['answers'][a_id]['article']\n if url not in article_dict:\n article_dict[url] = article\n self.stat_dict['urls'].append(url)\n assert \"//\" in url, url\n site = url.split(\"//\")[1].split(\"/\")\n self.stat_dict['sites'].append(site[0])\n print(\"# of Answers:\", answer_cnt)\n print(\"Unique articles: \", len(article_dict)) # This should match up with count written to file\n self._write_stats(\"full collection\")\n\n # Get token/sent averages of unique articles\n if args.tokenize:\n self.stat_dict = {'article': [[], []]}\n for a in article_dict:\n self._get_token_cnts(article_dict[a], 'article')\n self._write_stats(\"token_counts\")", "def document_distribution_per_topic(df_dominant_topic):\n cols = [color for name, color in mcolors.TABLEAU_COLORS.items()] # more colors: 'mcolors.XKCD_COLORS'\n\n\n fig, axes = plt.subplots(2,2,figsize=(16,14), dpi=160, sharex=True, sharey=True)\n\n for i, ax in enumerate(axes.flatten()):\n df_dominant_topic_sub = df_dominant_topic.loc[df_dominant_topic.Dominant_Topic == i, :]\n doc_lens = [len(d) for d in df_dominant_topic.Text]\n ax.hist(doc_lens, bins = 1000, color=cols[i])\n ax.tick_params(axis='y', labelcolor=cols[i], color=cols[i])\n sns.kdeplot(doc_lens, color=\"black\", shade=False, ax=ax.twinx())\n ax.set(xlim=(0, 1000), xlabel='Document Word Count')\n ax.set_ylabel('Number of Documents', color=cols[i])\n ax.set_title('Topic: '+str(i), fontdict=dict(size=16, color=cols[i]))\n\n fig.tight_layout()\n fig.subplots_adjust(top=0.90)\n plt.xticks(np.linspace(0,1000,9))\n fig.suptitle('Distribution of Document Word Counts by Dominant Topic', fontsize=22)\n plt.show()", "def scrapeTopic(self, topic, num_articles, sources=list(site.all_sites)): \n pass", "def initialize(self):\n self.n_words = len(self.vocab)\n self.n_docs = len(self.documents)\n\n # Initialize the three count matrices.\n # The (i,j) entry of self.nmz is the number of words in document i assigned to topic j.\n self.nmz = np.zeros((self.n_docs, self.n_topics))\n # The (i,j) entry of self.nzw is the number of times term j is assigned to topic i.\n self.nzw = np.zeros((self.n_topics, self.n_words))\n # The (i)-th entry is the number of times topic i is assigned in the corpus.\n self.nz = np.zeros(self.n_topics)\n\n # Initialize the topic assignment dictionary.\n self.topics = {} # key-value pairs of form (m,i):z\n\n for m in range(self.n_docs):\n for i in self.documents[m]:\n # Get random topic assignment, i.e. z is a random integer in the range of topics\n z = np.random.randint(self.n_topics)\n # Increment count matrices\n self.nmz[m,z] += 1\n self.nzw[z,self.documents[m][i]] += 1\n self.nz[z] += 1\n # Store topic assignment\n self.topics[(m,i)] = z", "def validate_new_curriculum_topics(self, curriculum_topics):\n\n for cur in curriculum_topics:\n # check to make sure its in the general topics table\n self.db_cursor.execute(\"\"\"SELECT COUNT(*) FROM Topic WHERE name = %s\"\"\", (cur,))\n ct = self.db_cursor.fetchone()\n ct = ct[0]\n if ct == 0:\n print(\"topic does not exist, we must create new one or cancel\") # todo\n\n return True", "def total_exs(dataset):\n total = 0\n for article in dataset['data']:\n for para in article['paragraphs']:\n total += len(para['qas'])\n return total", "def prepare_titanic_data(df):\n\n df.embark_town.fillna('Other', inplace=True)\n\n # Drop deck and embarked_town\n df.drop(columns=['deck', 'embark_town'], inplace=True)\n\n # Encoding: Objects (Categorical Variables) to Numeric\n # Use sklearn's LabelEncoder\n encoder = LabelEncoder()\n\n # Set Unknown and encode Embarked column to numbers\n # 2 == \"S\" == Southampton == 644 people\n # 0 == \"C\" == Cherbourg == 168 people\n # 1 == \"Q\" == Queenstown == 77 people\n # 3 == \"Unknown\" == 2 people\n df.embarked.fillna('Unknown', inplace=True)\n encoder.fit(df.embarked)\n df.embarked = encoder.transform(df.embarked)\n\n # Encode the Class (first class, second, etc...)\n # First class == 0\n # Second class == 1\n # Third class == 2\n encoder.fit(df[\"class\"])\n df[\"class_encoded\"] = encoder.transform(df[\"class\"])\n\n # Encode gender\n # male == 1 == 577 records\n # female == 0 == 314 records\n encoder.fit(df.sex)\n df[\"sex_encoded\"] = encoder.transform(df.sex)\n\n # Handle the 177 records with missing age values\n average_age = df.age.mean()\n df.age.fillna(average_age, inplace=True)\n\n scaler = MinMaxScaler()\n scaler.fit(df[['fare']])\n df[\"fare_scaled\"] = scaler.transform(df[['fare']])\n\n scaler = MinMaxScaler()\n scaler.fit(df[['age']])\n df[\"age_scaled\"] = scaler.transform(df[['age']])\n\n # Set the index to the passenger id\n df = df.set_index(\"passenger_id\")\n return df", "def tweet_df(n):\n # Retrieve the tweet contents\n first_tweet = get_value(df_1t, n)\n second_tweet = get_value(df_2t, n) \n third_tweet = get_value(df_3t, n)\n fourth_tweet = get_value(df_4t, n)\n fifth_tweet = get_value(df_5t, n)\n sixth_tweet = get_value(df_6t, n)\n seventh_tweet = get_value(df_7t, n)\n eighth_tweet = get_value(df_8t, n)\n nineth_tweet = get_value(df_9t, n)\n tenth_tweet = get_value(df_10t, n) \n \n # Sentiment of each tweet\n sa_first_tweet = sentiment_analyzer_scores(first_tweet)\n sa_second_tweet = sentiment_analyzer_scores(second_tweet)\n sa_third_tweet = sentiment_analyzer_scores(third_tweet)\n sa_fourth_tweet = sentiment_analyzer_scores(fourth_tweet)\n sa_fifth_tweet = sentiment_analyzer_scores(fifth_tweet)\n sa_sixth_tweet = sentiment_analyzer_scores(sixth_tweet)\n sa_seventh_tweet = sentiment_analyzer_scores(seventh_tweet)\n sa_eighth_tweet = sentiment_analyzer_scores(eighth_tweet)\n sa_nineth_tweet = sentiment_analyzer_scores(nineth_tweet)\n sa_tenth_tweet = sentiment_analyzer_scores(tenth_tweet)\n \n # Compute the compound score for obtaining a sentiment class\n compound_score_first_tweet = sentiment_logic((list(sa_first_tweet.values())[list(sa_first_tweet.keys()).index('compound')] ))\n compound_score_second_tweet = sentiment_logic((list(sa_second_tweet.values())[list(sa_second_tweet.keys()).index('compound')] )) \n compound_score_third_tweet = sentiment_logic((list(sa_third_tweet.values())[list(sa_third_tweet.keys()).index('compound')] ))\n compound_score_fourth_tweet = sentiment_logic((list(sa_fourth_tweet.values())[list(sa_fourth_tweet.keys()).index('compound')] ))\n compound_score_fifth_tweet = sentiment_logic((list(sa_fifth_tweet.values())[list(sa_fifth_tweet.keys()).index('compound')] ))\n compound_score_sixth_tweet = sentiment_logic((list(sa_sixth_tweet.values())[list(sa_sixth_tweet.keys()).index('compound')] ))\n compound_score_seventh_tweet = sentiment_logic((list(sa_seventh_tweet.values())[list(sa_seventh_tweet.keys()).index('compound')] ))\n compound_score_eighth_tweet = sentiment_logic((list(sa_eighth_tweet.values())[list(sa_eighth_tweet.keys()).index('compound')] ))\n compound_score_nineth_tweet = sentiment_logic((list(sa_nineth_tweet.values())[list(sa_nineth_tweet.keys()).index('compound')] ))\n compound_score_tenth_tweet = sentiment_logic((list(sa_tenth_tweet.values())[list(sa_tenth_tweet.keys()).index('compound')] ))\n \n # Create a new temporary dataframe for the tweet contents and sentiment\n compound_score_list = [compound_score_first_tweet, compound_score_second_tweet,\n compound_score_third_tweet, compound_score_fourth_tweet,\n compound_score_fifth_tweet, compound_score_sixth_tweet, \n compound_score_seventh_tweet, compound_score_eighth_tweet,\n compound_score_nineth_tweet, compound_score_tenth_tweet]\n \n \n first_col = [first_tweet, second_tweet,\n third_tweet, fourth_tweet,\n fifth_tweet, sixth_tweet,\n seventh_tweet, eighth_tweet,\n nineth_tweet, tenth_tweet]\n \n second_col = compound_score_list\n \n tmp_df = pd.DataFrame(data = {'Tweets' : first_col, \n 'Sentiment' : second_col})\n \n \n return tmp_df.to_json(date_format = 'iso', orient = 'split')" ]
[ "0.670414", "0.6682198", "0.6473323", "0.63052845", "0.6284073", "0.5883223", "0.58540165", "0.5768975", "0.56314296", "0.5595589", "0.55580306", "0.5541051", "0.5484164", "0.54825205", "0.5471465", "0.5443077", "0.53968257", "0.5374345", "0.5362987", "0.5333066", "0.5320533", "0.5308043", "0.53065175", "0.52841324", "0.5240766", "0.52356154", "0.52329373", "0.52273995", "0.52240163", "0.521626", "0.5211933", "0.5203348", "0.5189752", "0.5181925", "0.5180661", "0.5169465", "0.515654", "0.5126955", "0.5111875", "0.511152", "0.51057273", "0.5102154", "0.50883985", "0.5079168", "0.507327", "0.5064662", "0.5062901", "0.5061368", "0.5060122", "0.50509924", "0.50500023", "0.5049223", "0.5037668", "0.5037464", "0.5037276", "0.5029944", "0.50166714", "0.50112253", "0.5011054", "0.50106376", "0.50070673", "0.5005642", "0.5005098", "0.50037766", "0.4995084", "0.4995084", "0.49873996", "0.4977276", "0.4969586", "0.49690676", "0.4965407", "0.49626452", "0.4960137", "0.4946523", "0.49440113", "0.49387252", "0.49378192", "0.49370906", "0.49369827", "0.49368608", "0.49324876", "0.49308643", "0.492595", "0.49245626", "0.49127477", "0.4910924", "0.4900158", "0.4898201", "0.48939243", "0.48889497", "0.48882478", "0.4888174", "0.4884081", "0.4881955", "0.48597437", "0.4858623", "0.48481187", "0.48480734", "0.48430032", "0.48367956" ]
0.7382751
0
Plot a data frame with bar type
def plot_type_of_topic(data_frame: pb.DataFrame) -> None: plt.interactive(False) plt.figure() data_frame.plot(kind='bar', x= data_frame['TopicID']) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()", "def bar_plot(df, data_pt):\n \n x=df.loc[data_pt]\n y= df.columns.tolist()\n sorte=x.tolist()\n a=sorted(zip(sorte, y))[-10:]\n y=[y for _, y in a]\n ## soru burda yapıp altı ona göre duzeliyecegim birde\n \n x = df[y].loc[data_pt]\n \n # Here we modify the tickangle of the xaxis, resulting in rotated labels.\n #title={'text': \"<b>Comparing features with Golden for Cycle {}\".format(cycle),\n # 'y':0.9,'x':0.5,'xanchor': 'center','yanchor': 'top'}\n\n \n trace = {'type': 'bar',\n 'orientation':'h',\n 'x' : x,\n 'y' : y}\n data = Data([trace])\n layout = {'title' : \"<b>Reconstruction error in each dimension for cycle{}\".format(data_pt),\n 'titlefont':{'size' : 20},\n 'xaxis' : {'title': '<b>Reconstruction Error',\n 'titlefont':{'size' : 20},\n 'tickangle': -45, 'tickfont': {'size':15} ,},\n \n 'yaxis' : {'title': '<b>Features',\n 'titlefont':{'size' : 20},\n 'tickfont': {'size':15},},\n 'margin' : {'l':100, 'r' : 1, 'b': 200, 't': 100, 'pad' : 1},\n 'height' : 600, 'width' : 800,\n }\n \n fig = Figure(data = data, layout = layout)\n \n return pyo.iplot(fig)", "def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)", "def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")", "def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()", "def bar_plot(df, field_name, graph_title, threshold_value, x_axis_label, y_axis_label):\n\n x = df[field_name].value_counts().sort_values()\n x[x > threshold_value].plot(kind='barh', figsize=(12, 8), title=graph_title, x=x_axis_label, y=y_axis_label)\n return", "def bar(\n df,\n x=None,\n y=\"value\",\n bars=\"variable\",\n order=None,\n bars_order=None,\n orient=\"v\",\n legend=True,\n title=True,\n ax=None,\n cmap=None,\n **kwargs,\n):\n\n # default x-axis to time-col attribute from an IamDataFrame, else use \"year\"\n x = x or time_col_or_year(df)\n\n # cast to DataFrame if necessary\n # TODO: select only relevant meta columns\n if not isinstance(df, pd.DataFrame):\n df = df.as_pandas()\n\n for col in set(SORT_IDX) - set([x, bars]):\n if len(df[col].unique()) > 1:\n msg = \"Can not plot multiple {}s in bar plot with x={}, bars={}\"\n raise ValueError(msg.format(col, x, bars))\n\n if ax is None:\n fig, ax = plt.subplots()\n\n # long form to one column per bar group\n _df = reshape_mpl(df, x, y, bars, **{x: order, bars: bars_order})\n\n # explicitly get colors\n defaults = default_props(reset=True, num_colors=len(_df.columns), colormap=cmap)[\n \"color\"\n ]\n rc = run_control()\n color = []\n for key in _df.columns:\n c = next(defaults)\n if \"color\" in rc and bars in rc[\"color\"] and key in rc[\"color\"][bars]:\n c = rc[\"color\"][bars][key]\n color.append(c)\n\n # change year to str to prevent pandas/matplotlib from auto-ordering (#474)\n if _df.index.name == \"year\":\n _df.index = map(str, _df.index)\n\n # plot data\n kind = \"bar\" if orient.startswith(\"v\") else \"barh\"\n _df.plot(kind=kind, color=color, ax=ax, **kwargs)\n\n # add legend\n ax.legend(loc=\"center left\", bbox_to_anchor=(1.0, 0.5))\n if not legend:\n ax.legend_.remove()\n\n # add default labels if possible\n if orient == \"v\":\n ax.set_xlabel(x.capitalize())\n else:\n ax.set_ylabel(x.capitalize())\n units = df[\"unit\"].unique()\n if len(units) == 1 and y == \"value\":\n if orient == \"v\":\n ax.set_ylabel(units[0])\n else:\n ax.set_xlabel(units[0])\n\n # build a default title if possible\n _title = []\n for var in [\"model\", \"scenario\", \"region\", \"variable\"]:\n values = df[var].unique()\n if len(values) == 1:\n _title.append(\"{}: {}\".format(var, values[0]))\n if title and _title:\n title = \" \".join(_title) if title is True else title\n ax.set_title(title)\n\n return ax", "def barGraph(listOfWord, listOfFrequency):\r\n\r\n\tindex = np.arange(len(listOfWord))\r\n\r\n\tplt.title(\"Frekuensi Kemunculan Kata\")\r\n\tplt.barh(index, listOfFrequency)\r\n\tplt.xlabel('Frekuensi')\r\n\tplt.yticks(index, listOfWord, fontsize=6)\r\n\r\n\tplt.show()", "def visualize_data(df):\n # Remove 'not available'\n genres = df.genre.unique().tolist()\n remove_index = genres.index('Not Available')\n genres.pop(remove_index)\n print('Genres: ', genres)\n\n # Extract number of songs in each genre\n genre_counts = df.genre.value_counts().tolist()\n genre_counts.pop(remove_index)\n print('Counts: ', genre_counts)\n\n # Plot bar graph\n plt.bar(genres, genre_counts)\n plt.xlabel('Genres')\n plt.ylabel('Count')\n plt.show()", "def featuresBarPlot(barNames,barValues):\n plt.bar(range(0,len(barNames)),barValues)\n plt.xticks(range(0,len(barNames)), barNames,rotation='vertical')\n plt.show()", "def plot_class_balances(df, col):\n\n ser_counts = df[col].value_counts()\n ser_counts.plot.bar()\n plt.title(col + ' Counts \\n(classes={})'.format(ser_counts.shape[0]))\n \n plt.show()", "def plot_bv_bar(df, xcolname, ycolname, icol=0):\n # set plot size\n fig, ax = plt.subplots(figsize=(8,6))\n \n # plotting... box\n sns.barplot(ax=ax, data = df\n , x = str(xcolname)\n , y = str(ycolname)\n , color = sns.color_palette()[icol]);\n \n \n # title and labels\n plt.title(xcolname+' Vs '+ycolname, fontsize=20)\n plt.xlabel(xcolname+ ' (units)', fontsize=16)\n plt.ylabel(ycolname+ ' (units)', fontsize=16)\n \n return plt.show()", "def BarPlot(data,colormap='Paired',ax=None,headers='show',value_max=None,x_ticklabels_rotation=90,**kws):\r\n if ax is None:\r\n ax=plt.subplot(111)\r\n\r\n if value_max is None:\r\n value_max=data.sum(1).max()\r\n\r\n data.plot(kind='bar', stacked=True,colormap=colormap, ax=ax,**kws)\r\n ax.set_ylim((0,value_max))\r\n\r\n\r\n #reverse legend order\r\n handles, labels = ax.get_legend_handles_labels()\r\n ax.legend(reversed(handles),reversed(data.columns),bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n\r\n #AXES\r\n if (headers is None or headers=='hide'):\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_xaxis().set_ticks([])\r\n elif headers=='show':\r\n plt.setp(ax.get_xticklabels(),rotation=x_ticklabels_rotation)\r\n ax.set_xlabel(None,visible=False)\r\n\r\n\r\n #plt.tight_layout()\r\n\r\n\r\n return ax", "def bar_chart(\n df,\n orientation='v',\n bar_width=None,\n opacity=0.9,\n textpos=None,\n linewidth=1,\n linecolor='#2C3347',\n marker_color=None,\n **kwargs):\n\n traces = []\n rng = df.index.size if orientation == 'v' else df.columns.size\n otn = orientation\n for i in range(rng):\n x = [str(x) for x in df.columns] if otn == 'v' else df.iloc[:, i]\n y = df.iloc[i] if otn == 'v' else [str(x) for x in df.index]\n text = df.iloc[i] if otn == 'v' else df.iloc[:, i]\n name = df.iloc[i].name if otn == 'v' else df.columns[i]\n\n preset_args = dict(\n x=x,\n y=y,\n text=text,\n textposition=textpos,\n marker=dict(\n opacity=opacity,\n color=marker_color,\n line=dict(\n color=linecolor,\n width=linewidth)),\n name=name,\n width=bar_width,\n orientation=orientation\n )\n\n all_args = {**preset_args, **kwargs}\n bar = go.Bar(all_args)\n traces.append(bar)\n\n return traces", "def plot_bar_chart(objects, data, title='', ylabel='', bar_color = 'blue'):\n y_pos = np.arange(len(objects))\n\n plt.bar(y_pos, data, align='center', alpha=0.5)\n plt.xticks(y_pos, objects, rotation='vertical')\n plt.ylabel(ylabel, fontsize=12)\n plt.title(title, fontsize=12)\n plt.ylim([0,1300])\n plt.bar(range(len(data)), data, color=bar_color)\n\n return plt.show()", "def bar_plot(data, xtitle, title):\n label = list(set(data))\n height = count_elements(data)\n height = [height[i] for i in label]\n plt.bar(label, height=height, width=0.8)\n plt.ylabel('frequency')\n plt.xlabel(xtitle)\n plt.xticks(label)\n plt.savefig('./figures/{}.png'.format(title))\n plt.close()", "def drawStackedBarPlot(df, column, hue):\n plt.style.use('default')\n plt.style.use('dark_background')\n p_table = pd.pivot_table(df, index=column, \n columns=hue, aggfunc='size')\n p_table = p_table.div(p_table.sum(axis=1), axis=0)\n p_table.plot.bar(stacked=True, figsize=(14,7))\n plt.xlabel('Spekraltyp')\n plt.ylabel('Anteil')\n plt.show()", "def barplot(bars, title='', upColor='blue', downColor='red'):\n import pandas as pd\n import matplotlib.pyplot as plt\n from matplotlib.lines import Line2D\n from matplotlib.patches import Rectangle\n\n if isinstance(bars, pd.DataFrame):\n ohlcTups = [\n tuple(v) for v in bars[['open', 'high', 'low', 'close']].values]\n elif bars and hasattr(bars[0], 'open_'):\n ohlcTups = [(b.open_, b.high, b.low, b.close) for b in bars]\n else:\n ohlcTups = [(b.open, b.high, b.low, b.close) for b in bars]\n\n fig, ax = plt.subplots()\n ax.set_title(title)\n ax.grid(True)\n fig.set_size_inches(10, 6)\n for n, (open_, high, low, close) in enumerate(ohlcTups):\n if close >= open_:\n color = upColor\n bodyHi, bodyLo = close, open_\n else:\n color = downColor\n bodyHi, bodyLo = open_, close\n line = Line2D(\n xdata=(n, n),\n ydata=(low, bodyLo),\n color=color,\n linewidth=1)\n ax.add_line(line)\n line = Line2D(\n xdata=(n, n),\n ydata=(high, bodyHi),\n color=color,\n linewidth=1)\n ax.add_line(line)\n rect = Rectangle(\n xy=(n - 0.3, bodyLo),\n width=0.6,\n height=bodyHi - bodyLo,\n edgecolor=color,\n facecolor=color,\n alpha=0.4,\n antialiased=True\n )\n ax.add_patch(rect)\n\n ax.autoscale_view()\n return fig", "def plot_data_stats(data):\n sns.set_style(\"dark\")\n f, ax = plt.subplots(figsize=(6, 15))\n\n ax = sns.barplot(x='tag', y='count', data=tags_freqs)\n\n ax.axes.set_title(\"POS Tags Frequencies\",fontsize=20)\n ax.set_xlabel(\"POS Tags\", fontsize=16)\n ax.set_ylabel(\"Counts\", fontsize=16)\n ax.tick_params(labelsize=12)\n\n plt.show()", "def barplot(data, field_name, field_categories):\n\n\tcategories, counts = np.unique(data[field_name], return_counts=True)\n\n\tfig = plt.figure(figsize=(4, 3))\n\taxes = fig.add_axes([0, 0, 1, 1]) # left, bottom, width, height (range 0 to 1)\n\taxes.bar(range(len(categories)), counts, fc=\"gray\") # fc is the face color\n\n\taxes.set_xlabel(\"\")\n\taxes.set_ylabel('Count')\n\taxes.set_title(field_name)\n\tfig.autofmt_xdate(rotation=45)\n\n\taxes.set_xticks(range(len(categories)))\n\taxes.set_xticklabels([field_categories[c] for c in categories]);", "def _bar_example_4(quantity_by_fruit):\n ch = chartify.Chart(x_axis_type=\"categorical\", blank_labels=True)\n ch.set_title(\"Vertical bar plot with labels\")\n ch.set_subtitle(\"Hidden y-axis\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.style.color_palette.reset_palette_order()\n ch.plot.text(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n text_column=\"quantity\",\n color_column=\"fruit\",\n )\n # Adjust the axis range to prevent clipping of the text labels.\n ch.axes.set_yaxis_range(0, 1200)\n ch.axes.hide_yaxis()\n ch.show(_OUTPUT_FORMAT)", "def plotify_bar(title, data):\n\n x, y, z, labels = [], [], [], []\n\n for d in reversed(data[:len(data) - 1]):\n x.append(f\"{d['settimana_del']:%d-%b}\\n{d['settimana_fino_al']:%d-%b}\")\n y.append(d['nuovi_positivi'])\n z.append(\"lightgrey\" if d['giorni'] < 7 else 'green' if d['delta'] <= 0 else 'red' )\n labels.append(human_format(d['nuovi_positivi']) if d['giorni'] == 7 else f\"{human_format(d['nuovi_positivi'])}\\n(in corso)\" )\n\n x_pos = np.arange(len(x))\n\n # create a new figure\n plt.figure()\n\n plt.title(title)\n\n # Create bars with different colors\n plt.bar(x_pos, y, color=z)\n\n # Create names on the x-axis\n plt.xticks(x_pos, x, rotation=40)\n\n\n # Text on the top of each bar\n x_ticks = plt.gca().get_xticks()\n for i in range(len(y)):\n text = data[i]\n plt.text(x = x_ticks[i], y = y[i]+5, s = labels[i], size = 9, horizontalalignment='center', verticalalignment='bottom')\n\n # prettify y values\n current_values = plt.gca().get_yticks()\n plt.gca().set_yticklabels(['{:n}'.format(int(x)) for x in current_values])\n\n # responsive layout\n plt.tight_layout()\n\n\n\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n buf.seek(0)\n\n ### Release memory\n # Clear the current axes.\n plt.cla() \n # Clear the current figure.\n plt.clf() \n # Closes all the figure windows.\n plt.close('all') \n # plt.close(fig)\n gc.collect()\n\n return buf", "def _bar_example_1(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot\")\n ch.set_subtitle(\"Automatically sorts by value counts.\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n )\n ch.show(_OUTPUT_FORMAT)", "def value_counts_plot(df):\n \n plt.figure(figsize=(15,10))\n \n #get rid of sort_index() to change the graph\n return df.value_counts().sort_index().plot(kind='bar')", "def message_genre_bar_chart(df):\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n return {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n }", "def draw_bar(x_index, data_list, xticks, title, x_label, y_label):\n pyplot.bar(x_index, data_list)\n pyplot.xlabel(x_label)\n pyplot.ylabel(y_label)\n pyplot.xticks(x_index, xticks)\n pyplot.title(title)\n pyplot.show()\n pyplot.savefig()", "def plot(*args, **params):\n if len(args) == 1: # only support data for now\n if isinstance(args[0], list):\n bar_2d = Bar2D(data=args[0])\n bar_2d.plot()\n bar_2d.fig.show()\n else:\n bar_2d = Bar2D(**params)\n bar_2d.plot()\n bar_2d.fig.show()", "def _bar_example_3(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"categorical\")\n ch.set_title(\"Horizontal bar plot\")\n ch.set_subtitle(\"Horizontal with color grouping\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.show(_OUTPUT_FORMAT)", "def BarOverview(data):\n return dcc.Graph(id=\"BarOverview\", className=\"bar\", figure=dict(\n data=[go.Bar(\n x=data[\"frequencies\"],\n y=data[\"names\"],\n orientation='h',\n marker={\n 'color': '#ff4058'\n },\n )],\n layout=dict(\n title=\"<b>Most common Persons</b>\",\n font=dict(family='Soria, Times New Roman, Times, serif', color='#002C77', size=19),\n margin=dict(l=10, r=20, t=50, b=30),\n plot_bgcolor=\"rgba(0,0,0,0)\",\n paper_bgcolor=\"rgba(0,0,0,0)\",\n xaxis=dict(tick0=0, dtick=max(data[\"frequencies\"])),\n yaxis=dict(ticks='outside',\n showgrid=True,\n showline=False,\n showticklabels=False),\n annotations=[dict(xref='paper', yref='y',\n x=0, y=yd,\n font=dict(\n color=\"#000000\",\n size=19\n ),\n text=str(yd),\n showarrow=False) for xd, yd in zip(data[\"frequencies\"], data[\"names\"])]\n )\n ))", "def plot_norm_bar(df, title, figsize=(12,7)):\n fig, ax = plt.subplots(ncols=1, figsize=figsize)\n fig.suptitle(title)\n cat_value_counts = df.fillna('missing').value_counts(normalize=True)\n sns.barplot(y = cat_value_counts.index, x= cat_value_counts.values*100)\n ax.set(xlabel= 'percentage', ylabel=str(df.name))\n \n plt.plot()\n\n return", "def barplot(self, x = \"Predictor\", color = None, opacity = 1, template = \"ggplot2\", \n has_title = True, barmode=\"stack\", is_horizontal = False, title = None, is_percent = False,\n show_num = False):\n if color: #Produce either a stacked or grouped bar plot\n df_stack = self._df.groupby([x,color]).size().reset_index()\n df_stack['Percentage'] = self._df.groupby([x, color]).size().groupby(level = 0).apply(lambda \n x:100 * x/float(x.sum())).values\n df_stack.columns = [x, color, 'Count', 'Percentage']\n df_stack['Percentage'] = round(df_stack['Percentage'], 2)\n \n x_clean, df_clean = clean_varname(df_stack, var = x)\n color_clean, df_clean = clean_varname(df_clean, var = color)\n \n if has_title:\n if not title:\n title = f\"Bar Plot of {x_clean} and {color_clean}\"\n else:\n title = None\n \n \n # 8 different variations for how this graph can appear:\n if is_horizontal:\n if is_percent:\n if show_num: #Show percentages on stacked bar graph\n fig = px.bar(df_clean, y = x_clean, x = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Percentage'])\n else:\n fig = px.bar(df_clean, y = x_clean, x = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if show_num: #Show counts on stacked bar graph:\n fig = px.bar(df_clean, y = x_clean, x = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Count'])\n else:\n fig = px.bar(df_clean, y = x_clean, x = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if is_percent:\n if show_num:\n fig = px.bar(df_clean, x = x_clean, y = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Percentage'])\n else:\n fig = px.bar(df_clean, x = x_clean, y = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if show_num:\n fig = px.bar(df_clean, x = x_clean, y = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Count'])\n else:\n fig = px.bar(df_clean, x = x_clean, y = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title) \n \n return fig\n \n else: #Create a basic bar plot\n df_stack = self._df.groupby([x]).size().reset_index()\n df_stack['Percentage'] = self._df.groupby([x]).size().groupby(level = 0).apply(lambda", "def bar( # pylint: disable=disallowed-name\n self, x: Hashable | None = None, y: Hashable | None = None, **kwargs\n ) -> PlotAccessor:\n return self(kind=\"bar\", x=x, y=y, **kwargs)", "def bar(self, entry_type:str, x:str, labels:list=None, diff:bool=False, x_idx:int=-1):\n\n query = self._decode(x)\n\n data_points = []\n\n for idx, (log, name) in enumerate(zip(self.logs, self.log_names)):\n log = log[entry_type]\n\n candidates = []\n\n for entry in log:\n test = self._follow(entry, query)\n\n if type(test) == dict:\n candidates.append(test)\n elif type(test) == list:\n candidates.append({idx: v for idx, v in enumerate(test)})\n \n if len(candidates) > 0:\n data_points.append((name, candidates[x_idx]))\n \n if len(data_points) == 0:\n print('Warning: Nothing to show in bar chart!')\n return\n\n names = [x[0] for x in data_points]\n data_points = [x[1] for x in data_points]\n\n # Construct the labels for the data\n if labels is not None:\n data_labels = labels\n else:\n data_labels = set()\n for datum in data_points:\n for k in datum:\n data_labels.add(k)\n \n data_labels = list(data_labels)\n data_labels.sort()\n \n\n data_values = [[(datum[k] if k in datum else None) for k in data_labels] for datum in data_points]\n\n if diff:\n for idx in reversed(range(len(data_values))):\n for jdx in range(len(data_labels)):\n if data_values[0][jdx] is None or data_values[idx][jdx] is None:\n data_values[idx][jdx] = None\n else:\n data_values[idx][jdx] -= data_values[0][jdx]\n\n\n series_labels = names\n\n # Plot the graph now\n num_bars = len(series_labels)\n bar_width = 1 / (num_bars + 1)\n \n # Set position of bar on X axis\n positions = [np.arange(len(data_labels))]\n for _ in range(1, num_bars):\n positions.append([x + bar_width for x in positions[-1]])\n \n # Make the plot\n for idx, (series, data, pos) in enumerate(zip(series_labels, data_values, positions)):\n plt.bar(pos, data, color=self._color(idx), width=bar_width, edgecolor='white', label=series)\n \n # Add xticks on the middle of the group bars\n plt.title(x.replace('x.', entry_type + '.') + (' diff' if diff else ''))\n plt.xticks([r + bar_width for r in range(len(data_labels))], data_labels)\n \n # Create legend & Show graphic\n plt.legend()\n plt.show()", "def bar_time_series(df, title, ylabel, report):\n for col in df:\n fig, ax = plt.subplots(1, 1, figsize=(12, 4))\n plt.gcf().subplots_adjust(bottom=0.25)\n df[col].plot.bar();\n ax.set_xticklabels([v if i % 4 == 0 else '' for i, v in enumerate(df.index)])\n ax.xaxis.set_tick_params(rotation=45, length=0);\n ax.set_xlabel('Date')\n ax.set_ylabel(ylabel)\n full_title = title if df.shape[1] == 1 else '{} {}'.format(col, title)\n report.write_plot(full_title)\n plt.title(full_title)\n plt.show();\n plt.close();", "def plot_uv_bar(df, colname, colorid=0):\n if (colname in list(df.columns)):\n \n # Set figure size \n fig, ax = plt.subplots(figsize=(8,6))\n \n # set colorid for bar plot\n base_color = sns.color_palette()[colorid]\n\n # variable counts to calculate percentage\n cdict_count = df[colname].value_counts().to_dict() \n total_count = df.shape[0]\n \n \n if (len(list(cdict_count.keys())) > 5):\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.125\n # max. no. of categories Vs % rotation \n rottext_pct = 90 \n # font size for % display\n fontsiz_pct = 12\n else:\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.075\n # max. no. of categories Vs % rotation \n rottext_pct = 0 \n # font size for % display\n fontsiz_pct = 16\n \n \n # plotting...\n sns.countplot(data = df, x = colname\n , order = list(cdict_count.keys())\n , color = base_color\n , saturation = 0.7)\n\n # title and labels\n plt.title('Order of '+ colname, fontsize=20)\n plt.xlabel(colname + ' Type', fontsize=16)\n plt.ylabel('Count', fontsize=16)\n \n # x-,y- ticks\n locs, labels = plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n\n # display % count information on each tower of bar plot\n for loc, label in zip(locs, labels):\n count = cdict_count[label.get_text()]\n pct_string = '{:0.1f}%'.format(count*100/total_count)\n plt.text(loc, count-maxcount_pct, pct_string, ha='center', color='w', fontsize=fontsiz_pct, rotation=rottext_pct)\n\n return plt.show()\n\n else:\n \n print(' >>>Error:',colname,' is not in DataFrame')", "def plot_df(data_frame):\n plt.figure(figsize = (10, 5))\n chart = sns.countplot(data_frame['label'], \n palette=\"Set1\"\n )\n plt.show()", "def plot_bar(filename, data, std=None, xlab='x', ylab='y', ylim=[0,1],yticks=np.arange(0,1.1,0.1), title='Bar-Plot', methods=None, datasets=None, figwidth=8, figheight=6, colors=None, legend_loc=\"lower center\", xytick_fontsize=12, xylabel_fontsize=15, title_fontsize=15, legend_fontsize=12):\n import matplotlib as mpl\n mpl.use(\"pdf\")\n import matplotlib.pyplot as plt\n\n data=np.array(data)\n num_methods=len(data)\n \n # colors\n if colors is None:\n colors=['b','r','g','c','m','y','k','w'] # maximally 8 colors allowed so far\n\n ind = np.arange(num_methods) # the x locations of the bars\n width = 0.8 # the width of the bars\n fig=plt.figure(num=1,figsize=(figwidth,figheight))\n ax=fig.add_subplot(1,1,1)\n if std is None:\n ax.bar(ind,data,width,color=colors[0:num_methods],ecolor='k')\n else:\n ax.bar(ind,data,width,color=colors[0:num_methods],yerr=std,ecolor='k')\n\n # add some text for labels, title and axes ticks\n ax.set_ylabel(ylab,fontsize=xylabel_fontsize)\n ax.set_xlabel(xlab,fontsize=xylabel_fontsize)\n ax.set_title(title,fontsize=title_fontsize)\n ax.set_xticks(ind+0.5*width)\n ax.set_xticklabels( methods )\n #if ylim is None:\n # yticks=np.arange(0,1.1,0.1)\n # ylim=[0,1]\n if yticks is not None:\n ax.set_yticks(yticks)\n if ylim is not None:\n ax.set_ylim(ylim[0],ylim[1])\n plt.setp(ax.get_xticklabels(), fontsize=xytick_fontsize)\n plt.setp(ax.get_yticklabels(), fontsize=xytick_fontsize)\n # shrink axis box \n #box = ax.get_position()\n #ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n #ax.legend( method_bar, methods, loc='lower left', bbox_to_anchor=(1.0, 0.3), fontsize=legend_fontsize )\n #ax.legend(methods, loc=legend_loc, fontsize=legend_fontsize )\n #plt.show()\n fig.savefig(filename,bbox_inches='tight')\n plt.close(fig)", "def category_bar_chart(df):\n label_names = df.drop(['message', 'original', 'genre', 'id'], axis=1).columns\n label_counts = []\n for column in label_names:\n label_counts.append(df[column].sum())\n return {\n 'data': [\n Bar(\n x=label_names,\n y=label_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Labelled Categories',\n 'yaxis': {\n 'title': \"Count\",\n 'type': 'log'\n },\n 'xaxis': {\n 'title': \"Category\"\n }\n }\n }", "def bar(variable, name, data=None, x_v=None, color_set=custom_bw,\n ax_size=(20, 6), highlight=None, ax=None):\n\n common_set_up(ax_size) # Apply basic plot style\n\n fig = sns.barplot(x=x_v, y=variable, data=data, saturation=1, ax=ax,\n color=color_set[2], label=name,\n )\n\n sns.despine(offset=2, trim=True, left=True, bottom=True)\n\n # Set title and axes\n title_color = '#192231'\n font_colour = '#9099A2'\n if ax is None:\n fig.set_title('{0}'.format(name),\n fontsize=20, color=title_color)\n fig.set_ylabel('Frequency',\n color=font_colour)\n fig.set_xlabel('{0}'.format(name),\n color=font_colour)\n\n if highlight:\n bars = fig.patches\n bars[highlight].set_color(color_set[1])\n\n return fig", "def barPlot2():\n n = 10\n X = np.arange(n)\n Y1 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n)\n plt.bar(X, +Y1, facecolor='#9999ff', edgecolor='white')\n\n for x,y in zip(X,Y1):\n plt.text(x+0.2, y+0.05, '%.2f' % y, ha='center', va= 'bottom')\n\n plt.ylim(0,1.25)\n plt.show()", "def _bar_example_2(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot - Label sort\")\n ch.set_subtitle(\"Set `categorical_order_by` to sort by labels\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n categorical_order_by=\"labels\",\n categorical_order_ascending=True,\n )\n ch.show(_OUTPUT_FORMAT)", "def bar(self, row_id, col_id, label=None, offset=(350, 30), **kwargs):\n bar = BarGraph(label=label, **kwargs)\n self.pl[row_id, col_id].addItem(bar)\n\n bar.barClicked.connect(self.clickedBar)", "def plot_results(t_val, mood):\r\n N = 8\r\n theta = np.linspace(0.0, 2 * np.pi , N, endpoint=False)\r\n the_stats = [t_val['number_words'], t_val['average_character_length'], \r\n t_val['signs'], t_val['multiple_signs'], t_val['question'],\r\n t_val['exclamation'], t_val['name'], mood] \r\n \r\n width = np.pi / N \r\n\r\n plt.figure()\r\n \r\n handle = plt.subplot(111, polar=True)\r\n handle.set_xticklabels(['Word', 'AvrChar', 'Signs', '2Signs', '?', '!', 'name', 'mood'])\r\n \r\n handle.bar(theta, the_stats, width=width, bottom=1.0)\r\n \r\n plt.show()", "def img_gen_bar():\n data = pd.DataFrame(data=np.random.rand(5,1), index=range(1,6), columns=['Fred'])\n #m,n = np.shape(data)\n\n plt.clf()\n plt.bar(x=data.index.values, height=data.values.ravel(), color='k') # figsize=(10, 6))\n # Options for later from https://matplotlib.org/api/_as_gen/matplotlib.pyplot.bar.html\n # bar_width = 0.35\n # alpha = .3\n fig=plt.gcf()\n fig.set_size_inches(2.24, 2.24)\n plt.axis('off')\n fig.tight_layout()\n fig.canvas.draw()\n # grab the pixel buffer and dump it into a numpy array\n pixels = np.array(fig.canvas.renderer._renderer)[:,:,:3]\n #print(pixels.shape)\n return pixels, data.index.values + data.values.ravel()", "def _bar2df(bars,data):\n tmp = list(map(lambda x: [x[0] for _ in range(x[1])],list(enumerate(bars))))\n labels = list(itertools.chain(*tmp)) # tmp is a list of list and this line flattens it\n data[\"labels\"] = labels\n data[\"index\"] = list(range(data.shape[0]))\n aggregation_condition = {'time': ['first', 'last'], \\\n 'index':['first', 'last'], \\\n 'price': ['min', 'max', 'first', 'last']}\n res = data.groupby(\"labels\",as_index=False).agg(aggregation_condition)\n res.columns = ['_'.join(col) for col in res.columns.values]\n res = res.drop([\"labels_\"],axis=1)\n res.columns = ['start_t', 'end_t', 'start_idx', 'end_idx','low', 'high', 'open', 'close']\n return res", "def matplotlib_bar_chart() -> Tuple:\n df = read_dataset(Path('..', '..', 'iris.csv'))\n x = []\n\n for col in df.columns:\n try:\n max_val = get_column_max(df, col)\n x.append(max_val)\n except ValueError:\n pass\n \n fig, ax = a_libraries.matplotlib_bar_chart(np.array(x))\n\n return fig, ax", "def plot_bar(label_array, acc_array, f1_array, width=0.5, axis_label=None, graph_title=None, file_name=\"\", dpi=100):\n plt.figure(figsize=plt.figaspect(1.), dpi=dpi)\n x = np.arange(len(label_array)) # the label locations\n plt.bar(x - 0.5 * width, acc_array, width, label='Accuracy')\n plt.bar(x + 0.5 * width, f1_array, width, label='F1 score')\n plt.ylim([0, 1.1])\n plt.xticks(x, labels=label_array)\n if axis_label is None:\n axis_label = ['Set', 'Values']\n plt.xlabel(axis_label[0])\n plt.ylabel(axis_label[1])\n if graph_title is None:\n graph_title = graph_title\n plt.title(graph_title)\n plt.tight_layout()\n plt.legend()\n plt.grid()\n if file_name:\n plt.savefig(file_name, bbox_inches='tight')\n plt.show()\n return", "def PlotBayes( x=np.ones(1), bayes=np.ones(1), title=None, label=None, width=1.0, color='blue', show_values=False, ax=None, posterior=False ):\n\n if ax is None:\n fig, ax = plt.subplots( )\n ax.bar(x, bayes/bayes.max(), width, color=color )\n ax.set_title( title )\n ax.set_yscale('log')\n ax.set_xlabel( label )\n if posterior:\n ax.set_ylabel(r\"$P/P_{\\rm max}$\")\n else:\n ax.set_ylabel(r\"$\\mathcal{B}/\\mathcal{B}_{\\rm max}$\")\n# ax.set_ylabel(r\"$\\mathcal{B} = \\prod L / L_0$\")\n if show_values: ## print value on top of each bar, .... doesnt work ...\n shift = bayes.max()/bayes.min()/10\n for xx, b in zip( x, bayes ):\n ax.text( xx, b*shift, str(b), color=color, fontweight='bold' )\n\n ### assure that there are ticks at y axis\n lim = ax.get_ylim()\n ax.set_ylim(lim[0]*0.5, lim[1]*2)", "def barh_plotter(data: pd.DataFrame, variable: str):\n fig, axs = plt.subplots(2,5, gridspec_kw={'wspace': 1, 'hspace': 0.2},\n figsize=(60, 40), sharex = False)\n\n for ax, dta in zip(axs.flatten(), data.values()) :\n ax.barh(dta['club'], dta[f'{variable}'])\n ax.set_xlabel(f'{variable}', fontsize=25)\n \n \n for ax, dta in zip(axs.flatten(), data.keys()):\n ax.set_title(dta, fontsize=30)\n\n return plt.show()", "def barPlot1():\n n = 12\n X = np.arange(n)\n Y1 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n)\n Y2 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n)\n\n plt.bar(X, +Y1, facecolor='#9999ff', edgecolor='white')\n plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')\n\n for x,y in zip(X,Y1):\n plt.text(x+0.2, y+0.05, '%.2f' % y, ha='center', va= 'bottom')\n\n for x,y in zip(X,Y2):\n plt.text(x+0.2, -y-0.1, '%.2f' % y, ha='center', va= 'bottom')\n\n plt.ylim(-1.25,+1.25)\n plt.show()", "def graphy2():\n data = pd.read_csv(\"week2.csv\")\n plot_g = pygal.Bar(fill=True, interpolate='cubic', style=LightSolarizedStyle)\n plot_g.title = \"Top Fans in Week 2\"\n plot_g.x_labels = data.GENDER\n plot_g.y_labels = map(int, range(0, 80, 10))\n plot_g.add(\"Male\", data.COUNT)\n plot_g.add(\"Female\", data.COUNT2)\n plot_g.add(\"Total\", data.COUNT3)\n plot_g.render_to_file(\"plotweek2.svg\")", "def draw_bar_plot(fig, x, y, labels):\r\n\r\n #Convert times to a displayable format\r\n (x_times, hour_mode) = times_to_axis(x)\r\n\r\n\r\n #Draw horizontal grid lines behind data\r\n fig.yaxis.grid(zorder=0)\r\n\r\n #Draw plot\r\n fig.bar(x_times, y, zorder=2)\r\n\r\n\r\n #If necessary, enable processing of \"datetime\" objects on the x-axis\r\n if not hour_mode:\r\n fig.xaxis_date()\r\n\r\n\r\n #Label and style plot\r\n set_axis_labels(fig, *labels)\r\n style_x_labels(fig)", "def graph(df):\n df.plot()\n plt.show()", "def plot_individual_bar_chart_graph(data_values, title,\r\n number_of_keys,\r\n max_val,\r\n vals_for_bar_chart,\r\n file_in):\r\n\r\n n_groups = len(vals_for_bar_chart)\r\n fig, ax = plt.subplots()\r\n index = np.arange(n_groups)\r\n bar_width = 0.9\r\n opacity = 0.4\r\n # print vals_for_bar_chart\r\n rects1 = plt.bar(index,\r\n vals_for_bar_chart,\r\n bar_width,\r\n alpha=opacity,\r\n color='b') # label='whatever'\r\n plt.xlabel('number in cluster')\r\n plt.ylabel('Count')\r\n plt.title(title+\"_barchart\")\r\n plt.legend()\r\n pylab.grid(True)\r\n ax.set_yscale('symlog')\r\n ax.set_xscale('symlog')\r\n plt.tight_layout()\r\n plt.show()\r\n pylab.savefig(file_in + \"_\" + title + '_barchart.png')\r\n plt.close()\r\n pylab.close()", "def show_graph(d:dict):\n x = []\n y = []\n for key, value in d.items():\n x.append(str(key))\n y.append(value)\n\n x_pos = [i for i, _ in enumerate(x)]\n plt.figure()\n plt.bar(x_pos, y, color='green')\n plt.xlabel(\"Size\")\n plt.ylabel(\"Number of images\")\n plt.title(\"Count by size\")\n plt.xticks(x_pos, x)", "def plot_type_of_two_topic(data_frame1: pb.DataFrame, data_frame2: pb.DataFrame) -> None:\n plt.interactive(False)\n plt.figure()\n data_frame1.plot(kind='bar', x= data_frame['TopicID'])\n data_frame2.plot(kind='bar', x= data_frame['TopicID'])\n plt.show()", "def plotBarPlots(self, stage, strain, strainID, count, p, xm, ym, xn, yn):\n\t\t\n\t\ttimeList = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n\t\t\n\t\tind = np.arange(len(timeList))\n\t\t\n\t\tplt.style.use('bmh')\n\t\tcolors = ['#b2182b', '#238b45', '#3690c0', '#023858']\n\t\t\n\t\tfig = plt.figure(figsize=(15,15), frameon=False)\n\t\tax1 = fig.add_subplot(111)\n\t\tfig.suptitle(strain + ' - ' + stage, fontsize=20, fontweight='bold')\n\t\n\t\tax1.spines['top'].set_visible(False)\n\t\tax1.spines['right'].set_visible(False)\n\t\tax1.spines['bottom'].set_visible(False)\n\t\tax1.spines['left'].set_visible(False)\n\t\tax1.yaxis.set_ticks_position('left')\n\t\tax1.xaxis.set_ticks_position('bottom')\n\t\twidth = 0.6\n\t\t\n\t\tax1.bar(ind+width/2., count[timeList,1], width, color=colors[0], edgecolor = \"none\")\n\t\t\n\t\tax1.plot(*p.linspace(), c='k', linewidth=3.0)\n\t\t\n\t\tfor point in range(len(xm)):\n\t\t\tax1.plot(xm[point], ym[point], marker='*', markersize=30, color=\"blue\")\n\t\tfor point in range(len(xn)):\n\t\t\tax1.plot(xn[point], yn[point], marker='*', markersize=30, color=\"blue\")\t\t\t\n\t\t\n\t\tax1.set_ylabel('Frequency as %', fontweight='bold', fontsize=20)\n\t\tax1.set_xlabel('Time', fontweight='bold', fontsize=20)\n\t\txTickMarks = ['%s' %str(j) for j in timeList]\n\t\tax1.set_xticks(ind+width/2)\n\t\txtickNames = ax1.set_xticklabels(xTickMarks, fontweight='bold')\n\t\t\n\t\tplt.setp(xtickNames, fontsize=15)\n\t\tax1.xaxis.set_ticks_position('none')\n\t\tax1.yaxis.set_ticks_position('none')\t\t\n\t\tax1.set_yticklabels(ax1.get_yticks(), fontweight='bold', fontsize=17)\n\t\tax1.set_xlim([6, len(timeList)-6])\n\t\t\t\t\t\n\t\tfname = 'strain%d' %strainID + stage + '.png' \n\t\t\n\t\tfig.savefig(fname, transparent=True, dpi=100)\n\t\tplt.close(fig)", "def diagram_plugs(data_no,\r\n data_little,\r\n data_means,\r\n data_great,\r\n data_large_enough,\r\n data_super_large,\r\n er_no, er_little,\r\n er_means,\r\n er_great,\r\n er_large_enough,\r\n er_super_large):\r\n\r\n\r\n plt.bar(range(6), [data_no,\r\n data_little,\r\n data_means,\r\n data_great,\r\n data_large_enough,\r\n data_super_large],\r\n width=0.1, color='black',\r\n yerr=[er_no, er_little, er_means,\r\n er_great, er_large_enough,\r\n er_super_large],\r\n ecolor='black', capsize=10)\r\n\r\n\r\n plt.xticks(range(6), ['non', 'petit', 'moyen',\r\n 'grand', 'assez grand', 'tres grand'])\r\n\r\n\r\n plt.ylabel('Taux de pollution en AQI')\r\n plt.title(\"Taux de pollution selon les bouchons\")\r\n\r\n nouveau = new()\r\n print(nouveau)\r\n plt.savefig(nouveau, transparent=True)\r\n plt.clf()\r\n plt.close()\r\n\r\n shutil.move(nouveau, '/app/static/popo')\r\n\r\n return nouveau", "def barplot(self, name: str, y_label: str, img_title: str):\n path = C.TEST_DIR\n\n sns.set(style='whitegrid')\n sns.set_palette(sns.color_palette(C.IRT_COLORS))\n df = pd.read_csv(path + name + '.csv')\n ax = sns.barplot(data=df)\n ax.set(ylabel=y_label, title=img_title)\n\n self.save_plot(name)\n plt.show()", "def plot_bar_chart(log, frame_seconds=-1):\n\tif frame_seconds == -1:\n\t\tframe_seconds = (log.content[-1].timestamp - log.content[0].timestamp).total_seconds() / timedelta(seconds=100).total_seconds()\n\tlines,dates,areas = log.give_plot_data_bar(frame_seconds=frame_seconds)\n\tfig, ax = plt.subplots(figsize=(11,6))\n\tfig.autofmt_xdate()\n\tmyFmt = DateFormatter(\"%Y %d.%b %H:%M:%S\")\n\tax.xaxis.set_major_formatter(myFmt)\n\tax.set_xlabel('timestamps in UTC')\n\tax.set_ylabel('amount of entries')\n\tplt.title('Analysis of the file \\\"'+log.name+'\\\" \\n with frame size ' + str(frame_seconds) +' seconds')\n\n\twidth = timedelta(seconds=frame_seconds).total_seconds() / timedelta(days=1).total_seconds()\n\tnormal = ax.bar(dates, areas, width, label='entries', edgecolor='k')\n\n\tplt.legend()\n\tplt.subplots_adjust(left=0.15, bottom=0.2, right=0.9, top=0.9)\n\tfig.canvas.mpl_connect('key_press_event', _quit_figure)", "def create_bar_graph(plot_df, title=\"\", x_title=\"\", y_title=\"\"):\n plot_df[\"quarter\"] = pd.PeriodIndex(pd.to_datetime(plot_df.iloc[:, 0]), freq=\"Q\")\n fig_data = [\n go.Bar(\n x=plot_df[\"quarter\"].astype(str),\n y=plot_df.iloc[:, 1],\n text=plot_df.iloc[:, 1],\n marker={\"color\": color_palette[0]},\n hoverinfo=\"x+y\",\n showlegend=False,\n )\n ]\n\n fiq_layout = build_bar_layout(title, x_title=x_title, y_title=y_title)\n\n return dict(data=fig_data, layout=fiq_layout)", "def oneNumBar(df, colName):\n bins = pd.qcut(x=df[colName[0]], q=15, duplicates='drop')\n ax = bins.value_counts()\n bins = bins.cat.as_ordered()\n bins = bins.cat.categories\n bounds = bins.left \n bounds = list(bounds)\n bounds.append(bins[len(bounds)-1].right)\n texts = []\n for x,y in zip(bounds[0::],bounds[1::]):\n texts.append(\"(\" + str(x) + \", \" + str(y) + \"]\") \n barData = [go.Bar(x=texts, \n y=ax,\n marker=dict(\n color = '#92c5de',\n opacity=0.8)\n )] \n layout = go.Layout(\n title=\"Bar Plot Showing Count of Values for \" + str(colName[0]),\n xaxis=dict(\n title= colName[0]\n ),\n yaxis=dict(\n title= \"NUMBER OF RECORDS\", \n )\n )\n fig = go.Figure(data=barData, layout=layout)\n return {\"label\":\"Frequency\", \"plot\":fig}", "def bar_plot(self,\n x: str,\n y: str,\n x_label: str=None,\n y_label: str=None,\n title: str='Bar Plot',\n **kwargs) -> Figure:\n x_label = x if x_label is None else x_label\n y_label = y if y_label is None else y_label\n fig = px.bar(self.df, x=x, y=y,\n labels={x: x_label, y: y_label},\n title=title, **kwargs)\n return fig", "def setBarType(bartype='vertical'):\n dislin.bartyp(bardict[bartype])", "def draw_bar(df=data):\n pt = {\n 1: 'Credit card',\n 2: 'Cash',\n 3: 'No charge',\n 4: 'Dispute',\n 5: 'Unknown',\n 6: 'Voided trip',\n }\n df['payment_type'] = df['payment_type'].replace(pt)\n gr = df.groupby(['payment_type', 'weekday']) \\\n .agg(total_amount=('total_amount', 'sum')) \\\n .reset_index(drop=False)\n return px.bar(gr, x='weekday', y='total_amount', color='payment_type', barmode='group') \\\n .update_layout(\n template='plotly_dark',\n plot_bgcolor='rgba(0, 0, 0, 0)',\n paper_bgcolor='rgba(0, 0, 0, 0)',\n )", "def plot_dict_bar(aDictionary):\n\t# Convert strings to float\n\tfor key in aDictionary:\n\t\taDictionary[key] = float(aDictionary[key])\n\t\t\n\t# Plot the result\n\tplt.bar(range(len(aDictionary)), aDictionary.values(), align='center')\n\tplt.xticks(range(len(aDictionary)), aDictionary.keys(), rotation=90)\n\t\n\tplt.show()", "def performanceBarCharts(): \n ##tauopathy HCS pearson\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null YFP Model\", \"Null DAPI Model\"]\n ml_model_perf = pickle.load(open(\"pickles/ml_model_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n x = [1, 2, 3]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'gold', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance\",fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=12)\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n ax.set_xticklabels(xlabels,fontsize=12, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(3))\n plt.savefig(\"matplotlib_figures/tau_performance_pearson_special_HCS_model.png\", dpi=300)\n\n ##tauopathy HCS MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null YFP Model\", \"Null DAPI Model\"]\n ml_model_perf = pickle.load(open(\"pickles/ml_model_mse_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_mse_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_mse_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n x = [1, 2, 3]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'gold', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance\",fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=12)\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n ax.set_xticklabels(xlabels,fontsize=12, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(3))\n plt.savefig(\"matplotlib_figures/tau_performance_mse_special_HCS_model.png\", dpi=300)\n\n ##osteosarcoma 3-fold (raw images) pearson\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ml_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_null_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [0.075, 0.1156] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with Raw Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.02)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_pearson_cross_val.png\", dpi=300)\n\n ##osteosarcoma 3-fold (raw images) MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ml_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_null_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [0.15, .2312] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance with Raw Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.01)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_mse.png\", dpi=300)\n\n ##osteosarcoma 3-fold (ablated image training) pearson\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ablated_ml_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_ablated_null_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [.1288, .1385] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with\\n95% Ablated Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.0)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_pearson_trained_ablation_model.png\", dpi=300)\n\n ##osteosarcoma 3-fold (ablated image training) MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ablated_ml_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_ablated_null_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [.2576, .2771] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance with\\n95% Ablated Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.0)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_MSE_trained_ablation_model.png\", dpi=300)\n\n ##supplemental single channel learning YFP and DAPI performance\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"YFP-tau to AT8-pTau\", \"DAPI to AT8-pTau\"]\n YFP_ml_model = pickle.load(open(\"pickles/single_channel_YFP_ml_model_perf.pkl\", \"rb\"))\n DAPI_ml_model = pickle.load(open(\"pickles/single_channel_DAPI_ml_model_perf.pkl\", \"rb\"))\n y = np.array([YFP_ml_model[0], DAPI_ml_model[0]]).round(decimals=2)\n stds = [YFP_ml_model[1], DAPI_ml_model[1]]\n x = [1, 2]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=\"cornflowerblue\", zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with\\nSingle Channel Input Learning\",fontname=\"Times New Roman\", fontsize=17, y=1.01)\n ax.set_xlabel(\"Model\", fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=14)\n plt.yticks(fontname=\"Times New Roman\", fontsize=14)\n ax.set_xticklabels(xlabels,fontsize=14, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/supplemental_single_channel_learning.png\", dpi=300)\n\n ##supplemental single channel learning YFP and DAPI, input similarity to prediction\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"YFP-tau to AT8-pTau\", \"DAPI to AT8-pTau\"]\n y = np.array([0.94894628, 0.98718720]).round(decimals=2)\n stds = [0.1673864, 0.039042]\n x = [1, 2]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=\"orange\", zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Similarity Between\\nInput Channel and Predicted Channel\",fontname=\"Times New Roman\", fontsize=17)\n ax.set_xlabel(\"Model\", fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=14)\n plt.yticks(fontname=\"Times New Roman\", fontsize=14)\n ax.set_xticklabels(xlabels,fontsize=14, fontname=\"Times New Roman\")\n ax.set_ylim((0,1.13))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/supplemental_single_channel_learning_pearson_similarity_input_and_predicted.png\", dpi=300)", "def visualize_type():\n \n data_file = parse(MY_FILE, ',')\n\n # num of incidents per category\n counter = Counter(item['Category'] for item in data_file)\n\n # Set the labels\n labels = tuple(counter.keys())\n\n # Set exactly where the labels hit the x-axis\n xlocations = na.array(range(len(labels))) + 0.5\n\n # Width of each bar\n width = 0.5\n\n # Assign data to a bar plot\n plt.bar(xlocations, counter.values(), width=width)\n\n # Assign labels and tick location to x-axis\n plt.xticks(xlocations + width / 2, labels, rotation=90)\n \n # Give some more room so the x-axis labels aren't cut off\n plt.subplots_adjust(bottom=0.4)\n\n # Make the overall graph/figure larger\n plt.rcParams['figure.figsize'] = 12, 8\n\n # save\n plt.savefig('Type.png')\n\n # close\n plt.clf()", "def get_proba_plot(data_type):\n buffer = io.BytesIO()\n plt.subplots(figsize = (25,15))\n ax = sns.barplot(x='proba', y='type', data=data_type, palette = \"Blues_r\")\n ax.set_xlabel('Probability')\n plt.yticks(fontsize = 30)\n plt.xticks(fontsize = 30)\n plt.ylabel(\"Type\", fontsize = 38)\n plt.xlabel(\"Probability\", fontsize = 38);\n plt.title(\"Model Results\", fontsize = 50)\n plt.savefig(buffer, format='png')\n# plt.show()\n plt.close()\n buffer.seek(0)\n chart_probability= Image.open(buffer).resize((512+256,512))\n return chart_probability", "def plot_bar(counter, title=\"\", filename=\"tmp.png\"):\n\n fig = plt.figure()\n axis = fig.add_subplot(111)\n\n if isinstance(counter, dict):\n frequencies = counter.values()\n names = counter.keys()\n elif isinstance(counter, list):\n frequencies = [x[1] for x in counter]\n names = [x[0] for x in counter]\n y_pos = np.arange(len(counter))\n axis.barh(y_pos, frequencies, align='center')\n axis.set_title(title)\n axis.set_yticks(y_pos)\n axis.set_yticklabels(list(names))\n axis.invert_yaxis()\n axis.set_xlabel('Frequency')\n print('going to save fig...')\n fig.savefig('png_files/' + filename.replace(\".csv\", \".png\"))\n\n return axis", "def custom_barchart(ax, x, y, error, xlims, ylims, error_kw, color='lightblue', width=0.75):\n\n error = [np.zeros(len(error)), error]\n\n ax.bar(x, y, color=color, width=width, yerr=error, error_kw=error_kw, align='center')\n\n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n\n return ax", "def bar_plot(trj: TrajaDataFrame, bins: Union[int, tuple] = None, **kwargs) -> Axes:\n # TODO: Add time component\n\n bins = traja.trajectory._bins_to_tuple(trj, bins)\n\n X, Y, U, V = coords_to_flow(trj, bins)\n\n hist, _ = trip_grid(trj, bins, hist_only=True)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.set_aspect(\"equal\")\n X = X.flatten(\"F\")\n Y = Y.flatten(\"F\")\n ax.bar3d(\n X,\n Y,\n np.zeros_like(X),\n 1,\n 1,\n hist.flatten(),\n zsort=\"average\",\n shade=True,\n **kwargs,\n )\n ax.set(xlabel=\"x\", ylabel=\"y\", zlabel=\"Frames\")\n\n return ax", "def _plot_dict_bar(d, xmin=None, label=None):\n xvals, yvals = _dict2lists(d)\n if xmin == None:\n xmin = min(xvals) - 1\n else:\n xmin = min(xmin, min(xvals) - 1)\n if label:\n pylab.bar(xvals, yvals, align='center', label=label)\n pylab.xlim([xmin, max(xvals)+1])\n else:\n pylab.bar(xvals, yvals, align='center')\n pylab.xlim([xmin, max(xvals)+1])", "def plotBarChart(resultConfirmed, resultDeath, resultVaccinated):\n fig, ax = plt.subplots(3)\n\n ax[0].plot(resultConfirmed['Date'], resultConfirmed['Confirmed Cases'])\n ax[0].title.set_text('Confirmed Cases')\n \n ax[1].plot(resultDeath['Date'], resultDeath['Death Cases'])\n ax[1].title.set_text('Death Cases')\n \n ax[2].plot(resultVaccinated['Date'], resultVaccinated['Vaccinated Person'])\n ax[2].title.set_text('Vaccinated Cases')\n fig.tight_layout()\n plt.show()", "def generate_barplot(predictions, labels):\n plot = figure(x_range=labels, plot_height=300, plot_width=400)\n plot.vbar(x=labels, top=predictions, width=0.8)\n # plot.xaxis.major_label_orientation = pi / 2.\n # plot.xaxis.axis_label_text_font_size = \"40pt\"\n # plot.yaxis.axis_label_text_font_size = \"40pt\"\n\n return components(plot)", "def plot_balance_class(classes):\n unique, counts = np.unique(classes, return_counts=True)\n plt.bar(unique, counts)\n plt.title('Class Frequency')\n plt.xlabel('Class')\n plt.ylabel('Frequency')\n plt.show()", "def make_bar_plot(x, y, title):\n return plotly.graph_objs.Figure(\n data=[plotly.graph_objs.Bar(x=list(x), y=list(y))],\n layout=plotly.graph_objs.Layout(title=title)\n )", "def BarSpecific():\n return dcc.Graph(id=\"PersChart\", className=\"bar\", figure=dict(\n data=[go.Bar(\n x=[1],\n y=[\"Persons\"],\n orientation='h',\n marker={\n 'color': '#ff4058',\n },\n )],\n layout=dict(\n title=\"<b>Most similar Persons</b>\",\n font=dict(family='Soria, Times New Roman, Times, serif', color='#002C77', size=19),\n margin=dict(l=100, r=20, t=50, b=30),\n plot_bgcolor=\"rgba(0,0,0,0)\",\n paper_bgcolor=\"rgba(0,0,0,0)\",\n xaxis=dict(tick0=0, dtick=1),\n yaxis=dict(ticks='outside')\n )\n ))", "def plot(self):\n x = np.arange(5)\n # labels = ['temp', 'humi', 'mais', 'o2', 'co2']\n plt.bar(x - 0.35/2, self.data, 0.35, label='actual')\n plt.bar(x + 0.35/2, self.desired_values, 0.35, label='desired')\n plt.ylim(-5, 80)\n plt.legend()\n\n plt.draw()\n plt.pause(0.000001)\n plt.clf()", "def simple_bar():\n\n # Make random discrete data\n discrete_a = np.zeros((8,2))\n discrete_b = np.zeros((8,2))\n discrete_c = np.zeros((8,2))\n discrete_a[:,0] = np.arange(8)\n discrete_b[:,0] = np.arange(8)\n discrete_c[:,0] = np.arange(8)\n discrete_a[:,1] = np.random.rand(8)*10\n discrete_b[:,1] = np.random.rand(8)*10\n discrete_c[:,1] = np.random.rand(8)*10\n\n # Make data sets, if using multiple bar_width must be the same\n dataset_a = DataSet(discrete_a,colour='pink',bar_width=0.8,plot='bar',label='A')\n dataset_b = DataSet(discrete_b,colour='violet',bar_width=0.8,plot='bar',label='B')\n dataset_c = DataSet(discrete_c,colour='darkviolet',bar_width=0.8,plot='bar',label='C')\n\n # Make plot object and add data sets\n plot = Plot()\n plot.add_dataset(dataset_a)\n plot.add_dataset(dataset_b)\n plot.add_dataset(dataset_c)\n plot.set_axes(xticks=(1,1),xlim=(-0.5,7.5),ylim=(0,12))\n plot.set_legend(legend=True,location='upper right')\n plot.set_text(legend=8)\n\n # Plot graph and display\n plot.plot()\n plot.save(name='./figures/2d_simple_bar',fmt='png')\n plot.display()", "def bar(*args, **kwargs):\n ax, args, kwargs = maybe_get_ax(*args, **kwargs)\n color_cycle = brewer2mpl.get_map('Set2', 'qualitative', 8).mpl_colors\n almost_black = '#262626'\n kwargs.setdefault('color', color_cycle[0])\n kwargs.setdefault('edgecolor', 'white')\n middle = 0.4 if 'width' not in kwargs else kwargs['width']/2.0\n\n # Check if data contains stacks\n stacked = kwargs.pop('stacked',False)\n # Check if stack text should be included\n stack_text = kwargs.pop('stack_text',False)\n # Get legend if available\n legend = kwargs.pop('legend',False)\n\n left = args[0]\n height = np.array(args[1])\n\n # Label each individual bar, if xticklabels is provided\n xtickabels = kwargs.pop('xticklabels', None)\n # left+0.4 is the center of the bar\n xticks = np.array(left) + middle\n\n # Whether or not to annotate each bar with the height value\n annotate = kwargs.pop('annotate', False)\n\n show_ticks = kwargs.pop('show_ticks', False)\n\n # If no grid specified, don't draw one.\n grid = kwargs.pop('grid', None)\n\n # Check if stacked and plot data accordingly\n if stacked:\n num_stacks, num_data = height.shape\n bottom = np.zeros(num_data)\n for i in np.arange(num_stacks):\n lst = list(args)\n lst[1] = height[i]\n args = tuple(lst)\n kwargs['color'] = set2[i]\n kwargs['bottom'] = bottom\n rectangles = ax.bar(*args, **kwargs)\n bottom += height[i]\n else:\n rectangles = ax.bar(*args, **kwargs)\n\n # add legend\n if isinstance(legend, collections.Iterable):\n ax.legend(legend,loc='upper center',bbox_to_anchor=(0.5,1.11), ncol=5)\n\n # add whitespace padding on left\n xmin, xmax = ax.get_xlim()\n xmin -= 0.2\n if stacked:\n xmax = num_data\n ax.set_xlim(xmin, xmax)\n\n # If the user is only plotting one bar, make it an iterable\n if not isinstance(height, collections.Iterable):\n height = [height]\n\n\n # If there are negative counts, remove the bottom axes\n # and add a line at y=0\n if any(h < 0 for h in height.tolist()):\n axes_to_remove = ['top', 'right', 'bottom']\n ax.hlines(y=0, xmin=xmin, xmax=xmax,\n linewidths=0.75)\n else:\n axes_to_remove = ['top', 'right']\n\n # Remove excess axes\n remove_chartjunk(ax, axes_to_remove, grid=grid, show_ticks=show_ticks)\n\n if stacked:\n data = height\n height = height.sum(axis=0)\n\n # Add the xticklabels if they are there\n if xtickabels is not None:\n ax.set_xticks(xticks)\n ax.set_xticklabels(xtickabels)\n\n if annotate or isinstance(annotate, collections.Iterable):\n annotate_yrange_factor = 0.025\n ymin, ymax = ax.get_ylim()\n yrange = ymax - ymin\n\n # Reset ymax and ymin so there's enough room to see the annotation of\n # the top-most\n if ymax > 0:\n ymax += yrange * 0.1\n if ymin < 0:\n ymin -= yrange * 0.1\n ax.set_ylim(ymin, ymax)\n yrange = ymax - ymin\n\n offset_ = math.log(yrange) + math.log(annotate_yrange_factor+1)\n print offset_\n print yrange * annotate_yrange_factor\n print math.log(yrange) + math.log(annotate_yrange_factor)\n if isinstance(annotate, collections.Iterable):\n annotations = map(str, annotate)\n else:\n annotations = ['%.3f' % h if type(h) is np.float_ else str(h)\n for h in height]\n\n for x, h, annotation in zip(xticks, height, annotations):\n # Adjust the offset to account for negative bars\n offset = offset_ if h >= 0 else -1 * offset_\n verticalalignment = 'bottom' if h >= 0 else 'top'\n\n # Finally, add the text to the axes\n ax.annotate(annotation, (x, h + annotate_yrange_factor), \n verticalalignment=verticalalignment,\n horizontalalignment='center',\n color=almost_black)\n\n # Text for each block of stack\n # This was partially inspired by the following article by Tableau software\n # http://www.tableausoftware.com/about/blog/2014/1/new-whitepaper-survey-data-less-ugly-more-understandable-27812\n if stack_text:\n bottom = np.zeros(num_data)\n max_h = max(height)\n for i in np.arange(num_stacks):\n for x, d, b in zip(xticks, data[i], bottom):\n if (d*100.0/max_h) > 4.0:\n ax.text(x,b+d/2.0,d, ha='center', va='center', color=almost_black)\n bottom += data[i]\n return rectangles", "def show_class_imbalance(df, title='Class Imbalance', PATH=None):\n ax = sns.barplot(x=[\"Normal\", \"Clickbait\"], y=df.groupby(['target']).target.count())\n ax.set_title(title, size=20)\n plt.xticks([0,1],[\"Normal\", \"Clickbait\"], size = 20)\n ax.set_ylabel(\"Document Count\", size=17)\n ax.set_xlabel(\"Article Class\", size=20)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n return ax", "def to_bar(self):\n factor = FactorData()\n return factor", "def plot_what_if(df):\n fig = go.Figure()\n for col in df.columns:\n fig.add_trace(go.Bar(x=df[col].index, y=df[col].values, name=col))\n fig.update_xaxes(title_text=\"Gains/Losses ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=False, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=False, gridwidth=1, gridcolor='lightgrey')\n fig.update_yaxes(title_text=\"Gains/Losses ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=2, zerolinecolor='grey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_layout(legend=dict(orientation=\"h\", yanchor=\"bottom\", y=-0.25, xanchor=\"center\", x=0.5),\n font=dict(family='Times New Roman', size=15), plot_bgcolor='rgba(0,0,0,0)',\n margin_l=20, margin_r=20, margin_t=20, margin_b=20,\n xaxis=dict(tickmode='linear', tick0=1, dtick=1,))\n\n\n fig.write_image(join('..', 'docs', 'redistribution.png'), height=700, width=900, engine='kaleido')\n fig.write_html(join('..', 'docs', 'redistribution.html'))\n fig.show()", "def draw_bar_plot():\n # Copy and modify data for monthly bar plot\n \n df_bar = df.copy()\n\n # Draw bar plot\n leglab = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\n labels = [2016, 2017, 2018, 2019]\n months = np.zeros([12, 4])\n\n for i in range(12):\n for j, year in enumerate(labels):\n t = df[df.index.year == year]\n months[i][j] = t[t.index.month == i].value.mean()\n\n x = np.arange(len(labels))\n width = 0.7\n fig, ax = plt.subplots()\n fig.set_figwidth(10)\n fig.set_figheight(8)\n for i, month in enumerate(months):\n ax.bar(x - (width * (12 - i) / 12), months[i], width / 12, label=leglab[i])\n\n ax.set_ylabel(\"Average Page Views\")\n ax.set_xlabel(\"Years\")\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n ax.legend(title='Months')\n\n # Save image and return fig (don't change this part)\n fig.savefig('bar_plot.png')\n return fig", "def plot_bar(source_files, column_ids, column_names, normalize, sort, plot_difference, freq_bound, title=None,\n dtype=int):\n\n def _filter_data(raw_data, numerical):\n \"\"\" Filters plot-able data. \"\"\"\n # Retain numeric information\n legal_count_inventory = digits + '.'\n # Retain POS tags, also\n legal_entry_inventory = ascii_uppercase + '$'\n filtered_data = list()\n for data_point in raw_data:\n skip = False\n for symbol in list(str(data_point)):\n if symbol not in legal_count_inventory and symbol not in legal_entry_inventory:\n skip = True\n if not skip:\n if numerical:\n filtered_data.append(dtype(data_point))\n else:\n filtered_data.append(data_point)\n # Optionally normalize count values, resulting in a proportion plot\n if numerical and normalize:\n filtered_data = filtered_data / np.sum(filtered_data)\n return np.array(filtered_data)\n\n # Set plot parameters\n sns.set_style('whitegrid')\n sns.set_context('paper')\n\n # Compile data to be plotted within a new dataframe\n # Not necessary, but convenient when plotting with seaborn\n source_dict = dict()\n # Read in data and sort alphanumeric features (e.g. POS tags) alphabetically\n df_features = pd.read_table(source_files[0], header=None, names=['Tag', 'Count'], skip_blank_lines=True)\n df_features = df_features.sort_values('Tag', ascending=True)\n df_reference = pd.read_table(source_files[1], header=None, names=['Tag', 'Count'], skip_blank_lines=True)\n df_reference = df_reference.sort_values('Tag', ascending=True)\n # Isolate columns to be plotted\n entries = _filter_data(df_features.iloc[:, column_ids[0]].values, False)\n counts = _filter_data(df_features.iloc[:, column_ids[1]].values, True) # e.g. counts from corpus A\n reference_counts = _filter_data(df_reference.iloc[:, column_ids[1]].values, True) # e.g. counts from corpus B\n # Construct dataframe to be visualized\n source_dict[column_names[0]] = entries\n source_dict['reference_counts'] = reference_counts\n # Generate frequency mask to exclude low-frequency features from the plot\n # Optional; results in a clearer, better readable visualization\n frequency_mask = np.array(\n [int(counts[i] >= freq_bound or reference_counts[i] >= freq_bound) for i in range(counts.shape[0])])\n source_dict['frequency_mask'] = frequency_mask\n # Calculate per-feature count differences (i.e. target counts vs. reference counts), if specified\n if plot_difference:\n diffs = counts - reference_counts\n source_dict[column_names[1]] = diffs\n else:\n source_dict[column_names[1]] = counts\n features = pd.DataFrame.from_dict(source_dict)\n # Sort by count value and apply frequency mask\n if sort:\n features = features.sort_values(column_names[0], ascending=True)\n if freq_bound > 0:\n features = features.drop(features[features.frequency_mask == 0].index)\n\n # Make plot\n fig, ax = plt.subplots()\n fig.set_size_inches(8, 6)\n if plot_difference:\n colors = ['coral' if feature >= 0 else 'skyblue' for feature in features[column_names[1]]]\n sns.barplot(x=column_names[1], y=column_names[0], data=features, ax=ax, palette=colors)\n else:\n sns.barplot(x=column_names[1], y=column_names[0], data=features, ax=ax, palette='Set2')\n sns.despine()\n if title is not None:\n plt.title(title)\n plt.show()", "def bar_plot(ax, data, colors=None, total_width=0.8, single_width=1, legend=True):\n\n # Check if colors where provided, otherwhise use the default color cycle\n if colors is None:\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n # Number of bars per group\n n_bars = len(data)\n\n # The width of a single bar\n bar_width = total_width / n_bars\n\n # List containing handles for the drawn bars, used for the legend\n bars = []\n\n # Iterate over all data\n for i, (name, values) in enumerate(data.items()):\n # The offset in x direction of that bar\n x_offset = (i - n_bars / 2) * bar_width + bar_width / 2\n\n # Draw a bar for every value of that type\n for x, y in enumerate(values):\n bar = ax.bar(x + x_offset, y, width=bar_width * single_width, color=colors[i % len(colors)])\n\n # Add a handle to the last drawn bar, which we'll need for the legend\n bars.append(bar[0])\n\n # Draw legend if we need\n if legend:\n ax.legend(bars, data.keys())", "def plot_featurewise_barplot(\n utr5_counts, cds_counts, utr3_counts, ax=None, saveto=None, **kwargs\n):\n fig = None\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = ax.get_figure()\n barlist = ax.bar([0, 1, 2], [utr5_counts, cds_counts, utr3_counts])\n barlist[0].set_color(\"#1b9e77\")\n barlist[1].set_color(\"#d95f02\")\n barlist[2].set_color(\"#7570b3\")\n ax.set_xticks([0, 1, 2])\n ax.set_xticklabels([\"5'UTR\", \"CDS\", \"3'UTR\"])\n max_counts = np.max(np.hstack([utr5_counts, cds_counts, utr3_counts]))\n setup_axis(\n ax=ax, axis=\"y\", majorticks=max_counts // 10, minorticks=max_counts // 20\n )\n ax.set_ylabel(\"# RPFs\")\n # sns.despine(trim=True, offset=10)\n if saveto:\n fig.tight_layout()\n fig.savefig(saveto, dpi=DPI)\n return ax, fig", "def bar_grapgh(dictionary, variable):\r\n plt.clf() # Deletes the previous plot \r\n plt.hist(dictionary[variable])\r\n plt.title('Histogram of ' + variable)\r\n plt.xlabel(variable)\r\n plt.ylabel('Frequency')\r\n plt.savefig(variable)", "def visualize_type():\n\t\n\t#grab our parsed data\n\tdata_file = parse(MY_FILE, \",\")\n\t\n\t#make a new variable, counter, from iterating through each line of\n\t#data in parsed data, and count how many incidents happen by category\n\tcounter = Counter(item[\"Category\"] for item in data_file)\n\t\n\t#set the labels which are based on the keys of our counter\n\t#since order doesn't matter, we can just use counter.keys()\n\tlabels = tuple(counter.keys())\n\t\n\t#set exactly where the labels should hit the x-axis\n\txlocations = np.arange(len(labels)) + 0.5\n\t\n\t#width of each bar that will be plotted\n\twidth = 0.5\n\t\n\t#assign data to a bar plot\n\tplt.bar(xlocations, counter.values(), width=width)\n\t\n\t#assign labels and tick location to x-axis\n\tplt.xticks(xlocations + width /2, labels, rotation=90)\n\t\n\t#give more room to the x-axis so the labels aren't cut off\n\tplt.subplots_adjust(bottom=0.4)\n\t\n\t#make the overall graph/figure larger\n\tplt.rcParams['figure.figsize'] = 12, 8\n\t\n\t#save the graph\n\tplt.savefig(\"type.png\")\n\t\n\t#close the plot figure\n\tplt.clf()", "def to_bar(self):\n group = GroupData()\n return group", "def plot_bar_chart_quantum_vs_classical(\n df_bugs: pd.DataFrame,\n column_to_inspect: str,\n mapping_dict: Dict[str, str],\n categories_to_exclude: List[str] = [],\n categories_keep_only: List[str] = None,\n out_file_name: str = None,\n out_folder_path: str = None,\n horizontal: bool = False,\n map_value_since_beginning: bool = False,\n figsize: Tuple[int, int] = (10, 5),\n legend_placement: str = 'upper center'\n ):\n\n fig, ax = plt.subplots(figsize=figsize)\n\n df = expand_columns(df_bugs, column_to_inspect)\n df = df[~(df[column_to_inspect].isin(categories_to_exclude))]\n\n if categories_keep_only is not None:\n df = df[df[column_to_inspect].isin(categories_keep_only)]\n\n if map_value_since_beginning:\n df[column_to_inspect] = df[column_to_inspect].map(mapping_dict)\n\n categories_q_bugs = list(df[\n df['type'] == 'Quantum'].groupby(\n column_to_inspect).count().sort_values(\n by='type', ascending=False).index)\n\n for component in df[column_to_inspect].unique():\n if component not in categories_q_bugs:\n categories_q_bugs.append(component)\n\n args = {\n \"hue\": \"type\",\n \"data\": df,\n \"palette\": PALETTE,\n \"ax\": ax,\n \"order\": categories_q_bugs\n }\n\n if horizontal:\n sns.countplot(y=column_to_inspect, **args)\n ax.grid(axis='x')\n else:\n sns.countplot(x=column_to_inspect, **args)\n ax.grid(axis='y')\n\n if not map_value_since_beginning:\n # map the value at the latest stage, thus in the labels\n obj_labels = ax.get_xticklabels()\n for i, l in enumerate(obj_labels):\n obj_labels[i] = mapping_dict[l.get_text()]\n ax.set_xticklabels(obj_labels, rotation=60, ha='right')\n\n ax.set_xlabel(capitalize(column_to_inspect), fontsize=15)\n ax.set_ylabel(\"Count\", fontsize=15)\n plt.legend(title=\"Type of Bug\", loc=legend_placement)\n plt.tight_layout()\n\n if out_file_name is not None and out_folder_path is not None:\n fig.savefig(os.path.join(out_folder_path, out_file_name), format=\"pdf\")", "def plot_bars(self, ax, bottom=None, log=False):\n if bottom is None and log:\n bottom = np.ones_like(self.values) * min(self.values[self.values > 0 ]) * .1\n return ax.bar(self.lefts, self.values, self.widths, color=self.color, label=self.label,edgecolor=self.color, bottom=bottom, log=log, **self.options)[0]", "def _plot_group_bars(ax, xss, field, side):\n\n #translate side to index\n if(side == 'left'):\n side = 0\n else:\n side = 1\n #plot the bars\n x = range(len(xss))\n values = [xs.ROW_edge_fields[field].values[side] for xs in xss]\n ax.bar(x, values, color=[_colormap[i%len(_colormap)] for i in range(len(xss))],\n bottom=0.0, align='center', alpha=0.8, width=0.6)\n ax.set_xticks(x)\n ax.set_xticklabels([textwrap.fill(xs.sheet, 15) for xs in xss],\n rotation='vertical', fontsize=11)", "def plot_bar_important_features(important_features, title, xlabel, ylabel, fname):\r\n plt.figure(figsize=(20, 21))\r\n plt.barh(important_features.index.astype(str).tolist(), important_features.values.tolist())\r\n plt.title(title)\r\n plt.xlabel(xlabel)\r\n plt.ylabel(ylabel)\r\n plt.savefig(fname, bbox_inches='tight')\r\n plt.close()", "def plot_data(self):", "def stopword_bar(df, stop_words, ax):\n df_test = df.copy()\n df_test['prop'] = df.title.apply(stopword_proportion)\n sns.barplot(data=df_test, x='target', y='prop', ax=ax, ci=False)\n ax.set_title(\"Ratio of Stopwords Between Classes\", size=20)\n ax.set_ylim([1,2])\n ax.set_ylabel(\"Ratio\", size=20)\n ax.set_xlabel(\"Article Class\", size=20)\n plt.xticks(ticks=range(2),labels=['Normal','Clickbait'], size=20)\n return ax", "def plot_cat(df, cat_columns, hue = \"default_payment_next_month\"):\n fig = plt.figure(figsize = (20,(len(cat_columns)/2+1)*8))\n loc = 1\n for col in cat_columns:\n ax = fig.add_subplot(len(cat_columns)/2+1, 2, loc)\n df_plot = df[[col, hue, \"id\"]].groupby([col, hue]).count()\n df_plot.reset_index(inplace = True)\n sns.barplot(x=col, y= \"id\", hue = hue, data=df_plot, palette = \"GnBu_d\", ax = ax);\n plt.legend(title = \"default payment (1=yes, 0=no)\")\n plt.ylim([0.0001,15000])\n plt.ylabel(\"clients\");\n loc += 1", "def plot_bar(self, nsingular=None, nbars=20):\n if nsingular is not None:\n self.get_identifiability_dataframe(nsingular)\n\n plot_obj = plots.IdentBar(self.ident_df, nsingular=nsingular, nbars=nbars)\n plot_obj.generate()\n plot_obj.draw()\n \n return plot_obj.fig, plot_obj.ax", "def graph():\n fp = mpl.font_manager.FontProperties(family='JasmineUPC',size=24)\n x = np.arange(0,10)\n y = [386557057065, 368368395622, 242451971944, 225960095934, 161573560379, 107461232731, 89784502211, 73749349545, 54525219632, 52864743212]\n name = ['เชื้อเพลิงที่ได้จากแร่', 'เครื่องจักรและส่วนประกอบ', 'ยานยนต์และส่วนประกอบ', 'เครื่องอุปกรณ์ไฟฟ้าและส่วนประกอบ', 'เหล็กและเหล็กกล้า', 'พลาสติกและของทำด้วยพลาสติก', 'ของทำด้วยเหล็กหรือเหล็กกล้า', 'ทองแดงละของทำด้วยทองแดง', 'เคมีภัณฑ์เบ็ดเตล็ด', 'อุปกรณ์ที่ใช้ทางทัศนศาสตร์']\n ax = plt.gca(xticks=x)\n ax.set_xticklabels(name,rotation=1000,fontproperties=fp)\n plt.bar(x,y,color='g')\n plt.show()" ]
[ "0.72341543", "0.7051117", "0.7032588", "0.702026", "0.6910697", "0.6906154", "0.68671536", "0.67882943", "0.6782081", "0.6735545", "0.6699668", "0.6685477", "0.66498506", "0.663799", "0.6616477", "0.6604195", "0.6479067", "0.6469803", "0.64694345", "0.64263964", "0.6424513", "0.6419261", "0.640694", "0.64018095", "0.63064665", "0.63012624", "0.6273125", "0.6266547", "0.6259449", "0.6240214", "0.6228225", "0.6225711", "0.6224449", "0.62193334", "0.61900115", "0.618825", "0.6187623", "0.61718714", "0.61673653", "0.615912", "0.61552125", "0.61546445", "0.61543536", "0.61523354", "0.61441165", "0.61416775", "0.61191326", "0.6104422", "0.61014444", "0.60894746", "0.6074595", "0.6069581", "0.6053907", "0.6037133", "0.60273874", "0.60265684", "0.6025053", "0.6024364", "0.6023842", "0.6023384", "0.60215837", "0.6020362", "0.60189235", "0.6016994", "0.6011624", "0.5997864", "0.59918255", "0.5982423", "0.5979257", "0.59706324", "0.59666777", "0.596615", "0.59298617", "0.5924291", "0.591691", "0.5916065", "0.59147304", "0.59119755", "0.5911653", "0.5911455", "0.5908337", "0.59053963", "0.58876586", "0.5880613", "0.58777", "0.58659816", "0.58650124", "0.58643466", "0.5860246", "0.58598906", "0.585575", "0.5855504", "0.58311594", "0.5823492", "0.58207417", "0.58191276", "0.5806449", "0.5797168", "0.57748044", "0.5772498" ]
0.7253609
0
Plot a data frame with bar type
def plot_type_of_two_topic(data_frame1: pb.DataFrame, data_frame2: pb.DataFrame) -> None: plt.interactive(False) plt.figure() data_frame1.plot(kind='bar', x= data_frame['TopicID']) data_frame2.plot(kind='bar', x= data_frame['TopicID']) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_type_of_topic(data_frame: pb.DataFrame) -> None:\n plt.interactive(False)\n plt.figure()\n data_frame.plot(kind='bar', x= data_frame['TopicID'])\n plt.show()", "def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()", "def bar_plot(df, data_pt):\n \n x=df.loc[data_pt]\n y= df.columns.tolist()\n sorte=x.tolist()\n a=sorted(zip(sorte, y))[-10:]\n y=[y for _, y in a]\n ## soru burda yapıp altı ona göre duzeliyecegim birde\n \n x = df[y].loc[data_pt]\n \n # Here we modify the tickangle of the xaxis, resulting in rotated labels.\n #title={'text': \"<b>Comparing features with Golden for Cycle {}\".format(cycle),\n # 'y':0.9,'x':0.5,'xanchor': 'center','yanchor': 'top'}\n\n \n trace = {'type': 'bar',\n 'orientation':'h',\n 'x' : x,\n 'y' : y}\n data = Data([trace])\n layout = {'title' : \"<b>Reconstruction error in each dimension for cycle{}\".format(data_pt),\n 'titlefont':{'size' : 20},\n 'xaxis' : {'title': '<b>Reconstruction Error',\n 'titlefont':{'size' : 20},\n 'tickangle': -45, 'tickfont': {'size':15} ,},\n \n 'yaxis' : {'title': '<b>Features',\n 'titlefont':{'size' : 20},\n 'tickfont': {'size':15},},\n 'margin' : {'l':100, 'r' : 1, 'b': 200, 't': 100, 'pad' : 1},\n 'height' : 600, 'width' : 800,\n }\n \n fig = Figure(data = data, layout = layout)\n \n return pyo.iplot(fig)", "def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)", "def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")", "def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()", "def bar_plot(df, field_name, graph_title, threshold_value, x_axis_label, y_axis_label):\n\n x = df[field_name].value_counts().sort_values()\n x[x > threshold_value].plot(kind='barh', figsize=(12, 8), title=graph_title, x=x_axis_label, y=y_axis_label)\n return", "def bar(\n df,\n x=None,\n y=\"value\",\n bars=\"variable\",\n order=None,\n bars_order=None,\n orient=\"v\",\n legend=True,\n title=True,\n ax=None,\n cmap=None,\n **kwargs,\n):\n\n # default x-axis to time-col attribute from an IamDataFrame, else use \"year\"\n x = x or time_col_or_year(df)\n\n # cast to DataFrame if necessary\n # TODO: select only relevant meta columns\n if not isinstance(df, pd.DataFrame):\n df = df.as_pandas()\n\n for col in set(SORT_IDX) - set([x, bars]):\n if len(df[col].unique()) > 1:\n msg = \"Can not plot multiple {}s in bar plot with x={}, bars={}\"\n raise ValueError(msg.format(col, x, bars))\n\n if ax is None:\n fig, ax = plt.subplots()\n\n # long form to one column per bar group\n _df = reshape_mpl(df, x, y, bars, **{x: order, bars: bars_order})\n\n # explicitly get colors\n defaults = default_props(reset=True, num_colors=len(_df.columns), colormap=cmap)[\n \"color\"\n ]\n rc = run_control()\n color = []\n for key in _df.columns:\n c = next(defaults)\n if \"color\" in rc and bars in rc[\"color\"] and key in rc[\"color\"][bars]:\n c = rc[\"color\"][bars][key]\n color.append(c)\n\n # change year to str to prevent pandas/matplotlib from auto-ordering (#474)\n if _df.index.name == \"year\":\n _df.index = map(str, _df.index)\n\n # plot data\n kind = \"bar\" if orient.startswith(\"v\") else \"barh\"\n _df.plot(kind=kind, color=color, ax=ax, **kwargs)\n\n # add legend\n ax.legend(loc=\"center left\", bbox_to_anchor=(1.0, 0.5))\n if not legend:\n ax.legend_.remove()\n\n # add default labels if possible\n if orient == \"v\":\n ax.set_xlabel(x.capitalize())\n else:\n ax.set_ylabel(x.capitalize())\n units = df[\"unit\"].unique()\n if len(units) == 1 and y == \"value\":\n if orient == \"v\":\n ax.set_ylabel(units[0])\n else:\n ax.set_xlabel(units[0])\n\n # build a default title if possible\n _title = []\n for var in [\"model\", \"scenario\", \"region\", \"variable\"]:\n values = df[var].unique()\n if len(values) == 1:\n _title.append(\"{}: {}\".format(var, values[0]))\n if title and _title:\n title = \" \".join(_title) if title is True else title\n ax.set_title(title)\n\n return ax", "def barGraph(listOfWord, listOfFrequency):\r\n\r\n\tindex = np.arange(len(listOfWord))\r\n\r\n\tplt.title(\"Frekuensi Kemunculan Kata\")\r\n\tplt.barh(index, listOfFrequency)\r\n\tplt.xlabel('Frekuensi')\r\n\tplt.yticks(index, listOfWord, fontsize=6)\r\n\r\n\tplt.show()", "def visualize_data(df):\n # Remove 'not available'\n genres = df.genre.unique().tolist()\n remove_index = genres.index('Not Available')\n genres.pop(remove_index)\n print('Genres: ', genres)\n\n # Extract number of songs in each genre\n genre_counts = df.genre.value_counts().tolist()\n genre_counts.pop(remove_index)\n print('Counts: ', genre_counts)\n\n # Plot bar graph\n plt.bar(genres, genre_counts)\n plt.xlabel('Genres')\n plt.ylabel('Count')\n plt.show()", "def featuresBarPlot(barNames,barValues):\n plt.bar(range(0,len(barNames)),barValues)\n plt.xticks(range(0,len(barNames)), barNames,rotation='vertical')\n plt.show()", "def plot_class_balances(df, col):\n\n ser_counts = df[col].value_counts()\n ser_counts.plot.bar()\n plt.title(col + ' Counts \\n(classes={})'.format(ser_counts.shape[0]))\n \n plt.show()", "def plot_bv_bar(df, xcolname, ycolname, icol=0):\n # set plot size\n fig, ax = plt.subplots(figsize=(8,6))\n \n # plotting... box\n sns.barplot(ax=ax, data = df\n , x = str(xcolname)\n , y = str(ycolname)\n , color = sns.color_palette()[icol]);\n \n \n # title and labels\n plt.title(xcolname+' Vs '+ycolname, fontsize=20)\n plt.xlabel(xcolname+ ' (units)', fontsize=16)\n plt.ylabel(ycolname+ ' (units)', fontsize=16)\n \n return plt.show()", "def BarPlot(data,colormap='Paired',ax=None,headers='show',value_max=None,x_ticklabels_rotation=90,**kws):\r\n if ax is None:\r\n ax=plt.subplot(111)\r\n\r\n if value_max is None:\r\n value_max=data.sum(1).max()\r\n\r\n data.plot(kind='bar', stacked=True,colormap=colormap, ax=ax,**kws)\r\n ax.set_ylim((0,value_max))\r\n\r\n\r\n #reverse legend order\r\n handles, labels = ax.get_legend_handles_labels()\r\n ax.legend(reversed(handles),reversed(data.columns),bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n\r\n #AXES\r\n if (headers is None or headers=='hide'):\r\n ax.get_xaxis().set_visible(False)\r\n ax.get_xaxis().set_ticks([])\r\n elif headers=='show':\r\n plt.setp(ax.get_xticklabels(),rotation=x_ticklabels_rotation)\r\n ax.set_xlabel(None,visible=False)\r\n\r\n\r\n #plt.tight_layout()\r\n\r\n\r\n return ax", "def bar_chart(\n df,\n orientation='v',\n bar_width=None,\n opacity=0.9,\n textpos=None,\n linewidth=1,\n linecolor='#2C3347',\n marker_color=None,\n **kwargs):\n\n traces = []\n rng = df.index.size if orientation == 'v' else df.columns.size\n otn = orientation\n for i in range(rng):\n x = [str(x) for x in df.columns] if otn == 'v' else df.iloc[:, i]\n y = df.iloc[i] if otn == 'v' else [str(x) for x in df.index]\n text = df.iloc[i] if otn == 'v' else df.iloc[:, i]\n name = df.iloc[i].name if otn == 'v' else df.columns[i]\n\n preset_args = dict(\n x=x,\n y=y,\n text=text,\n textposition=textpos,\n marker=dict(\n opacity=opacity,\n color=marker_color,\n line=dict(\n color=linecolor,\n width=linewidth)),\n name=name,\n width=bar_width,\n orientation=orientation\n )\n\n all_args = {**preset_args, **kwargs}\n bar = go.Bar(all_args)\n traces.append(bar)\n\n return traces", "def plot_bar_chart(objects, data, title='', ylabel='', bar_color = 'blue'):\n y_pos = np.arange(len(objects))\n\n plt.bar(y_pos, data, align='center', alpha=0.5)\n plt.xticks(y_pos, objects, rotation='vertical')\n plt.ylabel(ylabel, fontsize=12)\n plt.title(title, fontsize=12)\n plt.ylim([0,1300])\n plt.bar(range(len(data)), data, color=bar_color)\n\n return plt.show()", "def bar_plot(data, xtitle, title):\n label = list(set(data))\n height = count_elements(data)\n height = [height[i] for i in label]\n plt.bar(label, height=height, width=0.8)\n plt.ylabel('frequency')\n plt.xlabel(xtitle)\n plt.xticks(label)\n plt.savefig('./figures/{}.png'.format(title))\n plt.close()", "def drawStackedBarPlot(df, column, hue):\n plt.style.use('default')\n plt.style.use('dark_background')\n p_table = pd.pivot_table(df, index=column, \n columns=hue, aggfunc='size')\n p_table = p_table.div(p_table.sum(axis=1), axis=0)\n p_table.plot.bar(stacked=True, figsize=(14,7))\n plt.xlabel('Spekraltyp')\n plt.ylabel('Anteil')\n plt.show()", "def barplot(bars, title='', upColor='blue', downColor='red'):\n import pandas as pd\n import matplotlib.pyplot as plt\n from matplotlib.lines import Line2D\n from matplotlib.patches import Rectangle\n\n if isinstance(bars, pd.DataFrame):\n ohlcTups = [\n tuple(v) for v in bars[['open', 'high', 'low', 'close']].values]\n elif bars and hasattr(bars[0], 'open_'):\n ohlcTups = [(b.open_, b.high, b.low, b.close) for b in bars]\n else:\n ohlcTups = [(b.open, b.high, b.low, b.close) for b in bars]\n\n fig, ax = plt.subplots()\n ax.set_title(title)\n ax.grid(True)\n fig.set_size_inches(10, 6)\n for n, (open_, high, low, close) in enumerate(ohlcTups):\n if close >= open_:\n color = upColor\n bodyHi, bodyLo = close, open_\n else:\n color = downColor\n bodyHi, bodyLo = open_, close\n line = Line2D(\n xdata=(n, n),\n ydata=(low, bodyLo),\n color=color,\n linewidth=1)\n ax.add_line(line)\n line = Line2D(\n xdata=(n, n),\n ydata=(high, bodyHi),\n color=color,\n linewidth=1)\n ax.add_line(line)\n rect = Rectangle(\n xy=(n - 0.3, bodyLo),\n width=0.6,\n height=bodyHi - bodyLo,\n edgecolor=color,\n facecolor=color,\n alpha=0.4,\n antialiased=True\n )\n ax.add_patch(rect)\n\n ax.autoscale_view()\n return fig", "def plot_data_stats(data):\n sns.set_style(\"dark\")\n f, ax = plt.subplots(figsize=(6, 15))\n\n ax = sns.barplot(x='tag', y='count', data=tags_freqs)\n\n ax.axes.set_title(\"POS Tags Frequencies\",fontsize=20)\n ax.set_xlabel(\"POS Tags\", fontsize=16)\n ax.set_ylabel(\"Counts\", fontsize=16)\n ax.tick_params(labelsize=12)\n\n plt.show()", "def barplot(data, field_name, field_categories):\n\n\tcategories, counts = np.unique(data[field_name], return_counts=True)\n\n\tfig = plt.figure(figsize=(4, 3))\n\taxes = fig.add_axes([0, 0, 1, 1]) # left, bottom, width, height (range 0 to 1)\n\taxes.bar(range(len(categories)), counts, fc=\"gray\") # fc is the face color\n\n\taxes.set_xlabel(\"\")\n\taxes.set_ylabel('Count')\n\taxes.set_title(field_name)\n\tfig.autofmt_xdate(rotation=45)\n\n\taxes.set_xticks(range(len(categories)))\n\taxes.set_xticklabels([field_categories[c] for c in categories]);", "def _bar_example_4(quantity_by_fruit):\n ch = chartify.Chart(x_axis_type=\"categorical\", blank_labels=True)\n ch.set_title(\"Vertical bar plot with labels\")\n ch.set_subtitle(\"Hidden y-axis\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.style.color_palette.reset_palette_order()\n ch.plot.text(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n text_column=\"quantity\",\n color_column=\"fruit\",\n )\n # Adjust the axis range to prevent clipping of the text labels.\n ch.axes.set_yaxis_range(0, 1200)\n ch.axes.hide_yaxis()\n ch.show(_OUTPUT_FORMAT)", "def plotify_bar(title, data):\n\n x, y, z, labels = [], [], [], []\n\n for d in reversed(data[:len(data) - 1]):\n x.append(f\"{d['settimana_del']:%d-%b}\\n{d['settimana_fino_al']:%d-%b}\")\n y.append(d['nuovi_positivi'])\n z.append(\"lightgrey\" if d['giorni'] < 7 else 'green' if d['delta'] <= 0 else 'red' )\n labels.append(human_format(d['nuovi_positivi']) if d['giorni'] == 7 else f\"{human_format(d['nuovi_positivi'])}\\n(in corso)\" )\n\n x_pos = np.arange(len(x))\n\n # create a new figure\n plt.figure()\n\n plt.title(title)\n\n # Create bars with different colors\n plt.bar(x_pos, y, color=z)\n\n # Create names on the x-axis\n plt.xticks(x_pos, x, rotation=40)\n\n\n # Text on the top of each bar\n x_ticks = plt.gca().get_xticks()\n for i in range(len(y)):\n text = data[i]\n plt.text(x = x_ticks[i], y = y[i]+5, s = labels[i], size = 9, horizontalalignment='center', verticalalignment='bottom')\n\n # prettify y values\n current_values = plt.gca().get_yticks()\n plt.gca().set_yticklabels(['{:n}'.format(int(x)) for x in current_values])\n\n # responsive layout\n plt.tight_layout()\n\n\n\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n buf.seek(0)\n\n ### Release memory\n # Clear the current axes.\n plt.cla() \n # Clear the current figure.\n plt.clf() \n # Closes all the figure windows.\n plt.close('all') \n # plt.close(fig)\n gc.collect()\n\n return buf", "def _bar_example_1(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot\")\n ch.set_subtitle(\"Automatically sorts by value counts.\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n )\n ch.show(_OUTPUT_FORMAT)", "def value_counts_plot(df):\n \n plt.figure(figsize=(15,10))\n \n #get rid of sort_index() to change the graph\n return df.value_counts().sort_index().plot(kind='bar')", "def message_genre_bar_chart(df):\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n return {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n }", "def draw_bar(x_index, data_list, xticks, title, x_label, y_label):\n pyplot.bar(x_index, data_list)\n pyplot.xlabel(x_label)\n pyplot.ylabel(y_label)\n pyplot.xticks(x_index, xticks)\n pyplot.title(title)\n pyplot.show()\n pyplot.savefig()", "def plot(*args, **params):\n if len(args) == 1: # only support data for now\n if isinstance(args[0], list):\n bar_2d = Bar2D(data=args[0])\n bar_2d.plot()\n bar_2d.fig.show()\n else:\n bar_2d = Bar2D(**params)\n bar_2d.plot()\n bar_2d.fig.show()", "def _bar_example_3(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"categorical\")\n ch.set_title(\"Horizontal bar plot\")\n ch.set_subtitle(\"Horizontal with color grouping\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.show(_OUTPUT_FORMAT)", "def BarOverview(data):\n return dcc.Graph(id=\"BarOverview\", className=\"bar\", figure=dict(\n data=[go.Bar(\n x=data[\"frequencies\"],\n y=data[\"names\"],\n orientation='h',\n marker={\n 'color': '#ff4058'\n },\n )],\n layout=dict(\n title=\"<b>Most common Persons</b>\",\n font=dict(family='Soria, Times New Roman, Times, serif', color='#002C77', size=19),\n margin=dict(l=10, r=20, t=50, b=30),\n plot_bgcolor=\"rgba(0,0,0,0)\",\n paper_bgcolor=\"rgba(0,0,0,0)\",\n xaxis=dict(tick0=0, dtick=max(data[\"frequencies\"])),\n yaxis=dict(ticks='outside',\n showgrid=True,\n showline=False,\n showticklabels=False),\n annotations=[dict(xref='paper', yref='y',\n x=0, y=yd,\n font=dict(\n color=\"#000000\",\n size=19\n ),\n text=str(yd),\n showarrow=False) for xd, yd in zip(data[\"frequencies\"], data[\"names\"])]\n )\n ))", "def plot_norm_bar(df, title, figsize=(12,7)):\n fig, ax = plt.subplots(ncols=1, figsize=figsize)\n fig.suptitle(title)\n cat_value_counts = df.fillna('missing').value_counts(normalize=True)\n sns.barplot(y = cat_value_counts.index, x= cat_value_counts.values*100)\n ax.set(xlabel= 'percentage', ylabel=str(df.name))\n \n plt.plot()\n\n return", "def barplot(self, x = \"Predictor\", color = None, opacity = 1, template = \"ggplot2\", \n has_title = True, barmode=\"stack\", is_horizontal = False, title = None, is_percent = False,\n show_num = False):\n if color: #Produce either a stacked or grouped bar plot\n df_stack = self._df.groupby([x,color]).size().reset_index()\n df_stack['Percentage'] = self._df.groupby([x, color]).size().groupby(level = 0).apply(lambda \n x:100 * x/float(x.sum())).values\n df_stack.columns = [x, color, 'Count', 'Percentage']\n df_stack['Percentage'] = round(df_stack['Percentage'], 2)\n \n x_clean, df_clean = clean_varname(df_stack, var = x)\n color_clean, df_clean = clean_varname(df_clean, var = color)\n \n if has_title:\n if not title:\n title = f\"Bar Plot of {x_clean} and {color_clean}\"\n else:\n title = None\n \n \n # 8 different variations for how this graph can appear:\n if is_horizontal:\n if is_percent:\n if show_num: #Show percentages on stacked bar graph\n fig = px.bar(df_clean, y = x_clean, x = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Percentage'])\n else:\n fig = px.bar(df_clean, y = x_clean, x = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if show_num: #Show counts on stacked bar graph:\n fig = px.bar(df_clean, y = x_clean, x = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Count'])\n else:\n fig = px.bar(df_clean, y = x_clean, x = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if is_percent:\n if show_num:\n fig = px.bar(df_clean, x = x_clean, y = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Percentage'])\n else:\n fig = px.bar(df_clean, x = x_clean, y = 'Percentage', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title)\n else:\n if show_num:\n fig = px.bar(df_clean, x = x_clean, y = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title, text = df_clean['Count'])\n else:\n fig = px.bar(df_clean, x = x_clean, y = 'Count', \n color = color_clean, template = template, barmode=barmode, \n opacity = opacity, title = title) \n \n return fig\n \n else: #Create a basic bar plot\n df_stack = self._df.groupby([x]).size().reset_index()\n df_stack['Percentage'] = self._df.groupby([x]).size().groupby(level = 0).apply(lambda", "def bar( # pylint: disable=disallowed-name\n self, x: Hashable | None = None, y: Hashable | None = None, **kwargs\n ) -> PlotAccessor:\n return self(kind=\"bar\", x=x, y=y, **kwargs)", "def bar(self, entry_type:str, x:str, labels:list=None, diff:bool=False, x_idx:int=-1):\n\n query = self._decode(x)\n\n data_points = []\n\n for idx, (log, name) in enumerate(zip(self.logs, self.log_names)):\n log = log[entry_type]\n\n candidates = []\n\n for entry in log:\n test = self._follow(entry, query)\n\n if type(test) == dict:\n candidates.append(test)\n elif type(test) == list:\n candidates.append({idx: v for idx, v in enumerate(test)})\n \n if len(candidates) > 0:\n data_points.append((name, candidates[x_idx]))\n \n if len(data_points) == 0:\n print('Warning: Nothing to show in bar chart!')\n return\n\n names = [x[0] for x in data_points]\n data_points = [x[1] for x in data_points]\n\n # Construct the labels for the data\n if labels is not None:\n data_labels = labels\n else:\n data_labels = set()\n for datum in data_points:\n for k in datum:\n data_labels.add(k)\n \n data_labels = list(data_labels)\n data_labels.sort()\n \n\n data_values = [[(datum[k] if k in datum else None) for k in data_labels] for datum in data_points]\n\n if diff:\n for idx in reversed(range(len(data_values))):\n for jdx in range(len(data_labels)):\n if data_values[0][jdx] is None or data_values[idx][jdx] is None:\n data_values[idx][jdx] = None\n else:\n data_values[idx][jdx] -= data_values[0][jdx]\n\n\n series_labels = names\n\n # Plot the graph now\n num_bars = len(series_labels)\n bar_width = 1 / (num_bars + 1)\n \n # Set position of bar on X axis\n positions = [np.arange(len(data_labels))]\n for _ in range(1, num_bars):\n positions.append([x + bar_width for x in positions[-1]])\n \n # Make the plot\n for idx, (series, data, pos) in enumerate(zip(series_labels, data_values, positions)):\n plt.bar(pos, data, color=self._color(idx), width=bar_width, edgecolor='white', label=series)\n \n # Add xticks on the middle of the group bars\n plt.title(x.replace('x.', entry_type + '.') + (' diff' if diff else ''))\n plt.xticks([r + bar_width for r in range(len(data_labels))], data_labels)\n \n # Create legend & Show graphic\n plt.legend()\n plt.show()", "def bar_time_series(df, title, ylabel, report):\n for col in df:\n fig, ax = plt.subplots(1, 1, figsize=(12, 4))\n plt.gcf().subplots_adjust(bottom=0.25)\n df[col].plot.bar();\n ax.set_xticklabels([v if i % 4 == 0 else '' for i, v in enumerate(df.index)])\n ax.xaxis.set_tick_params(rotation=45, length=0);\n ax.set_xlabel('Date')\n ax.set_ylabel(ylabel)\n full_title = title if df.shape[1] == 1 else '{} {}'.format(col, title)\n report.write_plot(full_title)\n plt.title(full_title)\n plt.show();\n plt.close();", "def plot_uv_bar(df, colname, colorid=0):\n if (colname in list(df.columns)):\n \n # Set figure size \n fig, ax = plt.subplots(figsize=(8,6))\n \n # set colorid for bar plot\n base_color = sns.color_palette()[colorid]\n\n # variable counts to calculate percentage\n cdict_count = df[colname].value_counts().to_dict() \n total_count = df.shape[0]\n \n \n if (len(list(cdict_count.keys())) > 5):\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.125\n # max. no. of categories Vs % rotation \n rottext_pct = 90 \n # font size for % display\n fontsiz_pct = 12\n else:\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.075\n # max. no. of categories Vs % rotation \n rottext_pct = 0 \n # font size for % display\n fontsiz_pct = 16\n \n \n # plotting...\n sns.countplot(data = df, x = colname\n , order = list(cdict_count.keys())\n , color = base_color\n , saturation = 0.7)\n\n # title and labels\n plt.title('Order of '+ colname, fontsize=20)\n plt.xlabel(colname + ' Type', fontsize=16)\n plt.ylabel('Count', fontsize=16)\n \n # x-,y- ticks\n locs, labels = plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n\n # display % count information on each tower of bar plot\n for loc, label in zip(locs, labels):\n count = cdict_count[label.get_text()]\n pct_string = '{:0.1f}%'.format(count*100/total_count)\n plt.text(loc, count-maxcount_pct, pct_string, ha='center', color='w', fontsize=fontsiz_pct, rotation=rottext_pct)\n\n return plt.show()\n\n else:\n \n print(' >>>Error:',colname,' is not in DataFrame')", "def plot_df(data_frame):\n plt.figure(figsize = (10, 5))\n chart = sns.countplot(data_frame['label'], \n palette=\"Set1\"\n )\n plt.show()", "def plot_bar(filename, data, std=None, xlab='x', ylab='y', ylim=[0,1],yticks=np.arange(0,1.1,0.1), title='Bar-Plot', methods=None, datasets=None, figwidth=8, figheight=6, colors=None, legend_loc=\"lower center\", xytick_fontsize=12, xylabel_fontsize=15, title_fontsize=15, legend_fontsize=12):\n import matplotlib as mpl\n mpl.use(\"pdf\")\n import matplotlib.pyplot as plt\n\n data=np.array(data)\n num_methods=len(data)\n \n # colors\n if colors is None:\n colors=['b','r','g','c','m','y','k','w'] # maximally 8 colors allowed so far\n\n ind = np.arange(num_methods) # the x locations of the bars\n width = 0.8 # the width of the bars\n fig=plt.figure(num=1,figsize=(figwidth,figheight))\n ax=fig.add_subplot(1,1,1)\n if std is None:\n ax.bar(ind,data,width,color=colors[0:num_methods],ecolor='k')\n else:\n ax.bar(ind,data,width,color=colors[0:num_methods],yerr=std,ecolor='k')\n\n # add some text for labels, title and axes ticks\n ax.set_ylabel(ylab,fontsize=xylabel_fontsize)\n ax.set_xlabel(xlab,fontsize=xylabel_fontsize)\n ax.set_title(title,fontsize=title_fontsize)\n ax.set_xticks(ind+0.5*width)\n ax.set_xticklabels( methods )\n #if ylim is None:\n # yticks=np.arange(0,1.1,0.1)\n # ylim=[0,1]\n if yticks is not None:\n ax.set_yticks(yticks)\n if ylim is not None:\n ax.set_ylim(ylim[0],ylim[1])\n plt.setp(ax.get_xticklabels(), fontsize=xytick_fontsize)\n plt.setp(ax.get_yticklabels(), fontsize=xytick_fontsize)\n # shrink axis box \n #box = ax.get_position()\n #ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n #ax.legend( method_bar, methods, loc='lower left', bbox_to_anchor=(1.0, 0.3), fontsize=legend_fontsize )\n #ax.legend(methods, loc=legend_loc, fontsize=legend_fontsize )\n #plt.show()\n fig.savefig(filename,bbox_inches='tight')\n plt.close(fig)", "def category_bar_chart(df):\n label_names = df.drop(['message', 'original', 'genre', 'id'], axis=1).columns\n label_counts = []\n for column in label_names:\n label_counts.append(df[column].sum())\n return {\n 'data': [\n Bar(\n x=label_names,\n y=label_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Labelled Categories',\n 'yaxis': {\n 'title': \"Count\",\n 'type': 'log'\n },\n 'xaxis': {\n 'title': \"Category\"\n }\n }\n }", "def bar(variable, name, data=None, x_v=None, color_set=custom_bw,\n ax_size=(20, 6), highlight=None, ax=None):\n\n common_set_up(ax_size) # Apply basic plot style\n\n fig = sns.barplot(x=x_v, y=variable, data=data, saturation=1, ax=ax,\n color=color_set[2], label=name,\n )\n\n sns.despine(offset=2, trim=True, left=True, bottom=True)\n\n # Set title and axes\n title_color = '#192231'\n font_colour = '#9099A2'\n if ax is None:\n fig.set_title('{0}'.format(name),\n fontsize=20, color=title_color)\n fig.set_ylabel('Frequency',\n color=font_colour)\n fig.set_xlabel('{0}'.format(name),\n color=font_colour)\n\n if highlight:\n bars = fig.patches\n bars[highlight].set_color(color_set[1])\n\n return fig", "def barPlot2():\n n = 10\n X = np.arange(n)\n Y1 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n)\n plt.bar(X, +Y1, facecolor='#9999ff', edgecolor='white')\n\n for x,y in zip(X,Y1):\n plt.text(x+0.2, y+0.05, '%.2f' % y, ha='center', va= 'bottom')\n\n plt.ylim(0,1.25)\n plt.show()", "def _bar_example_2(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot - Label sort\")\n ch.set_subtitle(\"Set `categorical_order_by` to sort by labels\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n categorical_order_by=\"labels\",\n categorical_order_ascending=True,\n )\n ch.show(_OUTPUT_FORMAT)", "def bar(self, row_id, col_id, label=None, offset=(350, 30), **kwargs):\n bar = BarGraph(label=label, **kwargs)\n self.pl[row_id, col_id].addItem(bar)\n\n bar.barClicked.connect(self.clickedBar)", "def plot_results(t_val, mood):\r\n N = 8\r\n theta = np.linspace(0.0, 2 * np.pi , N, endpoint=False)\r\n the_stats = [t_val['number_words'], t_val['average_character_length'], \r\n t_val['signs'], t_val['multiple_signs'], t_val['question'],\r\n t_val['exclamation'], t_val['name'], mood] \r\n \r\n width = np.pi / N \r\n\r\n plt.figure()\r\n \r\n handle = plt.subplot(111, polar=True)\r\n handle.set_xticklabels(['Word', 'AvrChar', 'Signs', '2Signs', '?', '!', 'name', 'mood'])\r\n \r\n handle.bar(theta, the_stats, width=width, bottom=1.0)\r\n \r\n plt.show()", "def img_gen_bar():\n data = pd.DataFrame(data=np.random.rand(5,1), index=range(1,6), columns=['Fred'])\n #m,n = np.shape(data)\n\n plt.clf()\n plt.bar(x=data.index.values, height=data.values.ravel(), color='k') # figsize=(10, 6))\n # Options for later from https://matplotlib.org/api/_as_gen/matplotlib.pyplot.bar.html\n # bar_width = 0.35\n # alpha = .3\n fig=plt.gcf()\n fig.set_size_inches(2.24, 2.24)\n plt.axis('off')\n fig.tight_layout()\n fig.canvas.draw()\n # grab the pixel buffer and dump it into a numpy array\n pixels = np.array(fig.canvas.renderer._renderer)[:,:,:3]\n #print(pixels.shape)\n return pixels, data.index.values + data.values.ravel()", "def _bar2df(bars,data):\n tmp = list(map(lambda x: [x[0] for _ in range(x[1])],list(enumerate(bars))))\n labels = list(itertools.chain(*tmp)) # tmp is a list of list and this line flattens it\n data[\"labels\"] = labels\n data[\"index\"] = list(range(data.shape[0]))\n aggregation_condition = {'time': ['first', 'last'], \\\n 'index':['first', 'last'], \\\n 'price': ['min', 'max', 'first', 'last']}\n res = data.groupby(\"labels\",as_index=False).agg(aggregation_condition)\n res.columns = ['_'.join(col) for col in res.columns.values]\n res = res.drop([\"labels_\"],axis=1)\n res.columns = ['start_t', 'end_t', 'start_idx', 'end_idx','low', 'high', 'open', 'close']\n return res", "def matplotlib_bar_chart() -> Tuple:\n df = read_dataset(Path('..', '..', 'iris.csv'))\n x = []\n\n for col in df.columns:\n try:\n max_val = get_column_max(df, col)\n x.append(max_val)\n except ValueError:\n pass\n \n fig, ax = a_libraries.matplotlib_bar_chart(np.array(x))\n\n return fig, ax", "def plot_bar(label_array, acc_array, f1_array, width=0.5, axis_label=None, graph_title=None, file_name=\"\", dpi=100):\n plt.figure(figsize=plt.figaspect(1.), dpi=dpi)\n x = np.arange(len(label_array)) # the label locations\n plt.bar(x - 0.5 * width, acc_array, width, label='Accuracy')\n plt.bar(x + 0.5 * width, f1_array, width, label='F1 score')\n plt.ylim([0, 1.1])\n plt.xticks(x, labels=label_array)\n if axis_label is None:\n axis_label = ['Set', 'Values']\n plt.xlabel(axis_label[0])\n plt.ylabel(axis_label[1])\n if graph_title is None:\n graph_title = graph_title\n plt.title(graph_title)\n plt.tight_layout()\n plt.legend()\n plt.grid()\n if file_name:\n plt.savefig(file_name, bbox_inches='tight')\n plt.show()\n return", "def PlotBayes( x=np.ones(1), bayes=np.ones(1), title=None, label=None, width=1.0, color='blue', show_values=False, ax=None, posterior=False ):\n\n if ax is None:\n fig, ax = plt.subplots( )\n ax.bar(x, bayes/bayes.max(), width, color=color )\n ax.set_title( title )\n ax.set_yscale('log')\n ax.set_xlabel( label )\n if posterior:\n ax.set_ylabel(r\"$P/P_{\\rm max}$\")\n else:\n ax.set_ylabel(r\"$\\mathcal{B}/\\mathcal{B}_{\\rm max}$\")\n# ax.set_ylabel(r\"$\\mathcal{B} = \\prod L / L_0$\")\n if show_values: ## print value on top of each bar, .... doesnt work ...\n shift = bayes.max()/bayes.min()/10\n for xx, b in zip( x, bayes ):\n ax.text( xx, b*shift, str(b), color=color, fontweight='bold' )\n\n ### assure that there are ticks at y axis\n lim = ax.get_ylim()\n ax.set_ylim(lim[0]*0.5, lim[1]*2)", "def barh_plotter(data: pd.DataFrame, variable: str):\n fig, axs = plt.subplots(2,5, gridspec_kw={'wspace': 1, 'hspace': 0.2},\n figsize=(60, 40), sharex = False)\n\n for ax, dta in zip(axs.flatten(), data.values()) :\n ax.barh(dta['club'], dta[f'{variable}'])\n ax.set_xlabel(f'{variable}', fontsize=25)\n \n \n for ax, dta in zip(axs.flatten(), data.keys()):\n ax.set_title(dta, fontsize=30)\n\n return plt.show()", "def barPlot1():\n n = 12\n X = np.arange(n)\n Y1 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n)\n Y2 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n)\n\n plt.bar(X, +Y1, facecolor='#9999ff', edgecolor='white')\n plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')\n\n for x,y in zip(X,Y1):\n plt.text(x+0.2, y+0.05, '%.2f' % y, ha='center', va= 'bottom')\n\n for x,y in zip(X,Y2):\n plt.text(x+0.2, -y-0.1, '%.2f' % y, ha='center', va= 'bottom')\n\n plt.ylim(-1.25,+1.25)\n plt.show()", "def graphy2():\n data = pd.read_csv(\"week2.csv\")\n plot_g = pygal.Bar(fill=True, interpolate='cubic', style=LightSolarizedStyle)\n plot_g.title = \"Top Fans in Week 2\"\n plot_g.x_labels = data.GENDER\n plot_g.y_labels = map(int, range(0, 80, 10))\n plot_g.add(\"Male\", data.COUNT)\n plot_g.add(\"Female\", data.COUNT2)\n plot_g.add(\"Total\", data.COUNT3)\n plot_g.render_to_file(\"plotweek2.svg\")", "def draw_bar_plot(fig, x, y, labels):\r\n\r\n #Convert times to a displayable format\r\n (x_times, hour_mode) = times_to_axis(x)\r\n\r\n\r\n #Draw horizontal grid lines behind data\r\n fig.yaxis.grid(zorder=0)\r\n\r\n #Draw plot\r\n fig.bar(x_times, y, zorder=2)\r\n\r\n\r\n #If necessary, enable processing of \"datetime\" objects on the x-axis\r\n if not hour_mode:\r\n fig.xaxis_date()\r\n\r\n\r\n #Label and style plot\r\n set_axis_labels(fig, *labels)\r\n style_x_labels(fig)", "def graph(df):\n df.plot()\n plt.show()", "def plot_individual_bar_chart_graph(data_values, title,\r\n number_of_keys,\r\n max_val,\r\n vals_for_bar_chart,\r\n file_in):\r\n\r\n n_groups = len(vals_for_bar_chart)\r\n fig, ax = plt.subplots()\r\n index = np.arange(n_groups)\r\n bar_width = 0.9\r\n opacity = 0.4\r\n # print vals_for_bar_chart\r\n rects1 = plt.bar(index,\r\n vals_for_bar_chart,\r\n bar_width,\r\n alpha=opacity,\r\n color='b') # label='whatever'\r\n plt.xlabel('number in cluster')\r\n plt.ylabel('Count')\r\n plt.title(title+\"_barchart\")\r\n plt.legend()\r\n pylab.grid(True)\r\n ax.set_yscale('symlog')\r\n ax.set_xscale('symlog')\r\n plt.tight_layout()\r\n plt.show()\r\n pylab.savefig(file_in + \"_\" + title + '_barchart.png')\r\n plt.close()\r\n pylab.close()", "def show_graph(d:dict):\n x = []\n y = []\n for key, value in d.items():\n x.append(str(key))\n y.append(value)\n\n x_pos = [i for i, _ in enumerate(x)]\n plt.figure()\n plt.bar(x_pos, y, color='green')\n plt.xlabel(\"Size\")\n plt.ylabel(\"Number of images\")\n plt.title(\"Count by size\")\n plt.xticks(x_pos, x)", "def plotBarPlots(self, stage, strain, strainID, count, p, xm, ym, xn, yn):\n\t\t\n\t\ttimeList = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n\t\t\n\t\tind = np.arange(len(timeList))\n\t\t\n\t\tplt.style.use('bmh')\n\t\tcolors = ['#b2182b', '#238b45', '#3690c0', '#023858']\n\t\t\n\t\tfig = plt.figure(figsize=(15,15), frameon=False)\n\t\tax1 = fig.add_subplot(111)\n\t\tfig.suptitle(strain + ' - ' + stage, fontsize=20, fontweight='bold')\n\t\n\t\tax1.spines['top'].set_visible(False)\n\t\tax1.spines['right'].set_visible(False)\n\t\tax1.spines['bottom'].set_visible(False)\n\t\tax1.spines['left'].set_visible(False)\n\t\tax1.yaxis.set_ticks_position('left')\n\t\tax1.xaxis.set_ticks_position('bottom')\n\t\twidth = 0.6\n\t\t\n\t\tax1.bar(ind+width/2., count[timeList,1], width, color=colors[0], edgecolor = \"none\")\n\t\t\n\t\tax1.plot(*p.linspace(), c='k', linewidth=3.0)\n\t\t\n\t\tfor point in range(len(xm)):\n\t\t\tax1.plot(xm[point], ym[point], marker='*', markersize=30, color=\"blue\")\n\t\tfor point in range(len(xn)):\n\t\t\tax1.plot(xn[point], yn[point], marker='*', markersize=30, color=\"blue\")\t\t\t\n\t\t\n\t\tax1.set_ylabel('Frequency as %', fontweight='bold', fontsize=20)\n\t\tax1.set_xlabel('Time', fontweight='bold', fontsize=20)\n\t\txTickMarks = ['%s' %str(j) for j in timeList]\n\t\tax1.set_xticks(ind+width/2)\n\t\txtickNames = ax1.set_xticklabels(xTickMarks, fontweight='bold')\n\t\t\n\t\tplt.setp(xtickNames, fontsize=15)\n\t\tax1.xaxis.set_ticks_position('none')\n\t\tax1.yaxis.set_ticks_position('none')\t\t\n\t\tax1.set_yticklabels(ax1.get_yticks(), fontweight='bold', fontsize=17)\n\t\tax1.set_xlim([6, len(timeList)-6])\n\t\t\t\t\t\n\t\tfname = 'strain%d' %strainID + stage + '.png' \n\t\t\n\t\tfig.savefig(fname, transparent=True, dpi=100)\n\t\tplt.close(fig)", "def diagram_plugs(data_no,\r\n data_little,\r\n data_means,\r\n data_great,\r\n data_large_enough,\r\n data_super_large,\r\n er_no, er_little,\r\n er_means,\r\n er_great,\r\n er_large_enough,\r\n er_super_large):\r\n\r\n\r\n plt.bar(range(6), [data_no,\r\n data_little,\r\n data_means,\r\n data_great,\r\n data_large_enough,\r\n data_super_large],\r\n width=0.1, color='black',\r\n yerr=[er_no, er_little, er_means,\r\n er_great, er_large_enough,\r\n er_super_large],\r\n ecolor='black', capsize=10)\r\n\r\n\r\n plt.xticks(range(6), ['non', 'petit', 'moyen',\r\n 'grand', 'assez grand', 'tres grand'])\r\n\r\n\r\n plt.ylabel('Taux de pollution en AQI')\r\n plt.title(\"Taux de pollution selon les bouchons\")\r\n\r\n nouveau = new()\r\n print(nouveau)\r\n plt.savefig(nouveau, transparent=True)\r\n plt.clf()\r\n plt.close()\r\n\r\n shutil.move(nouveau, '/app/static/popo')\r\n\r\n return nouveau", "def barplot(self, name: str, y_label: str, img_title: str):\n path = C.TEST_DIR\n\n sns.set(style='whitegrid')\n sns.set_palette(sns.color_palette(C.IRT_COLORS))\n df = pd.read_csv(path + name + '.csv')\n ax = sns.barplot(data=df)\n ax.set(ylabel=y_label, title=img_title)\n\n self.save_plot(name)\n plt.show()", "def plot_bar_chart(log, frame_seconds=-1):\n\tif frame_seconds == -1:\n\t\tframe_seconds = (log.content[-1].timestamp - log.content[0].timestamp).total_seconds() / timedelta(seconds=100).total_seconds()\n\tlines,dates,areas = log.give_plot_data_bar(frame_seconds=frame_seconds)\n\tfig, ax = plt.subplots(figsize=(11,6))\n\tfig.autofmt_xdate()\n\tmyFmt = DateFormatter(\"%Y %d.%b %H:%M:%S\")\n\tax.xaxis.set_major_formatter(myFmt)\n\tax.set_xlabel('timestamps in UTC')\n\tax.set_ylabel('amount of entries')\n\tplt.title('Analysis of the file \\\"'+log.name+'\\\" \\n with frame size ' + str(frame_seconds) +' seconds')\n\n\twidth = timedelta(seconds=frame_seconds).total_seconds() / timedelta(days=1).total_seconds()\n\tnormal = ax.bar(dates, areas, width, label='entries', edgecolor='k')\n\n\tplt.legend()\n\tplt.subplots_adjust(left=0.15, bottom=0.2, right=0.9, top=0.9)\n\tfig.canvas.mpl_connect('key_press_event', _quit_figure)", "def create_bar_graph(plot_df, title=\"\", x_title=\"\", y_title=\"\"):\n plot_df[\"quarter\"] = pd.PeriodIndex(pd.to_datetime(plot_df.iloc[:, 0]), freq=\"Q\")\n fig_data = [\n go.Bar(\n x=plot_df[\"quarter\"].astype(str),\n y=plot_df.iloc[:, 1],\n text=plot_df.iloc[:, 1],\n marker={\"color\": color_palette[0]},\n hoverinfo=\"x+y\",\n showlegend=False,\n )\n ]\n\n fiq_layout = build_bar_layout(title, x_title=x_title, y_title=y_title)\n\n return dict(data=fig_data, layout=fiq_layout)", "def oneNumBar(df, colName):\n bins = pd.qcut(x=df[colName[0]], q=15, duplicates='drop')\n ax = bins.value_counts()\n bins = bins.cat.as_ordered()\n bins = bins.cat.categories\n bounds = bins.left \n bounds = list(bounds)\n bounds.append(bins[len(bounds)-1].right)\n texts = []\n for x,y in zip(bounds[0::],bounds[1::]):\n texts.append(\"(\" + str(x) + \", \" + str(y) + \"]\") \n barData = [go.Bar(x=texts, \n y=ax,\n marker=dict(\n color = '#92c5de',\n opacity=0.8)\n )] \n layout = go.Layout(\n title=\"Bar Plot Showing Count of Values for \" + str(colName[0]),\n xaxis=dict(\n title= colName[0]\n ),\n yaxis=dict(\n title= \"NUMBER OF RECORDS\", \n )\n )\n fig = go.Figure(data=barData, layout=layout)\n return {\"label\":\"Frequency\", \"plot\":fig}", "def bar_plot(self,\n x: str,\n y: str,\n x_label: str=None,\n y_label: str=None,\n title: str='Bar Plot',\n **kwargs) -> Figure:\n x_label = x if x_label is None else x_label\n y_label = y if y_label is None else y_label\n fig = px.bar(self.df, x=x, y=y,\n labels={x: x_label, y: y_label},\n title=title, **kwargs)\n return fig", "def setBarType(bartype='vertical'):\n dislin.bartyp(bardict[bartype])", "def draw_bar(df=data):\n pt = {\n 1: 'Credit card',\n 2: 'Cash',\n 3: 'No charge',\n 4: 'Dispute',\n 5: 'Unknown',\n 6: 'Voided trip',\n }\n df['payment_type'] = df['payment_type'].replace(pt)\n gr = df.groupby(['payment_type', 'weekday']) \\\n .agg(total_amount=('total_amount', 'sum')) \\\n .reset_index(drop=False)\n return px.bar(gr, x='weekday', y='total_amount', color='payment_type', barmode='group') \\\n .update_layout(\n template='plotly_dark',\n plot_bgcolor='rgba(0, 0, 0, 0)',\n paper_bgcolor='rgba(0, 0, 0, 0)',\n )", "def plot_dict_bar(aDictionary):\n\t# Convert strings to float\n\tfor key in aDictionary:\n\t\taDictionary[key] = float(aDictionary[key])\n\t\t\n\t# Plot the result\n\tplt.bar(range(len(aDictionary)), aDictionary.values(), align='center')\n\tplt.xticks(range(len(aDictionary)), aDictionary.keys(), rotation=90)\n\t\n\tplt.show()", "def performanceBarCharts(): \n ##tauopathy HCS pearson\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null YFP Model\", \"Null DAPI Model\"]\n ml_model_perf = pickle.load(open(\"pickles/ml_model_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n x = [1, 2, 3]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'gold', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance\",fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=12)\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n ax.set_xticklabels(xlabels,fontsize=12, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(3))\n plt.savefig(\"matplotlib_figures/tau_performance_pearson_special_HCS_model.png\", dpi=300)\n\n ##tauopathy HCS MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null YFP Model\", \"Null DAPI Model\"]\n ml_model_perf = pickle.load(open(\"pickles/ml_model_mse_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_mse_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_mse_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n x = [1, 2, 3]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'gold', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance\",fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=12)\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n ax.set_xticklabels(xlabels,fontsize=12, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(3))\n plt.savefig(\"matplotlib_figures/tau_performance_mse_special_HCS_model.png\", dpi=300)\n\n ##osteosarcoma 3-fold (raw images) pearson\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ml_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_null_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [0.075, 0.1156] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with Raw Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.02)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_pearson_cross_val.png\", dpi=300)\n\n ##osteosarcoma 3-fold (raw images) MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ml_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_null_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [0.15, .2312] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance with Raw Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.01)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_mse.png\", dpi=300)\n\n ##osteosarcoma 3-fold (ablated image training) pearson\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ablated_ml_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_ablated_null_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [.1288, .1385] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with\\n95% Ablated Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.0)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_pearson_trained_ablation_model.png\", dpi=300)\n\n ##osteosarcoma 3-fold (ablated image training) MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ablated_ml_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_ablated_null_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [.2576, .2771] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance with\\n95% Ablated Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.0)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_MSE_trained_ablation_model.png\", dpi=300)\n\n ##supplemental single channel learning YFP and DAPI performance\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"YFP-tau to AT8-pTau\", \"DAPI to AT8-pTau\"]\n YFP_ml_model = pickle.load(open(\"pickles/single_channel_YFP_ml_model_perf.pkl\", \"rb\"))\n DAPI_ml_model = pickle.load(open(\"pickles/single_channel_DAPI_ml_model_perf.pkl\", \"rb\"))\n y = np.array([YFP_ml_model[0], DAPI_ml_model[0]]).round(decimals=2)\n stds = [YFP_ml_model[1], DAPI_ml_model[1]]\n x = [1, 2]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=\"cornflowerblue\", zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with\\nSingle Channel Input Learning\",fontname=\"Times New Roman\", fontsize=17, y=1.01)\n ax.set_xlabel(\"Model\", fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=14)\n plt.yticks(fontname=\"Times New Roman\", fontsize=14)\n ax.set_xticklabels(xlabels,fontsize=14, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/supplemental_single_channel_learning.png\", dpi=300)\n\n ##supplemental single channel learning YFP and DAPI, input similarity to prediction\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"YFP-tau to AT8-pTau\", \"DAPI to AT8-pTau\"]\n y = np.array([0.94894628, 0.98718720]).round(decimals=2)\n stds = [0.1673864, 0.039042]\n x = [1, 2]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=\"orange\", zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Similarity Between\\nInput Channel and Predicted Channel\",fontname=\"Times New Roman\", fontsize=17)\n ax.set_xlabel(\"Model\", fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=14)\n plt.yticks(fontname=\"Times New Roman\", fontsize=14)\n ax.set_xticklabels(xlabels,fontsize=14, fontname=\"Times New Roman\")\n ax.set_ylim((0,1.13))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/supplemental_single_channel_learning_pearson_similarity_input_and_predicted.png\", dpi=300)", "def visualize_type():\n \n data_file = parse(MY_FILE, ',')\n\n # num of incidents per category\n counter = Counter(item['Category'] for item in data_file)\n\n # Set the labels\n labels = tuple(counter.keys())\n\n # Set exactly where the labels hit the x-axis\n xlocations = na.array(range(len(labels))) + 0.5\n\n # Width of each bar\n width = 0.5\n\n # Assign data to a bar plot\n plt.bar(xlocations, counter.values(), width=width)\n\n # Assign labels and tick location to x-axis\n plt.xticks(xlocations + width / 2, labels, rotation=90)\n \n # Give some more room so the x-axis labels aren't cut off\n plt.subplots_adjust(bottom=0.4)\n\n # Make the overall graph/figure larger\n plt.rcParams['figure.figsize'] = 12, 8\n\n # save\n plt.savefig('Type.png')\n\n # close\n plt.clf()", "def get_proba_plot(data_type):\n buffer = io.BytesIO()\n plt.subplots(figsize = (25,15))\n ax = sns.barplot(x='proba', y='type', data=data_type, palette = \"Blues_r\")\n ax.set_xlabel('Probability')\n plt.yticks(fontsize = 30)\n plt.xticks(fontsize = 30)\n plt.ylabel(\"Type\", fontsize = 38)\n plt.xlabel(\"Probability\", fontsize = 38);\n plt.title(\"Model Results\", fontsize = 50)\n plt.savefig(buffer, format='png')\n# plt.show()\n plt.close()\n buffer.seek(0)\n chart_probability= Image.open(buffer).resize((512+256,512))\n return chart_probability", "def plot_bar(counter, title=\"\", filename=\"tmp.png\"):\n\n fig = plt.figure()\n axis = fig.add_subplot(111)\n\n if isinstance(counter, dict):\n frequencies = counter.values()\n names = counter.keys()\n elif isinstance(counter, list):\n frequencies = [x[1] for x in counter]\n names = [x[0] for x in counter]\n y_pos = np.arange(len(counter))\n axis.barh(y_pos, frequencies, align='center')\n axis.set_title(title)\n axis.set_yticks(y_pos)\n axis.set_yticklabels(list(names))\n axis.invert_yaxis()\n axis.set_xlabel('Frequency')\n print('going to save fig...')\n fig.savefig('png_files/' + filename.replace(\".csv\", \".png\"))\n\n return axis", "def custom_barchart(ax, x, y, error, xlims, ylims, error_kw, color='lightblue', width=0.75):\n\n error = [np.zeros(len(error)), error]\n\n ax.bar(x, y, color=color, width=width, yerr=error, error_kw=error_kw, align='center')\n\n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n\n return ax", "def bar_plot(trj: TrajaDataFrame, bins: Union[int, tuple] = None, **kwargs) -> Axes:\n # TODO: Add time component\n\n bins = traja.trajectory._bins_to_tuple(trj, bins)\n\n X, Y, U, V = coords_to_flow(trj, bins)\n\n hist, _ = trip_grid(trj, bins, hist_only=True)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.set_aspect(\"equal\")\n X = X.flatten(\"F\")\n Y = Y.flatten(\"F\")\n ax.bar3d(\n X,\n Y,\n np.zeros_like(X),\n 1,\n 1,\n hist.flatten(),\n zsort=\"average\",\n shade=True,\n **kwargs,\n )\n ax.set(xlabel=\"x\", ylabel=\"y\", zlabel=\"Frames\")\n\n return ax", "def _plot_dict_bar(d, xmin=None, label=None):\n xvals, yvals = _dict2lists(d)\n if xmin == None:\n xmin = min(xvals) - 1\n else:\n xmin = min(xmin, min(xvals) - 1)\n if label:\n pylab.bar(xvals, yvals, align='center', label=label)\n pylab.xlim([xmin, max(xvals)+1])\n else:\n pylab.bar(xvals, yvals, align='center')\n pylab.xlim([xmin, max(xvals)+1])", "def plotBarChart(resultConfirmed, resultDeath, resultVaccinated):\n fig, ax = plt.subplots(3)\n\n ax[0].plot(resultConfirmed['Date'], resultConfirmed['Confirmed Cases'])\n ax[0].title.set_text('Confirmed Cases')\n \n ax[1].plot(resultDeath['Date'], resultDeath['Death Cases'])\n ax[1].title.set_text('Death Cases')\n \n ax[2].plot(resultVaccinated['Date'], resultVaccinated['Vaccinated Person'])\n ax[2].title.set_text('Vaccinated Cases')\n fig.tight_layout()\n plt.show()", "def generate_barplot(predictions, labels):\n plot = figure(x_range=labels, plot_height=300, plot_width=400)\n plot.vbar(x=labels, top=predictions, width=0.8)\n # plot.xaxis.major_label_orientation = pi / 2.\n # plot.xaxis.axis_label_text_font_size = \"40pt\"\n # plot.yaxis.axis_label_text_font_size = \"40pt\"\n\n return components(plot)", "def plot_balance_class(classes):\n unique, counts = np.unique(classes, return_counts=True)\n plt.bar(unique, counts)\n plt.title('Class Frequency')\n plt.xlabel('Class')\n plt.ylabel('Frequency')\n plt.show()", "def make_bar_plot(x, y, title):\n return plotly.graph_objs.Figure(\n data=[plotly.graph_objs.Bar(x=list(x), y=list(y))],\n layout=plotly.graph_objs.Layout(title=title)\n )", "def BarSpecific():\n return dcc.Graph(id=\"PersChart\", className=\"bar\", figure=dict(\n data=[go.Bar(\n x=[1],\n y=[\"Persons\"],\n orientation='h',\n marker={\n 'color': '#ff4058',\n },\n )],\n layout=dict(\n title=\"<b>Most similar Persons</b>\",\n font=dict(family='Soria, Times New Roman, Times, serif', color='#002C77', size=19),\n margin=dict(l=100, r=20, t=50, b=30),\n plot_bgcolor=\"rgba(0,0,0,0)\",\n paper_bgcolor=\"rgba(0,0,0,0)\",\n xaxis=dict(tick0=0, dtick=1),\n yaxis=dict(ticks='outside')\n )\n ))", "def plot(self):\n x = np.arange(5)\n # labels = ['temp', 'humi', 'mais', 'o2', 'co2']\n plt.bar(x - 0.35/2, self.data, 0.35, label='actual')\n plt.bar(x + 0.35/2, self.desired_values, 0.35, label='desired')\n plt.ylim(-5, 80)\n plt.legend()\n\n plt.draw()\n plt.pause(0.000001)\n plt.clf()", "def simple_bar():\n\n # Make random discrete data\n discrete_a = np.zeros((8,2))\n discrete_b = np.zeros((8,2))\n discrete_c = np.zeros((8,2))\n discrete_a[:,0] = np.arange(8)\n discrete_b[:,0] = np.arange(8)\n discrete_c[:,0] = np.arange(8)\n discrete_a[:,1] = np.random.rand(8)*10\n discrete_b[:,1] = np.random.rand(8)*10\n discrete_c[:,1] = np.random.rand(8)*10\n\n # Make data sets, if using multiple bar_width must be the same\n dataset_a = DataSet(discrete_a,colour='pink',bar_width=0.8,plot='bar',label='A')\n dataset_b = DataSet(discrete_b,colour='violet',bar_width=0.8,plot='bar',label='B')\n dataset_c = DataSet(discrete_c,colour='darkviolet',bar_width=0.8,plot='bar',label='C')\n\n # Make plot object and add data sets\n plot = Plot()\n plot.add_dataset(dataset_a)\n plot.add_dataset(dataset_b)\n plot.add_dataset(dataset_c)\n plot.set_axes(xticks=(1,1),xlim=(-0.5,7.5),ylim=(0,12))\n plot.set_legend(legend=True,location='upper right')\n plot.set_text(legend=8)\n\n # Plot graph and display\n plot.plot()\n plot.save(name='./figures/2d_simple_bar',fmt='png')\n plot.display()", "def bar(*args, **kwargs):\n ax, args, kwargs = maybe_get_ax(*args, **kwargs)\n color_cycle = brewer2mpl.get_map('Set2', 'qualitative', 8).mpl_colors\n almost_black = '#262626'\n kwargs.setdefault('color', color_cycle[0])\n kwargs.setdefault('edgecolor', 'white')\n middle = 0.4 if 'width' not in kwargs else kwargs['width']/2.0\n\n # Check if data contains stacks\n stacked = kwargs.pop('stacked',False)\n # Check if stack text should be included\n stack_text = kwargs.pop('stack_text',False)\n # Get legend if available\n legend = kwargs.pop('legend',False)\n\n left = args[0]\n height = np.array(args[1])\n\n # Label each individual bar, if xticklabels is provided\n xtickabels = kwargs.pop('xticklabels', None)\n # left+0.4 is the center of the bar\n xticks = np.array(left) + middle\n\n # Whether or not to annotate each bar with the height value\n annotate = kwargs.pop('annotate', False)\n\n show_ticks = kwargs.pop('show_ticks', False)\n\n # If no grid specified, don't draw one.\n grid = kwargs.pop('grid', None)\n\n # Check if stacked and plot data accordingly\n if stacked:\n num_stacks, num_data = height.shape\n bottom = np.zeros(num_data)\n for i in np.arange(num_stacks):\n lst = list(args)\n lst[1] = height[i]\n args = tuple(lst)\n kwargs['color'] = set2[i]\n kwargs['bottom'] = bottom\n rectangles = ax.bar(*args, **kwargs)\n bottom += height[i]\n else:\n rectangles = ax.bar(*args, **kwargs)\n\n # add legend\n if isinstance(legend, collections.Iterable):\n ax.legend(legend,loc='upper center',bbox_to_anchor=(0.5,1.11), ncol=5)\n\n # add whitespace padding on left\n xmin, xmax = ax.get_xlim()\n xmin -= 0.2\n if stacked:\n xmax = num_data\n ax.set_xlim(xmin, xmax)\n\n # If the user is only plotting one bar, make it an iterable\n if not isinstance(height, collections.Iterable):\n height = [height]\n\n\n # If there are negative counts, remove the bottom axes\n # and add a line at y=0\n if any(h < 0 for h in height.tolist()):\n axes_to_remove = ['top', 'right', 'bottom']\n ax.hlines(y=0, xmin=xmin, xmax=xmax,\n linewidths=0.75)\n else:\n axes_to_remove = ['top', 'right']\n\n # Remove excess axes\n remove_chartjunk(ax, axes_to_remove, grid=grid, show_ticks=show_ticks)\n\n if stacked:\n data = height\n height = height.sum(axis=0)\n\n # Add the xticklabels if they are there\n if xtickabels is not None:\n ax.set_xticks(xticks)\n ax.set_xticklabels(xtickabels)\n\n if annotate or isinstance(annotate, collections.Iterable):\n annotate_yrange_factor = 0.025\n ymin, ymax = ax.get_ylim()\n yrange = ymax - ymin\n\n # Reset ymax and ymin so there's enough room to see the annotation of\n # the top-most\n if ymax > 0:\n ymax += yrange * 0.1\n if ymin < 0:\n ymin -= yrange * 0.1\n ax.set_ylim(ymin, ymax)\n yrange = ymax - ymin\n\n offset_ = math.log(yrange) + math.log(annotate_yrange_factor+1)\n print offset_\n print yrange * annotate_yrange_factor\n print math.log(yrange) + math.log(annotate_yrange_factor)\n if isinstance(annotate, collections.Iterable):\n annotations = map(str, annotate)\n else:\n annotations = ['%.3f' % h if type(h) is np.float_ else str(h)\n for h in height]\n\n for x, h, annotation in zip(xticks, height, annotations):\n # Adjust the offset to account for negative bars\n offset = offset_ if h >= 0 else -1 * offset_\n verticalalignment = 'bottom' if h >= 0 else 'top'\n\n # Finally, add the text to the axes\n ax.annotate(annotation, (x, h + annotate_yrange_factor), \n verticalalignment=verticalalignment,\n horizontalalignment='center',\n color=almost_black)\n\n # Text for each block of stack\n # This was partially inspired by the following article by Tableau software\n # http://www.tableausoftware.com/about/blog/2014/1/new-whitepaper-survey-data-less-ugly-more-understandable-27812\n if stack_text:\n bottom = np.zeros(num_data)\n max_h = max(height)\n for i in np.arange(num_stacks):\n for x, d, b in zip(xticks, data[i], bottom):\n if (d*100.0/max_h) > 4.0:\n ax.text(x,b+d/2.0,d, ha='center', va='center', color=almost_black)\n bottom += data[i]\n return rectangles", "def show_class_imbalance(df, title='Class Imbalance', PATH=None):\n ax = sns.barplot(x=[\"Normal\", \"Clickbait\"], y=df.groupby(['target']).target.count())\n ax.set_title(title, size=20)\n plt.xticks([0,1],[\"Normal\", \"Clickbait\"], size = 20)\n ax.set_ylabel(\"Document Count\", size=17)\n ax.set_xlabel(\"Article Class\", size=20)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n return ax", "def to_bar(self):\n factor = FactorData()\n return factor", "def plot_what_if(df):\n fig = go.Figure()\n for col in df.columns:\n fig.add_trace(go.Bar(x=df[col].index, y=df[col].values, name=col))\n fig.update_xaxes(title_text=\"Gains/Losses ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=False, zerolinewidth=1, zerolinecolor='lightgrey',\n showgrid=False, gridwidth=1, gridcolor='lightgrey')\n fig.update_yaxes(title_text=\"Gains/Losses ($ USD)\",\n showline=True, mirror=True, linewidth=1, linecolor='black',\n zeroline=True, zerolinewidth=2, zerolinecolor='grey',\n showgrid=True, gridwidth=1, gridcolor='lightgrey')\n fig.update_layout(legend=dict(orientation=\"h\", yanchor=\"bottom\", y=-0.25, xanchor=\"center\", x=0.5),\n font=dict(family='Times New Roman', size=15), plot_bgcolor='rgba(0,0,0,0)',\n margin_l=20, margin_r=20, margin_t=20, margin_b=20,\n xaxis=dict(tickmode='linear', tick0=1, dtick=1,))\n\n\n fig.write_image(join('..', 'docs', 'redistribution.png'), height=700, width=900, engine='kaleido')\n fig.write_html(join('..', 'docs', 'redistribution.html'))\n fig.show()", "def draw_bar_plot():\n # Copy and modify data for monthly bar plot\n \n df_bar = df.copy()\n\n # Draw bar plot\n leglab = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\n labels = [2016, 2017, 2018, 2019]\n months = np.zeros([12, 4])\n\n for i in range(12):\n for j, year in enumerate(labels):\n t = df[df.index.year == year]\n months[i][j] = t[t.index.month == i].value.mean()\n\n x = np.arange(len(labels))\n width = 0.7\n fig, ax = plt.subplots()\n fig.set_figwidth(10)\n fig.set_figheight(8)\n for i, month in enumerate(months):\n ax.bar(x - (width * (12 - i) / 12), months[i], width / 12, label=leglab[i])\n\n ax.set_ylabel(\"Average Page Views\")\n ax.set_xlabel(\"Years\")\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n ax.legend(title='Months')\n\n # Save image and return fig (don't change this part)\n fig.savefig('bar_plot.png')\n return fig", "def plot_bar(source_files, column_ids, column_names, normalize, sort, plot_difference, freq_bound, title=None,\n dtype=int):\n\n def _filter_data(raw_data, numerical):\n \"\"\" Filters plot-able data. \"\"\"\n # Retain numeric information\n legal_count_inventory = digits + '.'\n # Retain POS tags, also\n legal_entry_inventory = ascii_uppercase + '$'\n filtered_data = list()\n for data_point in raw_data:\n skip = False\n for symbol in list(str(data_point)):\n if symbol not in legal_count_inventory and symbol not in legal_entry_inventory:\n skip = True\n if not skip:\n if numerical:\n filtered_data.append(dtype(data_point))\n else:\n filtered_data.append(data_point)\n # Optionally normalize count values, resulting in a proportion plot\n if numerical and normalize:\n filtered_data = filtered_data / np.sum(filtered_data)\n return np.array(filtered_data)\n\n # Set plot parameters\n sns.set_style('whitegrid')\n sns.set_context('paper')\n\n # Compile data to be plotted within a new dataframe\n # Not necessary, but convenient when plotting with seaborn\n source_dict = dict()\n # Read in data and sort alphanumeric features (e.g. POS tags) alphabetically\n df_features = pd.read_table(source_files[0], header=None, names=['Tag', 'Count'], skip_blank_lines=True)\n df_features = df_features.sort_values('Tag', ascending=True)\n df_reference = pd.read_table(source_files[1], header=None, names=['Tag', 'Count'], skip_blank_lines=True)\n df_reference = df_reference.sort_values('Tag', ascending=True)\n # Isolate columns to be plotted\n entries = _filter_data(df_features.iloc[:, column_ids[0]].values, False)\n counts = _filter_data(df_features.iloc[:, column_ids[1]].values, True) # e.g. counts from corpus A\n reference_counts = _filter_data(df_reference.iloc[:, column_ids[1]].values, True) # e.g. counts from corpus B\n # Construct dataframe to be visualized\n source_dict[column_names[0]] = entries\n source_dict['reference_counts'] = reference_counts\n # Generate frequency mask to exclude low-frequency features from the plot\n # Optional; results in a clearer, better readable visualization\n frequency_mask = np.array(\n [int(counts[i] >= freq_bound or reference_counts[i] >= freq_bound) for i in range(counts.shape[0])])\n source_dict['frequency_mask'] = frequency_mask\n # Calculate per-feature count differences (i.e. target counts vs. reference counts), if specified\n if plot_difference:\n diffs = counts - reference_counts\n source_dict[column_names[1]] = diffs\n else:\n source_dict[column_names[1]] = counts\n features = pd.DataFrame.from_dict(source_dict)\n # Sort by count value and apply frequency mask\n if sort:\n features = features.sort_values(column_names[0], ascending=True)\n if freq_bound > 0:\n features = features.drop(features[features.frequency_mask == 0].index)\n\n # Make plot\n fig, ax = plt.subplots()\n fig.set_size_inches(8, 6)\n if plot_difference:\n colors = ['coral' if feature >= 0 else 'skyblue' for feature in features[column_names[1]]]\n sns.barplot(x=column_names[1], y=column_names[0], data=features, ax=ax, palette=colors)\n else:\n sns.barplot(x=column_names[1], y=column_names[0], data=features, ax=ax, palette='Set2')\n sns.despine()\n if title is not None:\n plt.title(title)\n plt.show()", "def bar_plot(ax, data, colors=None, total_width=0.8, single_width=1, legend=True):\n\n # Check if colors where provided, otherwhise use the default color cycle\n if colors is None:\n colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n # Number of bars per group\n n_bars = len(data)\n\n # The width of a single bar\n bar_width = total_width / n_bars\n\n # List containing handles for the drawn bars, used for the legend\n bars = []\n\n # Iterate over all data\n for i, (name, values) in enumerate(data.items()):\n # The offset in x direction of that bar\n x_offset = (i - n_bars / 2) * bar_width + bar_width / 2\n\n # Draw a bar for every value of that type\n for x, y in enumerate(values):\n bar = ax.bar(x + x_offset, y, width=bar_width * single_width, color=colors[i % len(colors)])\n\n # Add a handle to the last drawn bar, which we'll need for the legend\n bars.append(bar[0])\n\n # Draw legend if we need\n if legend:\n ax.legend(bars, data.keys())", "def plot_featurewise_barplot(\n utr5_counts, cds_counts, utr3_counts, ax=None, saveto=None, **kwargs\n):\n fig = None\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = ax.get_figure()\n barlist = ax.bar([0, 1, 2], [utr5_counts, cds_counts, utr3_counts])\n barlist[0].set_color(\"#1b9e77\")\n barlist[1].set_color(\"#d95f02\")\n barlist[2].set_color(\"#7570b3\")\n ax.set_xticks([0, 1, 2])\n ax.set_xticklabels([\"5'UTR\", \"CDS\", \"3'UTR\"])\n max_counts = np.max(np.hstack([utr5_counts, cds_counts, utr3_counts]))\n setup_axis(\n ax=ax, axis=\"y\", majorticks=max_counts // 10, minorticks=max_counts // 20\n )\n ax.set_ylabel(\"# RPFs\")\n # sns.despine(trim=True, offset=10)\n if saveto:\n fig.tight_layout()\n fig.savefig(saveto, dpi=DPI)\n return ax, fig", "def bar_grapgh(dictionary, variable):\r\n plt.clf() # Deletes the previous plot \r\n plt.hist(dictionary[variable])\r\n plt.title('Histogram of ' + variable)\r\n plt.xlabel(variable)\r\n plt.ylabel('Frequency')\r\n plt.savefig(variable)", "def visualize_type():\n\t\n\t#grab our parsed data\n\tdata_file = parse(MY_FILE, \",\")\n\t\n\t#make a new variable, counter, from iterating through each line of\n\t#data in parsed data, and count how many incidents happen by category\n\tcounter = Counter(item[\"Category\"] for item in data_file)\n\t\n\t#set the labels which are based on the keys of our counter\n\t#since order doesn't matter, we can just use counter.keys()\n\tlabels = tuple(counter.keys())\n\t\n\t#set exactly where the labels should hit the x-axis\n\txlocations = np.arange(len(labels)) + 0.5\n\t\n\t#width of each bar that will be plotted\n\twidth = 0.5\n\t\n\t#assign data to a bar plot\n\tplt.bar(xlocations, counter.values(), width=width)\n\t\n\t#assign labels and tick location to x-axis\n\tplt.xticks(xlocations + width /2, labels, rotation=90)\n\t\n\t#give more room to the x-axis so the labels aren't cut off\n\tplt.subplots_adjust(bottom=0.4)\n\t\n\t#make the overall graph/figure larger\n\tplt.rcParams['figure.figsize'] = 12, 8\n\t\n\t#save the graph\n\tplt.savefig(\"type.png\")\n\t\n\t#close the plot figure\n\tplt.clf()", "def to_bar(self):\n group = GroupData()\n return group", "def plot_bar_chart_quantum_vs_classical(\n df_bugs: pd.DataFrame,\n column_to_inspect: str,\n mapping_dict: Dict[str, str],\n categories_to_exclude: List[str] = [],\n categories_keep_only: List[str] = None,\n out_file_name: str = None,\n out_folder_path: str = None,\n horizontal: bool = False,\n map_value_since_beginning: bool = False,\n figsize: Tuple[int, int] = (10, 5),\n legend_placement: str = 'upper center'\n ):\n\n fig, ax = plt.subplots(figsize=figsize)\n\n df = expand_columns(df_bugs, column_to_inspect)\n df = df[~(df[column_to_inspect].isin(categories_to_exclude))]\n\n if categories_keep_only is not None:\n df = df[df[column_to_inspect].isin(categories_keep_only)]\n\n if map_value_since_beginning:\n df[column_to_inspect] = df[column_to_inspect].map(mapping_dict)\n\n categories_q_bugs = list(df[\n df['type'] == 'Quantum'].groupby(\n column_to_inspect).count().sort_values(\n by='type', ascending=False).index)\n\n for component in df[column_to_inspect].unique():\n if component not in categories_q_bugs:\n categories_q_bugs.append(component)\n\n args = {\n \"hue\": \"type\",\n \"data\": df,\n \"palette\": PALETTE,\n \"ax\": ax,\n \"order\": categories_q_bugs\n }\n\n if horizontal:\n sns.countplot(y=column_to_inspect, **args)\n ax.grid(axis='x')\n else:\n sns.countplot(x=column_to_inspect, **args)\n ax.grid(axis='y')\n\n if not map_value_since_beginning:\n # map the value at the latest stage, thus in the labels\n obj_labels = ax.get_xticklabels()\n for i, l in enumerate(obj_labels):\n obj_labels[i] = mapping_dict[l.get_text()]\n ax.set_xticklabels(obj_labels, rotation=60, ha='right')\n\n ax.set_xlabel(capitalize(column_to_inspect), fontsize=15)\n ax.set_ylabel(\"Count\", fontsize=15)\n plt.legend(title=\"Type of Bug\", loc=legend_placement)\n plt.tight_layout()\n\n if out_file_name is not None and out_folder_path is not None:\n fig.savefig(os.path.join(out_folder_path, out_file_name), format=\"pdf\")", "def plot_bars(self, ax, bottom=None, log=False):\n if bottom is None and log:\n bottom = np.ones_like(self.values) * min(self.values[self.values > 0 ]) * .1\n return ax.bar(self.lefts, self.values, self.widths, color=self.color, label=self.label,edgecolor=self.color, bottom=bottom, log=log, **self.options)[0]", "def _plot_group_bars(ax, xss, field, side):\n\n #translate side to index\n if(side == 'left'):\n side = 0\n else:\n side = 1\n #plot the bars\n x = range(len(xss))\n values = [xs.ROW_edge_fields[field].values[side] for xs in xss]\n ax.bar(x, values, color=[_colormap[i%len(_colormap)] for i in range(len(xss))],\n bottom=0.0, align='center', alpha=0.8, width=0.6)\n ax.set_xticks(x)\n ax.set_xticklabels([textwrap.fill(xs.sheet, 15) for xs in xss],\n rotation='vertical', fontsize=11)", "def plot_bar_important_features(important_features, title, xlabel, ylabel, fname):\r\n plt.figure(figsize=(20, 21))\r\n plt.barh(important_features.index.astype(str).tolist(), important_features.values.tolist())\r\n plt.title(title)\r\n plt.xlabel(xlabel)\r\n plt.ylabel(ylabel)\r\n plt.savefig(fname, bbox_inches='tight')\r\n plt.close()", "def plot_data(self):", "def stopword_bar(df, stop_words, ax):\n df_test = df.copy()\n df_test['prop'] = df.title.apply(stopword_proportion)\n sns.barplot(data=df_test, x='target', y='prop', ax=ax, ci=False)\n ax.set_title(\"Ratio of Stopwords Between Classes\", size=20)\n ax.set_ylim([1,2])\n ax.set_ylabel(\"Ratio\", size=20)\n ax.set_xlabel(\"Article Class\", size=20)\n plt.xticks(ticks=range(2),labels=['Normal','Clickbait'], size=20)\n return ax", "def plot_cat(df, cat_columns, hue = \"default_payment_next_month\"):\n fig = plt.figure(figsize = (20,(len(cat_columns)/2+1)*8))\n loc = 1\n for col in cat_columns:\n ax = fig.add_subplot(len(cat_columns)/2+1, 2, loc)\n df_plot = df[[col, hue, \"id\"]].groupby([col, hue]).count()\n df_plot.reset_index(inplace = True)\n sns.barplot(x=col, y= \"id\", hue = hue, data=df_plot, palette = \"GnBu_d\", ax = ax);\n plt.legend(title = \"default payment (1=yes, 0=no)\")\n plt.ylim([0.0001,15000])\n plt.ylabel(\"clients\");\n loc += 1", "def plot_bar(self, nsingular=None, nbars=20):\n if nsingular is not None:\n self.get_identifiability_dataframe(nsingular)\n\n plot_obj = plots.IdentBar(self.ident_df, nsingular=nsingular, nbars=nbars)\n plot_obj.generate()\n plot_obj.draw()\n \n return plot_obj.fig, plot_obj.ax", "def graph():\n fp = mpl.font_manager.FontProperties(family='JasmineUPC',size=24)\n x = np.arange(0,10)\n y = [386557057065, 368368395622, 242451971944, 225960095934, 161573560379, 107461232731, 89784502211, 73749349545, 54525219632, 52864743212]\n name = ['เชื้อเพลิงที่ได้จากแร่', 'เครื่องจักรและส่วนประกอบ', 'ยานยนต์และส่วนประกอบ', 'เครื่องอุปกรณ์ไฟฟ้าและส่วนประกอบ', 'เหล็กและเหล็กกล้า', 'พลาสติกและของทำด้วยพลาสติก', 'ของทำด้วยเหล็กหรือเหล็กกล้า', 'ทองแดงละของทำด้วยทองแดง', 'เคมีภัณฑ์เบ็ดเตล็ด', 'อุปกรณ์ที่ใช้ทางทัศนศาสตร์']\n ax = plt.gca(xticks=x)\n ax.set_xticklabels(name,rotation=1000,fontproperties=fp)\n plt.bar(x,y,color='g')\n plt.show()" ]
[ "0.7253609", "0.72341543", "0.7051117", "0.7032588", "0.702026", "0.6910697", "0.6906154", "0.68671536", "0.67882943", "0.6782081", "0.6735545", "0.6699668", "0.6685477", "0.66498506", "0.663799", "0.6616477", "0.6604195", "0.6479067", "0.6469803", "0.64694345", "0.64263964", "0.6424513", "0.6419261", "0.640694", "0.64018095", "0.63064665", "0.63012624", "0.6273125", "0.6266547", "0.6259449", "0.6240214", "0.6228225", "0.6225711", "0.6224449", "0.62193334", "0.61900115", "0.618825", "0.6187623", "0.61718714", "0.61673653", "0.615912", "0.61552125", "0.61546445", "0.61543536", "0.61523354", "0.61441165", "0.61416775", "0.61191326", "0.6104422", "0.61014444", "0.60894746", "0.6074595", "0.6069581", "0.6053907", "0.6037133", "0.60273874", "0.6025053", "0.6024364", "0.6023842", "0.6023384", "0.60215837", "0.6020362", "0.60189235", "0.6016994", "0.6011624", "0.5997864", "0.59918255", "0.5982423", "0.5979257", "0.59706324", "0.59666777", "0.596615", "0.59298617", "0.5924291", "0.591691", "0.5916065", "0.59147304", "0.59119755", "0.5911653", "0.5911455", "0.5908337", "0.59053963", "0.58876586", "0.5880613", "0.58777", "0.58659816", "0.58650124", "0.58643466", "0.5860246", "0.58598906", "0.585575", "0.5855504", "0.58311594", "0.5823492", "0.58207417", "0.58191276", "0.5806449", "0.5797168", "0.57748044", "0.5772498" ]
0.60265684
56
Return the project name when printed.
def __str__(self): return self.project_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def project_name(self):\n pass", "def getProjectName():", "def full_name(self):\n if not self.project_id:\n raise ValueError('Missing project ID.')\n return 'projects/%s' % (self.project_id)", "def get_project_name(self):\n return self.line_edit.text()", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def get_project_display_name(self, package):\n with self._conn.begin():\n return self._conn.execute(\n \"VALUES (get_project_display_name(%s))\",\n (package, )\n ).scalar()", "def getProjectName(self, projectId: int) -> str:\n query = f\"SELECT name FROM projects WHERE id = {projectId}\"\n result = sql.executeAndReadQuery(self.connection, query)\n return result[0][0]", "def _get_project_name(self, context, project_id):\n return project_id", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def get_project_name(self, project_id):\n return self.project_names.get(project_id)", "def log_project(self) -> str:\n return pulumi.get(self, \"log_project\")", "def __str__(self):\n return_string = \"Project: {}-{}\".\\\n format(self.public_information[\"project_id\"],\n self.public_information[\"title\"])\n\n return return_string", "def _project_name(self):\n name = getattr(self._req.req, 'project_name', '')\n if name:\n return name\n raise ValueError('Requirement has no project_name.')", "def fullname(self):\n return \"{project}/{version}\".format(\n project=self.project.name, version=self.name\n )", "def get_project_name(self):\n remote = self.get_gitlab_remote()\n return self.get_project_name_from_url(remote.url)", "def name(self):\r\n return self.setuptools_requirement.project_name", "def fullname(cls): # pylint: disable=no-self-argument\n return func.concat(\n (select([Project.name]).where(Project.id == cls.project_id).as_scalar()),\n \" \",\n cls.name,\n )", "def get_current_project(self):\n\n try:\n command = self._oc_command([\"project\", \"-q\"])\n output = run_cmd(command, return_output=True)\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"Failed to obtain current project name : %s\" % ex)\n\n try:\n return output.rstrip() # remove '\\n'\n except IndexError:\n raise ConuException(\"Failed to obtain project name\")", "def get_project_name(working_dir):\n return path.path(working_dir).name", "def log_project(self) -> Optional[str]:\n return pulumi.get(self, \"log_project\")", "def get_project_name_from_id(project_id: int) -> str:\n session = konfuzio_session()\n url = get_project_url(project_id)\n r = session.get(url=url)\n return r.json()['name']", "def show():\n info(str(Project))", "def getTopicName(nd_proj):\n # does not line &\n return '-'.join(nd_proj.generateProjectInfo())", "def project_name() -> str:\n fake = Faker()\n raw_name: str = (\n fake.name_female().lower().replace(\" \", \"_\").replace(\"-\", \"_\").replace(\".\", \"_\")\n )\n return re.sub(\"_+\", \"_\", raw_name).strip(\"_\")", "def project(self) -> str:\n return self._db_data.project", "def displayname(self):\n if self.path.is_dir():\n if (is_uuid(self.path.parts[-1])):\n self.is_uuid_folder = True\n return self.path.name + '/'\n elif is_proj(self.path.parts[-1]):\n return f'{bcolors.BOLD}' + self.path.name + f'{bcolors.ENDC}'\n return self.path.name", "def project(self) -> str:\n return self.proto.project", "def __str__(self):\r\n proj_string = \" Project Name: \" + self.__name\r\n proj_string += \"\\n Cover Photo: \" + self.__cover_photo\r\n proj_string += \"\\n Links: \" + self.__links\r\n proj_string += \" Note: \" + self.__note\r\n proj_string += \" Photos: \" + list_str(self.__photos)\r\n\r\n return proj_string", "def project_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project_id\")", "def get_project_name(projects, project_id):\n for project in projects:\n if project['id'] == project_id:\n return project['name']", "def __str__(self):\n return \"{}: {}\".format(self.project, self.id)", "def display_project_info(project_name):\n\n # project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project_name)\n\n grades = hackbright.get_grades_by_title(project_name)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n grade=max_grade,\n grades=grades)", "def get_project_name(self, project_id):\n test = \"\"\"SELECT EXISTS(\n SELECT 1\n FROM barcodes.project\n WHERE project_id=%s\n )\"\"\"\n query = \"\"\"SELECT project\n FROM barcodes.project\n WHERE project_id=%s\"\"\"\n\n with self._transaction.cursor() as cur:\n cur.execute(test, [project_id, ])\n if not cur.fetchone()[0]:\n raise NotFound(f\"Project f'{project_id}' not found\")\n else:\n cur.execute(query, [project_id, ])\n return cur.fetchone()[0]", "def project(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"project\")", "def get_project_name(name: str) -> str:\n if is_shortcut_name(name):\n return name.split(config.name_separator)[1]\n raise CHCShortCutNameError(name)", "def get_project_name(build_rules_list, working_directory,\n verbose=False, project_name=None):\n\n if not project_name:\n # Check build_rules.py\n project_name = getattr_build_rules_list(\n build_rules_list, \"PROJECT_NAME\", None)\n if not project_name:\n project_name = os.path.basename(working_directory)\n\n # Print if needed.\n if verbose:\n print(\"Project name is {}\".format(project_name))\n return project_name", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name\")", "def full_name(self) -> str:\n inherited_from = (\n [self.inherited_from]\n if isinstance(self.inherited_from, str)\n else self.inherited_from\n )\n\n return \".\".join([*inherited_from, self.project_name])", "def base_name(self):\n return self._project.path", "def get_project():\n\n title = request.args.get('title')\n if not title:\n return \"Please enter a title!\"\n\n project = hackbright.get_project_by_title(title)\n\n grades = hackbright.get_grades_by_title(title)\n\n if not project:\n return \"There is no project with title \\\"{}\\\".\".format(title)\n\n title, description, max_grade = project\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n grades=grades)", "def displaySummary(self):\r\n print('Project Name:' + self.project['name'])\r\n print('Project chip:' + self.project['chip'])\r\n print('Project includes: ' + ' '.join(self.project['incs']))\r\n print('Project defines: ' + ' '.join(self.project['defs']))\r\n print('Project srcs: ' + ' '.join(self.project['srcs']))", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"project\")", "def project_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"project_id\")", "def project_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"project_id\")", "def get_and_display_project():\n\n project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project)\n\n\n github_grade_list = hackbright.get_grades_by_title(project)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n github_grade_list=github_grade_list)", "def do_project_show(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n _, project = cs.projects.get(id)\n utils.print_dict(project)", "def display_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"display_name\")" ]
[ "0.8392448", "0.82995224", "0.7878728", "0.78781843", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.7737", "0.76813203", "0.76813203", "0.76813203", "0.76813203", "0.76813203", "0.76813203", "0.76414037", "0.7613182", "0.755784", "0.75346524", "0.75346524", "0.75346524", "0.75346524", "0.75346524", "0.75346524", "0.7491989", "0.7464459", "0.74489504", "0.7402116", "0.73303837", "0.73093045", "0.7301804", "0.7220941", "0.7211046", "0.7171814", "0.7135486", "0.7084348", "0.70739794", "0.70434344", "0.7018383", "0.6946094", "0.6939117", "0.69063085", "0.6849107", "0.6827202", "0.68258435", "0.682146", "0.6802679", "0.6769938", "0.6739797", "0.67281663", "0.66794527", "0.6675128", "0.6675128", "0.6675128", "0.6675128", "0.6675128", "0.6675128", "0.6675128", "0.6675128", "0.6675128", "0.6675128", "0.6645449", "0.6625041", "0.6608102", "0.65734327", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65722334", "0.65361696", "0.65361696", "0.6506469", "0.649929", "0.64889365", "0.64889365" ]
0.77389073
5
Return the skill name.
def __str__(self): return self.skill
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def skill(self):\n return self._get(\"skill\")", "def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"", "def display_skill(self):\n return ', '.join([skill.name for skill in self.skill.all()[:3]])", "def getSkill(self, skillName):\r\n if self.__contains__(skillName):\r\n return self.skills[skillName]\r\n return None", "def get_name(self):\n return '-'.join(self._name_parts +\n [self.role.name, self.scenario.name])", "def get_name() -> str:", "def get_name() -> str:\n pass", "def alexa_skill_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"alexa_skill_id\")", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n pass", "def getName(self):\n return _libsbml.Objective_getName(self)", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")" ]
[ "0.77862394", "0.7630785", "0.7034226", "0.6951422", "0.67984796", "0.6626635", "0.66073817", "0.6603298", "0.6590914", "0.6590914", "0.6590914", "0.6585181", "0.6584061", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996", "0.6573996" ]
0.702125
3
Returns the account for the given client. If it does not exist a new one is created and returned
def get_account(self, client: int): try: return self.accounts[client] except KeyError: return self._create_account(client)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_client(self, clientname):\n client = self.dbsession.query(Client).filter_by(clientname=clientname).all()\n if not client:\n return self.create_client({'clientname': clientname})\n else:\n return client[0]", "def get_client(self, user_id: int, client_name: str) -> Client:\n return self.clients[user_id][client_name]", "def client(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/users/clients/{params['id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n client = result[\"response\"][\"result\"][\"client\"]\n client_obj = FreshbooksClient(\n accounting_systemid=client['accounting_systemid'], \n first_name=client['fname'],\n last_name=client['lname'],\n email=client['email'],\n vat_name=client['vat_name'],\n vat_number=client['vat_number'],\n home_phone=client['home_phone'],\n organization=client['organization'],\n username=client['username']\n )\n return client_obj.__dict__", "def get_account_for_user(cls, user):\n email = user.email()\n assert email\n key = '<%s>' % email\n # Since usually the account already exists, first try getting it\n # without the transaction implied by get_or_insert().\n account = cls.get_by_key_name(key)\n if account is not None:\n return account\n nickname = cls.create_nickname_for_user(user)\n return cls.get_or_insert(key, user=user, email=email, nickname=nickname,\n fresh=True)", "def account(self, account_code):\r\n return acc.Account(self, account_code)", "def get_client_by_id(self, client_id=None):\n # search client_id in list and return the client object\n for client in self.client_list:\n if client_id == client.client_id:\n return client.copy()\n\n # return empty client otherwise\n return Client()", "def get_account():\n\n bus = session_bus()\n\n goa_manager = bus.get_object(GOA_NAME, GOA_PATH)\n\n goa_objects = goa_manager.GetManagedObjects(dbus_interface=OBJECT_MANAGER)\n\n accounts = [\n obj for obj in goa_objects\n if obj != GOA_MANAGER_PATH\n ]\n\n if len(accounts) > 1:\n sys.exit(\"More than one account found.\")\n\n (account_path,) = accounts\n\n return bus.get_object(GOA_NAME, account_path)", "def get_account(self, account):\n \n pass", "def account(self, account_id):\r\n return resources.Account(self, account_id)", "def _GetAccountFromUser(self):\n name = self._GetAccountNameFromUser()\n number = self._GetAccountNumberFromUser()\n # Validate that the number is a number (assumes no alphabet characters in\n # the account number).\n if re.match(\"^[0-9]*$\", number) is None:\n raise ValueError(\"Account number is invalid: %r\" % number)\n return accounts_lib.Account(name, int(number))", "def getaccount(self, vergeaddress):\n return self.proxy.getaccount(vergeaddress)", "def get_account(self):\n return self._account", "def get_account(self):\n return self._account", "def client(self, id):\n return self.query(Client).filter(Client.id == id).one()", "def get_account(self, *args):\n\n account_data = api.get_account(\n *args,\n api_key=self.__creds.api_key_v2)\n\n return en.Account(creds=self.__creds, **account_data)", "def get_account(self, name):\n return self._accounts[name]", "def account(self, account_id: str):\n return get_from_list(self.accounts, \"id\", account_id)", "def get_cached_account(username, registry):\n cache_key = get_account_cache_key(username, registry)\n cache = registry.cache\n cached_account = cache.get(cache_key)\n return cached_account", "def account(cls, key, code):\n\n if key and code:\n utable = current.auth.settings.table_user\n query = (utable.registration_key == cls.keyhash(key, code))\n account = current.db(query).select(utable.ALL, limitby=(0, 1)).first()\n else:\n account = None\n\n return account", "def account_id():\n return client.get_caller_identity()['Account']", "def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)", "def get_account():\n\n # get user\n user = g.user\n\n # response\n return jsonify({'user_account': UserAccountAdminSchema().dump(user)}), 200", "def __get_account(self, address):\n\t\tfor acct in self.wallet:\n\t\t\tif acct[\"address\"] == address:\n\t\t\t\treturn acct\n\t\traise ValueError(\"The given address does not exist in the bunkr-wallet\")", "def get_account_by_name(self, account_name):\n accounts = self.service_old.management().accounts().list().execute()\n\n account = None\n if accounts.get('items'):\n account = next(acnt for acnt in accounts.get('items') if acnt[\"name\"] == account_name)\n\n if account is None:\n log_msg = \"The account named \" + account_name + \" does not exist!\"\n print(log_msg)\n\n return account", "def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)", "def get_client(self, service, region, account):\n\n client = AwsApi.CLIENTS_CACHE.get((service, region, account))\n if client:\n return client # from cache\n\n if region == '*':\n eprint(\"warn: unknown region ('*'), using the default ('{}')\", self.default_region)\n region = self.default_region\n\n if account == '*':\n eprint(\"warn: unknown account ('*'), using default session\")\n client = self.session.client(\n service,\n region_name=region\n )\n elif account == self.default_account:\n client = self.session.client(\n service,\n region_name=region\n )\n elif self.args.no_input:\n eprint(\"warn: unknown account ('{}') and --no-input set, using default session\", account)\n client = self.session.client(\n service,\n region_name=region\n )\n else:\n account_config = self.config.setdefault('aws', {}).setdefault('accounts', {}).setdefault(account, {})\n if not 'profile' in account_config:\n account_config['profile'] = input(\"Enter configured AWS profile for {}: \".format(account))\n client = boto3.Session(profile_name=account_config['profile']).client(service, region_name=region)\n\n AwsApi.CLIENTS_CACHE[(service, region, account)] = client\n return client", "def get_client_id():\n\n return str(get_account().Get(GOA_ACCOUNT_OAUTH2, 'ClientId',\n dbus_interface=PROPERTIES))", "def get_or_create(account, account_name):\n if account.account == account_name:\n return account\n return realization.get_or_create(account, account_name)", "def account(self, account_id):\r\n return Account(self, account_id)", "def account(self):\n return Account(self)", "def get_client(self, ip_address):\n\n self.cur.execute(\n 'select * from authenticated_clients where ip_address=%s',\n (ip_address, )\n )\n return self.cur.fetchone()", "def account(self, sid):\r\n return accounts.Account(self, sid)", "def account(self):\r\n return Account(self)", "def get_dependent_accounts(client):\n\n PAGE_SIZE = 500\n managed_customer_service = client.GetService(\n 'ManagedCustomerService', version=settings.API_VERSION)\n\n offset = 0\n selector = {\n 'fields': ['CustomerId', 'Name'],\n 'predicates': {\n 'field': 'CanManageClients',\n 'operator': 'EQUALS',\n 'values': 'False'\n },\n 'paging': {\n 'startIndex': str(offset),\n 'numberResults': str(PAGE_SIZE)\n }\n }\n more_pages = True\n accounts = {}\n while more_pages:\n page = managed_customer_service.get(selector)\n\n if 'entries' in page and page['entries']:\n for account in page['entries']:\n accounts[account['customerId']] = str(account['name']) \\\n if hasattr(account, 'name') else 'None'\n\n offset += PAGE_SIZE\n selector['paging']['startIndex'] = str(offset)\n more_pages = offset < int(page['totalNumEntries'])\n\n return accounts", "def get_user_reference(client):\n role = None\n try:\n role = client.hr.get_user_role()['userrole'][0]\n except Exception, exc:\n logging.error('Failed to get role: %r' % (exc,))\n return role\n return role['user__reference']", "async def get_current_account(pub_key: str = Depends(get_current_pub_key)):\n account = await Account.query.where(Account.pub_key == pub_key).gino.first()\n if account is None:\n raise HTTPException(status_code=403, detail=\"Account doesn't exist\")\n return account", "def logged_in_client(client):\n user = UserFactory()\n client.force_login(user)\n return client, user", "def get(self, account_id):\n self.client.get_account(account_id)", "def auth_by_pbid(self):\n self.console.debug(\"Auth by FSA: %r\", self.pbid)\n clients_matching_pbid = self.console.storage.getClientsMatching(dict(pbid=self.pbid))\n if len(clients_matching_pbid) > 1:\n self.console.warning(\"Found %s client having FSA '%s'\", len(clients_matching_pbid), self.pbid)\n return self.auth_by_pbid_and_guid()\n elif len(clients_matching_pbid) == 1:\n self.id = clients_matching_pbid[0].id\n # we may have a second client entry in database with current guid.\n # we want to update our current client guid only if it is not the case.\n try:\n client_by_guid = self.console.storage.getClient(Iourt42Client(guid=self.guid))\n except KeyError:\n pass\n else:\n if client_by_guid.id != self.id:\n # so storage.getClient is able to overwrite the value which will make\n # it remain unchanged in database when .save() will be called later on\n self._guid = None\n return self.console.storage.getClient(self)\n else:\n self.console.debug('Frozen Sand account [%s] unknown in database', self.pbid)\n return False", "def get_client_by_id(self, client_id):\r\n cursor = self.conn.cursor()\r\n cursor.execute(\"\"\"SELECT * FROM CLIENT WHERE id={}\"\"\".format(client_id))\r\n return cursor.fetchall()", "def get_client_by_nick(self, nick):\n for client in self.clients.values():\n if client.ident.nick == nick:\n return client\n return None", "def _get_client(self):\n try:\n client = boto3_cached_conn(\n 'iam', **self.conn_details)\n\n if not client:\n raise ValueError(f\"boto3_cached_conn returned null IAM client for {self.account_number}\")\n\n return client\n\n except Exception as e:\n self.on_failure.send(self, error=e)\n self.current_app.logger.exception(f\"Failed to obtain boto3 IAM client for account {self.account_number}.\", exc_info=False)\n raise e", "def getCustomerAccount(self):\n return self._CustomerAccount", "def getCustomerAccount(self):\n return self._CustomerAccount", "def find_account_by_name(cls, application_name):\n \n for credential in cls.credentials_list:\n if credential.application_name == application_name:\n return credential", "def get_user_details(client):\n\n try:\n return client.user(user_id='me').get(fields=['login'])\n # print(f\"The email of the user is: {me['login']}\")\n\n except Exception as e:\n print(f\"Error has occurred: {e}\")\n return None", "def get_token(client):\n # Begin by looking in token cache, first arg is for scopes,\n # because token is for app rather than user, second arg is None.\n result = client.acquire_token_silent(\n [\"https://graph.microsoft.com/.default\"], account=None\n )\n\n if not result:\n logger.info(\"No suitable token exists in cache. Get new one from Azure AD\")\n result = client.acquire_token_for_client(\n scopes=[\"https://graph.microsoft.com/.default\"]\n )\n\n # If we can't get access token, see what went wrong, otherwise return it.\n if \"access_token\" not in result:\n logger.exception(f'{result[\"error_description\"]} - {result[\"correlation_id\"]}')\n else:\n return result[\"access_token\"]", "def query_client(self, client_id):\n try:\n return self.client_model.objects.get(client_id=client_id)\n except self.client_model.DoesNotExist:\n return None", "def get_by_key_name(cls, key, **kwds):\n if not kwds and cls.current_user_account is not None:\n if key == cls.current_user_account.key().name():\n return cls.current_user_account\n return super(Account, cls).get_by_key_name(key, **kwds)", "def retrieve(cls, account):\n requested_acct = None\n try:\n requested_acct = BankAccount.__acct_store[account]\n except KeyError:\n return False\n finally:\n return requested_acct", "def get_account(self, accountid):\n payload = {'appkey': self._lr_object._get_api_key(), 'appsecret': self._lr_object._get_api_secret(),\n 'accountid': accountid}\n url = SECURE_API_URL + \"raas/v1/account\"\n return self._lr_object._get_json(url, payload)", "def get_mcc_accounts(client):\n\n PAGE_SIZE = 500\n managed_customer_service = client.GetService(\n 'ManagedCustomerService', version=settings.API_VERSION)\n\n offset = 0\n selector = {\n 'fields': ['CustomerId', 'Name'],\n 'predicates': {\n 'field': 'CanManageClients',\n 'operator': 'EQUALS',\n 'values': 'True'\n },\n 'paging': {\n 'startIndex': str(offset),\n 'numberResults': str(PAGE_SIZE)\n }\n }\n more_pages = True\n mcc_accounts = {}\n while more_pages:\n page = managed_customer_service.get(selector)\n\n if 'entries' in page and page['entries']:\n for account in page['entries']:\n mcc_accounts[account['customerId']] = unicode(account['name']) \\\n if hasattr(account, 'name') else 'None'\n\n offset += PAGE_SIZE\n selector['paging']['startIndex'] = str(offset)\n more_pages = offset < int(page['totalNumEntries'])\n\n return mcc_accounts", "def get_client_instance(cls, session, client_config, create=False):\n client = None\n if cls.SESSION_ID_KEY in session:\n client = session[cls.SESSION_ID_KEY]\n log.debug(\"Found OAuth client in session.\")\n if client is None and create:\n client = cls(client_config)\n session[cls.SESSION_ID_KEY] = client\n session.save()\n log.debug(\"No OAuth client in session - created new one.\")\n return client", "def _get_user_client(self):\n return api.OAuthClient(settings.CLIENT_ID, settings.CLIENT_SECRET, settings.USER, settings.PASSWORD)", "def GetAccount(host):\n return FetchUrlJson(host, 'accounts/self')", "async def get_user_account(self):\n ts = tools.get_cur_timestamp_ms()\n params = {\n \"timestamp\": str(ts)\n }\n success, error = await self.request(\"GET\", \"/api/v3/account\", params, auth=True)\n return success, error", "def get_account_for_tenant(test_auth, tenant_id):\n return '%s%s' % (test_auth.reseller_prefixes[0], tenant_id)", "def get_account(self, account_id, **kwargs):\r\n\r\n if 'mask' not in kwargs:\r\n kwargs['mask'] = 'status'\r\n\r\n return self.account.getObject(id=account_id, **kwargs)", "def get_client(self, mac_address: str) -> Union[Any, None]:\n if mac_address in self._clients:\n return self._clients[mac_address]", "def bank_account():\n return BankAccount()", "def __fetch_user_account(self, guid):\n\n try:\n user_account = UserAccount.objects.get(guid=guid)\n except Exception as e:\n logger.exception(e)\n else:\n return user_account", "def get_stencila_account(cls) -> \"Account\":\n if not hasattr(cls, \"_stencila_account\"):\n cls._stencila_account = Account.objects.get(name=\"stencila\")\n return cls._stencila_account", "def get_wim_account_by(self, wim=None, tenant=None, uuid=None, **kwargs):\n kwargs.setdefault('error_if_multiple', True)\n return self.get_wim_accounts_by(wim, tenant, uuid, **kwargs)[0]", "def create_client(name):\n client = Client(name=name)\n print(client.client_secret)\n db.session.add(client)\n db.session.commit()\n return client", "def find_credential(account):\n return Credentials.find_by_username(account)", "def _account(self) -> Account:\n if isinstance(self._node_cached_account, Account):\n return self._node_cached_account\n account = Account.retrieve(\n session=self.entity.session,\n entity=self.entity,\n account_id=self.account_id\n )\n self._node_cached_account = account\n return account", "def get_clientid(self):\n\n url = f'https://{self.__api}/v1/objects/client'\n body = {\"filter\": {}}\n with requests.post(url, json=body,\n headers={'X-WallarmAPI-UUID': self.__uuid,\n 'X-WallarmAPI-Secret': self.__secret}) as response:\n if response.status_code not in [200, 201, 202, 204, 304]:\n raise NonSuccessResponse(response.status_code, response.content)\n return response.json().get('body')[0].get('id')", "def get_client_data(client_name):\n log.debug('starting get_client_data')\n clients = wf.cached_data('clients', None, max_age=0)\n\n # Loop through clients and return client with a match\n for client in clients:\n if client['name'] == client_name:\n log.debug('get_client_id finished, client_data: ' + str(client))\n return client", "def get_by_id(self, id):\n accts = [acct for acct in self.accounts if UUID(acct.uuid) == UUID(id)]\n assert len(accts) <= 1\n if len(accts) == 0:\n raise KeyError('account with id {} unknown'.format(id))\n elif len(accts) > 1:\n log.warning('multiple accounts with same UUID found', uuid=id)\n return accts[0]", "def get_object(self):\n account = Account.get_account_with_admins(account.id)\n\n return account[0] if account else None", "def lookup_client(self, ip_addr: str):\n try:\n conn_obj = self.client_list[ip_addr]\n except KeyError:\n raise Networking.Host.ClientNotFoundException\n\n if conn_obj is not None:\n return conn_obj\n else:\n raise Networking.Host.ClientNotFoundException", "def get_wim_account(self, uuid_or_name, **kwargs):\n kwargs.setdefault('postprocess', _postprocess_wim_account)\n kwargs.setdefault('SELECT', _WIM_ACCOUNT_SELECT)\n return self.get_by_name_or_uuid('wim_accounts', uuid_or_name, **kwargs)", "def get_account(self, account_number):\n\n if not isinstance(account_number, str):\n raise ValueError('Invalid type <{}> for account number'.format(\n type(account_number)))\n\n try:\n if self.di is not None:\n result = self.di.get(account_number)\n else:\n result = self.accounts.get(account_number, None)\n\n except DBConnectionError:\n result = \"Connection error occurred. Try Again.\"\n return result", "def account():\n\n bank_test = Bank.objects.create(name='R-Bank')\n company_test = Company.objects.create(name='Tre Belarus', country='Belarus')\n account = Account.objects.create(iban_number='TEEdddddddfs', swift_code='tertrefdsf',\n bank=bank_test, company=company_test)\n return account", "def get_account_for_nickname(cls, nickname):\n assert nickname\n assert '@' not in nickname\n return cls.all().filter('lower_nickname =', nickname.lower()).get()", "def find_by_account_name(cls, account_name):\n for account in cls.credentials_list:\n if account.account_name == account_name:\n return account", "async def get_user_account(self):\n uri = \"/fapi/v1/account\"\n ts = tools.get_cur_timestamp_ms()\n params = {\n \"timestamp\": str(ts)\n }\n success, error = await self.request(\"GET\", uri, params, auth=True)\n return success, error", "def get_account_for_email(cls, email):\n assert email\n key = '<%s>' % email\n return cls.get_by_key_name(key)", "def account(request: Request) -> Dict:\n # Get account\n account_id: int = request.matchdict.get(\"account_id\")\n account_obj: Optional[Account] = get_account_by_id(\n session=request.dbsession,\n account_id=account_id,\n )\n # TODO: Check access\n\n\n return {\n \"account\": account_obj,\n }", "def get_client(self):\n return self.client", "def get_account_by_id(self, id_):\n return next((account for account in self.accounts\n if account.id == id_), None)", "def ClientCreateAccount(account_name,\n chap,\n strict,\n client_ips,\n client_user,\n client_pass,\n mvip,\n username,\n password):\n log = GetLogger()\n\n log.info(\"Searching for accounts\")\n cluster = SFCluster(mvip, username, password)\n\n try:\n svip = cluster.GetClusterInfo()[\"svip\"]\n except SolidFireError as e:\n log.error(\"Failed to get cluster info: {}\".format(e))\n return False\n\n # Get a list of accounts from the cluster\n try:\n allaccounts = SFCluster(mvip, username, password).ListAccounts()\n except SolidFireError as e:\n log.error(\"Failed to list accounts: {}\".format(e))\n return False\n\n # Run all of the client operations in parallel\n allgood = True\n results = []\n pool = threadutil.GlobalPool()\n for client_ip in client_ips:\n results.append(pool.Post(_ClientThread, mvip, username, password, client_ip, client_user, client_pass, account_name, svip, allaccounts, chap, strict))\n\n for idx, client_ip in enumerate(client_ips):\n try:\n results[idx].Get()\n except SolidFireError as e:\n log.error(\" {}: Error creating account: {}\".format(client_ip, e))\n allgood = False\n continue\n\n if allgood:\n log.passed(\"Successfully created accounts for all clients\")\n return True\n else:\n log.error(\"Could not create accounts for all clients\")\n return False", "def find_credential(account):\n return Credentials.find_credential(account)", "def get_account():\n account_id = request.json['id']\n account = [account for account in accounts if account['id'] == account_id]\n if len(account) == 0:\n abort(404, 'Account not found')\n\n return json.dumps(account[0], ensure_ascii=False), 200, {'Content-Type': 'text/css; charset=utf-8'}", "def auth_by_pbid_and_guid(self):\n self.console.debug(\"Auth by both guid and FSA: %r, %r\", self.guid, self.pbid)\n clients_matching_pbid = self.console.storage.getClientsMatching({'pbid': self.pbid, 'guid': self.guid})\n if len(clients_matching_pbid):\n self.id = clients_matching_pbid[0].id\n return self.console.storage.getClient(self)\n else:\n self.console.debug(\"Frozen Sand account [%s] with guid '%s' unknown in database\", self.pbid, self.guid)\n return False", "def _get_billing_account_id():\n org_client = boto3.client(\"organizations\")\n response = org_client.describe_organization()\n return response[\"Organization\"][\"MasterAccountId\"]", "def service_account(self) -> str:\n return pulumi.get(self, \"service_account\")", "def get_user(self, user_id):\n try:\n return Account.objects.get(pk=user_id)\n except Account.DoesNotExist:\n return None", "def get(self, id: int) -> Client:\n\n return self.__clients[id]", "def get_client_key(self, client, channel):\n result = None\n if channel in self.clients:\n for service in self.clients[channel]:\n if service[0] != client:\n continue\n result = service[1]\n return result", "def account(request):\r\n # if auth fails, it'll raise an HTTPForbidden exception\r\n with ReqAuthorize(request):\r\n user = UserMgr.get(username=request.user.username)\r\n\r\n return {\r\n 'user': user,\r\n 'username': user.username,\r\n }", "def get_by_address(self, address):\n assert len(address) == 20\n accounts = [account for account in self.accounts if account.address == address]\n if len(accounts) == 0:\n raise KeyError('account with address {} not found'.format(encode_hex(address)))\n elif len(accounts) > 1:\n log.warning('multiple accounts with same address found', address=encode_hex(address))\n return accounts[0]", "def get_account(self, account_id=None, account_name=None, search=False):\n if not (account_id or account_name):\n aliases = self.get_account_aliases()\n if aliases:\n account_name = aliases[0]\n else:\n raise ValueError('get_account(). Account id, name, or alias not found')\n accounts = self.get_all_accounts(account_id=account_id, account_name=account_name,\n search=search)\n if accounts:\n if len(accounts) > 1:\n raise ValueError('get_account matched more than a single account with the '\n 'provided criteria: account_id=\"{0}\", account_name=\"{1}\". '\n 'Matched:{2}'\n .format(account_id, account_name,\n \", \".join(str(x) for x in accounts)))\n else:\n return accounts[0]\n return None", "async def get_client(\n self,\n request: Request,\n client_id: str,\n client_secret: Optional[str] = None,\n ) -> Optional[OAuth2Client]:\n\n client_record = await self._db.query_one(\n Client.select(*OAuth2Client._fields, filters=\".id = <uuid>$id\"),\n id=client_id,\n )\n client_record = Client.from_obj(client_record)\n\n if client_record is not None:\n return OAuth2Client(\n client_id=client_record.client_id,\n client_secret=client_record.client_secret,\n grant_types=client_record.grant_types,\n response_types=client_record.response_types,\n redirect_uris=client_record.redirect_uris,\n scope=client_record.scope,\n )", "def service_account(self) -> Optional[str]:\n return pulumi.get(self, \"service_account\")", "def get_settings_from_client(client):\r\n settings = {\r\n 'username': '',\r\n 'api_key': '',\r\n 'timeout': client.timeout or '',\r\n 'endpoint_url': client.endpoint_url,\r\n }\r\n try:\r\n settings['username'] = client.auth.username\r\n settings['api_key'] = client.auth.api_key\r\n except AttributeError:\r\n pass\r\n\r\n return settings", "def get_client_data(self, client_id):\n query = \"\"\"SELECT id,\n secret\n FROM clients\n WHERE active = 1\n AND id = %s\"\"\"\n self._execute(query, (client_id,))\n return self._dictfetchone()", "def customer_get_one(user_id):\n return customer_get(user_id)", "def account_id(self):\n return self.get('/accounts')[0]['Id']", "def get_account_by_name(self, name):\n return next((account for account in self.accounts\n if account.name.lower() == name.lower()), None)" ]
[ "0.66133344", "0.6345957", "0.6296915", "0.62183553", "0.6195077", "0.61447585", "0.6143067", "0.61124474", "0.6101005", "0.6082482", "0.6080354", "0.60321856", "0.60321856", "0.60195756", "0.5999535", "0.59913605", "0.5953246", "0.5937958", "0.5926711", "0.59228164", "0.58934146", "0.5786306", "0.575765", "0.57551455", "0.573389", "0.56322896", "0.5619452", "0.55649114", "0.55547", "0.55461085", "0.5523594", "0.5516546", "0.5511032", "0.55092376", "0.550592", "0.5489547", "0.5481052", "0.5458719", "0.5438777", "0.5429854", "0.54263484", "0.54209274", "0.54155385", "0.54155385", "0.5415127", "0.5402789", "0.5391464", "0.53897667", "0.53745735", "0.5373628", "0.53604907", "0.5347643", "0.5340946", "0.534042", "0.53269655", "0.53265125", "0.53243315", "0.5319333", "0.53187835", "0.53056556", "0.5294829", "0.5289865", "0.52851474", "0.5283333", "0.5282494", "0.52701205", "0.5267372", "0.52561164", "0.52554905", "0.52525395", "0.52521753", "0.5239201", "0.52382994", "0.5225702", "0.5223156", "0.52143705", "0.5213595", "0.521035", "0.52094674", "0.5194261", "0.5192971", "0.5191183", "0.51853245", "0.5181834", "0.5180524", "0.51775134", "0.51774114", "0.51767874", "0.5171143", "0.5169232", "0.51661587", "0.51647186", "0.51628786", "0.5158746", "0.51424193", "0.5140492", "0.5132355", "0.51233375", "0.51098603", "0.50999725" ]
0.9063625
0
Write a volume to a file path.
def write(img, path): create_directories_for_file_name(path) writer = sitk.ImageFileWriter() writer.Execute(img, path, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, path, **kwargs):\n client = self.connect(VAULT_TOKEN)\n client.write(path, **kwargs)", "def write_to_path(self, path):\n assert not path.exists()\n fout = path.open(\"wb\")\n fout.write(self.to_string())\n assert not fout.close()\n path.setdata()", "def writable(path):", "def setVolume(value):\n client = connect()\n setVolumeInternal(client, value)\n disconnect(client)", "def update_volume(VolumeId=None, Name=None, MountPoint=None):\n pass", "def put(self, filename, data, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n file_obj = open(file_path, \"w\")\n file_obj.write(data)", "def add_writable_file_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt, # type: Optional[Text]\n tmpdir_prefix # type: Text\n ):\n if self.inplace_update:\n self._add_volume_binding(volume.resolved, volume.target, writable=True)\n else:\n if host_outdir_tgt:\n # shortcut, just copy to the output directory\n # which is already going to be mounted\n log.debug('shutil.copy({}, {})'.format(volume.resolved, host_outdir_tgt))\n shutil.copy(volume.resolved, host_outdir_tgt)\n else:\n log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))\n tmpdir = tempfile.mkdtemp(dir=self.tmpdir)\n file_copy = os.path.join(\n tmpdir, os.path.basename(volume.resolved))\n log.debug('shutil.copy({}, {})'.format(volume.resolved, file_copy))\n shutil.copy(volume.resolved, file_copy)\n self._add_volume_binding(file_copy, volume.target, writable=True)\n ensure_writable(host_outdir_tgt or file_copy)", "def save(self, filepath: str | Path) -> None:\n extension = Path(filepath).suffix\n if extension.lower() in VIDEO_TYPES:\n video_writer = imageio.get_writer(filepath, macro_block_size=None)\n for slice in self.volume:\n slice = slice.astype(\"uint8\")\n video_writer.append_data(slice)\n video_writer.close()\n elif extension.lower() in IMAGE_TYPES:\n base = Path(filepath).stem\n print(\n \"Saving OCT as sequential slices {}_[1..{}]{}\".format(\n base, len(self.volume), extension\n )\n )\n full_base = Path(filepath).with_suffix(\"\")\n self.volume = np.array(self.volume).astype(\"float64\")\n self.volume *= 255.0 / self.volume.max()\n for index, slice in enumerate(self.volume):\n filename = \"{}_{}{}\".format(full_base, index, extension)\n cv2.imwrite(filename, slice)\n elif extension.lower() == \".npy\":\n np.save(filepath, self.volume)\n else:\n raise NotImplementedError(\n \"Saving with file extension {} not supported\".format(extension)\n )", "def write_inventory_file(inventory_item):\n try:\n with open('inventory', 'w') as file:\n file.write(inventory_item)\n except OSError:\n pass", "def write_file(path, data):\n # opens file\n try:\n os.makedirs(os.path.dirname(path), exist_ok=True)\n f = open(str(path), \"w\")\n f.write(data)\n f.close()\n except Exception as e:\n print(\"Error writing file: \", e)\n sys.exit(1)", "def write_binary(self, path):\n return", "def write(self,path,content):\n file_path = os.path.join( self.directory, path)\n with open(file_path, \"w\") as file:\n file.write( content )", "def volume(self, value):\n self._volume = value\n self._sendCommand('%03dVL' % value)", "async def async_set_volume(self, volume):\n self._volume = volume", "def store_volume(volume_name):\n class store(argparse.Action):\n def __call__(self, parser, namespace, values, option_strings = None):\n # Add the new volume to the list of volumes\n volumes = getattr(namespace, \"volumes\", [])\n new_volume = NamedVolume(volume_name, Path(values)) if values else None\n setattr(namespace, \"volumes\", [*volumes, new_volume])\n\n # Allow the new volume to be found by name on the opts object\n setattr(namespace, volume_name.replace('/', '_'), new_volume)\n\n return store", "def add_file_or_directory_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt # type: Optional[Text]\n ):\n if not volume.resolved.startswith(\"_:\"):\n self._add_volume_binding(volume.resolved, volume.target) # this one defaults to read_only", "def write(self, path, key):\n raise NotImplementedError", "def setVolume(self, *args):\n return _libsbml.Compartment_setVolume(self, *args)", "def nv_write(self, path: Union[bytes, str], data: Union[bytes, str]) -> None:\n path = _to_bytes_or_null(path)\n data = _to_bytes_or_null(data)\n ret = lib.Fapi_NvWrite(self._ctx, path, data, len(data))\n _chkrc(ret)", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def write(self, filename, data):\n owner_rw = 0600\n fd = os.open(filename, os.O_WRONLY | os.O_CREAT, owner_rw)\n # In case file existed already with wrong permissions, fix them.\n os.chmod(filename, owner_rw)\n os.write(fd, data)\n os.close(fd)", "def volume(self, volume):\n\n self._volume = volume", "def volume(self, volume):\n\n self._volume = volume", "def volume(self, volume):\n\n self._volume = volume", "def write(cls, path, text):\n with cls.open(path, 'wt') as fd:\n return fd.write(text)", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def set_volume(self, volume):\n self.get(COMMAND_UIC, 'SetVolume', [('volume', int(volume))])", "async def write_file(self, directory: str, name: str, file: bytes):\n pass", "def _save(self, name, content):\n full_path = self.path(name)\n with caches['default'].lock('{}_{}'.format(full_path, 'reader')):\n with caches['default'].lock('{}_{}'.format(full_path, 'writer')):\n if cache.islocked(full_path) is False:\n with cache.lock(full_path):\n cache.set(full_path, 'storage')\n try:\n directory = os.path.dirname(full_path)\n\n # Create any intermediate directories that do not exist.\n if self.__volume.exists(directory) is False:\n try:\n if self.directory_permissions_mode is not None:\n # os.makedirs applies the global umask, so we reset it,\n # for consistency with file_permissions_mode behavior.\n self.volume.makedirs(directory, self.directory_permissions_mode)\n else:\n self.volume.makedirs(directory)\n except FileNotFoundError:\n # There's a race between os.path.exists() and os.makedirs().\n # If os.makedirs() fails with FileNotFoundError, the directory\n # was created concurrently.\n pass\n if not os.path.isdir(directory):\n raise IOError(\"%s exists and is not a directory.\" % directory)\n\n # There's a potential race condition between get_available_name and\n # saving the file; it's possible that two threads might return the\n # same name, at which point all sorts of fun happens. So we need to\n # try to create the file, but if it already exists we have to go back\n # to get_available_name() and try again.\n\n while True:\n try:\n # This file has a file path that we can move.\n if hasattr(content, 'temporary_file_path'):\n file_move_safe(content.temporary_file_path(), full_path)\n\n # This is a normal uploadedfile that we can stream.\n else:\n # The current umask value is masked out by os.open!\n fd = self.__volume.open(full_path, self.OS_OPEN_FLAGS, 0o666)\n _file = None\n try:\n for chunk in content.chunks():\n if _file is None:\n _file = fd.dup()\n _file.write(chunk)\n finally:\n if _file is not None:\n _file.close()\n fd.close()\n except FileExistsError:\n # A new name is needed if the file exists.\n name = self.get_available_name(name)\n full_path = self.path(name)\n else:\n # OK, the file save worked. Break out of the loop.\n break\n\n if self.file_permissions_mode is not None:\n self.__volume.chmod(full_path, self.file_permissions_mode)\n finally:\n cache.delete(full_path)\n # Store filenames with forward slashes, even on Windows.\n return (True, name.replace('\\\\', '/'))\n return (False, cache.get(full_path))", "def write(self, filename, data):\n\t\t# create the path if it doesn't exists\n\t\tdir = os.path.dirname(filename)\n\t\tif not os.path.isdir(dir):\n\t\t\tos.mkdir(dir)\n\t\t\n\t\t# write data\n\t\tfile = codecs.open(filename, 'w', 'utf8')\n\t\tfile.write(data)\n\t\tfile.close()", "def write_file(path, data):\n with open_local_or_gcs(path, 'w') as h_dest:\n h_dest.write(data) # pylint: disable=no-member", "def create_volume(self, volume):\n LOG.debug('SPDK create volume')\n\n return self._create_volume(volume)", "def spew(path, data):\n with open(path, 'w+') as f:\n f.write(data)", "def _write_to_file(self, string):\n with open(self.p.base_dir + '/' + self.p.filename, 'w') as f:\n f.write(string)", "def set_volume(self, volume):\n self._volume = volume\n self._update_volume()", "def attach_volume(self, instance_name, device_path, mountpoint):\n return True", "def do_create_volume(self, arg):\n args = self.parse_arguments(arg)\n if len(args) == 0:\n self.perror(\"No name given.\")\n return\n if len(args) == 1:\n self.perror(\"No path given.\")\n return\n if not os.path.isabs(args[1]):\n print(\"Path must be absolute: \" + args[1])\n return\n self.do_coroutine(self._localStorageRoutines.create_volume_routine(args[0], args[1]))", "def write_to_file(self, filename: str) -> None:", "def on_volume(self, _instance, volume):\n self._set_volume(volume)", "def attach_volume(self, host_path: str, container_path: str, mode: str = None):\n self.volumes[host_path] = {\n \"bind\": container_path,\n \"mode\": mode or \"Z\"\n }", "def write(self, path, overwrite=False):\r\n\r\n self.data.write(path, format='ascii.fixed_width', delimiter='|', overwrite=overwrite)", "def create_volume_string(host_dir, container_dir, read_only = True):\n access = \"ro\" if read_only else \"rw\"\n return \":\".join([os.path.abspath(host_dir), container_dir, access])", "def write_to_disk(self):\n\n\t\t# print \"--------------------------------------------------------WRITING PIECE %r TO DISK\" %self.index\n\t\ttry:\n\t\t\tos.makedirs(PATH)\n\t\texcept:\n\t\t\tpass\n\t\tself.piece_file_name = os.path.join(PATH, self.torrent.name+'.'+'00'+str(self.index))\n\t\t# print \"Saving piece to file name: \", self.piece_file_name\n\t\tpiece_file = open(self.piece_file_name, 'w')\n\t\tpiece_file.write(self.data)\n\t\tpiece_file.close()", "def write(self, path, content):\n this_file = open(path, 'w')\n this_file.write(content)\n this_file.close()", "def test_volumes_simple_volume(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n volumes:\n /cpath: /hpath\n \"\"\"\n )\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert len(config.volumes) == 1\n\n v = config.volumes[\"/cpath\"]\n assert v.container_path == \"/cpath\"\n assert v.host_path == \"/hpath\"", "def write(path, data):\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n from sambatools.smb.smb_structs import OperationFailure\r\n try:\r\n samba.store_file(os.path.basename(path), data, os.path.dirname(path))\r\n except OperationFailure:\r\n logger.info(\"deportesalacarta.core.filetools write: Error al guardar el archivo: {0}\".format(path))\r\n return False\r\n else:\r\n return True\r\n\r\n else:\r\n try:\r\n f = open(path, \"wb\")\r\n f.write(data)\r\n f.close()\r\n\r\n # except EnvironmentError:\r\n except Exception, ex:\r\n logger.info(\"filetools.write: Error al guardar el archivo: \")\r\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\r\n message = template.format(type(ex).__name__, ex.args)\r\n logger.info(message)\r\n # logger.info(\"deportesalacarta.core.filetools write: Error al guardar el archivo: {0}\".format(path))\r\n return False\r\n else:\r\n return True", "def test_edit_volume(self, volume, volumes_steps):\n new_name = volume.name + ' (updated)'\n with volume.put(name=new_name):\n volumes_steps.edit_volume(volume_name=volume.name,\n new_volume_name=new_name)", "def write_img_to_fs(name, data):\n with open(name, \"wb\") as fout:\n fout.write(data)", "def add_volume_info(self, vi):\n vol_num = vi.volume_number\n self.volume_info_dict[vol_num] = vi\n if self.fh:\n self.fh.write(vi.to_string() + \"\\n\")", "def write(message):\n\n with open(str(path), 'a') as fp:\n fp.write(message)", "def write(path):\n return mac_slideshow.preferences.write(KEY, path)", "def Set(*args):\n return _XCAFDoc.XCAFDoc_Volume_Set(*args)", "def update_volume( opencloud_volume ):\n\n client = connect_syndicate()\n\n vol_name = opencloud_volume.name\n vol_description = opencloud_volume.description\n vol_private = opencloud_volume.private\n vol_archive = opencloud_volume.archive\n vol_default_gateway_caps = opencloud_caps_to_syndicate_caps( opencloud_volume.cap_read_data, opencloud_volume.cap_write_data, opencloud_volume.cap_host_data )\n\n try:\n rc = client.update_volume( vol_name,\n description=vol_description,\n private=vol_private,\n archive=vol_archive,\n default_gateway_caps=vol_default_gateway_caps )\n\n if not rc:\n raise Exception(\"update_volume(%s) failed!\" % vol_name )\n\n except Exception, e:\n # transort or method error \n logger.exception(e)\n return False\n\n else:\n return True", "def fs_write(obj, file_path):\n try:\n with open(str(file_path), 'w') as f:\n f.write(obj)\n return obj\n except TypeError as e:\n raise e", "def put_file(container, filepath, content):\n return put_files(container, [(filepath, content)])", "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):", "def write(self, content, mode='wb'):\r\n self.localpath.write(content, mode)", "def touch(path):\n with open(path, 'wt') as f:\n pass", "def copy_file_out(self, path, callback=None):\n try:\n self.copy_volume(path, self.device, callback=callback)\n except IOError, e:\n logger.exception(\"copy_file_out failed with '%s'\" % e)\n raise ISCSICopyFailed()", "def journal_write(session, k, v):\n entry = models.VppEtcdJournal(k=k, v=v)\n session.add(entry)\n session.flush()", "def write(self, filepath, data, encoding=None):\n if isinstance(filepath, basestring):\n filepath = filepath.split('/')\n if len(filepath) > 1:\n dirpath = self._join(filepath[:-1])\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n thepath = self._join(filepath)\n encoding = encoding or self.encoding\n if encoding is not None:\n data = data.encode(encoding)\n with open(thepath, 'wb') as f:\n f.write(data)\n return thepath", "def write(s, path, encoding=\"utf-8\"):\n with open(path, \"wb\") as f:\n f.write(s.encode(encoding))", "def writefile(path: Union[str, Path], txt: str) -> None:\n with open(path, 'w') as outfile:\n outfile.write(txt)", "def write(self, cw, message):\n if cw in self.location:\n fn = self.location[str(cw)]\n try:\n swf = open(fn, \"w\")\n except Exception:\n logmsg.update(\"Error writing to file \" + fn + \"!\", 'E')\n else:\n swf.write(str(message))\n swf.close()\n else:\n logmsg.update(\"Wrong target [\" + str(cw) + \"] for saving file!\", 'E')", "def write(self, block_no, value):\n with open(self.file_path, 'r+') as f:\n f.seek(block_no * config.block_size)\n f.write(value)", "def fwrite(filename, text):\n basedir = os.path.dirname(filename)\n if not os.path.isdir(basedir):\n os.makedirs(basedir)\n\n with open(filename, 'w') as f:\n f.write(text)", "def writetofile(self,direction,value):\r\n output = str(\"{},{} \\n\".format(direction,value))\r\n self.new_file.write(output)", "def add_volume(self, volume: 'Volume'):\n self.volumes.append(volume)", "def path_to_volume(path):\n gFile = gio.File(path)\n try:\n mount = gFile.find_enclosing_mount()\n except gio.Error:\n return None\n else:\n if mount != None:\n volume = mount.get_volume()\n return volume\n return None", "def write_to_file(filepath, data):\n\n with open(filepath, 'w') as f:\n f.write(str(data))", "def write_remote_file(sid, path, data):\n with slycat.web.server.remote.get_session(sid) as session:\n return session.write_file(path, data)", "async def async_set_volume_level(self, volume: float) -> None:\n await self._client.set_volume(round(volume * 100))\n self.async_write_ha_state()", "def write(filePath, data, mkdir=False):\r\n __dir, __name = os.path.split(filePath)\r\n\r\n if not os.path.isdir(__dir):\r\n if mkdir:\r\n preparedir(__dir)\r\n else:\r\n raise ValueError(\"Target dir doesn't exist: %s\" % __dir)\r\n\r\n with open(filePath, \"w\") as outf:\r\n try:\r\n outf.write(str(data))\r\n return filePath\r\n except Exception as e:\r\n print \"Failed to write points to file:\\n%s\" % e\r\n return False", "def write(self, filename, data):\n raise NotImplementedError", "def write_file(data, file_path):\n try:\n with open(file_path, \"w\") as file_obj:\n file_obj.write(data)\n\n except OSError:\n writer(f\"\\nwarning: Unable to write backup file {file_path}\\n\", FORMAT[\"WARNING\"])", "def write(self, string):\n self.__file.write(string)", "def save_video_to_volume(\n filename_in, \n filename_out,\n color_diff=20,\n crop_threshold=0,\n interpolator=Interpolator(),\n ):\n try:\n video = read_video_into_numpy(filename_in)\n volume = video_to_volume(video, \n color_diff=color_diff, \n crop_threshold=crop_threshold, \n interpolator=interpolator)\n np.save(filename_out, volume)\n return True\n except:\n print(\"Error:\", filename_in, filename_out)\n return False", "async def volume(self, ctx, volume: int):\n\n if ctx.voice_client is None:\n return await ctx.send(\"Not connected to a voice channel.\")\n\n ctx.voice_client.source.volume = volume / 100\n await ctx.send(\"Changed volume to {}%\".format(volume),delete_after=15)", "def write(cls, file, data):\n file.write(data)", "def write_to_binary_file(self, loc: str, data: bytes):\n try:\n os.mkdir(\"../\" + self.uri)\n except FileExistsError:\n pass\n\n f = open(\"../\" + self.uri + loc, \"wb\")\n f.write(data)\n print(\"[WRITE] written to binary file loc\")\n f.close()", "def touch(path):\n open(path, 'wb').close()", "def volume(name, map, ramp=\"rainbow2\"):\r\n return f'\\ncmd.volume(name=\"{name}\", map=\"{map}\", ramp=\"{ramp}\")\\n'", "def write_file(self, path, data):\n _url = (\n f\"{self.connector.base_url}/projects/{self.project_id}/nodes/{self.node_id}\"\n f\"/files/{path}\"\n )\n\n self.connector.http_call(\"post\", _url, data=data)", "def w(self, value):\n self.oFile.write(value)", "def write(\n path: Union[Path, str],\n image: np.ndarray) -> None:\n raise NotImplementedError()", "def write_file(path: str, content: Union[str, bytes], mode: str = 'w') -> None:\n from peltak.core import context, log\n\n if context.get('pretend', False):\n log.info(\"Would overwrite <34>{path}<32> with:\\n<90>{content}\",\n path=path,\n content=content)\n else:\n with open(path, mode) as fp:\n fp.write(content)", "def FileWrite(offset, buf):\r\n return _hiew.HiewGate_FileWrite(offset, buf)", "def _write_value(value, path):\n base_command = \"echo '{0}' > {1}\"\n # There is no common method for redirecting stderr to a null sink, so the\n # command string is platform-dependent\n if platform == 'win32':\n command = \"{0} > NUL\".format(base_command)\n else:\n command = \"exec 2> /dev/null; {0}\".format(base_command)\n os.system(command.format(value, path))", "def add_writable_directory_volume(self,\n runtime, # type: List[Text]\n volume, # type: MapperEnt\n host_outdir_tgt, # type: Optional[Text]\n tmpdir_prefix # type: Text\n ):\n if volume.resolved.startswith(\"_:\"):\n # Synthetic directory that needs creating first\n if not host_outdir_tgt:\n log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))\n new_dir = os.path.join(\n tempfile.mkdtemp(dir=self.tmpdir),\n os.path.basename(volume.target))\n self._add_volume_binding(new_dir, volume.target, writable=True)\n elif not os.path.exists(host_outdir_tgt):\n log.debug('os.makedirs({}, 0o0755)'.format(host_outdir_tgt))\n os.makedirs(host_outdir_tgt, 0o0755)\n else:\n if self.inplace_update:\n self._add_volume_binding(volume.resolved, volume.target, writable=True)\n else:\n if not host_outdir_tgt:\n log.debug('tempfile.mkdtemp(dir={})'.format(self.tmpdir))\n tmpdir = tempfile.mkdtemp(dir=self.tmpdir)\n new_dir = os.path.join(\n tmpdir, os.path.basename(volume.resolved))\n log.debug('shutil.copytree({}, {})'.format(volume.resolved, new_dir))\n shutil.copytree(volume.resolved, new_dir)\n self._add_volume_binding(new_dir, volume.target, writable=True)\n else:\n log.debug('shutil.copytree({}, {})'.format(volume.resolved, host_outdir_tgt))\n shutil.copytree(volume.resolved, host_outdir_tgt)\n ensure_writable(host_outdir_tgt or new_dir)", "def send_channel_volume(self, value=127, ch=None):\n self.send_control_change(CHANNEL_VOLUME, value, ch=ch)", "def set_volume_level(self, volume):\n self._volume = volume", "def set_volume_level(self, volume):\n self._volume = volume", "def savedata(data, path):\n if path.endswith(\".tiff\") or path.endswith('.tif'):\n try:\n from vigra.impex import writeVolume\n except ImportError:\n raise ImportError(\"Vigra is needed to read/write TIFF volumes, but could not be imported.\")\n\n writeVolume(data, path, '', dtype='UINT8')\n\n elif path.endswith(\".h5\"):\n try:\n from vigra.impex import writeHDF5\n vigra_available = True\n except ImportError:\n vigra_available = False\n import h5py\n\n if vigra_available:\n writeHDF5(data, path, \"/data\")\n else:\n with h5py.File(path, mode='w') as hf:\n hf.create_dataset(name='data', data=data)\n\n else:\n raise NotImplementedError(\"Can't save: unsupported format. Supported formats are .tiff and .h5\")", "def set_volume(self):\n import fcntl\n import struct\n try:\n knob = struct.pack(\"III\", 0, 0, self.volume) # VOLUME_DEVICE_ID, VOLUME_KNOB_ID, volume_level\n fcntl.ioctl(self.mixer_fd, 3, knob)\n except:\n pass", "def filewrite(self, filename):\n io.write(self, filename)", "def write(self, path):\n with open(path, \"w\") as fh_:\n fh_.write(self.config())", "def write(self, path):\n with open(path, \"w\") as fh_:\n fh_.write(self.config())", "def write(self, arg, **kwargs):\r\n if hasattr(arg, 'seek'):\r\n self._tofile(arg, **kwargs)\r\n else:\r\n with open(arg, 'wb') as fid:\r\n self._tofile(fid, **kwargs)", "def write_to_local(path, data_rec):\n path, filename = os.path.split(path)\n with open(filename, 'wb') as f:\n f.write(data_rec)\n f.close()" ]
[ "0.6251699", "0.60698074", "0.60018075", "0.5975603", "0.5835609", "0.5832426", "0.5821404", "0.5796797", "0.5752071", "0.5750823", "0.57148486", "0.5701186", "0.5690606", "0.56724447", "0.565024", "0.56472427", "0.5638502", "0.56315726", "0.56258756", "0.5602037", "0.5573271", "0.5560137", "0.5560137", "0.5560137", "0.55437773", "0.55258846", "0.55258846", "0.55229485", "0.5505212", "0.54960054", "0.5494361", "0.549311", "0.54913056", "0.54909086", "0.54846764", "0.5462422", "0.54529774", "0.5452756", "0.54433125", "0.5434171", "0.54327095", "0.54264665", "0.5422272", "0.54011554", "0.53961647", "0.5395942", "0.5394504", "0.53702563", "0.5368564", "0.5366436", "0.5366093", "0.536576", "0.5358981", "0.53566825", "0.5353158", "0.53517795", "0.53340876", "0.53106564", "0.52969366", "0.5292138", "0.5278205", "0.5274248", "0.52556074", "0.52530044", "0.5248171", "0.52385545", "0.52234846", "0.5223051", "0.5220737", "0.5210948", "0.5208342", "0.52075857", "0.5199806", "0.5198602", "0.51896757", "0.5186363", "0.51824176", "0.51721704", "0.51695484", "0.5164829", "0.51643246", "0.51640207", "0.5162911", "0.5155831", "0.51528525", "0.51487803", "0.51396585", "0.51382613", "0.5138009", "0.51369387", "0.5132596", "0.5130728", "0.5130728", "0.5126496", "0.51262647", "0.51092046", "0.5105601", "0.5105601", "0.5100507", "0.50987977" ]
0.5269131
62
r""" Calculates precipitable water (cm) from ambient air temperature (C) and relatively humidity (%) using an empirical model. The accuracy of this method is approximately 20% for moderate PW (13 cm) and less accurate otherwise.
def gueymard94_pw(temp_air, relative_humidity): T = temp_air + 273.15 # Convert to Kelvin # noqa: N806 RH = relative_humidity # noqa: N806 theta = T / 273.15 # Eq. 1 from Keogh and Blakers pw = ( 0.1 * (0.4976 + 1.5265*theta + np.exp(13.6897*theta - 14.9188*(theta)**3)) * (216.7*RH/(100*T)*np.exp(22.330 - 49.140*(100/T) - 10.922*(100/T)**2 - 0.39015*T/100))) pw = np.maximum(pw, 0.1) return pw
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_compensated_temperature() -> float:\n comp_factor = 2.25\n cpu_temp = get_cpu_temperature()\n raw_temp = bme280.get_temperature()\n comp_temp = raw_temp - ((cpu_temp - raw_temp) / comp_factor)\n # print(\"\"\"\n # Compensated_Temperature: {:05.2f} *C\n # Pressure: {:05.2f} hPa\n # Relative humidity: {:05.2f} %\n # \"\"\".format(temperature, pressure, humidity))\n return comp_temp", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def get_cape(temp,pres,dewpt,hght,startp,startt,startdp,totalcape=False): \n\n # Check units\n # Init temp is startt in C, Init dew point is stwrtdp,\n # pressure levels are in hPa \n temp = temp - 273.15 # convert temperature to celsius\n dewpt = dewpt - 273.15 # convert dewpoint to celsius\n pres = pres/100 # convert pressure to hPa\n \n \n inds = np.where( (pres < startp) ) \n tmp = pres[inds]\n del pres\n #pres = tmp[::-1]\n pres = tmp[:]\n del tmp \n startp = startp/100\n \n tmp = temp[inds]\n del temp\n #temp = tmp[::-1]\n temp = tmp[:]\n del tmp \n\n tmp = dewpt[inds]\n del dewpt\n #dewpt = tmp[::-1]\n dewpt = tmp[:]\n del tmp \n\n tmp = hght[inds]\n del hght\n #hght = tmp[::-1]\n hght = tmp[:]\n del tmp \n\n \n # Get Sub-LCL traces \n presdry,tempdry,tempiso=dry_ascent(startp,startt-degCtoK,startdp-degCtoK) \n \n\n # make lcl variables explicit\n P_lcl=presdry[-1]\n T_lcl=tempdry[-1]\n\n # Now lift a wet parcel from the intersection point\n # preswet=linspace(P_lcl,100,101)\n preswet,tempwet=moist_ascent(P_lcl,T_lcl)\n\n # tparcel is the concatenation of tempdry and \n # tempwet, and so on.\n \n tparcel=np.concatenate((tempdry,tempwet[1:]))\n pparcel=np.concatenate((presdry,preswet[1:]))\n\n # Interpolating the environmental profile onto the \n # parcel pressure coordinate\n # tempenv=interp(preswet,pres[::-1],temp[::-1])\n ## NEW, for total column:\n tempenv=interp(pparcel,pres[::-1],temp[::-1])\n\n\n # now solve for the equlibrium levels above LCL\n # (all of them, including unstable ones)\n # eqlev,stab=solve_eq(preswet[::-1],(tempwet-tempenv)[::-1])\n # NEW, for total column:\n # On second thought, we don't really want/need\n # any equilibrium levels below LCL\n # eqlev,stab=solve_eq(pparcel[::-1],(tparcel-tempenv)[::-1])\n # This is equivalent to the old statement :\n eqlev,stab=solve_eq(pparcel[pparcel<=P_lcl][::-1],\\\n (tparcel-tempenv)[pparcel<=P_lcl][::-1])\n\n aa = tparcel-tempenv\n\n # Sorting index by decreasing pressure\n I=np.argsort(eqlev)[::-1]\n eqlev=eqlev[I]; stab=stab[I]\n\n # temperatures at the equilibrium level\n # tempeq=interp(eqlev,preswet[::-1],tempenv[::-1])\n ## NEW, for total column:\n tempeq=interp(eqlev,pparcel[::-1],tparcel[::-1])\n\n # This helps with debugging\n # for ii,eq in enumerate(eqlev):\n # print \"%5.2f %5.2f %2d\"%(eq,tempeq[ii],stab[ii])\n\n # need environmental temperature at LCL\n tenv_lcl=interp(P_lcl,pparcel[::-1],tempenv[::-1])\n\n isstab=np.where(stab==1.,True,False)\n unstab=np.where(stab==1.,False,True) \n\n if eqlev.shape[0]==0:\n # no unstable layers in entire profile\n # because the parcel never crosses the tenv\n P_lfc=float('NaN')\n P_el=float('NaN')\n elif T_lcl>tenv_lcl:\n # check LCL to see if this is unstable\n P_lfc=P_lcl\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n elif eqlev.shape[0]>1:\n # Parcel is stable at LCL so LFC is the \n # first unstable equilibrium level and \n # \"EQ\" level is the first stable equilibrium \n # level\n P_lfc=eqlev[unstab][0]\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n else:\n # catch a problem... if there is only\n # one eqlev and it's stable (this is \n # unphysical), then it could be a vertical\n # resolution thing. This is a kind of \n # \"null\" option\n try:\n\t P_el=eqlev[isstab][0]\n P_lfc=eqlev[isstab][0]\n except:\n\t P_el=eqlev[unstab][0]\n P_lfc=eqlev[unstab][0]\t\n\t\n if np.isnan(P_lfc):\n return P_lcl,P_lfc,P_el,0,0\n\n # need to handle case where dwpt is not available \n # above a certain level for any reason. Most simplest \n # thing to do is set it to a reasonably low value; \n # this should be a conservative approach!\n \n #dwpt=dewpt.copy().soften_mask()\n [inds] = np.where(np.isnan(dewpt))\n dwpt = dewpt\n dwpt[inds] = dwpt.min()\n \n # raise ValueError\n #if dwpt[(pres>=P_el).data*(pres<P_lfc).data].mask.any():\n # print \"WARNING: substituting dwpt.min() for masked values of DWPT in this sounding\"\n #dwpt[dwpt.mask]=dwpt.min()\n # dwptenv=interp(preswet,pres[::-1],dwpt[::-1])\n # NEW:\n\n dwptenv=interp(pparcel,pres[::-1],dwpt[::-1])\n\n\n \n #if hght[(pres>=P_el).data].mask.any():\n # raise NotImplementedError, \"TODO: Implement standard atmosphere to substitute missing heights\"\n # hghtenv=interp(preswet,pres[::-1],self.soundingdata['hght'][::-1])\n # NEW:\n hghtenv=interp(pparcel,pres[::-1],hght[::-1])\n \n\n # Areas of POSITIVE Bouyancy\n # cond1=(tempwet>=tempenv)*(preswet<=P_lfc)*(preswet>P_el)\n # NEW:\n cond1=(tparcel>=tempenv)*(pparcel<=P_lfc)*(pparcel>P_el)\n # Areas of NEGATIVE Bouyancy\n # cond2=(tempwet<tempenv)*(preswet<=P_lcl)*(preswet>P_el)\n # NEW:\n if totalcape:\n cond2=(tparcel<tempenv)*(pparcel>P_el)\n else:\n cond2=(tparcel<tempenv)*(pparcel>P_lfc)\n # Do CAPE calculation\n # 1. Virtual temperature of parcel... remember it's saturated above LCL.\n # e_parcel=SatVap(tempwet)\n # Tv_parcel=VirtualTemp(tempwet+degCtoK,preswet*100.,e_parcel)\n # e_env=SatVap(dwptenv)\n # Tv_env=VirtualTemp(tempenv+degCtoK,preswet*100.,e_env)\n # NEW:\n e_parcel=SatVap(tparcel)\n Tv_parcel=VirtualTemp(tparcel+degCtoK,pparcel*100.,e_parcel)\n e_env=SatVap(dwptenv)\n Tv_env=VirtualTemp(tempenv+degCtoK,pparcel*100.,e_env)\n\n CAPE=trapz(9.81*(Tv_parcel[cond1]-Tv_env[cond1])/Tv_env[cond1],hghtenv[cond1])\n CIN=trapz(9.81*(Tv_parcel[cond2]-Tv_env[cond2])/Tv_env[cond2],hghtenv[cond2])\n\n return P_lcl,P_lfc,P_el,CAPE,CIN", "def calculate_water_vapour_pressure(self, T=None, units='atm'): # temp in Kelvin\n A,B,C = self.get_ABC(T=T)\n \n if A is not None and B is not None and C is not None:\n # bar \n p_vap_bar = math.pow(10, (A-B/(C+T)))\n if units=='bar':\n return p_vap_bar\n \n # atm\n elif units=='atm': \n p_vap_atm = convertor.convert(\n p_vap_bar, \n currentUnits='bar', \n newUnits='atm')\n return p_vap_atm\n \n else:\n return None\n else:\n return None", "def Latentc(tempc):\n \n return 1000*(2500.8 - 2.36*tempc + 0.0016*tempc**2 - 0.00006*tempc**3)", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def compute_dewpoint(temperature, humidity):\n\n temp_C = (temperature - 32) * 5 / 9 # Convert temperature from deg F to deg C\n rh = humidity / 100\n\n b = 18.678\n c = 257.14 # deg C\n\n gamma = math.log(rh) + (b * temp_C) / (c + temp_C)\n tdp = c * gamma / (b -gamma)\n\n tdp_F = 9 / 5 * tdp + 32 # Convert temperature from deg C to deg F\n return tdp_F;", "def get_corrected_ppm(self, temperature, humidity):\n return self.PARA * math.pow((self.get_corrected_resistance(temperature, humidity)/ self.RZERO), -self.PARB)", "def get_corrected_ppm(self, temperature, humidity):\n return self.PARA * math.pow((self.get_corrected_resistance(temperature, humidity)/ self.RZERO), -self.PARB)", "def tempWater(sample):\n sample *= .0009\n sample *= 1000\n celsius = (sample - 20.5128) * 0.0512\n return round(celsius,2)", "def get_cooling_output_for_supply_air_estimation(\n a_a: float, q: float, mu_c: float, v_vent: np.ndarray,\n theta_ex: np.ndarray, x_ex: np.ndarray, j: np.ndarray,\n hc_period: np.ndarray, n_p: np.ndarray, q_gen: np.ndarray, w_gen: np.ndarray, v_local: np.ndarray,\n theta_set_c: float, x_set_c: float):\n\n # specific heat of air, J/kgK\n c = get_specific_heat()\n\n # air density, kg/m3\n rho = get_air_density()\n\n # latent heat of evaporation, kJ/kg\n l_wtr = get_evaporation_latent_heat()\n\n q_d_hs_cs = np.maximum(\n (((q - 0.35 * 0.5 * 2.4) * a_a + c * rho * (v_local + sum(v_vent)) / 3600) * (\n theta_ex - theta_set_c)\n + mu_c * a_a * j + q_gen + n_p * 51.0) * 3600 * 10 ** (-6),\n 0.0) * (hc_period == 'c')\n\n q_d_hs_cl = np.maximum(\n (((v_local + sum(v_vent)) * rho * (x_ex - x_set_c) * 10 ** 3 + w_gen) * l_wtr\n + n_p * 40.0 * 3600) * 10 ** (-6), 0.0) * (hc_period == 'c')\n\n return q_d_hs_cs + q_d_hs_cl", "def _calculate_temperature(c, h):\n\n return (c - 331.4 - 0.0124 * h) / 0.6", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def C_P(self):\n return self.generic_getter(\n get_heat_capacity_pressure, \"C_P\", \"convert_heat_capacity\"\n )", "def compute_water_charge(self, volume, time, units_volume='ml'):\n if units_volume == 'ml':\n volume_iu = volume / 1e6\n else:\n raise NotImplementedError\n \n return volume_iu / time", "def get_decided_outlet_supply_air_absolute_humidity_for_cooling(\n x_req_c: np.ndarray, v_supply: np.ndarray, x_hs_out_min_c: np.ndarray) -> np.ndarray:\n\n return np.maximum(np.sum(x_req_c * v_supply / v_supply.sum(axis=0), axis=0), x_hs_out_min_c)", "def get_capacitive_rain_sensor_temp(\n self, rain_sensor_temp: Optional[int] = None\n ) -> float:\n # TODO: these values were hardcoded but now are taken from the CW.\n # Check which way is the \"true\" way based on the sensor type (capacitive vs Hydredon)\n # rain_pull_up_resistance = 1\n # rain_res_at_25 = 1\n # rain_beta = 3450\n absolute_zero = 273.15\n\n if rain_sensor_temp is None:\n rain_sensor_temp = self.raw_rain_sensor_temp\n\n if rain_sensor_temp < 1:\n rain_sensor_temp = 1\n elif rain_sensor_temp > 1022:\n rain_sensor_temp = 1022\n\n r = self.rain_pull_up_resistance / ((1023 / rain_sensor_temp) - 1)\n r = math.log(r / self.rain_res_at_25)\n\n return 1 / (r / self.rain_beta + 1 / (absolute_zero + 25)) - absolute_zero", "def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)", "def ionization_constant_water(temperature=298.15, density=None):\n import numpy as np\n\n # using Model II from Bandura etal\n # model parameters\n n = 6\n alpha_0 = -0.864671\n alpha_1 = 8659.19\n alpha_2 = -22786.2\n beta_0 = 0.642044\n beta_1 = -56.8534\n beta_2 = -0.375754\n\n # Water parameters\n Mw = 18.01528\n\n # temperature\n T = temperature\n\n # density\n if density:\n D = density\n else:\n D = density_water(T)\n\n pKWG = 0.61415 \\\n + 48251.33 / T \\\n - 67707.93 / T**2.0 \\\n + 10102100.0 / T**3.0\n\n Z = D * np.exp(alpha_0 \\\n + alpha_1/T \\\n + alpha_2/T**2 *np.power(D,2.0/3.0)\n )\n\n pKw = -2*n*(\n np.log10(1 + Z) - (Z/(Z + 1)) * D * (\n beta_0 + beta_1/T + beta_2*D\n )\n ) + pKWG + 2 * np.log10(Mw/1000.0)\n\n return np.power(10, -pKw)", "def cp(wair,pres,entr=None,temp=None,airf=None,dhum=None,chkvals=False,\n chktol=_CHKTOL,airf0=None,temp0=None,dhum0=None,chkbnd=False,\n mathargs=None):\n airf, temp, dhum = eq_wpte(wair,pres,entr=entr,temp=temp,airf=airf,\n dhum=dhum,chkvals=chkvals,chktol=chktol,airf0=airf0,temp0=temp0,\n dhum0=dhum0,chkbnd=chkbnd,mathargs=mathargs)\n h_s = temp\n h_ss = iceair_h(0,2,0,wair,pres,temp=temp,airf=airf,dhum=dhum)\n cp = h_s/h_ss\n return cp", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def get_air_conditioned_room_absolute_humidity(\n hc_period: np.ndarray, x_ex: np.ndarray, x_set_c: float) -> np.ndarray:\n\n return x_set_c * (hc_period == 'c') + x_ex * (hc_period != 'c')", "def _calculate_temp_in_c(temp):\r\n return str((temp * 9 / 5.0 + 32) if temp else \"\")", "def temperature() -> float:", "def dielectric_constant_water(temperature=298.15):\n tabulated_data = np.array([[263.15, 92.10],\n [268.15, 89.96],\n [273.15, 87.90],\n [278.15, 85.90],\n [283.15, 83.96],\n [288.15, 82.06],\n [293.15, 80.20],\n [298.15, 78.38],\n [303.15, 76.60],\n [308.15, 74.86],\n [313.15, 73.17],\n [318.15, 71.50],\n [323.15, 69.88],\n [328.15, 68.29],\n [333.15, 66.74],\n [338.15, 65.22],\n [343.15, 63.73],\n [348.15, 62.28],\n [353.15, 60.87],\n [358.15, 59.48],\n [363.15, 58.13],\n [368.15, 56.81],\n [373.15, 55.51]])\n polynomal_degree = 5\n fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1],\n polynomal_degree)\n fitfunction = np.poly1d(fitdata)\n return fitfunction(temperature)", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def eco_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_low)", "def get_pressure(self): # This function implements the equations needed to convert the digital data into mbars\n self.digital_pressure_data()\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n temperature, dT=self.get_temperature()\n OFF = ((C_2 * (2**16)) + ((C_4 * dT)/2**7))\n SENS = (C_1 * (2**15)) + ((C_3 * dT)/(2**8))\n pressure=(((self.presadc*(SENS/(2**21)))-OFF)/(2**15))/100\n return pressure, temperature", "def get_actual_air_conditioned_temperature(\n hc_period: np.ndarray,\n theta_ac: np.ndarray, v_supply: np.ndarray, theta_supply_h: np.ndarray, theta_supply_c: np.ndarray,\n l_d_h: np.ndarray, l_d_cs: np.ndarray,\n u_prt: float, a_prt: np.ndarray, a_hcz: np.ndarray, q: float) -> np.ndarray:\n\n rho = get_air_density()\n c = get_specific_heat()\n\n a_prt = a_prt.reshape(1, 5).T\n a_hcz = a_hcz[0:5].reshape(1, 5).T\n\n theta_ac_act_h = np.maximum(theta_ac + (c * rho * v_supply * (theta_supply_h - theta_ac) - l_d_h * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n theta_ac_act_c = np.minimum(theta_ac - (c * rho * v_supply * (theta_ac - theta_supply_c) - l_d_cs * 10 ** 6)\n / (c * rho * v_supply + (u_prt * a_prt + q * a_hcz) * 3600), theta_ac)\n\n return theta_ac_act_h * (hc_period == 'h') + theta_ac_act_c * (hc_period == 'c') + theta_ac * (hc_period == 'm')", "def calculate_dew_point(temp, hum):\n return temp - (100 - hum) / 5", "def partial_pressure(fraction=3, tem=283.15, pre=1.21325):\n pwater = np.exp(77.345 + 0.0057 * tem - 7235 / tem) / (tem ** 8.2) / 100000\n # partial pressure of H2O in air by relation, [Bar]\n p_hcl = fraction * 10 ** -5 * pre\n # firstly use 3ppm concentration to do estimation [Bar]\n return tem, pre, pwater, p_hcl", "def heat_capacity_of_air(self) -> float:\n\n return 1002.5 + 275 * (10 ** (-6)) * (self.ambient_temperature - 200) ** 2", "def test_ccl_with_ml():\n pressure = np.array([992.0, 990.0, 983.0, 967.0, 950.0, 944.0, 928.0, 925.0, 922.0,\n 883.0, 877.7, 858.0, 853.0, 850.0, 835.0, 830.0, 827.0, 826.0,\n 813.6, 808.0, 799.0, 784.0, 783.3, 769.0, 760.0, 758.0, 754.0,\n 753.0, 738.0, 725.7, 711.0, 704.0, 700.0, 685.0, 672.0, 646.6,\n 598.6, 596.0, 587.0, 582.0, 567.0, 560.0, 555.0, 553.3, 537.0,\n 526.0, 521.0, 519.0, 515.0, 500.0]) * units.mbar\n temperature = np.array([6.8, 6.2, 7.8, 7.6, 7.2, 7.6, 6.6, 6.4, 6.2, 3.2, 2.8, 1.2,\n 1.0, 0.8, -0.3, -0.1, 0.4, 0.6, 0.9, 1.0, 0.6, -0.3, -0.3,\n -0.7, -1.5, -1.3, 0.2, 0.2, -1.1, -2.1, -3.3, -2.3, -1.7, 0.2,\n -0.9, -3.0, -7.3, -7.5, -8.1, -8.3, -9.5, -10.1, -10.7,\n -10.8, -12.1, -12.5, -12.7, -12.9, -13.5, -15.5]) * units.degC\n dewpoint = np.array([5.1, 5.0, 4.2, 2.7, 2.2, 0.6, -2.4, -2.6, -2.8, -3.8, -3.6,\n -3.1, -5.0, -4.2, -1.8, -4.3, -7.6, -6.4, -8.2, -9.0, -10.4,\n -9.3, -9.6, -14.7, -11.5, -12.3, -25.8, -25.8, -19.1, -19.6,\n -20.3, -42.3, -39.7, -46.8, -46.8, -46.7, -46.5, -46.5,\n -52.1, -36.3, -47.5, -30.1, -29.7, -30.4, -37.1, -49.5,\n -36.7, -28.9, -28.5, -22.5]) * units.degC\n\n ccl_p, ccl_t, t_c = ccl(pressure, temperature, dewpoint,\n mixed_layer_depth=500 * units.m, which='all')\n\n assert_array_almost_equal(ccl_p, np.array(\n [850.600930, 784.325312, 737.767377, 648.076147]) * units.mbar, 5)\n assert_array_almost_equal(ccl_t, np.array(\n [0.840118, -0.280299, -1.118757, -2.875716]) * units.degC, 5)\n assert_array_almost_equal(t_c, np.array(\n [13.146845, 18.661621, 22.896152, 32.081388]) * units.degC, 5)", "def internal_external_canopy_heat_capacity(lumped_cover_heat_capacity: float) -> float:\n return 0.1 * lumped_cover_heat_capacity", "def get_air_conditioned_room_temperature(\n hc_period: np.ndarray,\n theta_ex: np.ndarray, theta_set_h: float, theta_set_c: float) -> np.ndarray:\n\n theta_ac_m = np.clip(theta_ex, theta_set_h, theta_set_c)\n\n return theta_set_h * (hc_period == 'h') + theta_set_c * (hc_period == 'c') + theta_ac_m * (hc_period == 'm')", "def target_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_c\"))\r\n return kelvin_to_celsius(self._target_temperature_low)", "def cooled_surface_temp(T:np.ndarray) -> float:\n \n return T.dot(cs_temp_weights)", "def CMB_spec(eng, temp):\n if np.issubdtype(type(eng), float):\n eng = np.array([eng])\n prefactor = 8*np.pi*(eng**2)/((ele_compton*me)**3)\n phot_spec_density = prefactor*[1/(np.exp(photeng/temp) - 1) if photeng/temp < 500 else 0 for photeng in eng]\n\n if np.issubdtype(type(eng), float): \n return phot_spec_density[0] \n else: \n return phot_spec_density", "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def water_vapour(t):\n T_0 = 273.15\n T_rw = 35.86 # over water\n a = 17.269\n # cdo -mulc,610.78 -exp -div -mulc,17.5 -subc,273.15 a\n return 610.78 * np.exp(a * (t - T_0) / (t - T_rw))", "def test_wet_psychrometric_rh():\n p = 1013.25 * units.mbar\n dry_bulb_temperature = 20. * units.degC\n wet_bulb_temperature = 18. * units.degC\n psychrometric_rh = relative_humidity_wet_psychrometric(p, dry_bulb_temperature,\n wet_bulb_temperature)\n assert_almost_equal(psychrometric_rh, 82.8747 * units.percent, 3)", "def get_supply_air_absolute_humidity_for_cooling(\n x_hs_out_c: np.ndarray, x_ac: np.ndarray, operation: np.ndarray) -> np.ndarray:\n\n return np.where(operation == 'c', x_hs_out_c, x_ac)", "def calc_supply_temp(tr, Q, m, cp, case):\n if m > 0:\n if case == \"DH\":\n ts = tr + Q / (m * cp)\n else:\n ts = tr - Q / (m * cp)\n else:\n ts = 0\n return ts", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def target_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_c\"))\r\n return kelvin_to_celsius(self._target_temperature)", "def plc_temp(coil_df):", "def get_chiller_temperature(self) -> float:\n\n return self.send(self.cmd.GET_COOLING_ACT)", "def calc_h_lat(dry_bulb_C, humidity_ratio_out_kgperkg):\n\n h_kJ_kg = humidity_ratio_out_kgperkg * (dry_bulb_C * CPW_kJ_kgC + h_we_kJ_kg)\n\n return h_kJ_kg", "def initialize_thermal_prediction(self, config_file):\n conf_pred = config_file['prediction']['heat']\n conf_powr = config_file['prediction']['power']\n # conf_pred\n n_day = conf_pred['n_day']\n n_values = conf_pred['n_values_per_day']\n precision_in_h = conf_pred['precision_in_h']\n use_predef_loads = conf_pred['use_predef_loads']\n predef_loads_file_path = conf_pred['path_loads']\n # heating curve\n conf_hk = config_file['components']['heating_curve']\n hk_ta = conf_hk['design_ambient_temperature_oC']\n hk_ti = conf_hk['design_indoor_temperature_oC']\n hk_tv = conf_hk['design_supply_temperature_oC']\n hk_tr = conf_hk['design_return_temperature_oC']\n hk_n = conf_hk['radiator_coefficient_n']\n hk_m = conf_hk['radiator_coefficient_m']\n hk_qn = conf_hk['design_heat_load_in_kW']\n # chp unit\n patm = utils.get_pressure_in_MPa()\n calcopt = utils.get_calc_option()\n eps_el_chp = config_file['components']['chp_unit']['electrical_efficiency']\n eps_th_chp = config_file['components']['chp_unit']['thermal_efficiency']\n qel_n_chp = config_file['components']['chp_unit']['max_electric_power_in_kW']\n chp_tinp = config_file['components']['chp_unit']['design_input_temperature_oC']\n chp_tmax = config_file['components']['chp_unit']['design_output_temperature_oC']\n qth_n_chp = eps_th_chp * qel_n_chp / eps_el_chp # in kW\n mstr_chp = qth_n_chp / (utils.cp_fluid_water(0.5 * (chp_tmax + chp_tinp), patm, calcopt) * (chp_tmax - chp_tinp)) # in kg/s = kW / (kJ/kg/K * K)\n # gas boiler\n qth_n_gb = config_file['components']['gas_boiler']['max_thermal_power_in_kW']\n gb_tinp = config_file['components']['gas_boiler']['design_input_temperature_oC']\n gb_tmax = config_file['components']['gas_boiler']['design_output_temperature_oC']\n mstr_gb = qth_n_gb / (utils.cp_fluid_water(0.5 * (gb_tinp + gb_tmax), patm, calcopt) * (gb_tmax - gb_tinp)) # in kg/s = kW / (kJ/kg/K * K) # in kg/s = kW / (kJ/kg/K * K)\n # storage tank\n effective_height = config_file['components']['storage_tank']['effective_heigth_in_m']\n inner_radius = config_file['components']['storage_tank']['inner_radius_tank_in_m']\n effective_pipe_volume = config_file['components']['storage_tank']['effective_coil_volume_in_m3'] # in m3 - water volume of the pipes of inner heat exchanger\n effective_volume = config_file['components']['storage_tank']['effective_volume_in_m3']\n if (effective_volume <= 0.0):\n effective_volume = math.pi * inner_radius * inner_radius * effective_height - effective_pipe_volume # in m3\n nr_calc = 20\n slice_volume = effective_volume / nr_calc # in m3\n qmax_rod_el = config_file['components']['storage_tank']['power_heating_rod_in_kW']\n open_weather_map_active = config_file['calculation']['platform_mode']['open_weather_map_active']\n # conf_powr\n #print('\\n initialize_thermal_prediction')\n #print('use_predef_loads = {}; {}'.format(use_predef_loads,type(use_predef_loads)))\n #print('predef_loads_file_path = {}; {}'.format(predef_loads_file_path,type(predef_loads_file_path)))\n return predict_thermal.predict_Q(n_day, n_values, precision_in_h, predef_loads_file_path, use_predef_loads, \n self.output_horizon_in_h, self.output_resolution_in_s, conf_powr, hk_tv, hk_tr, hk_ti, hk_ta, \n hk_qn, hk_n, hk_m, chp_tmax, gb_tmax, slice_volume, mstr_chp, mstr_gb, qmax_rod_el, eps_th_chp, \n eps_el_chp, open_weather_map_active)", "def target_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_high_c\"))\r\n return kelvin_to_celsius(self._target_temperature_high)", "def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)", "def internal_temp_c(self) -> int:\n return int(self._device_info[\"Temperature\"])", "def convert_f_to_c(temp_in_farenheit): ## ##\n celsiustemp = round((temp_in_farenheit - 32) * 5/9, 1) ##\n return celsiustemp ##", "def get_decided_outlet_supply_air_temperature_for_cooling(\n vav_system: bool, theta_req_c: np.ndarray, v_d_supply: np.ndarray,\n theta_hs_out_min_c: np.ndarray) -> np.ndarray:\n\n if vav_system:\n return np.maximum(np.min(theta_req_c, axis=0), theta_hs_out_min_c)\n else:\n return np.maximum(np.sum(theta_req_c * v_d_supply / v_d_supply.sum(axis=0), axis=0), theta_hs_out_min_c)", "def eco_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_high)", "def get_percent_wet():\n # Create an ADS1115 ADC (16-bit) instance.\n adc = Adafruit_ADS1x15.ADS1115()\n\n GAIN = 1\n DRY = 20280 # 100% Dry\n WET = 10140 # 100% Wet\n\n value = adc.read_adc(0, gain=GAIN)\n \n # print \"value: %d\" % value\n \n percent_dry = ((value - WET)*100)/(DRY-WET)\n percent_wet = 100 - percent_dry\n\n return percent_wet", "def p_mx_c(pm,px,py,pyx_c,pym_c,beta):\n \n pmx_c = np.zeros((pm.size,px.size)) # P(M|X) matrix to be returned\n for mi in range(pm.size):\n for xi in range(px.size):\n pmx_c[mi,xi] = pm[mi] * np.exp(-beta * entropy(pyx_c[:,xi], pym_c[:,mi], base=2))\n z = pmx_c.sum(axis=0)\n pmx_c /= z #Normalize\n \n \t\n return pmx_c, z", "def get_requested_supply_air_absolute_humidity_for_cooling(\n x_ac: np.ndarray, l_d_cl: np.ndarray, v_d_supply: np.ndarray) -> np.ndarray:\n\n # air density, kg/m3\n rho = get_air_density()\n\n # latent heat of evaporation, kJ/kg\n l_wtr = get_evaporation_latent_heat()\n\n return x_ac - l_d_cl * 10 ** 3 / (v_d_supply * rho * l_wtr)", "def convert_f_to_c(temp_in_farenheit):\n \n temp=round((float(temp_in_farenheit)-32)*5/9,1)\n \n return (temp)", "def initialize_thermal_prediction(self, config_file):\n conf_pred = config_file['prediction']['heat']\n conf_powr = config_file['prediction']['power']\n # config_json\n n_day = conf_pred['n_day']\n n_values = conf_pred['n_values_per_day']\n precision_in_h = conf_pred['precision_in_h']\n use_predef_loads = conf_pred['use_predef_loads']\n predef_loads_file_path = conf_pred['path_loads']\n # heating curve\n conf_hk = config_file['components']['heating_curve']\n hk_ta = conf_hk['design_ambient_temperature_oC']\n hk_ti = conf_hk['design_indoor_temperature_oC']\n hk_tv = conf_hk['design_supply_temperature_oC']\n hk_tr = conf_hk['design_return_temperature_oC']\n hk_n = conf_hk['radiator_coefficient_n']\n hk_m = conf_hk['radiator_coefficient_m']\n hk_qn = conf_hk['design_heat_load_in_kW']\n # chp unit\n patm = utils.get_pressure_in_MPa()\n calcopt = utils.get_calc_option()\n eps_el_chp = config_file['components']['chp_unit']['electrical_efficiency']\n eps_th_chp = config_file['components']['chp_unit']['thermal_efficiency']\n qel_n_chp = config_file['components']['chp_unit']['max_electric_power_in_kW']\n chp_tinp = config_file['components']['chp_unit']['design_input_temperature_oC']\n chp_tmax = config_file['components']['chp_unit']['design_output_temperature_oC']\n qth_n_chp = eps_th_chp * qel_n_chp / eps_el_chp # in kW\n mstr_chp = qth_n_chp / (utils.cp_fluid_water(0.5 * (chp_tmax + chp_tinp), patm, calcopt) * (chp_tmax - chp_tinp)) # in kg/s = kW / (kJ/kg/K * K)\n # gas boiler\n qth_n_gb = config_file['components']['gas_boiler']['max_thermal_power_in_kW']\n gb_tinp = config_file['components']['gas_boiler']['design_input_temperature_oC']\n gb_tmax = config_file['components']['gas_boiler']['design_output_temperature_oC']\n mstr_gb = qth_n_gb / (utils.cp_fluid_water(0.5 * (gb_tinp + gb_tmax), patm, calcopt) * (gb_tmax - gb_tinp)) # in kg/s = kW / (kJ/kg/K * K) # in kg/s = kW / (kJ/kg/K * K)\n # storage tank\n effective_height = config_file['components']['storage_tank']['effective_heigth_in_m']\n inner_radius = config_file['components']['storage_tank']['inner_radius_tank_in_m']\n effective_pipe_volume = config_file['components']['storage_tank']['effective_coil_volume_in_m3']\n effective_volume = config_file['components']['storage_tank']['effective_volume_in_m3']\n if (effective_volume <= 0.0):\n effective_volume = math.pi * inner_radius * inner_radius * effective_height - effective_pipe_volume # in m3\n nr_calc = 20\n slice_volume = effective_volume / nr_calc # in m3\n qmax_rod_el = config_file['components']['storage_tank']['power_heating_rod_in_kW']\n open_weather_map_active = config_file['calculation']['platform_mode']['open_weather_map_active']\n # conf_powr\n #print('\\n initialize_thermal_prediction')\n #print('use_predef_loads = {}; {}'.format(use_predef_loads,type(use_predef_loads)))\n #print('predef_loads_file_path = {}; {}'.format(predef_loads_file_path,type(predef_loads_file_path)))\n return predict_thermal.predict_Q(n_day, n_values, precision_in_h, predef_loads_file_path, use_predef_loads, self.output_horizon_in_h, \n self.output_resolution_in_s, conf_powr, hk_tv, hk_tr, hk_ti, hk_ta, hk_qn, hk_n, hk_m, chp_tmax, gb_tmax, slice_volume, \n mstr_chp, mstr_gb, qmax_rod_el, eps_th_chp, eps_el_chp, open_weather_map_active)", "def calc_temp_withlosses(t0, Q, m, cp, case):\n if m > 0:\n if case == \"positive\":\n t1 = t0 + Q / (m * cp)\n else:\n t1 = t0 - Q / (m * cp)\n else:\n t1 = 0\n return t1", "def get_hc_external(self, weather, surface, h_surface, terrain):\r\n roughness = surface.construction[0].roughness_unit # Change back to this line...left as below to match Na's\r\n if roughness == \"VeryRough\":\r\n D = 11.58\r\n E = 5.894\r\n F = 0\r\n elif roughness == \"Rough\":\r\n D = 12.49\r\n E = 4.065\r\n F = 0.028\r\n elif roughness == \"MediumRough\":\r\n D = 10.79\r\n E = 4.192\r\n F = 0.0\r\n elif roughness == \"MediumSmooth\":\r\n D = 8.23\r\n E = 4.0\r\n F = -0.057\r\n elif roughness == \"Smooth\":\r\n D = 10.22\r\n E = 3.1\r\n F = 0.0\r\n elif roughness == \"VerySmooth\":\r\n D = 8.23\r\n E = 3.33\r\n F = -0.036\r\n else:\r\n D = 8.23\r\n E = 4.0\r\n F = -0.057\r\n print \"No Roughness Value Found so Set Default Values of 8.23,4.0,-0.057\"\r\n\r\n wind_speed_temp = weather[\"wind_speed\"]\r\n # Terrain Lookup Table\r\n if terrain == 'Flat or Open Countryside':\r\n sigma = 270\r\n a = 0.14\r\n elif terrain == 'Rough or Wooded Country':\r\n sigma = 370\r\n a = 0.22\r\n elif terrain == 'Towns and City Scapes':\r\n sigma = 460\r\n a = 0.33\r\n elif terrain == 'Ocean Front Areas':\r\n sigma = 210\r\n a = 0.10\r\n elif terrain == 'Urban, Industrial, or Forest':\r\n sigma = 370\r\n a = 0.22\r\n else:\r\n sigma = 370\r\n a = 0.22\r\n print \"No Terrain Type Found so Set Default Values of 370,0.22\"\r\n terrain_sigma = sigma\r\n terrain_cof = a\r\n\r\n # Adjust the wind speed...Stable air above human inhabited areas:\r\n #wind_speed = wind_speed_temp * ((h_surface / 10) ** 0.5) # This was the line used to get wind_speed before terrain was added\r\n # Wind speed corrected for terrain differences;\r\n wind_speed = wind_speed_temp * ((270/10) ** 0.14) * (h_surface/terrain_sigma) ** terrain_cof\r\n #print wind_speed\r\n # Calculate the hc_external\r\n # hc_external= D+E*Wind_speed+F*Wind_speed^2\r\n hc_external = D + (E * wind_speed) + (F * wind_speed ** 2)\r\n\r\n # depending on the direction of the wind adjust the hc_external...as of versions 3 and 4 this part seems omitted\r\n #x = abs(wind_speed_dir - azimuth)\r\n #if x > 100:\r\n # if x < 260:\r\n # hc_external *= 0.5\r\n #print \"hc_external : \", hc_external, D, E, F\r\n\r\n return round(hc_external, 5)", "def cmm(self) -> Optional[np.ndarray]:\n if self.sensorsz is None:\n return None\n return self.c * self.sensorsz / self.imgsz", "def __we_c(cls, calib, tc, temp, we_v, ae_v):\n we_t = we_v - (calib.we_elc_mv / 1000.0) # remove electronic we zero\n ae_t = ae_v - (calib.ae_elc_mv / 1000.0) # remove electronic ae zero\n\n we_c = tc.correct(calib, temp, we_t, ae_t)\n\n # print(\"A4Datum__we_c: we_t:%f ae_t:%f we_c:%s\" % (we_t, ae_t, we_c), file=sys.stderr)\n\n return we_c", "def _approx_wep(wair,entr,pres):\n pvmax = pres * (1-wair) / (1-wair + _EPSW*wair)\n if pvmax >= _PTPE:\n # Saturation would start at _TTP; use saturated heat capacity at _TTP\n a_t = (pres - _PTPE)/(pres - _PTPE + _EPSW*_PTPE)\n s_t = (wair*_CDRY*numpy.log(_TTP/_TCELS) - (1-wair)*_LILTP/_TTP\n - wair*_RDRY*numpy.log((pres-_PTPE)/_PATM)\n + wair*_RWAT*_EPSW*_PTPE/(pres-_PTPE)*_AVI)\n c_t = (wair*_CDRY + wair*(1-a_t)/a_t*_CVAP + (1-wair/a_t)*_CICE\n + wair*_RWAT*(1-a_t)*(_EPSW*a_t + 1-a_t)/_EPSW/a_t**2 * _AVI**2)\n temp = _TTP * numpy.exp(-(s_t-entr)/c_t)\n else:\n # Get approximate saturation temperature\n v = numpy.log(pres*(1-wair)/(_PTPE*(_EPSW*wair + 1-wair)))/_BVI\n r = _AVI/_BVI\n x = maths4.lamb2(v,r)\n tsat = _TTP/x\n ssat = (wair * (_CDRY*numpy.log(tsat/_TCELS)\n - _RDRY*numpy.log((pres-pvmax)/_PATM))\n + (1-wair) * (_CVAP*numpy.log(tsat/_TTP) + _LLVTP/_TTP\n - _RWAT*numpy.log(pvmax/_PTPE)))\n \n if entr >= ssat:\n ceff = wair*_CDRY + (1-wair)*_CVAP\n temp = _TTP * numpy.exp((entr-ssat)/ceff)\n else:\n csat = (wair*_CDRY + (1-wair)*_CVAP\n + (1-wair)*_RWAT*pres/(pres-pvmax)\n * ((_AVI+_BVI)*_TTP/tsat - _BVI)**2)\n temp = tsat * numpy.exp(-(ssat-entr)/csat)\n pvap = _PTPE * numpy.exp((_AVI+_BVI)*(1 - _TTP/temp)\n - _BVI*numpy.log(temp/_TTP))\n airf = (pres - pvap) / (pres - pvap + _EPSW*pvap)\n dhum = pres/(_RDRY*temp) / (airf + (1-airf)/_EPSW)\n return airf, temp, dhum", "def get_specific_heat() -> float:\n return 1006.0", "def pressure(z: tf.Tensor) -> tf.Tensor:\n return self._p_thermal * tf.math.exp(\n -(z + self._height * delta_t_frac *\n (tf.math.log(1.0 - delta_t_frac * tf.math.tanh(z / self._height)) -\n tf.math.log(1.0 + tf.math.tanh(z / self._height)) +\n z / self._height)) / h_sfc / (1.0 - delta_t_frac**2))", "def convert_f_to_c(temp_in_farenheit):\n temp_in_celcius = ((temp_in_farenheit - 32) * 5) / 9\n temp_in_celcius = round(temp_in_celcius, 1)\n return temp_in_celcius", "def lookup_effective_mass_area_factor(self, cm):\n\n if cm == 0.0:\n return 0.0\n elif 0.0 < cm <= 165000.0:\n return 2.5\n else:\n return 3.2", "def water_vapour_saturation_pressure(T, isCelsius = True):\n if isCelsius:\n T += 273.15\n\n if T < 273.15: \n warnings.warn(\"Temperature {:} K is below freezing. This function is not valid.\".format(T))\n elif T > 647.096:\n warnings.warn(\"Temperature {:} K is above the critical temperature. This function is not valid.\".format(T))\n \n Tc = 647.096 # critical temperature, K\n Pc = 22.064e6 # critical pressure Pa\n C1 = -7.85951783\n C2 = 1.84408259\n C3 = -11.7866497\n C4 = 22.6807411\n C5 = -15.9618719\n C6 = 1.80122502\n v = 1 - T / Tc # vartheta = transformed temperature\n \n a = C1 * v + C2 * v**1.5 + C3 * v**3 + C4 * v**3.5 + C5 * v**4 + C6 * v**7.5\n Pws = Pc * numpy.exp(Tc * a / T)\n return Pws", "def farenheit(ctemp):\n return round(9.0/5.0 * ctemp + 32)", "def pressure(self):\r\n self._read_temperature()\r\n\r\n # Algorithm from the BME280 driver\r\n # https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c\r\n adc = self._read24(_BME280_REGISTER_PRESSUREDATA) / 16 # lowest 4 bits get dropped\r\n var1 = float(self._t_fine) / 2.0 - 64000.0\r\n var2 = var1 * var1 * self._pressure_calib[5] / 32768.0\r\n var2 = var2 + var1 * self._pressure_calib[4] * 2.0\r\n var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0\r\n var3 = self._pressure_calib[2] * var1 * var1 / 524288.0\r\n var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0\r\n var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]\r\n if var1 == 0:\r\n return 0\r\n if var1:\r\n pressure = 1048576.0 - adc\r\n pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1\r\n var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0\r\n var2 = pressure * self._pressure_calib[7] / 32768.0\r\n pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0\r\n\r\n pressure /= 100\r\n if pressure < _BME280_PRESSURE_MIN_HPA:\r\n return _BME280_PRESSURE_MIN_HPA\r\n if pressure > _BME280_PRESSURE_MAX_HPA:\r\n return _BME280_PRESSURE_MAX_HPA\r\n return pressure\r\n else:\r\n return _BME280_PRESSURE_MIN_HPA", "def conductivity(self):\n m = 1.67296736e-02 # Determined from optimisation\n c = 8.54665149e-05 # Determined from optimisation\n return m * self.concentration + c", "def test_getThermalExpansionFactorConservedMassByLinearExpansionPercent(self):\n hotTemp = 700.0\n dLL = self.component.material.linearExpansionFactor(\n Tc=hotTemp, T0=self._coldTemp\n )\n ref = 1.0 + dLL\n cur = self.component.getThermalExpansionFactor(Tc=hotTemp)\n self.assertAlmostEqual(cur, ref)", "def compute_cm_fm(illuminant, gains, ccm, cal):\n\n ###########################################################################\n # Standard matrices.\n\n # W is the matrix that maps sRGB to XYZ.\n # See: http://www.brucelindbloom.com/\n W = numpy.array([\n [ 0.4124564, 0.3575761, 0.1804375],\n [ 0.2126729, 0.7151522, 0.0721750],\n [ 0.0193339, 0.1191920, 0.9503041]])\n\n # HH is the chromatic adaptation matrix from D65 (since sRGB's ref white is\n # D65) to D50 (since CIE XYZ's ref white is D50).\n HH = numpy.array([\n [ 1.0478112, 0.0228866, -0.0501270],\n [ 0.0295424, 0.9904844, -0.0170491],\n [-0.0092345, 0.0150436, 0.7521316]])\n\n # H is a chromatic adaptation matrix from D65 (because sRGB's reference\n # white is D65) to the calibration illuminant (which is a standard matrix\n # depending on the illuminant). For a D65 illuminant, the matrix is the\n # identity. For the A illuminant, the matrix uses the linear Bradford\n # adaptation method to map from D65 to A.\n # See: http://www.brucelindbloom.com/\n H_D65 = numpy.array([\n [ 1.0, 0.0, 0.0],\n [ 0.0, 1.0, 0.0],\n [ 0.0, 0.0, 1.0]])\n H_A = numpy.array([\n [ 1.2164557, 0.1109905, -0.1549325],\n [ 0.1533326, 0.9152313, -0.0559953],\n [-0.0239469, 0.0358984, 0.3147529]])\n H = [H_A, H_D65][illuminant]\n\n ###########################################################################\n # Per-model matrices (that should be the same for all units of a particular\n # phone/camera. These are statics in the HAL camera properties.\n\n # G is formed by taking the r,g,b gains and putting them into a\n # diagonal matrix.\n G = numpy.array([[gains[0],0,0], [0,gains[1],0], [0,0,gains[3]]])\n\n # S is just the CCM.\n S = numpy.array([ccm[0:3], ccm[3:6], ccm[6:9]])\n\n ###########################################################################\n # Per-unit matrices.\n\n # The per-unit calibration matrix for the given illuminant.\n CC = numpy.array([cal[0:3],cal[3:6],cal[6:9]])\n\n ###########################################################################\n # Derived matrices. These should match up with DNG-related matrices\n # provided by the HAL.\n\n # The color matrix and forward matrix are computed as follows:\n # CM = inv(H * W * S * G * CC)\n # FM = HH * W * S\n CM = numpy.linalg.inv(\n numpy.dot(numpy.dot(numpy.dot(numpy.dot(H, W), S), G), CC))\n FM = numpy.dot(numpy.dot(HH, W), S)\n\n # The color matrix is normalized so that it maps the D50 (PCS) white\n # point to a maximum component value of 1.\n CM = CM / max(numpy.dot(CM, (0.9642957, 1.0, 0.8251046)))\n\n return CM, FM", "def volumetric_heat_capacity(temperature):\n a = -2.4083\n b = 7.6006\n c = -8.2982\n d = 7.3301\n e = -4.2386\n f = 1.4294\n g = -0.24396\n h = 0.015236\n i = 0.0\n log_t = math.log10(temperature)\n f_exp = a + b*log_t + c*log_t**2.0 + d*log_t**3.0 + e*log_t**4.0 + f*log_t**5.0 + g*log_t**6.0 + \\\n h*log_t**7.0 + i*log_t**8.0\n g10_cp = 10.0**f_exp\n return g10_cp * G10NISTMaterialProperties.density", "def P(self, energy, newEnergy, temperature):\n \"\"\" This is the decision-rule, adapted from Nascimento, et al., 2009 (See references) \"\"\"\n \n delta = self.calcDelta(newEnergy, energy)\n\n minTemp = 0.00001 # use minimum to avoid div/0 and buffer overflow\n if temperature == 0:\n return minTemp\n elif temperature > minTemp:\n try:\n return math.exp(-1 * round(delta, 4) / round(temperature, 4))\n except OverflowError as detail:\n return minTemp\n else:\n return 1", "def partial_pressure(num_moles, ideal_gas_law, temp, volume, is_temp_kelvin=True):\n ATM = 0.0821\n KPA = 8.314\n MMHG = 62.4\n if ideal_gas_law == \"atm\":\n ideal_gas_law_number = ATM\n if ideal_gas_law == \"kpa\":\n ideal_gas_law_number = KPA\n if ideal_gas_law == \"mmhg\":\n ideal_gas_law_number = MMHG\n if not is_temp_kelvin:\n temp += 273\n nrt = (num_moles) * (ideal_gas_law_number) * (temp)\n final_pressure = nrt / volume\n return final_pressure", "def get_cold_junction_temperature(self):\n return self._mcp9600.get('COLD_JUNCTION').temperature", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def read_ambient_temperatureC(self, ):\n return self._read_temperature(MLX90614_TA)", "def _pwr_std_temp(rpm, MP, altitude):\n # get the power at sea level (i.e. point B on the left side of the Lycoming power chart)\n \n # get pwr at two even hundreds of rpm, and then interpolate\n if rpm >= 2600:\n rpm1 = 2600\n elif rpm <= 1800:\n rpm1 = 1800\n else:\n rpm1 = rpm - rpm % 100\n\n rpm2 = rpm1 + 100\n \n pwr_SL1 = _pwr_sl(rpm1, MP)\n pwr_SL2 = _pwr_sl(rpm2, MP)\n # print \"SL Pwr 1=\", pwr_SL1\n # print \"SL Pwr 2=\", pwr_SL2\n \n # get power at full throttle at this rpm and MP at altitude (i.e. point A on the right side of the Lycoming power chart)\n # density ratio at point A on the right side of the Lycoming power chart)\n pwr_FT1, DR_FT1 = _hp_at_MP_and_altitude(rpm1, MP)\n pwr_FT2, DR_FT2 = _hp_at_MP_and_altitude(rpm2, MP)\n # print \"FT pwr 1=\", pwr_FT1\n # print \"FT pwr 2=\", pwr_FT2\n # print \"DR FT 1=\", DR_FT1\n # print \"DR FT 2=\", DR_FT2\n \n # density ratio at sea level\n DR_sl = 1\n \n # density ratio for the actual conditions (i.e. point D on the right side of the Lycoming power chart)\n DR_test = SA.alt2density_ratio(altitude)\n # print \"DR_test=\", DR_test\n \n # function is unstable if the DR at FT is close to 1. This sends the slope off to unpredictable values.\n slope1=(pwr_FT1 - pwr_SL1) / (DR_FT1 - DR_sl)\n slope2=(pwr_FT2 - pwr_SL2) / (DR_FT2 - DR_sl)\n \n if MP > 28:\n if slope1 < -80:\n slope1=-62\n elif slope1> -60:\n slope1=-62\n if slope2< -80:\n slope2 = -62\n elif slope2> -60:\n slope2=-62\n \n # print \"slope1=\", slope1\n # print \"slope2=\", slope2\n \n pwr_std_temp1 = pwr_SL1 + (DR_test - DR_sl) * slope1\n pwr_std_temp2 = pwr_SL2 + (DR_test - DR_sl) * slope2\n # print \"Pwr Std Temp 1=\", pwr_std_temp1\n # print \"Pwr Std Temp 2=\", pwr_std_temp2\n pwr_std_temp = pwr_std_temp1 + (rpm - rpm1) * (pwr_std_temp2 - pwr_std_temp1) / (rpm2 - rpm1)\n\n return pwr_std_temp", "def set_temp_compensation(self, temp: int = 25) -> str:\n if type(temp) == float or int:\n response = self.query(f'T,{temp}')\n if temp < 10 or temp > 40:\n print(f'\\nNOTE: Unusual ocean temperature set: {temp} C.')\n else:\n print('Temp compensation factor should be a decimal/integer!')\n return response", "def air_humidity_method_qsat26air(air_temperature,surface_air_pressure,relative_humdity):\n es = vapor_pressure(air_temperature,surface_air_pressure)\n em = 0.01*relative_humdity*es\n air_humidity = 622.*em/(surface_air_pressure-0.378*em)\n return air_humidity", "def get_D_C3H8_air_eff(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air_Kn = self.get_D_C3H8_air_Kn(T)\n\n if np.isscalar(Kn):\n if Kn <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n else:\n if Kn.any() <= 1.:\n D_C3H8_air_eff = (\n self.porosity / self.tortuosity * self.D_C3H8_air\n )\n else:\n D_C3H8_air_eff = (\n 2. * self.porosity / self.tortuosity *\n (self.D_C3H8_air * D_C3H8_air_Kn) / (self.D_C3H8_air +\n D_C3H8_air_Kn)\n )\n\n self.D_C3H8_air_eff = D_C3H8_air_eff\n\n return D_C3H8_air_eff", "def convert_f_to_c(temp_in_farenheit):\n celcius_temp = round(float((temp_in_farenheit) - 32)*(5/9),1)\n return(celcius_temp)", "def molar_mass_dry_air():\n return 28.9647", "def distribute(self, date_time, air_temp, vapor_pressure=None,\n dew_point=None, cloud_factor=None):\n\n self._logger.debug('%s Distributing thermal' % date_time)\n\n # calculate clear sky thermal\n if self.clear_sky_method == 'marks1979':\n cth = np.zeros_like(air_temp, dtype=np.float64)\n envphys_c.ctopotherm(\n air_temp, dew_point,\n self.dem,\n self.sky_view_factor,\n cth,\n self.config['marks1979_nthreads'])\n\n elif self.clear_sky_method == 'dilley1998':\n cth = clear_sky.Dilly1998(air_temp, vapor_pressure/1000)\n\n elif self.clear_sky_method == 'prata1996':\n cth = clear_sky.Prata1996(air_temp, vapor_pressure/1000)\n\n elif self.clear_sky_method == 'angstrom1918':\n cth = clear_sky.Angstrom1918(air_temp, vapor_pressure/1000)\n\n # terrain factor correction\n if (self.sky_view_factor is not None) and \\\n (self.clear_sky_method != 'marks1979'):\n # apply (emiss * skvfac) + (1.0 - skvfac) to the longwave\n cth = cth * self.sky_view_factor + (1.0 - self.sky_view_factor) * \\\n STEF_BOLTZ * air_temp**4\n\n # make output variable\n self.thermal_clear = cth.copy()\n\n # correct for the cloud factor\n # ratio of measured/modeled solar indicates the thermal correction\n if self.correct_cloud:\n if self.cloud_method == 'garen2005':\n cth = cloud.Garen2005(cth,\n cloud_factor)\n\n elif self.cloud_method == 'unsworth1975':\n cth = cloud.Unsworth1975(cth,\n air_temp,\n cloud_factor)\n\n elif self.cloud_method == 'kimball1982':\n cth = cloud.Kimball1982(cth,\n air_temp,\n vapor_pressure/1000,\n cloud_factor)\n\n elif self.cloud_method == 'crawford1999':\n cth = cloud.Crawford1999(cth,\n air_temp,\n cloud_factor)\n\n # make output variable\n self.thermal_cloud = cth.copy()\n\n # correct for vegetation\n if self.correct_veg:\n cth = vegetation.thermal_correct_canopy(cth,\n air_temp,\n self.veg_tau,\n self.veg_height)\n\n # make output variable\n self.thermal_veg = cth.copy()\n\n self.thermal = utils.set_min_max(cth, self.min, self.max)", "def test_cape_cin_value_error():\n pressure = np.array([1012.0, 1009.0, 1002.0, 1000.0, 925.0, 896.0, 855.0, 850.0, 849.0,\n 830.0, 775.0, 769.0, 758.0, 747.0, 741.0, 731.0, 712.0, 700.0, 691.0,\n 671.0, 636.0, 620.0, 610.0, 601.0, 594.0, 587.0, 583.0, 580.0, 571.0,\n 569.0, 554.0, 530.0, 514.0, 506.0, 502.0, 500.0, 492.0, 484.0, 475.0,\n 456.0, 449.0, 442.0, 433.0, 427.0, 400.0, 395.0, 390.0, 351.0, 300.0,\n 298.0, 294.0, 274.0, 250.0]) * units.hPa\n temperature = np.array([27.8, 25.8, 24.2, 24, 18.8, 16, 13, 12.6, 12.6, 11.6, 9.2, 8.6,\n 8.4, 9.2, 10, 9.4, 7.4, 6.2, 5.2, 3.2, -0.3, -2.3, -3.3, -4.5,\n -5.5, -6.1, -6.1, -6.1, -6.3, -6.3, -7.7, -9.5, -9.9, -10.3,\n -10.9, -11.1, -11.9, -12.7, -13.7, -16.1, -16.9, -17.9, -19.1,\n -19.9, -23.9, -24.7, -25.3, -29.5, -39.3, -39.7, -40.5, -44.3,\n -49.3]) * units.degC\n dewpoint = np.array([19.8, 16.8, 16.2, 16, 13.8, 12.8, 10.1, 9.7, 9.7,\n 8.6, 4.2, 3.9, 0.4, -5.8, -32, -34.6, -35.6, -34.8,\n -32.8, -10.8, -9.3, -10.3, -9.3, -10.5, -10.5, -10, -16.1,\n -19.1, -23.3, -18.3, -17.7, -20.5, -27.9, -32.3, -33.9, -34.1,\n -35.9, -26.7, -37.7, -43.1, -33.9, -40.9, -46.1, -34.9, -33.9,\n -33.7, -33.3, -42.5, -50.3, -49.7, -49.5, -58.3, -61.3]) * units.degC\n cape, cin = surface_based_cape_cin(pressure, temperature, dewpoint)\n expected_cape, expected_cin = 2098.688061 * units('joules/kg'), 0.0 * units('joules/kg')\n assert_almost_equal(cape, expected_cape, 3)\n assert_almost_equal(cin, expected_cin, 3)", "def conduct_heat(self, delta_time, external_power):\n\t\tself.temperature_container = self.temperature_container+self.area*external_power*delta_time/(self.heat_capacity_container*self.mass_container)#https://en.wikipedia.org/wiki/Heat_capacity\n\t\t\n\t\tinternal_power = 0.591*(self.temperature_container-self.temperature)/0.01#No idea of this is right. Mainly the devides by its length bit. https://en.wikipedia.org/wiki/Thermal_conduction#Fourier's_law\n\t\t\n\t\tif (self.heat_capacity*self.mass())!=0:\n\t\t\tself.temperature = self.temperature+internal_power*delta_time/(self.heat_capacity*self.mass())\n\t\t\t#self.temperature_container=self.temperature_container-internal_power*delta_time/(self.heat_capacity_container*self.mass_container)#Als je dit toevoegd lijkt de simulatie niet goed meer te werken dus nog even uitzoeken heo dat zit.", "def get_pressure_coefficient(self):\n depth = self.params[\"Measured_Pressure\"][\"depth\"]\n coef = self.params[\"Measured_Pressure\"][\"coef\"]\n pres = self.params[\"Measured_Pressure\"][\"data\"]\n if depth and not coef and pres:\n hydro = hydrostatic_pressure(self.depth,\n kelly_bushing=self.kelly_bushing,\n depth_w=self.water_depth)\n coef_data = list()\n for dp, pr in zip(depth, pres):\n idx = np.searchsorted(self.depth, dp)\n coef_data.append(pr / hydro[idx])\n log = Log()\n log.depth = depth\n log.data = coef_data\n return log\n else:\n log = Log()\n log.depth = depth\n log.data = coef\n return log", "def prescribed_surface_temperature(x, t, K_medium, rho_medium, c_medium, T_medium_initial, T_external_applied):\n k = get_kappa(K_medium, rho_medium, c_medium)\n return (T_external_applied - T_medium_initial)*erfc(x/(2*np.sqrt(k*t))) + T_medium_initial", "def get_temperature(elevation, sea_level):\n if elevation <= sea_level:\n return 0.8\n else:\n return (-1.0 / (1.0 - sea_level)) * (elevation - sea_level) + 1.0", "def _temperature(self, p_input:float) -> float:\n if self._unit_in == 'R':\n temp_K = p_input*5.0/9.0\n elif self._unit_in == 'F':\n temp_K = (p_input+459.67)/9.0*5.0\n elif self._unit_in == 'C':\n temp_K = p_input+273.15\n elif self._unit_in == 'K':\n temp_K = p_input\n \n if self._unit_out == 'R':\n return (temp_K*9.0/5.0)\n elif self._unit_out == 'F':\n return (temp_K*9.0/5.0-459.67) \n elif self._unit_out == 'C':\n return (temp_K-273.15)\n elif self._unit_out == 'K':\n return temp_K", "def p_ym_c(pm,px,py,pyx_c,pmx_c):\n pym_c = np.zeros((py.size,pm.size))\n for yi in range(py.size):\n for mi in range(pm.size):\n for xi in range(px.size):\n pym_c[yi,mi] += (1./pm[mi])*pyx_c[yi,xi]*pmx_c[mi,xi]*px[xi]\n return pym_c", "def get_requested_supply_air_temperature_for_cooling(\n theta_sur_c: np.ndarray, theta_ac: np.ndarray, l_d_cs: np.ndarray, v_d_supply: np.ndarray,\n psi: float, l_duct: np.ndarray) -> np.ndarray:\n\n c = get_specific_heat()\n rho = get_air_density()\n\n l_duct = np.array(l_duct).reshape(1,5).T\n\n theta_req_c = theta_sur_c - (theta_sur_c - theta_ac + l_d_cs * 10 ** 6 / (v_d_supply * c * rho)) \\\n * np.exp(psi * l_duct * 3600 / (v_d_supply * c * rho))\n\n return np.minimum(theta_req_c, theta_ac)", "def get_Cm(self, K0, K1):\n # if(self.weight>=1.5):\n # raise ValueError,\" Error bounds only accurate for k<1.5! got k=%s\" % self.weight\n twominusk = mp2 - self._weight\n tmp = mpmath.mpf(len(self.multiplier().weil_module().D()))\n tmp1 = mppi * mp2\n tmp1 = mpmath.power(tmp1, twominusk)\n tmp3 = mpmath.zeta(twominusk)\n if(K0 == 0):\n tmp4 = 1\n else:\n tmp4 = mpmath.power(K0, 1 - self._weight)\n g1 = mpmath.gamma(1 - self._weight)\n g2 = mpmath.gamma(2 - self._weight)\n\n Cm = mp2 / g1 + mp4 * tmp1 / g1 / g2 * tmp * tmp3 * tmp4\n return Cm", "def calculate_economics(\n irradiance: pd.DataFrame, temperature: pd.DataFrame, wind_speed: pd.DataFrame,\n CECMod: pd.DataFrame, configuration: float = 1\n ):\n p_out = calculate_dc_output(irradiance, temperature, wind_speed, CECMod=CECMod)\n\n # convert dc to AC - considering a flat loss of 14%\n # we have to improve this in the future\n p_out = [v * 0.86 for v in p_out]\n\n day_count = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n monthly_electricity = []\n\n for month in range(12):\n st_index = sum(day_count[:month + 1]) * 24\n end_index = sum(day_count[:month + 2]) * 24\n data = p_out[st_index: end_index]\n # Note: division by 50 is to match the values - remove it later!\n monthly_electricity.append(sum(data) / len(data) / 50)\n\n total_ac_energy = sum(p_out)\n monthly_ac_energy = pd.DataFrame(\n zip(calendar.month_abbr[1:], monthly_electricity),\n columns=['month', 'Thousand kWh']\n )\n\n # Based on the example here: https://nrel-pysam.readthedocs.io/en/master/Import.html\n\n grid = Grid.default(\"PVWattsCommercial\")\n ur = UtilityRate.from_existing(grid, \"PVWattsCommercial\")\n cl = Cashloan.from_existing(grid,\"PVWattsCommercial\")\n\n sam_data = read_sam_data(configuration)\n for module, data in zip([grid, ur, cl], sam_data[:-1]):\n for k, v in data.items():\n if k == 'number_inputs':\n continue\n try:\n module.value(k, v)\n except AttributeError:\n print(module, k, v)\n\n\n grid.SystemOutput.gen = p_out\n\n grid.execute()\n ur.execute()\n cl.execute()\n\n # list possible outputs here\n adjusted_installed_cost = cl.Outputs.adjusted_installed_cost\n payback_cash_flow = [-1 * x for x in cl.Outputs.cf_discounted_payback]\n\n return total_ac_energy, monthly_ac_energy, adjusted_installed_cost, payback_cash_flow", "def get_heat_capacity_pressure(vv, tt, chemical_potential, gbar=2.0, *args, **kwargs):\n # type: (np.ndarray, np.ndarray, np.ndarray, float, list, dict) -> np.ndarray\n y = chemical_potential / tt\n # There is a precision problem with \"-\" (minus) operator\n # We'll use asymptotic formula for high temperatures to avoid that problem\n y_low = y[y < THRESHOLD]\n vv_low, vv_high = vv[y < THRESHOLD], vv[y >= THRESHOLD]\n tt_low, tt_high = tt[y < THRESHOLD], tt[y >= THRESHOLD]\n # high temperatures - low numbers\n C_P_low = 5 * gbar * np.sqrt(2) / (36 * np.pi ** 2) * tt_low ** (3 / 2) * vv_low\n C_P_low *= (\n 5 * _1d_call(_fdk, y_low, k=-1 / 2) * _1d_call(_fdk, y_low, k=3 / 2)\n - 9 * _1d_call(_fdk, y_low, k=1 / 2) ** 2\n )\n C_P_low *= _1d_call(_fdk, y_low, k=3 / 2) / _1d_call(_fdk, y_low, k=1 / 2) ** 2\n # low temperatures - high numbers\n C_P_high = (gbar * np.pi / 6) ** (2 / 3) * tt_high * vv_high ** (2 / 3)\n return np.concatenate((C_P_low, C_P_high)).reshape(y.shape)", "def getCMean(inp):\n\tinp = sorted(inp, key = lambda x: x[0])\n\treturn 1 - getClearWaterDepth(inp) / getY90(inp)" ]
[ "0.6271697", "0.61329305", "0.61147255", "0.60481554", "0.5957173", "0.59200716", "0.58812517", "0.5873447", "0.5873447", "0.58698595", "0.58185816", "0.57858187", "0.5785377", "0.572641", "0.56684095", "0.56592894", "0.56545794", "0.56407183", "0.5634677", "0.56337875", "0.5617559", "0.56123394", "0.5606336", "0.5598867", "0.5584177", "0.5552192", "0.5533172", "0.55303067", "0.55013573", "0.5483794", "0.54750246", "0.5474339", "0.54675716", "0.5433305", "0.54293674", "0.5429006", "0.5412294", "0.54104203", "0.5402218", "0.539962", "0.53971815", "0.53942996", "0.5385547", "0.5371968", "0.53715336", "0.5359355", "0.53377676", "0.5329969", "0.5321873", "0.53110546", "0.53047395", "0.5301873", "0.53006583", "0.52958083", "0.5290615", "0.52904224", "0.5290135", "0.5284513", "0.52772313", "0.5266409", "0.5255935", "0.52553815", "0.52493906", "0.5248162", "0.5246704", "0.5243548", "0.5243156", "0.5237512", "0.5225367", "0.5220558", "0.52204484", "0.52195525", "0.5218908", "0.52172214", "0.52123225", "0.5211388", "0.52075976", "0.52060854", "0.51964647", "0.5194749", "0.51944846", "0.5193705", "0.5192458", "0.51921517", "0.5191625", "0.51893294", "0.5187028", "0.51843154", "0.5178343", "0.5176842", "0.51748", "0.51651376", "0.5161176", "0.51561844", "0.5155146", "0.5151945", "0.5150044", "0.5149724", "0.5144883", "0.5141581" ]
0.542909
35
Approximate broadband aerosol optical depth. Bird and Hulstrom developed a correlation for broadband aerosol optical depth (AOD) using two wavelengths, 380 nm and 500 nm.
def bird_hulstrom80_aod_bb(aod380, aod500): # approximate broadband AOD using (Bird-Hulstrom 1980) return 0.27583 * aod380 + 0.35 * aod500
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_blue_haze_TOA(d,sza,L):\n rho_blue_TOA=np.pi*(d**2)*L[:,5,:]/(2.16*np.cos(sza))\n return rho_blue_TOA", "def ead(o2, depth):\n fraction_o2 = percentage_to_fraction(o2)\n fraction_n2 = 1.0 - fraction_o2\n return math.ceil(((depth + 10.0) * (fraction_n2 / 0.79)) - 10.0)", "def optical_depth(self,z0,ETeV,\n #OmegaM = cosmo.Om0, OmegaL = 1. - cosmo.Om0, \n OmegaM = 0.3, OmegaL = 0.7, \n H0 = 70., #cosmo.H0.value, \n steps_z = 50,\n steps_e = 50,\n egamma_LIV = True,\n LIV_scale = 0., nLIV=1):\n if np.isscalar(ETeV):\n ETeV = np.array([ETeV])\n elif type(ETeV) == list or type(ETeV) == tuple:\n ETeV = np.array(ETeV)\n\n\n z_array = np.linspace(0.,z0,steps_z)\n result = self.mean_free_path(z_array,ETeV,\n LIV_scale = LIV_scale,\n nLIV = nLIV, \n egamma_LIV = egamma_LIV,\n steps_e = steps_e)\n\n\n zz,ee = np.meshgrid(z_array, ETeV, indexing = 'ij')\n result = 1. / (result.T * u.Mpc).to(u.cm).value # this is in cm^-1\n result *= 1./ ( (1. + zz ) * np.sqrt((1.+ zz )**3. * OmegaM + OmegaL) ) # dt / dz for a flat universe\n\n result = simps(result,zz, axis = 0)\n\n H0 = (H0 * cosmo.H0.unit).to('1 / s').value # convert from km / Mpc / s to 1 / s\n\n return result * c.c.to('cm / s').value / H0", "def calculate_blue_TOA(d,sza,L):\n rho_blue_TOA=np.pi*(d**2)*L[:,0,:]/(1.57*np.cos(sza))\n return rho_blue_TOA", "def optical_depth(self, dispersion):\n\n from ..calculations import optical_depth, tau_peak\n\n if isinstance(dispersion, Quantity):\n dispersion = dispersion.to(angstrom)\n\n elif hasattr(dispersion, 'unit'):\n if dispersion.unit is not None:\n dispersion = dispersion.to(angstrom)\n\n else:\n dispersion = Quantity(dispersion, unit=angstrom)\n\n velocity_range = ([-20000, 20000] * km / s if self.logn > 18\n else [-1000, 1000] * km / s)\n\n # Select only transitions with redshifted central wavelengths inside\n # `dispersion` +/- 500 km/s:\n rest_wavelengths = Quantity([t.wavelength for t in self.transitions])\n observed_wavelengths = rest_wavelengths * (1 + self.redshift)\n\n wmin = dispersion[0] * (1 - 500 * km / s / c_kms)\n wmax = dispersion[-1] * (1 - 500 * km / s / c_kms)\n\n in_range = ((observed_wavelengths >= wmin) &\n (observed_wavelengths <= wmax))\n transitions = np.array(self.transitions)[in_range]\n\n tau = np.zeros_like(dispersion.value)\n\n for i, transition in enumerate(transitions):\n\n tau_max = tau_peak(transition, self.logn, self.b)\n\n if 1 - exp(-tau_max) < 1e-3:\n continue\n\n observed_wavelength = transition.wavelength * (1 + self.redshift)\n dv = ((dispersion - observed_wavelength) /\n observed_wavelength * c_kms)\n\n i0, i1 = dv.searchsorted(velocity_range)\n tau0 = optical_depth(dv[i0:i1], transition, self.logn, self.b)\n tau[i0:i1] += tau0\n\n return tau", "def one_transition_spectrum_cd(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n rr = tr[\"rr\"] # transition dipole strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n # CD and fluorescence can be calculated in this step\n # TODO if rotatory strength defined calculate also circular dichroism spectra\n # TOOD calculate fluorescence spectra (for fluorescence there should be a switch because it should be calculated only for the first transition) \n \n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n# plt.figure()\n# plt.title(\"Absorption\")\n# plt.plot(ta.data,numpy.real(at))\n# plt.plot(ta.data,numpy.imag(at))\n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = rr*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def calculate_relative_soma_depth(data: Data) -> float:\n\n return data.relative_soma_depth", "def depth_to_ata(depth):\n return (depth / 10.0) + 1.0", "def Get_AASR(DopplerBandwidth, Frequencies, NumAmbiguities, FreqSteps, Prf,\n TxAntennaPattern, RxAntennaPattern, TxGain=1, RxGain=1):\n _BWDop = DopplerBandwidth\n _NA = NumAmbiguities\n _step = _BWDop / FreqSteps\n \n _denom = 0\n for f in np.arange(start=-_BWDop/2, stop=_BWDop/2, step=_step):\n # find the nearest frequency and magnitude pair for the TX and RX patterns\n _idx, _ = Utils.find_nearest(Frequencies, f)\n _txAzPat = TxAntennaPattern[_idx] * TxGain\n _rxAzPat = RxAntennaPattern[_idx] * RxGain\n _azPat = _txAzPat * _rxAzPat\n _denom += ((math.pow(_azPat, 2)) * _step)\n\n # _txFreq, _txAzPat = Utils.find_nearest(list(TxAntennaPattern.keys()), f)\n # _rxFreq, _rxAzPat = Utils.find_nearest(list(RxAntennaPattern.keys()), f)\n\n # # _txAzPat = Get_AntennaPatternWithDopplerFrequency(f, txDimension, velocity, txGain)\n # # _rxAzPat = Get_AntennaPatternWithDopplerFrequency(f, rxDimension, velocity, rxGain)\n # # _denom += ((math.pow(_azPat, 2)) * _step)\n # _denom += ((_txAzPat * _rxAzPat) * _step)\n \n _num = 0\n _azPat = 0\n for m in range(-_NA, _NA+1, 1):\n if m != 0:\n for f in np.arange(start=-_BWDop/2, stop=_BWDop/2, step=_step):\n # find the nearest frequency and magnitude pair for the TX and RX patterns\n _idx, _ = Utils.find_nearest(Frequencies, f + (m*Prf))\n _txAzPat = TxAntennaPattern[_idx] * TxGain\n _rxAzPat = RxAntennaPattern[_idx] * RxGain\n _azPat = _txAzPat * _rxAzPat\n _num += ((math.pow(_azPat, 2)) * _step)\n\n # _txFreq, _txAzPat = Utils.find_nearest(list(TxAntennaPattern.keys()), f + (m*Prf))\n # _rxFreq, _rxAzPat = Utils.find_nearest(list(RxAntennaPattern.keys()), f + (m*Prf))\n\n # # _txAzPat = Get_AntennaPatternWithDopplerFrequency((f + (m*prf)), txDimension, velocity, txGain)\n # # _rxAzPat = Get_AntennaPatternWithDopplerFrequency((f + (m*prf)), rxDimension, velocity, rxGain)\n # # #_num += ((math.pow(_azPat, 2)) * _step)\n # _num += ((_txAzPat * _rxAzPat) * _step)\n \n return (_num / _denom)", "def BoatEarMoon():\n D=1\n alpha=math.radians(83)\n beta=math.radians(42) \n phi=math.radians(70) \n mu=math.radians(10) \n omega=math.radians(30) \n A=25, a=12, b=20, L=0, P=0, W1=0, W2=0, N=0\n \n resMode()", "def GetConcBeer(Abs, epsilon, pathLength):\n return Abs / (epsilon * pathLength)", "def rayonDeCourbur(**kwargs):\n a = 0\n b = 0\n try:\n if kwargs[\"ref\"] == \"local\":\n a = 6378249.145\n b = 6356515\n elif kwargs[\"ref\"] == \"global\":\n a = 6378137\n b = 6356752.314\n elif kwargs[\"a\"] and kwargs[\"b\"]:\n a = kwargs[\"a\"]\n b = kwargs[\"b\"]\n except KeyError:\n return {\"erreur\": \"params a and b is required, you can use ref too which has two possible value: local and global\"}\n if a != 0 and b != 0:\n try:\n phi = math.radians(kwargs[\"phi\"])\n e2 = 1-(b/a)**2\n w = math.sqrt(1-e2*math.sin(phi)**2)\n M = a*(1-e2)/w**3\n N = a/w\n if \"alpha\" in kwargs.keys():\n alpha = math.radians(kwargs[\"alpha\"])\n rAlpha = (M*N)/(M*math.sin(alpha)**2+N*math.cos(alpha)**2)\n return{\"M\": M, \"N\": N, \"rAlpha\": rAlpha, \"1/R\": 1/rAlpha}\n elif kwargs[\"radius\"] == \"M\":\n return {\"M\": M}\n elif kwargs[\"radius\"] == \"N\":\n return {\"N\": N}\n except KeyError as err:\n return {\"erreur\": f\"{format(err)} is required!\"}", "def GetAbsBeer(epsilon, conc, pathLength):\n return epsilon * conc * pathLength", "def one_transition_spectrum_abs(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n dd = tr[\"dd\"] # transition dipole strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n # CD and fluorescence can be calculated in this step\n # TODO if rotatory strength defined calculate also circular dichroism spectra\n # TOOD calculate fluorescence spectra (for fluorescence there should be a switch because it should be calculated only for the first transition) \n \n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n# plt.figure()\n# plt.title(\"Absorption\")\n# plt.plot(ta.data,numpy.real(at))\n# plt.plot(ta.data,numpy.imag(at))\n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = dd*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def doctored_depth_1852_adhoc(self, longitude, latitude, dip):\n # set up sample point and fault array\n p1 = np.array([longitude, latitude])\n fault_file = './InputData/fault_array.npy'\n fault_array = np.load(fault_file)\n # will store haversine distances for comparison\n #dist_array = np.zeros(0.5 * len(fault_array))\n dist_array = np.zeros(len(fault_array)//2)\n for i in range(len(dist_array)):\n x = fault_array[2 * i]\n y = fault_array[2 * i + 1]\n p2 = np.array([x, y])\n dist_array[i] = self.haversine_distance(p1, p2)\n\n dist = np.amin(dist_array)\n\n # need to add trig correction\n return (20000 + dist * np.tan(20 * np.pi / 180))", "def calcDispCorrandR(aaconst, aacorr, caliConst, AAdata, outName):\n k=caliConst[3]\n n=caliConst[2]\n #Be sure to change this appropriately to the fixed dye conc\n x=k*((fixed_dye_conc-n)/55.5)\n n = n\n print(aaconst)\n perr2=np.sqrt(np.diag(aacorr))\n print(perr2)\n corrmat2=np.zeros([len(aaconst),len(aaconst)])\n for i in range(len(aacorr)):\n for j in range(len(aacorr)):\n ele=aacorr[i,j]\n diele=ele/(perr2[i]*perr2[j])\n corrmat2[i,j]=round(diele,3)\n print(corrmat2)\n #calculate the r^2 value\n AAss_res = 0\n AAss_total = 0\n residuals = np.zeros([len(AAdata[:,0]), 1])\n for i in range(len(AAdata[:,0])):\n residuals[i] = (DispCurve(AAdata[i,0],x,aaconst[0],aaconst[1],n,aaconst[2]) - AAdata[i,1])\n AAss_res += np.square(residuals[i])\n AAss_total += np.square((AAdata[i,1] - np.average(AAdata[:,1])))\n print(AAss_res)\n print(AAss_total)\n AAr_sq = 1 - (AAss_res/AAss_total)\n print(AAr_sq)\n #write out the fit results\n f = open(outName + \"_disp_constants.txt\", 'w')\n f.write(\"B\\ta\\tN\\tK\\n\")\n for i in range(len(aaconst)):\n f.write('%.9f' %aaconst[i] + \"\\t\")\n f.write(\"\\n\")\n for i in range(len(aacorr)):\n f.write('%.9f' %perr2[i] + \"\\t\")\n f.write(\"\\n\\n\")\n f.write(\"Correlation matrix :\\n\\n\")\n for i in range(len(aacorr)):\n for j in range(len(aacorr)):\n f.write('%.9f' %corrmat2[i,j]+'\\t')\n f.write(\"\\n\\n\")\n f.write(\"R^2 value : \\t\" + '%.9f' %AAr_sq)\n f.close()", "def test_GBL_tau_star():\n z = 1.0\n\n # Fully ionized H and He\n x_ionH = 1.0\n x_ionHe = 2.0\n\n cosmo = {}\n cosmo['omega_M_0'] = numpy.array([[0.3],[0.6],[1.0]])\n cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']\n cosmo['h'] = 0.65\n cosmo['omega_b_0'] = 0.02 / cosmo['h']**2.\n cosmo['Y_He'] = 0.24\n cd.set_omega_k_0(cosmo)\n\n tau_inst, tau_star = cr.optical_depth_instant(z, \n x_ionH=x_ionH, \n x_ionHe=x_ionHe, \n return_tau_star=True,\n **cosmo)\n print(\"tau_star = %.7f\" % (tau_star))\n print(\"tau_star/(h Omega_b) = %.7f =? 0.061\" % \n (tau_star / (cosmo['h'] * cosmo['omega_b_0'])))\n\n ntest.assert_approx_equal(tau_star / (cosmo['h'] * cosmo['omega_b_0']),\n 0.061,\n 2)\n\n print(\"(1 - Y_He/2) = %.3f =? 0.88\" % (1. - (cosmo['Y_He']/2.)))\n ntest.assert_approx_equal((1. - (cosmo['Y_He']/2.)),\n 0.88,\n 7)\n\n H_0 = cc.H100_s * cosmo['h']\n\n # s^-1 * Mpc s^-1 * Mpc^2 / Mpc^3 msun^-1 s^-2 / Msun -> \n tau_star_explicit = ((1. - (cosmo['Y_He']/2.)) * \n ((3. * H_0 * cosmo['omega_b_0'] * cc.c_light_Mpc_s *\n cc.sigma_T_Mpc) / \n (8. * math.pi * cc.G_const_Mpc_Msun_s * \n (cc.m_p_g/cc.M_sun_g))))\n\n print(\"tau_star_explicit = %.7f =? tau_star\" % (tau_star_explicit))\n ntest.assert_approx_equal(tau_star, tau_star_explicit, 3)", "def Build_quadrant(self) :\n\n self.omega = np.zeros((self.n_dir,3))\n self.weight = np.zeros((self.n_dir))\n\n if self.sn==2 :\n direction = 0.577350269189625764509149\n weight = 1.\n\n self.omega[0,0] = direction\n self.omega[0,1] = direction\n self.omega[0,2] = direction\n \n self.weight[0] = weight\n \n elif self.sn==4 :\n direction_1 = 0.350021174581540677777041\n direction_2 = 0.868890300722201205229788\n weight = 1./3.\n\n self.omega[0,0] = direction_2\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n \n self.omega[1,0] = direction_1\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n\n self.omega[2,0] = direction_1\n self.omega[2,1] = direction_1\n self.omega[2,2] = direction_2\n\n self.weight[0] = weight\n self.weight[1] = weight\n self.weight[2] = weight\n\n elif self.sn==6 :\n direction_1 = 0.266635401516704720331535\n direction_2 = 0.681507726536546927403750\n direction_3 = 0.926180935517489107558380\n weight_1 = 0.176126130863383433783565\n weight_2 = 0.157207202469949899549768\n\n self.omega[0,0] = direction_3\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n\n self.omega[1,0] = direction_2\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n\n self.omega[2,0] = direction_1\n self.omega[2,1] = direction_3\n self.omega[2,2] = direction_1\n\n self.omega[3,0] = direction_2\n self.omega[3,1] = direction_1\n self.omega[3,2] = direction_2\n \n self.omega[4,0] = direction_1\n self.omega[4,1] = direction_2\n self.omega[4,2] = direction_2\n\n self.omega[5,0] = direction_1\n self.omega[5,1] = direction_1\n self.omega[5,2] = direction_3\n\n self.weight[0] = weight_1\n self.weight[1] = weight_2\n self.weight[2] = weight_1\n self.weight[3] = weight_2\n self.weight[4] = weight_2\n self.weight[5] = weight_1\n\n elif self.sn==8 :\n direction_1 = 0.218217890235992381266097\n direction_2 = 0.577350269189625764509149\n direction_3 = 0.786795792469443145800830\n direction_4 = 0.951189731211341853132399\n\n weight_1 = 0.120987654320987654320988\n weight_2 = 0.0907407407407407407407407\n weight_3 = 0.0925925925925925925925926\n\n self.omega[0,0] = direction_4\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n\n self.omega[1,0] = direction_3\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n \n self.omega[2,0] = direction_2\n self.omega[2,1] = direction_3\n self.omega[2,2] = direction_1\n\n self.omega[3,0] = direction_1\n self.omega[3,1] = direction_4\n self.omega[3,2] = direction_1\n\n self.omega[4,0] = direction_3\n self.omega[4,1] = direction_1\n self.omega[4,2] = direction_2\n\n self.omega[5,0] = direction_2\n self.omega[5,1] = direction_2\n self.omega[5,2] = direction_2\n\n self.omega[6,0] = direction_1\n self.omega[6,1] = direction_3\n self.omega[6,2] = direction_2\n\n self.omega[7,0] = direction_2\n self.omega[7,1] = direction_1\n self.omega[7,2] = direction_3\n\n self.omega[8,0] = direction_1\n self.omega[8,1] = direction_2\n self.omega[8,2] = direction_3\n\n self.omega[9,0] = direction_1\n self.omega[9,1] = direction_1\n self.omega[9,2] = direction_4\n\n self.weight[0] = weight_1\n self.weight[1] = weight_2\n self.weight[2] = weight_2\n self.weight[3] = weight_1\n self.weight[4] = weight_2\n self.weight[5] = weight_3\n self.weight[6] = weight_2\n self.weight[7] = weight_2\n self.weight[8] = weight_2\n self.weight[9] = weight_1\n\n elif self.sn==10 :\n direction_1 = 0.189321326478010476671494\n direction_2 = 0.508881755582618974382711\n direction_3 = 0.694318887594384317279217\n direction_4 = 0.839759962236684758403029\n direction_5 = 0.963490981110468484701598\n\n weight_1 = 0.0893031479843567214704325\n weight_2 = 0.0725291517123655242296233\n weight_3 = 0.0450437674364086390490892\n weight_4 = 0.0539281144878369243545650\n\n self.omega[0,0] = direction_5\n self.omega[0,1] = direction_1\n self.omega[0,2] = direction_1\n \n self.omega[1,0] = direction_4\n self.omega[1,1] = direction_2\n self.omega[1,2] = direction_1\n \n self.omega[2,0] = direction_3\n self.omega[2,1] = direction_3\n self.omega[2,2] = direction_1\n \n self.omega[3,0] = direction_2\n self.omega[3,1] = direction_4\n self.omega[3,2] = direction_1\n\n self.omega[4,0] = direction_1\n self.omega[4,1] = direction_5\n self.omega[4,2] = direction_1\n\n self.omega[5,0] = direction_4\n self.omega[5,1] = direction_1\n self.omega[5,2] = direction_2\n\n self.omega[6,0] = direction_3\n self.omega[6,1] = direction_2\n self.omega[6,2] = direction_2\n\n self.omega[7,0] = direction_2\n self.omega[7,1] = direction_3\n self.omega[7,2] = direction_2\n\n self.omega[8,0] = direction_1\n self.omega[8,1] = direction_4\n self.omega[8,2] = direction_2\n\n self.omega[9,0] = direction_3\n self.omega[9,1] = direction_1\n self.omega[9,2] = direction_3\n\n self.omega[10,0] = direction_2\n self.omega[10,1] = direction_2\n self.omega[10,2] = direction_3\n\n self.omega[11,0] = direction_1\n self.omega[11,1] = direction_3\n self.omega[11,2] = direction_3\n\n self.omega[12,0] = direction_2\n self.omega[12,1] = direction_1\n self.omega[12,2] = direction_4\n\n self.omega[13,0] = direction_1\n self.omega[13,1] = direction_2\n self.omega[13,2] = direction_4\n\n self.weight[0] = weight_1\n self.weight[1] = weight_2\n self.weight[2] = weight_3\n self.weight[3] = weight_2\n self.weight[4] = weight_1\n self.weight[5] = weight_2\n self.weight[6] = weight_4\n self.weight[7] = weight_4\n self.weight[8] = weight_2\n self.weight[9] = weight_3\n self.weight[10] = weight_4\n self.weight[11] = weight_3\n self.weight[12] = weight_2\n self.weight[13] = weight_2\n self.weight[14] = weight_1\n\n elif self.sn==12 :\n direction = np.zeros((6,1))\n\n direction[0] = 0.167212652822713264084504\n direction[1] = 0.459547634642594690016761\n direction[2] = 0.628019096642130901034766\n direction[3] = 0.760021014833664062877138\n direction[4] = 0.872270543025721502340662\n direction[5] = 0.971637719251358378302376\n\n weight_1 = 0.0707625899700910439766549\n weight_2 = 0.0558811015648888075828962\n weight_3 = 0.0373376737588285824652402\n weight_4 = 0.0502819010600571181385765\n weight_5 = 0.0258512916557503911218290\n\n for i in xrange(0,6) :\n self.omega[i,0] = direction[5-i]\n self.omega[i,1] = direction[i]\n self.omega[i,2] = direction[0]\n \n offset = 6\n for i in xrange(0,5) :\n self.omega[offset+i,0] = direction[4-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[1]\n\n offset += 5\n for i in xrange(0,4) :\n self.omega[offset+i,0] = direction[3-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[2]\n \n offset += 4\n for i in xrange(0,3) :\n self.omega[offset+i,0] = direction[2-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[3]\n\n offset += 3\n for i in xrange(0,2) :\n self.omega[offset+i,0] = direction[1-i]\n self.omega[offset+i,1] = direction[i]\n self.omega[offset+i,2] = direction[4]\n \n offset += 2\n self.omega[offset+i,0] = direction[0]\n self.omega[offset+i,1] = direction[1]\n self.omega[offset+i,2] = direction[5]\n\n self.weight[0] = weigth_1\n self.weight[1] = weight_2\n self.weight[2] = weight_3\n self.weight[3] = weight_3\n self.weight[4] = weight_2\n self.weight[5] = weight_1\n self.weight[6] = weight_2\n self.weight[7] = weight_4\n self.weight[8] = weight_5\n self.weight[9] = weight_4\n self.weight[10] = weight_2\n self.weight[11] = weight_3\n self.weight[12] = weight_5\n self.weight[13] = weight_5\n self.weight[14] = weight_3\n self.weight[15] = weight_3\n self.weight[16] = weight_4\n self.weight[17] = weight_3\n self.weight[18] = weight_2\n self.weight[19] = weight_2\n self.weight[20] = weight_1", "def comp_angle_opening_magnet(self):\n\n if self.W1 > 0:\n Rbo = self.get_Rbo()\n return float(2 * arcsin(self.W1 / (2 * Rbo)))\n else:\n return self.comp_angle_magnet()", "def Agl2ArcLen(self,agl):\r\n\r\n return (self.distance_between_wheels/2)*agl", "def ant_max_direction(freq, fit='Lebedev_model'):\n if fit == 'Lebedev':\n freqkHz = freq * 1e3 # to kHz, to float\n dep = 0.2816\n ep = 185634.9983 \\\n - 4.877867876 * freqkHz \\\n + 4.803667396E-005 * freqkHz**2 \\\n - 2.102600271E-010 * freqkHz**3 \\\n + 3.453540782E-016 * freqkHz**4\n ep += dep\n return np.deg2rad(ep)\n elif fit == 'Lebedev_model':\n return ant_max_direction_model_c_wrapped(freq * 1000.0)\n elif fit in ['Vasiliev_old', 'Vasiliev']:\n freqHz = freq * 1e6 # to kHz, to float\n d = 0.87\n a = 0.136\n b = 0.014\n h = 0.384\n wavelength = speed_of_light / freqHz\n k = 2.0 * np.pi / wavelength\n\n if fit == 'Vasiliev_old':\n hi_f = -32.3802499 \\\n + 6.329699841E-007 * freqHz \\\n - 4.001034124E-015 * freqHz ** 2 \\\n + 8.374351252E-024 * freqHz ** 3\n else:\n hi_f = 13.5066804030779 \\\n - 23.894031675887E-008 * freqHz \\\n + 15.2068384591317E-016 * freqHz ** 2 \\\n - 3.28019314316317E-024 * freqHz ** 3\n\n g = hi_f * np.sqrt(1 + ((a / (a + b)) * np.tan(k * h)) ** 2)\n theta = g - wavelength / d\n return theta\n else:\n raise ValueError('Incorrect fit: {}'.format(fit))", "def ASS_theo(D, k):\n\ta1b = k[\"A1B1\"]\n\tba1 = k[\"B1A1\"]\n\tca1 = k[\"C1A1\"]\n\tcb = k[\"B1C1\"]\n\tnum = (ca1*cb + ba1*ca1 + ba1*cb) + ca1*ca1*D\n\tden = (ba1*cb + ba1*ca1 + ca1*cb) + (2*ca1*ca1+ba1*cb+ca1*cb)*D + ba1*ca1*D*D\n\treturn num/den", "def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def mamajek08_logRpHK_Ro_edge():\n Ro_edge = 0.31935816876122064\n return Ro_edge", "def one_transition_spectrum_fluor(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n dd = tr[\"dd\"] # transition dipole strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n # CD and fluorescence can be calculated in this step\n # TODO if rotatory strength defined calculate also circular dichroism spectra\n # TOOD calculate fluorescence spectra (for fluorescence there should be a switch because it should be calculated only for the first transition) \n \n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n re = tr[\"re\"] # reorganisation energy\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-numpy.conjugate(gt) -1j*om*ta.data + 2j*re*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n# plt.figure()\n# plt.title(\"Absorption\")\n# plt.plot(ta.data,numpy.real(at))\n# plt.plot(ta.data,numpy.imag(at))\n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = dd*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def get_sn2005ek(colorplt=False):\n z = 0.016551\n ebv = 0.210\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 53639.9\n print (\"adopt r band t_max from Drout+13\")\n \n # tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\\t')\n # tb = tb.drop(columns=[\"Unnamed: 6\"])\n \n mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,\n 53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,\n 53654.2, 53655.2, 53656.2, 53657.2])\n \n Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,\n 20.07, np.nan, 20.67, 20.90, 21.05, np.nan,\n 21.74, np.nan, np.nan, np.nan])\n \n Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07, \n 0.07, np.nan, 0.04, 0.04, 0.04, np.nan,\n 0.12, np.nan, np.nan, np.nan])\n \n Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,\n 18.93, 19.48, 19.63, 19.86, 19.98, 20.35,\n 20.60, 20.74, 20.88, 21.22])\n \n Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,\n 0.02, 0.06, 0.03, 0.03, 0.04, 0.05, \n 0.08, 0.10, 0.08, 0.13])\n \n Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18, \n np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,\n 20.08, np.nan, 20.47, np.nan])\n \n Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,\n np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,\n 0.05, np.nan, 0.08, np.nan])\n\n Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71, \n np.nan, 18.13, 18.26, 18.51, 18.61, 18.74, \n 19.01, np.nan, 19.47, np.nan])\n \n Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,\n np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,\n 0.05, np.nan, 0.06, np.nan])\n \n mymjds = np.hstack([mjds, mjds, mjds, mjds])\n mymags = np.hstack([Bmags, Vmags, Rmags, Imags])\n myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])\n myfilts = np.hstack([ np.repeat(\"B\", len(Bmags)),\n np.repeat(\"V\", len(Bmags)),\n np.repeat(\"R\", len(Rmags)),\n np.repeat(\"I\", len(Imags)) ])\n ix = ~np.isnan(mymags)\n tb = pd.DataFrame({'mjd': mymjds[ix],\n 'mag': mymags[ix],\n 'emag': myemags[ix],\n \"filter\": myfilts[ix]})\n \n ixB = tb['filter'].values==\"B\"\n ixV = tb['filter'].values==\"V\"\n ixR = tb['filter'].values==\"R\"\n ixI = tb['filter'].values==\"I\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixV] = 5430\n tb['wave'].values[ixR] = 6349\n tb['wave'].values[ixI] = 8797\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['B', 'R', 'I']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"R\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"B\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"R\"]\n itb = tbsub[tbsub[\"filter\"].values==\"I\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"BmR\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"RmI\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def getArea(antenna):\n diameter = getDiameter(antenna)\n if antenna >=1 and antenna <= 6: \n efficiency = 0.5\n elif antenna >=7 and antenna <= 15: \n efficiency = 0.6\n elif antenna >=16 and antenna <= 23: \n efficiency = 0.6\n else:\n raise Exception,'Unregonized antenna type'\n area = math.pi * diameter*diameter / 4.0 * efficiency\n\n return area", "def anl_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) * 1.0 \\\r\n / float(self.size_tick * self.size_tick)\r\n print 'qE=', qe\r\n c = self.light_vel\r\n for i in range(0, len(self.obs.obt_g)):\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n x = m * c ** 2 / qe * (math.sqrt(1.0 + (qe * self.t[i] / (m\r\n * c)) ** 2) - 1.0)\r\n self.xa_track.append(x)\r\n p = qe * self.t[i]\r\n self.pa.append(p)\r\n v = p / math.sqrt(m ** 2 + (p / c) ** 2)\r\n jv = self.t[i] * qe / (m * c)\r\n v = math.sqrt(jv * jv / (1 + jv * jv)) * c\r\n self.va.append(v)\r\n print 'Analytical solution of the differential equation of motion'", "def get_caravan_depth(self):\n return self.caravan_depth", "def fringes_ABCD_phase(opd, fringesAr, fringesBr,\n fringesCr, fringesDr,\n wavelength=2.1e-6, plot=False):\n # define boundaries\n n_samples_per_fringes = wavelength/np.abs(np.diff(opd).mean())\n\n # compute Morlet's transform\n dopd = np.abs(np.diff(opd).mean())\n\n wavA = morlet.CWT(fringesAr, dopd, scale_min=1/6e-6,\n scale_max=1/2.e-6, n_per_octave=32)\n wavB = morlet.CWT(fringesBr, dopd, scale_min=1/6e-6,\n scale_max=1/2.e-6, n_per_octave=32)\n wavC = morlet.CWT(fringesCr, dopd, scale_min=1/6e-6,\n scale_max=1/2.e-6, n_per_octave=32)\n wavD = morlet.CWT(fringesDr, dopd, scale_min=1/6e-6,\n scale_max=1/2.e-6, n_per_octave=32)\n\n data = [wavA, wavB, wavC, wavD]\n scale=[]\n for wav in data:\n weight = np.sum(np.abs(wav.cwt)**2, axis=1)\n i0 = weight.argmax()\n i0 = min(max(i0, len(weight)-5), 5)\n print i0, len(weight)\n c = np.polyfit(wav.frequencies[i0-2:i0+2],\\\n np.log(weight[i0-2:i0+2]), 2)\n scale.append(-c[1]/(2*c[0]))\n scale = np.array(scale)\n wlABCD = 1/scale*1e6\n print 'wavelength (WT peak) [A B C D]:'\n print wlABCD\n print 100*(scale/scale.mean()-1), '(% / mean)'\n if plot:\n opd0 = (opd-opd.mean())*1e6\n Xp, Yp = np.meshgrid(opd0, 1/wavA.frequencies*1e6)\n\n plt.figure(4)\n plt.clf()\n ax = plt.subplot(211)\n plt.plot(opd0, fringesAr-fringesCr, 'k')\n plt.subplot(212, sharex=ax)\n plt.plot(opd0, fringesBr-fringesDr, 'k')\n\n plt.figure(3)\n plt.clf()\n plt.subplot(241, sharex=ax)\n plt.plot(opd0, fringesAr, 'k')\n plt.ylabel('fringes')\n plt.title('A')\n plt.subplot(245, sharex=ax)\n plt.pcolormesh(Xp,Yp, abs(wavA.cwt))\n #plt.hlines(wlABCD[0], opd0.min(), opd0.max(),\\\n # color='y')\n plt.ylabel('wavelength (um)')\n plt.subplot(242, sharex=ax)\n plt.plot(opd0, fringesBr, 'k')\n plt.title('B')\n plt.subplot(246, sharex=ax)\n plt.pcolormesh(Xp,Yp, abs(wavB.cwt))\n #plt.hlines(wlABCD[1], opd0.min(), opd0.max(),\\\n # color='y')\n plt.subplot(243, sharex=ax)\n plt.plot(opd0, fringesCr, 'k')\n plt.title('C')\n plt.subplot(247, sharex=ax)\n plt.pcolormesh(Xp,Yp, abs(wavC.cwt))\n #plt.hlines(wlABCD[2], opd0.min(), opd0.max(),\\\n # color='y')\n plt.subplot(244, sharex=ax)\n plt.plot(opd0, fringesDr, 'k')\n plt.title('D')\n plt.subplot(248, sharex=ax)\n plt.pcolormesh(Xp,Yp, abs(wavD.cwt))\n #plt.hlines(wlABCD[3], opd0.min(), opd0.max(),\\\n # color='y')\n\n # compute phases as function of OPD\n phiAB = fringes_morlet_phase(wavA, wavB)\n phiAC = fringes_morlet_phase(wavA, wavC, quasi_pi=True)\n phiDA = fringes_morlet_phase(wavD, wavA)\n phiBC = fringes_morlet_phase(wavB, wavC)\n phiBD = fringes_morlet_phase(wavB, wavD, quasi_pi=True)\n phiCD = fringes_morlet_phase(wavC, wavD)\n weightABCD = np.sum(abs(wavA.cwt)*abs(wavB.cwt)*\\\n abs(wavC.cwt)*abs(wavD.cwt),axis=0)\n weightABCD = weightABCD**(0.25)\n return phiAB, phiBC, phiCD, phiDA, phiAC, phiBD, weightABCD, wlABCD\n\n wp = np.where(weightABCD>\n 3*np.median(weightABCD))\n print 'WP=', wp\n phi_abcd = np.arctan2(fringesAr-fringesCr, \\\n fringesBr-fringesDr)\n phi_abcd = np.unwrap(phi_abcd)\n slopeAB = np.polyfit((opd[wp]-opd[wp].mean())*1e6,\\\n phiAB[wp], 1)\n slopeBC = np.polyfit((opd[wp]-opd[wp].mean())*1e6,\\\n phiBC[wp], 1)\n slopeCD = np.polyfit((opd[wp]-opd[wp].mean())*1e6,\\\n phiCD[wp], 1)\n slopeDA = np.polyfit((opd[wp]-opd[wp].mean())*1e6,\\\n phiDA[wp], 1)\n slopeAC = np.polyfit((opd[wp]-opd[wp].mean())*1e6,\\\n phiAC[wp], 1)\n slopeBD = np.polyfit((opd[wp]-opd[wp].mean())*1e6,\\\n phiBD[wp], 1)\n print 'phases: slope (rad/um of OPD), phi0/pi'\n print 'A-B %6.3f %6.3f' % (slopeAB[0], slopeAB[1]/np.pi)\n print 'B-C %6.3f %6.3f' % (slopeBC[0], slopeBC[1]/np.pi)\n print 'C-D %6.3f %6.3f' % (slopeCD[0], slopeCD[1]/np.pi)\n print 'D-A %6.3f %6.3f' % (slopeDA[0], slopeDA[1]/np.pi)\n slopes = [slopeAB[0], slopeBC[0], slopeCD[0], slopeDA[0]]\n slopes = np.array(slopes)\n wldiff = [wlABCD[0]-wlABCD[1], wlABCD[1]-wlABCD[2], \\\n wlABCD[2]-wlABCD[3], wlABCD[3]-wlABCD[0]]\n wldiff = np.array(wldiff)\n #------- FIGURE ----------\n if plot:\n plt.figure(1)\n plt.clf()\n plt.subplot(211)\n plt.plot(opd, fringesBr, 'k')\n plt.subplot(212)\n plt.hlines(np.pi*np.arange(-1,1.5,0.5), \\\n opd.min(), opd.max(), color='y',\\\n linestyles='dotted')\n plt.plot(opd[wp], phiAB[wp], 'r', linewidth=2)\n plt.plot(opd[wp], phiBC[wp], 'g', linewidth=2)\n plt.plot(opd[wp], phiCD[wp], 'b', linewidth=2)\n plt.plot(opd[wp], phiDA[wp], 'm', linewidth=2)\n plt.plot(opd[wp], np.unwrap(phiAC[wp]),\\\n color=((1,0.5,0,0)), linewidth=2)\n plt.plot(opd[wp], np.unwrap(phiBD[wp]),\\\n color=((0.3,0.2,1,0)), linewidth=2)\n plt.ylabel('phase (radian)')\n plt.xlabel('OPD (m)')\n plt.legend(('A-B', 'B-C', 'C-D', 'D-A',\\\n 'A-C','B-D'))\n plt.ylim(0, 4)\n return phiAB, phiBC, phiCD, phiDA, phiAC, phiBD, weightABCD, wlABCD", "def airydisk(unit_r, fno, wavelength):\n u_eff = unit_r * np.pi / wavelength / fno\n return abs(2 * jinc(u_eff)) ** 2", "def isothermal_depth_wyrtki1964(da_PT):\n\n # interpolate to finer vertical resolution (2.5m)\n da_interp = da_PT.interp(z=np.arange(0,da_PT.z.max(),2.5))\n\n # make land mask based on surface layer\n da_mask = da_PT.isel(z=0)*0.+1.\n\n # calculate rho-rho0\n da_diff = np.abs(da_interp-da_PT.isel(z=0))\n\n # remove values shallower than critcal value\n da_diff = da_diff.where(da_diff>0.5,other=99999)\n\n # find first index that have value larger than critical value\n z_ind = da_diff.argmin(dim='z',skipna=True)\n\n # used 2d index to find 2d depth map\n da_itd = da_diff.z[z_ind]*da_mask\n\n return da_itd", "def merdianArcLength(**kwargs):\n a = 0\n b = 0\n kwargs[\"radius\"] = \"M\"\n try:\n if kwargs[\"ref\"] == \"local\":\n a = 6378249.145\n b = 6356515\n elif kwargs[\"ref\"] == \"global\":\n a = 6378137\n b = 6356752.314\n elif kwargs[\"a\"] and kwargs[\"b\"]:\n a = kwargs[\"a\"]\n b = kwargs[\"b\"]\n except KeyError:\n return {\"erreur\": \"params a and b is required, you can use ref too which has two possible value: local and global\"}\n try:\n kwargs[\"phi\"] = kwargs[\"phi1\"]\n M1 = rayonDeCourbur(**kwargs)[\"M\"]\n kwargs[\"phi\"] = kwargs[\"phi2\"]\n M2 = rayonDeCourbur(**kwargs)[\"M\"]\n deltaPhi = abs(kwargs[\"phi2\"]-kwargs[\"phi1\"])\n if 2 <= deltaPhi < 5:\n e2 = 1-(b/a)**2\n Mm = (M1+M2)/2\n phiM = (kwargs[\"phi1\"]+kwargs[\"phi2\"])/2\n S = ((M1+M2+4*Mm)*deltaPhi +\n ((math.cos(math.radians(2*phiM))*deltaPhi**5)*(a*e2)/240))/6\n return {\"S\": S}\n elif deltaPhi < 2:\n e2 = 1-(b/a)**2\n Mm = (M1+M2)/2\n phiM = (kwargs[\"phi1\"]+kwargs[\"phi2\"])/2\n S = Mm*deltaPhi+(a*e2*math.cos(2*phiM)*deltaPhi**3)/3\n return {\"S\": S}\n except KeyError:\n return {\"error\": \"the function required 3 basics params phi1,phi2 and ref or a&b\"}", "def GetEpsilonBeer(Abs, conc, pathLength):\n return Abs / (conc * pathLength)", "def find_omega(filename_free,filename_close,filename_far,TI=0.11,wind_speed=8.):\n\n \"\"\"free FAST\"\"\"\n lines = np.loadtxt(filename_free,skiprows=8)\n Omega_free = np.mean(lines[:,6])\n\n \"\"\"waked FAST CLOSE\"\"\"\n lines = np.loadtxt(filename_close,skiprows=8)\n Omega_close = np.mean(lines[:,6])\n\n \"\"\"waked FAST FAR\"\"\"\n lines = np.loadtxt(filename_far,skiprows=8)\n Omega_far = np.mean(lines[:,6])\n\n\n \"\"\"setup the CCBlade loads\"\"\"\n turbineX_close = np.array([0.,126.4])*4.\n turbineX_far = np.array([0.,126.4])*10.\n\n turbineY_waked = np.array([0.,0.])\n\n hub_height = 90.\n\n free_speed = wind_speed\n close_speed = get_eff_turbine_speeds(turbineX_close, turbineY_waked, wind_speed,TI=TI)[1]\n far_speed = get_eff_turbine_speeds(turbineX_far, turbineY_waked, wind_speed,TI=TI)[1]\n\n print 'tip speed ratios'\n print 'free: ', Omega_free*126.4/2./free_speed\n print 'close: ', Omega_close*126.4/2./close_speed\n print 'far: ', Omega_far*126.4/2./far_speed\n\n # return flap_free_atm, edge_free_atm, Omega_free, free_speed, flap_close_atm, edge_close_atm, Omega_close, close_speed, flap_far_atm, edge_far_atm, Omega_far, far_speed\n return Omega_free, free_speed, Omega_close, close_speed, Omega_far, far_speed", "def distcalc(z,h=0.70,omegalambda=0.7,omegam=0.3,omegak=0.0):\n\n H0 = 100 * h # this is in units of km/s/Mpc\n\n H0freq = H0 * constants.kilo/(constants.mega * constants.parsec) # this is H0 is units of Hz\n \n hubbletime = 1.0/H0freq # in seconds\n hubbletimeyr = hubbletime / constants.year\n\n #hubble distance\n dh = constants.c / H0freq # in meters\n\n #now i can calculate the comoving distance (line of sight) using hogg eqn 15\n dc = dh * integrate.quad(dcintegrand,0,z,(omegalambda,omegam,omegak))[0]\n\n #now i can find the transverse comoving distance using hogg eqn 16\n if omegak == 0:\n dm = dc\n elif omegak > 0:\n dm = dh/np.sqrt(omegak) * np.sinh(dc * np.sqrt(omegak) / dh)\n else:\n dm = dh/np.sqrt(abs(omegak)) * np.sin(dc * np.sqrt(abs(omegak)) / dh)\n\n\n #now i will calculate the angular diameter distance (hogg eqn 18)\n da = dm/(1+z)\n \n #now i will calculate scale in kpc/arcsec, since this is commonly used\n scale = da * constants.arcsec / (constants.kilo * constants.parsec)\n\n #now i will calculate the luminosity distance (hog eqn 21)\n dl = (1+z)*dm\n \n #now i will calculate lookback time and \n #time from the begining of the universe to that redshift using hogg eqn 30\n \n tlookback = hubbletimeyr * integrate.quad(timeintegrand,0,z,(omegalambda,omegam,omegak))[0]\n \n tz = hubbletimeyr * integrate.quad(timeintegrand,z,np.inf,(omegalambda,omegam,omegak))[0]\n \n #all sky co-moving volume out to redshift z (hogg eqn 30)\n if omegak == 0:\n vc = 4 * np.pi * dm**3 / 3\n elif omegak > 0:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsinh( np.sqrt(omegak) * dm / dh ) / np.sqrt(omegak) )\n else:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsin( np.sqrt(abs(omegak)) * dm / dh ) / np.sqrt(abs(omegak)) )\n\n #for output, i will make a dictionary\n output = dict(dh=dh, dc=dc, dm=dm, da=da, scale=scale, dl=dl, tlookback = tlookback, tz=tz, vc=vc)\n\n return output", "def update():\n # TODO: Park the car 30 cm away from the closest orange cone.\n # Use both color and depth information to handle cones of multiple sizes.\n # You may wish to copy some of your code from lab2b.py\n global speed\n global angle\n global curState\n # Search for contours in the current color image\n update_contour()\n\n imgX = rc.camera.get_width()\n\n depth_image = rc.camera.get_depth_image()\n depth_image_adjust = (depth_image - 0.01) % 9999\n depth_image_adjust_blur = cv.GaussianBlur(depth_image_adjust, (11,11), 0)\n\n contour_x = contour_center[1]\n contour_y = contour_center[0]\n\n if contour_center is not None:\n angle = rc_utils.remap_range(contour_center[1],0,imgX,-1,1)\n\n contour_distance = depth_image_adjust_blur[contour_y][contour_x]\n\n print(contour_distance)\n # TODO: Park the car 30 cm away from the closest orange cone\n if curState == State.search:\n rc.drive.set_speed_angle(0.5, 1)\n \n if contour_center is not None:\n curState = State.approach\n\n elif curState == State.approach:\n # rc.drive.set_speed_angle(0.5, angle)\n\n if contour_distance > 50:\n rc.drive.set_speed_angle(0.3,angle)\n elif contour_distance > 32:\n rc.drive.set_speed_angle(0.1,angle)\n elif contour_distance == 32:\n rc.drive.set_speed_angle(-0.1,angle)\n elif contour_distance < 32:\n curState = State.stop\n print(\"stop\")\n\n elif curState == State.stop:\n rc.drive.set_speed_angle(0,0)\n\n pass", "def perfectrefl(wavelength):\n return 1.0", "def wavelength(energy):\n return 2 * PI * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def rod(lam, co2=400., lat=45., z=0., P=1013.25):\n Avogadro = codata.value('Avogadro constant')\n zs = 0.73737 * z + 5517.56 # effective mass-weighted altitude\n G = g(lat, zs)\n # air pressure at the pixel (i.e. at altitude) in hPa\n Psurf = (P * (1. - 0.0065 * z / 288.15) ** 5.255) * 1000. # air pressure at pixel location in dyn / cm2, which is hPa * 1000\n return raycrs(lam, co2) * Psurf * Avogadro/ma(co2)/G", "def derive_Damineli16(wavelength):\n # From their eq 19\n x = np.log10(2.159 / wavelength)\n log_A_AKs = -0.015 + 2.33*x + 0.522*x**2. - 3.001*x**3. + 2.034*x**4.\n\n # Now to convert this back to linear space\n A_AKs_at_wave = 10**log_A_AKs \n\n return A_AKs_at_wave", "def albedo_separation(albedo, Rs_1, F, fc, aleafv, aleafn, aleafl, adeadv,\n adeadn, adeadl, zs, iterations=10):\n # DAYTIME\n # Calculate potential (clear-sky) VIS and NIR solar components\n\n rad2deg = 180.0 / math.pi\n # deg2rad = math.pi / 180.0\n\n # Correct for curvature of atmos in airmas\n airmas = zs.expression(\n '(sqrt(cos(zs) ** 2 + 0.0025) - cos(zs)) / 0.00125', {'zs': zs})\n\n # Correct for refraction(good up to 89.5 deg.)\n airmas = airmas.where(\n zs.multiply(rad2deg).lt(89.5),\n zs.expression(\n 'airmas - (2.8 / (90.0 - zs_temp) ** 2)',\n {'airmas': airmas, 'zs_temp': zs.multiply(rad2deg)}))\n\n potbm1 = zs.expression(\n '600.0 * exp(-0.160 * airmas)', {'airmas': airmas})\n potvis = zs.expression(\n '(potbm1 + (600.0 - potbm1) * 0.4) * cos(zs)',\n {'potbm1': potbm1, 'zs': zs})\n # CGM - Not used\n potdif = zs.expression(\n '(600.0 - potbm1) * 0.4 * cos(zs)', {'potbm1': potbm1, 'zs': zs})\n uu = zs.expression('1.0 / cos(zs)', {'zs': zs}) \\\n .max(0.01)\n a = zs.expression(\n '10 ** (-1.195 + 0.4459 * axlog - 0.0345 * axlog * axlog)',\n {'axlog': uu.log10()})\n watabs = zs.expression('1320.0 * a', {'a': a})\n potbm2 = zs.expression(\n '720.0 * exp(-0.05 * airmas) - watabs',\n {'airmas': airmas, 'watabs': watabs})\n evaL = zs.expression(\n '(720.0 - potbm2 - watabs) * 0.54 * cos(zs)',\n {'potbm2': potbm2, 'watabs': watabs, 'zs': zs})\n potnir = zs.expression(\n 'evaL + potbm2 * cos(zs)',\n {'evaL': evaL, 'potbm2': potbm2, 'zs': zs})\n\n fclear = zs \\\n .expression(\n 'Rs_1 / (potvis + potnir)',\n {'potvis': potvis, 'potnir': potnir, 'Rs_1': Rs_1}) \\\n .clamp(0.01, 1.0) \\\n .where(zs.cos().lte(0.01), 1)\n \n # Partition SDN into VIS and NIR\n fvis = zs.expression(\n 'potvis / (potvis + potnir)', {'potvis': potvis, 'potnir': potnir})\n fnir = zs.expression(\n 'potnir / (potvis + potnir)', {'potvis': potvis, 'potnir': potnir})\n \n # Estimate direct beam and diffuse fraction in VIS and NIR wavebands\n fb1 = zs.expression(\n 'potbm1 * cos(zs) / potvis',\n {'potbm1': potbm1, 'potvis': potvis, 'zs': zs})\n fb2 = zs.expression(\n 'potbm2 * cos(zs) / potnir',\n {'potbm2': potbm2, 'potnir': potnir, 'zs': zs})\n\n dirvis = zs \\\n .expression(\n 'fb1 * (1.0 - ((0.9 - ratiox) / 0.7) ** 0.6667)',\n {'fb1': fb1, 'ratiox': fclear.min(0.9)}) \\\n .min(fb1)\n dirnir = zs \\\n .expression(\n 'fb1 * (1.0 - ((0.88 - ratiox) / 0.68) ** 0.6667)',\n {'fb1': fb1, 'ratiox': fclear.min(0.88)}) \\\n .min(fb1)\n\n dirvis = dirvis.where(dirvis.lt(0.01).And(dirnir.gt(0.01)), 0.011)\n dirnir = dirnir.where(dirnir.lt(0.01).And(dirvis.gt(0.01)), 0.011)\n\n difvis = zs.expression('1.0 - dirvis', {'dirvis': dirvis})\n difnir = zs.expression('1.0 - dirnir', {'dirnir': dirnir})\n \n # Correction for NIGHTIME\n ind = zs.cos().lte(0.01)\n fvis = fvis.where(ind, 0.5)\n fnir = fnir.where(ind, 0.5)\n difvis = difvis.where(ind, 0.0)\n difnir = difnir.where(ind, 0.0)\n\n\n # CGM - Not used anymore in function since e_atm is not computed\n # Rs0 = zs \\\n # .expression('potvis + potnir', {'potnir': potnir, 'potvis': potvis}) \\\n # .where(zs.cos().lte(0.01), 0.0)\n\n #**********************************************\n # Compute Albedo\n ratio_soil = 2.\n\n # CGM - Initialize rsoilv and fg from F and albedo\n rsoilv = F.multiply(0).add(0.12)\n fg = albedo.multiply(0).add(1)\n # rsoilv = ee.Image.constant(0.12)\n # fg = ee.Image.constant(1.0)\n\n # print('\\nairmas: {:>20.14f}'.format(utils.image_value(airmas).values()[0]))\n # print('airmas: {:>30.24f}'.format(utils.image_value(airmas).values()[0]))\n # print('potbm1: {:>20.14f}'.format(utils.image_value(potbm1).values()[0]))\n # print('potvis: {:>20.14f}'.format(utils.image_value(potvis).values()[0]))\n # print('potbm2: {:>20.14f}'.format(utils.image_value(potbm2).values()[0]))\n # print('potnir: {:>20.14f}'.format(utils.image_value(potnir).values()[0]))\n # print('fclear: {:>20.14f}'.format(utils.image_value(fclear).values()[0]))\n # print('fvis: {:>20.14f}'.format(utils.image_value(fvis).values()[0]))\n # print('fnir: {:>20.14f}'.format(utils.image_value(fnir).values()[0]))\n # print('fb1: {:>20.14f}'.format(utils.image_value(fb1).values()[0]))\n # print('dirvis: {:>20.14f}'.format(utils.image_value(dirvis).values()[0]))\n # print('dirnir: {:>20.14f}'.format(utils.image_value(dirnir).values()[0]))\n # print('difvis: {:>20.14f}'.format(utils.image_value(difvis).values()[0]))\n # print('difnir: {:>20.14f}'.format(utils.image_value(difnir).values()[0]))\n # print('rsoilv: {:>20.14f}'.format(utils.image_value(rsoilv).values()[0]))\n # print('fg: {:>20.14f}'.format(utils.image_value(fg).values()[0]))\n # print('aleafv: {:>20.14f}'.format(utils.image_value(aleafv).values()[0]))\n # print('aleafn: {:>20.14f}'.format(utils.image_value(aleafn).values()[0]))\n # print('adeadv: {:>20.14f}'.format(utils.image_value(adeadv).values()[0]))\n # print('adeadn: {:>20.14f}'.format(utils.image_value(adeadn).values()[0]))\n\n # CGM - Switched to an iterate call\n def iter_func(n, prev):\n # Extract inputs from previous iteration\n # CGM - Variables that are commented out only need to be returned\n # akb = ee.Image(ee.Dictionary(prev).get('akb'));\n # albedo_c = ee.Image(ee.Dictionary(prev).get('albedo_c'));\n # albedo_s = ee.Image(ee.Dictionary(prev).get('albedo_s'));\n # ameann = ee.Image(ee.Dictionary(prev).get('ameann'));\n # ameanv = ee.Image(ee.Dictionary(prev).get('ameanv'));\n # diff = ee.Image(ee.Dictionary(prev).get('diff'));\n fg_iter = ee.Image(ee.Dictionary(prev).get('fg'));\n # rbcpyn = ee.Image(ee.Dictionary(prev).get('rbcpyn'));\n # rbcpyv = ee.Image(ee.Dictionary(prev).get('rbcpyv'));\n rsoilv_iter = ee.Image(ee.Dictionary(prev).get('rsoilv'));\n # taudn = ee.Image(ee.Dictionary(prev).get('taudn'));\n # taudv = ee.Image(ee.Dictionary(prev).get('taudv'));\n\n rsoiln = rsoilv_iter.multiply(ratio_soil)\n # rsoiln = .expression(\n # 'rsoilv * ratio_soil',\n # {'rsoilv': rsoilv, 'ratio_soil': ratio_soil})\n\n # Weighted live/dead leaf average properties\n ameanv = aleafv.expression(\n 'aleafv * fg + adeadv * (1.0 - fg)',\n {'adeadv': adeadv, 'aleafv': aleafv, 'fg': fg_iter})\n ameann = aleafn.expression(\n 'aleafn * fg + adeadn * (1.0 - fg)',\n {'adeadn': adeadn, 'aleafn': aleafn, 'fg': fg_iter})\n ameanl = aleafl.expression(\n 'aleafl * fg + adeadl * (1.0 - fg)',\n {'adeadl': adeadl, 'aleafl': aleafl, 'fg': fg_iter})\n\n # DIFFUSE COMPONENT\n #*******************************\n # Canopy reflection (deep canopy)\n # Fit to Fig 15.4 for x=1\n akd = F.expression('-0.0683 * log(F) + 0.804', {'F': F})\n\n # Eq 15.7\n rcpyn = ameann.expression(\n '(1.0 - sqrt(ameann)) / (1.0 + sqrt(ameann))', {'ameann': ameann})\n rcpyv = ameanv.expression(\n '(1.0 - sqrt(ameanv)) / (1.0 + sqrt(ameanv))', {'ameanv': ameanv})\n # rcpyl = ameanl.expression(\n # '(1.0 - sqrt(ameanl)) / (1.0 + sqrt(ameanl))', {'ameanl': ameanl})\n\n # Eq 15.8\n rdcpyn = akd.expression(\n '2.0 * akd * rcpyn / (akd + 1.0)', {'akd': akd, 'rcpyn': rcpyn})\n rdcpyv = akd.expression(\n '2.0 * akd * rcpyv / (akd + 1.0)', {'akd': akd, 'rcpyv': rcpyv})\n # rdcpyl = akd.expression(\n # '2.0 * akd * rcpyl / (akd + 1.0)', {'akd': akd, 'rcpyl': rcpyl})\n\n # Canopy transmission (VIS)\n expfac = F.expression(\n 'sqrt(ameanv) * akd * F', {'akd': akd, 'ameanv': ameanv, 'F': F})\n expfac = expfac.max(0.001)\n # expfac = expfac.where(expfac.lt(0.001), 0.001)\n xnum = F.expression(\n '(rdcpyv * rdcpyv - 1.0) * exp(-expfac)',\n {'rdcpyv': rdcpyv, 'expfac': expfac})\n xden = F.expression(\n '(rdcpyv * rsoilv - 1.0) + '\n 'rdcpyv * (rdcpyv - rsoilv) * exp(-2.0 * expfac)',\n {'expfac': expfac, 'rdcpyv': rdcpyv, 'rsoilv': rsoilv_iter})\n # Eq 15.11\n taudv = F.expression('xnum / xden', {'xden': xden, 'xnum': xnum})\n # taudv = xnum.divide(xden)\n\n # Canopy transmission (NIR)\n expfac = F.expression(\n 'sqrt(ameann) * akd * F', {'akd': akd, 'ameann': ameann, 'F': F})\n expfac = expfac.max(0.001)\n # expfac = expfac.where(expfac.lt(0.001), 0.001)\n xnum = F.expression(\n '(rdcpyn * rdcpyn - 1.0) * exp(-expfac)',\n {'expfac': expfac, 'rdcpyn': rdcpyn})\n xden = F.expression(\n '(rdcpyn * rsoiln - 1.0) + '\n 'rdcpyn * (rdcpyn - rsoiln) * exp(-2.0 * expfac)',\n {'expfac': expfac, 'rdcpyn': rdcpyn, 'rsoiln': rsoiln})\n # Eq 15.11\n taudn = F.expression('xnum / xden', {'xden': xden, 'xnum': xnum})\n # taudn = xnum.divide(nden)\n\n # Canopy transmission (LW)\n taudl = F.expression(\n 'exp(-sqrt(ameanl) * akd * F)',\n {'akd': akd, 'ameanl': ameanl, 'F': F})\n\n # Diffuse albedo for generic canopy\n # Eq 15.9\n fact = F.expression(\n '((rdcpyn - rsoiln) / (rdcpyn * rsoiln - 1.0)) * '\n 'exp(-2.0 * sqrt(ameann) * akd * F)',\n {'akd': akd, 'ameann': ameann, 'F': F, 'rdcpyn': rdcpyn,\n 'rsoiln': rsoiln})\n albdn = F.expression(\n '(rdcpyn + fact) / (1.0 + rdcpyn * fact)',\n {'fact': fact, 'rdcpyn': rdcpyn})\n\n # Eq 15.9\n fact = F.expression(\n '((rdcpyv - rsoilv) / (rdcpyv * rsoilv - 1.0)) * '\n 'exp(-2.0 * sqrt(ameanv) * akd * F)',\n {'akd': akd, 'ameanv': ameanv, 'F': F, 'rdcpyv': rdcpyv,\n 'rsoilv': rsoilv_iter})\n albdv = F.expression(\n '(rdcpyv + fact) / (1.0 + rdcpyv * fact)',\n {'fact': fact, 'rdcpyv': rdcpyv})\n\n # BEAM COMPONENT\n #*******************************\n # Canopy reflection (deep canopy)\n akb = zs.expression('0.5 / cos(zs)', {'zs': zs})\n akb = akb.where(zs.cos().lte(0.01), 0.5)\n\n # Eq 15.7\n rcpyn = ameann.expression(\n '(1.0 - sqrt(ameann)) / (1.0 + sqrt(ameann))',\n {'ameann': ameann})\n rcpyv = ameanv.expression(\n '(1.0 - sqrt(ameanv)) / (1.0 + sqrt(ameanv))',\n {'ameanv': ameanv})\n\n # Eq 15.8\n rbcpyn = rcpyn.expression(\n '2.0 * akb * rcpyn / (akb + 1.0)', {'akb': akb, 'rcpyn': rcpyn})\n rbcpyv = rcpyv.expression(\n '2.0 * akb * rcpyv / (akb + 1.0)', {'akb': akb, 'rcpyv': rcpyv})\n\n # Beam albedo for generic canopy\n # Eq 15.9\n fact = F.expression(\n '((rbcpyn - rsoiln) / (rbcpyn * rsoiln - 1.0)) * '\n 'exp(-2.0 * sqrt(ameann) * akb * F)',\n {'akb': akb, 'ameann': ameann, 'F': F, 'rbcpyn': rbcpyn,\n 'rsoiln': rsoiln})\n albbn = F.expression(\n '(rbcpyn + fact) / (1.0 + rbcpyn * fact)',\n {'fact': fact, 'rbcpyn': rbcpyn})\n\n # Eq 15.9\n fact = F.expression(\n '((rbcpyv - rsoilv) / (rbcpyv * rsoilv - 1.0)) * '\n 'exp(-2.0 * sqrt(ameanv) * akb * F)',\n {'akb': akb, 'ameanv': ameanv, 'F': F, 'rbcpyv': rbcpyv,\n 'rsoilv': rsoilv_iter})\n albbv = F.expression(\n '(rbcpyv + fact) / (1.0 + rbcpyv * fact)',\n {'fact': fact, 'rbcpyv': rbcpyv})\n\n # CGM - finish\n # Weighted albedo (canopy)\n albedo_c = F.expression(\n 'fvis * (dirvis * albbv + difvis * albdv) + '\n 'fnir * (dirnir * albbn + difnir * albdn)',\n {'albbn': albbn, 'albbv': albbv, 'albdn': albdn, 'albdv': albdv,\n 'difnir': difnir, 'difvis': difvis, 'dirvis': dirvis,\n 'dirnir': dirnir, 'fnir': fnir, 'fvis': fvis, })\n albedo_c = albedo_c.where(\n zs.cos().lte(0.01),\n F.expression(\n 'fvis * (difvis * albdv) + fnir * (difnir * albdn)',\n {'albdn': albdn, 'albdv': albdv, 'difnir': difnir,\n 'difvis': difvis, 'fnir': fnir, 'fvis': fvis}))\n\n albedo_s = rsoilv.expression(\n 'fvis * rsoilv + fnir * rsoiln',\n {'fnir': fnir, 'fvis': fvis, 'rsoiln': rsoiln, 'rsoilv': rsoilv_iter})\n\n albedo_avg = fc.expression(\n '(fc * albedo_c) + ((1 - fc) * albedo_s)',\n {'albedo_c': albedo_c, 'albedo_s': albedo_s, 'fc': fc})\n diff = albedo_avg.subtract(albedo)\n # diff = albedo_avg.expression(\n # 'albedo_avg - albedo',\n # {'albedo_avg': albedo_avg, 'albedo': albedo})\n\n # CGM - Check what this is doing\n # Extra select call is needed if LAI is multiband\n # Added fc_mask call\n fc_mask = fc.select([0]).lt(0.75)\n rsoilv_iter = rsoilv_iter \\\n .where(fc_mask.And(diff.lte(-0.01)), rsoilv_iter.add(0.01)) \\\n .where(fc_mask.And(diff.gt(0.01)), rsoilv_iter.add(-0.01))\n # # CGM - IDL function\n # rsoilv = ((fc lt 0.75) * (\n # ((abs(diff) le 0.01) * rsoilv) +\n # ((diff le -0.01)*(rsoilv + 0.01)) +\n # ((diff gt 0.01)*(rsoilv - 0.01))))+\n # ((fc ge 0.75) * rsoilv)\n\n # CGM - Extra select call is needed since fc is multiband\n fc_mask = fc.select([0]).gte(0.75)\n fg_iter = fg_iter \\\n .where(fc_mask.And(diff.lte(-0.01)), fg_iter.subtract(0.05)) \\\n .where(fc_mask.And(diff.gt(0.01)), fg_iter.add(0.05)) \\\n .clamp(0.01, 1)\n # # CGM - IDL function\n # fg = ((fc ge 0.75) * (\n # ((abs(diff) le 0.01)*fg) +\n # ((diff le -0.01) * (fg - 0.05d0)) +\n # ((diff gt 0.01) * (fg + 0.05d0)))) +\n # ((fc lt 0.75) * fg)\n\n return ee.Dictionary({\n 'akb': akb, 'albedo_c': albedo_c, 'albedo_s': albedo_s,\n 'ameann': ameann, 'ameanv': ameanv, 'diff': diff, 'fg': fg_iter,\n 'rbcpyn': rbcpyn, 'rbcpyv': rbcpyv,\n 'rsoiln': rsoiln, 'rsoilv': rsoilv_iter,\n 'taudn': taudn, 'taudv': taudv\n })\n\n # Iterate the function n times\n input_images = ee.Dictionary({\n 'akb': None, 'albedo_c': None, 'albedo_s': None,\n 'ameann': None, 'ameanv': None, 'diff': None, 'fg': fg,\n 'rbcpyn': None, 'rbcpyv': None,\n 'rsoiln': None, 'rsoilv': rsoilv,\n 'taudn': None, 'taudv': None\n })\n iter_output = ee.Dictionary(\n # ee.List.sequence(1, iterations) \\\n ee.List.repeat(input_images, iterations) \\\n .iterate(iter_func, input_images))\n\n # Unpack the iteration output\n akb = ee.Image(iter_output.get('akb'))\n albedo_c = ee.Image(iter_output.get('albedo_c'))\n albedo_s = ee.Image(iter_output.get('albedo_s'))\n ameann = ee.Image(iter_output.get('ameann'))\n ameanv = ee.Image(iter_output.get('ameanv'))\n diff = ee.Image(iter_output.get('diff'))\n rbcpyn = ee.Image(iter_output.get('rbcpyn'))\n rbcpyv = ee.Image(iter_output.get('rbcpyv'))\n rsoilv = ee.Image(iter_output.get('rsoilv'))\n rsoiln = ee.Image(iter_output.get('rsoiln'))\n # rsoiln = rsoilv.multiply(ratio_soil)\n taudn = ee.Image(iter_output.get('taudn'))\n taudv = ee.Image(iter_output.get('taudv'))\n # print('\\nakb: {:>20.14f}'.format(utils.image_value(akb).values()[0]))\n # print('albedo_c: {:>20.14f}'.format(utils.image_value(albedo_c).values()[0]))\n # print('albedo_s: {:>20.14f}'.format(utils.image_value(albedo_s).values()[0]))\n # print('ameann: {:>20.14f}'.format(utils.image_value(ameann).values()[0]))\n # print('ameanv: {:>20.14f}'.format(utils.image_value(ameanv).values()[0]))\n # print('diff: {:>20.14f}'.format(utils.image_value(diff).values()[0]))\n # print('rbcpyn: {:>20.14f}'.format(utils.image_value(rbcpyn).values()[0]))\n # print('rbcpyv: {:>20.14f}'.format(utils.image_value(rbcpyv).values()[0]))\n # print('rsoilv: {:>20.14f}'.format(utils.image_value(rsoilv).values()[0]))\n # print('rsoiln: {:>20.14f}'.format(utils.image_value(rsoiln).values()[0]))\n # print('taudv: {:>20.14f}'.format(utils.image_value(taudv).values()[0]))\n # print('taudn: {:>20.14f}'.format(utils.image_value(taudn).values()[0]))\n\n # if a solution is not reached, alb_c=alb_s=alb\n albedo_c = albedo_c.where(diff.abs().gt(0.05), albedo)\n albedo_s = albedo_s.where(diff.abs().gt(0.05), albedo)\n\n # Direct beam+scattered canopy transmission coefficient (visible)\n expfac = F.expression(\n 'sqrt(ameanv) * akb * F',\n {'ameanv': ameanv, 'akb': akb, 'F': F})\n xnum = F.expression(\n '(rbcpyv * rbcpyv - 1.0) * exp(-expfac)',\n {'rbcpyv': rbcpyv, 'expfac': expfac})\n xden = F.expression(\n '(rbcpyv * rsoilv - 1.0) + '\n 'rbcpyv * (rbcpyv - rsoilv) * exp(-2.0 * expfac)',\n {'rbcpyv': rbcpyv, 'rsoilv': rsoilv, 'expfac': expfac})\n # Eq 15.11\n taubtv = F.expression('xnum / xden', {'xnum': xnum, 'xden': xden})\n # print('\\nexpfac: {:>20.14f}'.format(utils.image_value(expfac).values()[0]))\n # print('rbcpyv: {:>20.14f}'.format(utils.image_value(rbcpyv).values()[0]))\n # print('rsoilv: {:>20.14f}'.format(utils.image_value(rsoilv).values()[0]))\n # print('xnum: {:>20.14f}'.format(utils.image_value(xnum).values()[0]))\n # print('xden: {:>20.14f}'.format(utils.image_value(xden).values()[0]))\n # print('taubtv: {:>20.14f}'.format(utils.image_value(taubtv).values()[0]))\n \n # Direct beam+scattered canopy transmission coefficient (NIR)\n expfac = F.expression(\n 'sqrt(ameann) * akb * F',\n {'ameann': ameann, 'akb': akb, 'F': F})\n xnum = F.expression(\n '(rbcpyn * rbcpyn - 1.0) * exp(-expfac)',\n {'rbcpyn': rbcpyn, 'expfac': expfac})\n xden = F.expression(\n '(rbcpyn * rsoiln - 1.0) + '\n 'rbcpyn * (rbcpyn - rsoiln) * exp(-2.0 * expfac)',\n {'rbcpyn': rbcpyn, 'rsoiln': rsoiln, 'expfac': expfac})\n # Eq 15.11\n taubtn = F.expression('xnum / xden', {'xnum': xnum, 'xden': xden})\n # print('\\nexpfac: {:>20.14f}'.format(utils.image_value(expfac).values()[0]))\n # print('rbcpyn: {:>20.14f}'.format(utils.image_value(rbcpyn).values()[0]))\n # print('rsoiln: {:>20.14f}'.format(utils.image_value(rsoiln).values()[0]))\n # print('xnum: {:>20.14f}'.format(utils.image_value(xnum).values()[0]))\n # print('xden: {:>20.14f}'.format(utils.image_value(xden).values()[0]))\n # print('taubtn: {:>20.14f}'.format(utils.image_value(taubtn).values()[0]))\n \n # Shortwave radiation components\n tausolar = F.expression(\n 'fvis * (difvis * taudv + dirvis * taubtv) + '\n 'fnir * (difnir * taudn + dirnir * taubtn)',\n {'difnir': difnir, 'difvis': difvis,\n 'dirnir': dirnir, 'dirvis': dirvis,\n 'fnir': fnir, 'fvis': fvis,\n 'taubtn': taubtn, 'taubtv': taubtv,\n 'taudn': taudn, 'taudv': taudv})\n # print('tausolar: {}'.format(utils.image_value(tausolar).values()[0]))\n # print('Rs_1: {}'.format(utils.image_value(Rs_1).values()[0]))\n Rs_c = Rs_1.expression(\n 'Rs_1 * (1.0 - tausolar)', {'Rs_1': Rs_1, 'tausolar': tausolar})\n Rs_s = Rs_1.expression(\n 'Rs_1 * tausolar', {'Rs_1': Rs_1, 'tausolar': tausolar})\n\n # print('\\nRs_c: {:>20.14f}'.format(utils.image_value(Rs_c).values()[0]))\n # print('Rs_s: {:>20.14f}'.format(utils.image_value(Rs_s).values()[0]))\n # print('albedo_c: {:>20.14f}'.format(utils.image_value(albedo_c).values()[0]))\n # print('albedo_s: {:>20.14f}'.format(utils.image_value(albedo_s).values()[0]))\n\n return Rs_c, Rs_s, albedo_c, albedo_s", "def orthopyroxene():\n\n rho = 3304.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 236.9; C[0,1] = 79.6; C[0,2] = 63.2; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 180.5; C[1,2] = 56.8; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.4; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 84.3; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 79.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 80.1\n\n return C, rho", "def get_depth_pwm(self, goal):\n ddiff = goal.target_depth - self.curr_depth\n if self.curr_depth < -1:\n rospy.loginfo('Depth sensor is not initialized')\n return self.pwm_center\n zout = ddiff * self.depth_p\n # limit output if necassary\n if abs(zout) > self.depth_pmax:\n if zout < 0:\n zout = -self.depth_pmax\n else:\n zout = self.depth_pmax\n zout += self.pwm_center\n return zout", "def ApproxConc280(Abs, pathLength = 10):\n pass", "def get_wl_band(radar_frequency):\n return 0 if (30 < radar_frequency < 40) else 1", "def depth_estimation(x_left, x_right, f=33.4, d=114):\n depth = abs(f * d / ((x_left - x_right) / 72 * 2.54)) / 100 # - 0.418879\n return depth", "def DR(R,Pc):\n return r1*R*(K1**B1/(K1**B1 + (A/R)**B1))*(S/(S + R*Pc + Pc)) \\\n - gwt*A - r2*R*(K2**B2/(K2**B2 + (A)**B2))*(S/(S + R*Pc + Pc) ) \\\n *(R*M)/(K3 + R*M) + R*gc", "def one_transition_spectrum_ld(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n ld = tr[\"ld\"] # linear dichroism strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = ld*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def find_large_separation(self):\n\n x = self.modes['n'] # radial order\n y = self.modes['freq'] # frequency\n wid = (0.66*self.numax**0.88)/2/np.sqrt(2*np.log(2.0))\n w = (np.exp((-(y-self.numax)**2)/(2*wid**2))) # weight\n\n mN = np.sum(w)*np.sum(w*x*y) - np.sum(w*x)*np.sum(w*y)\n D = np.sum(w)*np.sum(w*x**2) - np.sum(w*x)**2\n Dn = mN/D\n #print Dn\n\n return Dn", "def albedoc(solin, fsntoa):\n var = (solin - fsntoa) / solin\n var.units = \"dimensionless\"\n var.long_name = \"TOA albedo clear-sky\"\n return var", "def rayleigh(th,r,wl,a,n1,n2):\n k = 2*np.pi/wl\n n_2 = n2**2/n1**2\n return ((k**2)*(a**3)*((n_2-1)/(n_2+2))/r)*np.array([[np.cos(th), 0],[0,1]])", "def best_deriv_drops_arclengths_r(fname):\r\n dat = sio.loadmat(fname,struct_as_record=False,squeeze_me=True)\r\n output = dat['output']\r\n y = dat['y'].ravel()\r\n cbool = dat['cbool'].astype('bool').ravel()\r\n R = {}\r\n inputs_list =[]\r\n arclength_list =[]\r\n kernels=[]\r\n for model in output._fieldnames:\r\n print('\\t'+model)\r\n arclength = output.__getattribute__(model).arclengths\r\n if len(arclength)==0 or arclength == 'all':\r\n compare_bool = cbool\r\n else:\r\n arclength_bool = dat['arclengths'].__getattribute__(arclength).astype('bool')\r\n compare_bool = np.logical_and(arclength_bool,cbool)\r\n\r\n inputs_list.append(output.__getattribute__(model).inputs)\r\n arclength_list.append(arclength)\r\n yhat = output.__getattribute__(model).yhat\r\n R[model],_,kernels = get_corr(y,yhat,compare_bool)\r\n\r\n R['kernels'] = kernels\r\n df = pd.DataFrame(R)\r\n df['id'] = os.path.basename(fname)[:10]\r\n # df['inputs'] = inputs_list\r\n # df['arclengths'] = arclength_list\r\n return(df)", "def calculate_wavelength(period, depth, gravity):\r\n return geometry.gmCalculateWavelength(period, depth, gravity)", "def mercier(self):\n\n # See Overleaf note \"Mercier criterion near the magnetic axis- detailed notes\".\n # See also \"20200604-02 Checking sign in Mercier DGeod near axis.docx\"\n\n # Shorthand:\n d_l_d_phi = self.d_l_d_phi\n B0 = self.B0\n G0 = self.G0\n p2 = self.p2\n etabar = self.etabar\n curvature = self.curvature\n sigma = self.sigma\n iotaN = self.iotaN\n iota = self.iota\n pi = np.pi\n\n #integrand = d_l_d_phi * (Y1c * Y1c + X1c * (X1c + Y1s)) / (Y1c * Y1c + (X1c + Y1s) * (X1c + Y1s))\n integrand = d_l_d_phi * (etabar*etabar*etabar*etabar + curvature*curvature*curvature*curvature*sigma*sigma + etabar*etabar*curvature*curvature) \\\n / (etabar*etabar*etabar*etabar + curvature*curvature*curvature*curvature*(1+sigma*sigma) + 2*etabar*etabar*curvature*curvature)\n\n integral = np.sum(integrand) * self.d_phi * self.nfp * 2 * pi / self.axis_length\n\n #DGeod_times_r2 = -(2 * sG * spsi * mu0 * mu0 * p2 * p2 * G0 * G0 * G0 * G0 * etabar * etabar &\n self.DGeod_times_r2 = -(2 * mu0 * mu0 * p2 * p2 * G0 * G0 * G0 * G0 * etabar * etabar \\\n / (pi * pi * pi * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * iotaN * iotaN)) \\\n * integral\n\n self.d2_volume_d_psi2 = 4*pi*pi*abs(G0)/(B0*B0*B0)*(3*etabar*etabar - 4*self.B20_mean/B0 + 2 * (self.G2 + iota * self.I2)/G0)\n\n self.DWell_times_r2 = (mu0 * p2 * abs(G0) / (8 * pi * pi * pi * pi * B0 * B0 * B0)) * \\\n (self.d2_volume_d_psi2 - 8 * pi * pi * mu0 * p2 * abs(G0) / (B0 * B0 * B0 * B0 * B0))\n\n self.DMerc_times_r2 = self.DWell_times_r2 + self.DGeod_times_r2", "def isothermal_depth_wyrtki1964_gradient(da_PT):\n\n # make land mask based on surface layer\n da_mask = da_PT.isel(z=0)*0.+1.\n\n # calculate drho/dz\n da_PT_dz = da_PT.differentiate('z') # kg/m^4\n\n # interpolate to finer vertical resolution (2.5m)\n da_interp = da_PT_dz.interp(z=np.arange(0,da_PT_dz.z.max(),2.5))\n\n # remove values shallower than critcal value\n da_interp_masked = da_interp.where(da_interp>0.02,other=99999)\n\n # find first index that have value larger than critical value\n z_ind = da_interp_masked.argmin(dim='z',skipna=True)\n\n # used 2d index to find 2d depth map\n da_itd = da_interp.z[z_ind]*da_mask\n\n return da_itd", "def isothermal_depth_wyrtki1964_gradient(da_PT):\n\n # make land mask based on surface layer\n da_mask = da_PT.isel(z=0)*0.+1.\n\n # calculate drho/dz\n da_PT_dz = da_PT.differentiate('z') # kg/m^4\n\n # interpolate to finer vertical resolution (2.5m)\n da_interp = da_PT_dz.interp(z=np.arange(0,da_PT_dz.z.max(),2.5))\n\n # remove values shallower than critcal value\n da_interp_masked = da_interp.where(da_interp>0.02,other=99999)\n\n # find first index that have value larger than critical value\n z_ind = da_interp_masked.argmin(dim='z',skipna=True)\n\n # used 2d index to find 2d depth map\n da_itd = da_interp.z[z_ind]*da_mask\n\n return da_itd", "def computeA(diameter):\n radius = diameter / 2.0\n return np.pi * (radius**2)", "def complete_spectrum_time(cn, Ahr_flat = 0.2, wantSNR = 10.0, bandwidth = 0.2, architecture = \"A\",\n plot = False, verbose = False):\n\n # If the coronagraph model has already been run...\n if cn._computed:\n # Use the existing stellar flux\n fstar = cn.solhr\n else:\n # Otherwise use the solar flux\n fstar = FSTAR\n\n if plot: fig, ax = plt.subplots()\n\n cc = [\"C0\", \"C2\", \"C3\"]\n t_chan = np.zeros(len(CHANNELS))\n Nbands_per_chan = np.zeros(len(CHANNELS))\n t_per_band_per_chan = []\n full_lam = []\n full_dlam = []\n full_Cobs = []\n full_Cratio = []\n full_Csig = []\n pct_obs_iwa = []\n lammax_obs_iwa = []\n lam_extrema = []\n\n ibp = 0\n\n # Loop over telescope channels\n for j, channel in enumerate(CHANNELS):\n\n t_tmp = []\n\n # Get the channel specific telescope parameters\n luvoir = default_luvoir(channel=channel, architecture = architecture)\n cn.telescope = luvoir\n\n if verbose: print(channel, luvoir.lammin, luvoir.lammax)\n\n lam_extrema.append(luvoir.lammin)\n lam_extrema.append(luvoir.lammax)\n\n # Calculate the bandpass edges\n edges = calculate_bandpass_edges(luvoir.lammin, luvoir.lammax, bandwidth = bandwidth)\n\n # Calculate the number of bandpasses\n Nbands = len(edges) - 1\n Nbands_per_chan[j] = Nbands\n\n # Run count rates (necessary to generate new wavelength grid)\n #cn.run_count_rates(spectroscopy.AHR, spectroscopy.LAMHR, spectroscopy.FSTAR)\n\n # Get new wavelength grid\n #l_grid, dl_grid = get_lam_dlam(cn)\n\n # Calculate how much of the spectrum is observable\n cnc = copy.deepcopy(cn)\n cnc.run_count_rates(AHR, LAMHR, FSTAR)\n pct, lammax_obs = calc_observable_spectrum(cnc)\n pct_obs_iwa.append(pct)\n lammax_obs_iwa.append(lammax_obs)\n\n # Loop over bandpasses\n for i in range(Nbands):\n\n if (type(wantSNR) is float) or (type(wantSNR) is int):\n wSNR = wantSNR\n else:\n wSNR = wantSNR[ibp]\n\n # Get the max, min, and middle wavelenths for this bandpass\n lammin = edges[i]\n lammax = edges[i+1]\n lammid = 0.5*(lammax + lammin)\n\n # Set telescope wavelength range\n cn.telescope.lammin = lammin\n cn.telescope.lammax = lammax\n\n if channel == \"UV\":\n cn.telescope.lam = np.array([lammid])\n cn.telescope.dlam = np.array([lammax - lammin])\n\n # Set spectrum to use for exposure time calcs\n # Using flat spectrum so not biased by bottom of bands\n Ahr_flat = Ahr_flat * np.ones(len(LAMHR))\n\n # Run count rates (necessary to generate new wavelength grid)\n cn.run_count_rates(Ahr_flat, LAMHR, fstar)\n\n # Calculate exposure times to wantSNR\n etimes = determine_exposure_time(cn, None, plot_snr_curves=False,\n plot_spectrum=False, wantSNR=wSNR, ref_lam = lammid)\n t_ref_lam = etimes[-1]\n\n # Re-do count rate calcs for true Earth spectrum\n cn.run_count_rates(AHR, LAMHR, fstar)\n\n # Draw random samples of data for a plot\n cn.make_fake_data(texp=t_ref_lam)\n\n if verbose: print(lammid, t_ref_lam)\n\n # Plot\n if plot:\n ax.axvspan(lammin, lammax, alpha = 0.2, color = cc[j])\n #ax.plot(cn.lam, cn.Cratio, ls = \"steps-mid\", color = \"grey\", zorder = 100)\n ax.plot(cn.lam, cn.Cobs, \"o\", ms = 3.0, alpha = 1.0, color = \"w\", zorder = 70)\n ax.errorbar(cn.lam, cn.Cobs, yerr=cn.Csig, fmt = \"o\", ms = 2.0, alpha = 0.7, color = \"k\", zorder = 70)\n ax.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax.set_ylabel(\"$F_p / F_s$\")\n\n # Save values\n t_tmp.append(t_ref_lam)\n full_lam.append(cn.lam)\n full_dlam.append(cn.dlam)\n full_Cratio.append(cn.Cratio)\n full_Cobs.append(cn.Cobs)\n full_Csig.append(cn.Csig)\n\n # Add time\n if np.isfinite(t_ref_lam):\n t_chan[j] += t_ref_lam\n\n ibp += 1\n\n # Save tmp times per band\n t_per_band_per_chan.append(t_tmp)\n\n # Deal with the \"two channels at a time\" thing\n t_tot = apply_two_channels(t_chan)\n\n spectrum = (np.array(full_lam),\n np.array(full_dlam),\n np.array(full_Cratio),\n np.array(full_Cobs),\n np.array(full_Csig))\n iwa = (pct_obs_iwa, lammax_obs_iwa)\n\n if plot:\n lam_extrema = np.array(lam_extrema)\n cn.telescope.lammin = np.min(lam_extrema)\n cn.telescope.lammax = np.max(lam_extrema)\n cn.telescope.resolution = 140.\n # Re-do count rate calcs for true Earth spectrum\n cn.run_count_rates(AHR, LAMHR, FSTAR)\n ax.plot(cn.lam, cn.Cratio, color = \"grey\", zorder = 80, lw = 3.0)\n ax.plot(cn.lam, cn.Cratio, color = \"w\", zorder = 80, lw = 2.0)\n\n return t_tot, t_per_band_per_chan, spectrum, iwa", "def nrefrac(wavelength, density=1.0):\n\n # The IAU standard for conversion from air to vacuum wavelengths is given\n # in Morton (1991, ApJS, 77, 119). For vacuum wavelengths (VAC) in\n # Angstroms, convert to air wavelength (AIR) via: \n\n # AIR = VAC / (1.0 + 2.735182E-4 + 131.4182 / VAC^2 + 2.76249E8 / VAC^4)\n\n try:\n wl = np.array(wavelength)\n except TypeError:\n return None\n\n wl2inv = (1e4/wl)**2\n refracstp = 272.643 + 1.2288 * wl2inv + 3.555e-2 * wl2inv**2\n return density * refracstp", "def Ag():\n # return load_material(miepy.__path__[0] + \"/materials/ag.npy\")\n\n wp = 9.01\n sig = [1.01889808, 0.62834151]\n f = [0,5.05635462]\n gam = [0.01241231, 0.54965831]\n wav = np.linspace(300,1100,1000)\n return drude_lorentz(wp,sig,f,gam,wav)", "def molar_mass_dry_air():\n return 28.9647", "def breathIO(self, bdry, dmap):\n cur_bdry = np.array([bdry[0][1], bdry[3][1], bdry[1][0], bdry[2][0]])\n if len(self.ref_bdry) == 0:\n # setup reference frame's boundary (up, down, left and right)\n self.ref_bdry = cur_bdry\n self.ref_dmap = dmap\n else:\n ubdry = np.array([int(min(cur_bdry[0], self.ref_bdry[0])),\n int(max(cur_bdry[1], self.ref_bdry[1])),\n int(max(cur_bdry[2], self.ref_bdry[2])),\n int(min(cur_bdry[3], self.ref_bdry[3]))])\n blk_diff = gf_2D((dmap-self.ref_dmap)[ubdry[1]:ubdry[0], ubdry[2]:ubdry[3]], 5)\n self.breath_list.append(np.mean(blk_diff))", "def retrieve_EchoDepth(\n ds,\n threshold,\n variable=\"zFactorFinal\",\n radar_frequency=\"Ku\",\n min_threshold=0,\n mask_liquid_phase=False,\n):\n # Retrieve required DataArrays\n da = get_variable_dataarray(ds, variable=variable)\n if len(da[\"radar_frequency\"].data) != 1:\n da = da.sel({\"radar_frequency\": radar_frequency})\n da_height = ds[\"height\"].copy()\n # Mask height bin where not raining\n da_mask_3d_rain = da > min_threshold\n da_height = da_height.where(da_mask_3d_rain)\n\n # Mask heights where Z is not above threshold\n da_mask_3d = da > threshold\n da_height_masked = da_height.where(da_mask_3d)\n\n # Mask liquid phase\n if mask_liquid_phase:\n da_liquid_mask = get_liquid_phase_mask(ds)\n da_height_masked = da_height_masked.where(~da_liquid_mask)\n\n # Retrieve min and max echo height\n da_max_height = da_height_masked.max(dim=\"range\")\n da_min_height = da_height_masked.min(dim=\"range\")\n\n # OLD MASKING\n # if mask_liquid_phase:\n # da_isnan = np.isnan(da_min_height)\n # da_height_melting = ds[\"heightZeroDeg\"]\n # da_height_melting = da_height_melting.where(~da_isnan)\n # # If max is below the 0 °C isotherm --> set to nan\n # da_max_height = da_max_height.where(da_max_height > da_height_melting)\n # # If min below the 0 °C isoterm --> set the isotherm height\n # da_min_height = da_min_height.where(da_min_height > da_height_melting, da_height_melting)\n\n # Compute depth\n da_depth = da_max_height - da_min_height\n\n # Add attributes\n da_depth.name = f\"EchoDepth{threshold}dBZ\"\n da_depth.attrs[\"units\"] = \"m\"\n return da_depth", "def far_fields(horn_width, horn_height, eplane_effective_length, hplane_effective_length, frequency, r, theta, phi):\n # Calculate the wavenumber\n k = 2.0 * pi * frequency / c\n\n # Calculate the wave impedance\n eta = sqrt(mu_0 / epsilon_0)\n\n # Define the radial-component of the electric field\n e_r = 0.0\n\n # Define the theta-component of the electric field\n e_theta = 1j * k / (4.0 * pi * r) * exp(-1j * k * r) * sin(phi) * (1.0 + cos(theta)) * \\\n I1(k, horn_width, hplane_effective_length, theta, phi) * \\\n I2(k, horn_height, eplane_effective_length, theta, phi)\n\n # Define the phi-component of the electric field\n e_phi = 1j * k / (4.0 * pi * r) * exp(-1j * k * r) * cos(phi) * (1.0 + cos(theta)) * \\\n I1(k, horn_width, hplane_effective_length, theta, phi) * \\\n I2(k, horn_height, eplane_effective_length, theta, phi)\n\n # Define the radial-component of the magnetic field\n h_r = 0.0\n\n # Define the theta-component of the magnetic field\n h_theta = 1j * k / (4.0 * pi * r) * exp(-1j * k * r) * -cos(phi) * (1.0 + cos(theta)) / eta * \\\n I1(k, horn_width, hplane_effective_length, theta, phi) * \\\n I2(k, horn_height, eplane_effective_length, theta, phi)\n\n # Define the phi-component of the magnetic field\n h_phi = 1j * k / (4.0 * pi * r) * exp(-1j * k * r) * sin(phi) * (1.0 + cos(theta)) / eta * \\\n I1(k, horn_width, hplane_effective_length, theta, phi) * \\\n I2(k, horn_height, eplane_effective_length, theta, phi)\n\n # Return all six components of the far field\n return e_r, e_theta, e_phi, h_r, h_theta, h_phi", "def geometric_tortuosity(maze):\n pathsTotal = []\n path_star_list = findPoints(maze, \"S\")\n\n total_caminos = []\n unit_caminos = 0\n array_path = np.array(maze)\n line = (array_path.shape)[2]\n global path\n i = 0\n path_star_list = endPoints(maze[0], 'S')\n # print(path_star_list)\n listEndPoints = endPoints(maze[-1], \"E\")\n # print(path_star_list)\n # print(listEndPoints)\n toTal = len(listEndPoints)*len(path_star_list)\n\n for star in path_star_list:\n caminos = []\n for end in listEndPoints:\n print(\"camino:\"+str(i+1)+\"/\"+str(toTal))\n\n path = astar(maze, star, end)\n\n if path != None:\n pathsTotal.append(path)\n\n i += 1\n result = 0\n # caminos.append(path)\n # total_caminos.append(caminos)\n try:\n x = map(valuepath, path)\n result = sum(x)\n except:\n pass\n\n caminos.append(result)\n unit_caminos += 1\n\n total_caminos.append(min(caminos))\n\n valor = (np.mean(np.array(total_caminos)))\n geometric_tortusity = valor/(int(line)-1)\n return geometric_tortusity, pathsTotal\n # return \"f\",\"f\"", "def compute_angle(map_id,padding_ratio=a.padding_ratio,map_size=a.map_size,sep=a.sep,freq=a.freq,\\\n f_dust=a.f_dust,lMax=a.lMax,lMin=a.lMin,l_step=a.l_step,FWHM=a.FWHM,noise_power=a.noise_power,\\\n slope=a.slope,delensing_fraction=a.delensing_fraction,useQU=a.useQU,N_bias=a.N_bias):\n\n # Step 1, create actual B-mode map\n lCut=int(1.35*lMax) # maximum ell for Fourier space maps\n\n # First compute B-mode map from padded-real space map with desired padding ratio. Also compute the padded window function for later use\n from hades.PaddedPower import MakePowerAndFourierMaps,DegradeMap,DegradeFourier\n fBdust,padded_window,unpadded_window=MakePowerAndFourierMaps(map_id,padding_ratio=padding_ratio,map_size=map_size,sep=sep,freq=freq,fourier=True,power=False,returnMasks=True,flipU=a.flipU)\n\n # Also compute unpadded map to give binning values without bias\n unpadded_fBdust=MakePowerAndFourierMaps(map_id,padding_ratio=1.,map_size=map_size,freq=freq,fourier=True,power=False,returnMasks=False,flipU=a.flipU)\n unpadded_fBdust=DegradeFourier(unpadded_fBdust,lCut) # remove high ell pixels\n\n fBdust=DegradeFourier(fBdust,lCut) # discard high-ell pixels\n padded_window=DegradeMap(padded_window.copy(),lCut) # remove high-ell data\n unpadded_window=DegradeMap(unpadded_window.copy(),lCut)\n\n unpadded_fBdust.kMap*=f_dust\n fBdust.kMap*=f_dust\n\n wCorrection = np.mean(padded_window.data**2.)**2./np.mean(padded_window.data**4.)\n\n from hades.NoisePower import noise_model,lensed_Cl,r_Cl\n Cl_lens_func=lensed_Cl(delensing_fraction=delensing_fraction) # function for lensed Cl\n\n def total_Cl_noise(l):\n return Cl_lens_func(l)+noise_model(l,FWHM=FWHM,noise_power=noise_power)\n\n from hades.PaddedPower import fourier_noise_map\n ellNoise=np.arange(5,lCut) # ell range for noise spectrum\n\n from hades.RandomField import fill_from_model\n #fourierNoise=fourier_noise_map\n\n from hades.PaddedPower import fourier_noise_test\n fourierNoise,unpadded_noise=fourier_noise_test(padded_window,unpadded_window,ellNoise,total_Cl_noise(ellNoise),padding_ratio=padding_ratio,unpadded=False,log=True)\n\n totFmap=fBdust.copy()\n totFmap.kMap+=fourierNoise.kMap# for total B modes\n unpadded_totFmap=unpadded_fBdust.copy()\n unpadded_totFmap.kMap+=unpadded_noise.kMap\n\n fBtrue=totFmap.copy()\n\n # Step 2: Compute the I map\n inDir=a.root_dir+'%sdeg%s/' %(map_size,sep)\n Tmap=liteMap.liteMapFromFits(inDir+'fvsmapT_'+str(map_id).zfill(5)+'.fits')\n Qmap=liteMap.liteMapFromFits(inDir+'fvsmapQ_'+str(map_id).zfill(5)+'.fits')\n Umap=liteMap.liteMapFromFits(inDir+'fvsmapU_'+str(map_id).zfill(5)+'.fits')\n Umap.data*=-1.\n QUmap=Qmap.copy()\n QUmap.data=np.sqrt(Qmap.data**2.+Umap.data**2.)\n if useQU:\n \tscaling=np.mean(QUmap.data**4.)\n else:\n \tscaling=np.mean(Tmap.data**4.)\n \n maskMap=liteMap.liteMapFromFits(inDir+'fvsmapMaskSmoothed_'+str(map_id).zfill(5)+'.fits')\n from hades.PaddedPower import zero_padding\n zTmap=zero_padding(Tmap,padding_ratio)\n zQUmap=zero_padding(QUmap,padding_ratio)\n zWindow=zero_padding(maskMap,padding_ratio)\n # Compute window factor <W^2> for padded window (since this is only region with data)\n windowFactor=np.mean(zWindow.data**2.)\n\n # Define mod(l) and ang(l) maps needed for fourier transforms\n modL,angL=fp.fftPol.makeEllandAngCoordinate(zTmap) # choice of map is arbitary\n # Create pure T,E,B maps using 'hybrid' method to minimize E->B leakage\n zTmap.data*=zWindow.data\n zQUmap.data*=zWindow.data\n fT=fftTools.fftFromLiteMap(zTmap)\n fQU=fftTools.fftFromLiteMap(zQUmap)\n\n # Rescale to correct amplitude using dust SED\n from hades.PowerMap import dust_emission_ratio\n dust_intensity_ratio=dust_emission_ratio(freq)\n\n fT.kMap*=dust_intensity_ratio # apply dust-reduction factor \n fT.kMap/=np.sqrt(windowFactor)\n fQU.kMap*=dust_intensity_ratio\n fQU.kMap/=np.sqrt(windowFactor)\n fImap=DegradeFourier(fT,lCut)\n fQUmap=DegradeFourier(fQU,lCut)\n\n # Step 3: Compute angle estimate\n powBtrue=fftTools.powerFromFFT(fBtrue)\n unpadded_powBtrue=fftTools.powerFromFFT(unpadded_totFmap)\n from hades.KKdebiased import derotated_estimator\n output=derotated_estimator(powBtrue,map_id,lMin=lMin,lMax=lMax,FWHM=FWHM,noise_power=noise_power,delensing_fraction=delensing_fraction,slope=slope)\n A,fs,fc,Afs,Afc,_=output\n HexPow2=Afs**2.+Afc**2.\n \n if a.debias_dedust:\n \tfrom .RandomField import padded_fill_from_Cell\n\tbias_data=np.zeros(N_bias)\n\t\n\tdef analytic_model(ell,A_est,slope):\n\t\t\"\"\"Use the estimate for A to construct analytic model.\n\t\tNB: This is just used for finding the centres of the actual binned data.\n\t\t\"\"\"\n\t\treturn total_Cl_noise(ell)+A_est*ell**(-slope)\n\t\n\tfrom .PowerMap import oneD_binning\n\tl_cen,mean_pow = oneD_binning(unpadded_powBtrue.copy(),lMin*padding_ratio,lCut,l_step*padding_ratio,binErr=False,exactCen=a.exactCen,\\\n\t\t\t\t\tC_ell_model=analytic_model,params=[A,slope]) \n\t#l_cen,mean_pow=oneD_binning(totPow.copy(),lMin,lCut,l_step,binErr=False,exactCen=a.exactCen,C_ell_model=analytic_model,params=[A_est,slope])\n\t# gives central binning l and mean power in annulus using window function corrections \n\t\n\t# Create spline fit\n\tfrom scipy.interpolate import UnivariateSpline\n\tspl=UnivariateSpline(l_cen,np.log(mean_pow),k=5)\n\tdef spline(ell):\n\t\treturn np.exp(spl(ell))\n\t#del l_cen,mean_pow\n\t\n\t# Precompute useful data:\n\tfrom hades.RandomField import precompute\n\tprecomp=precompute(padded_window.copy(),spline,lMin=lMin,lMax=lMax)\n\t\n\tfor n in range(N_bias):\n\t\tif n%100==0:\n\t\t\tprint 'Computing bias sim %s of %s' %(n+1,N_bias)\n\t\tfBias=padded_fill_from_Cell(padded_window.copy(),l_cen,mean_pow,lMin=lMin,unPadded=a.unPadded,precomp=precomp)#,padding_ratio=padding_ratio)\n\t\tbias_cross=fftTools.powerFromFFT(fBias.copy(),totFmap.copy()) # cross map\n\t\tbias_self=fftTools.powerFromFFT(fBias.copy()) # self map\n\t\t# First compute estimators on cross-spectrum\n\t\tcross_ests=derotated_estimator(bias_cross.copy(),map_id,lMin=lMin,lMax=lMax,slope=slope,\\\n\t\t\t\t\t\tfactor=A,FWHM=FWHM,noise_power=noise_power,\\\n\t\t\t\t\t\trot=a.rot,delensing_fraction=delensing_fraction,useTensors=a.useTensors,\\\n\t\t\t\t\t\tdebiasAmplitude=False,rot_average=a.rot_average,KKdebiasH2=False) # NB: CHANGE DEBIAS_AMPLITUDE parameter here\n\t\tself_ests=derotated_estimator(bias_self.copy(),map_id,lMin=lMin,lMax=lMax,slope=slope,\\\n\t\t\t\t\t\tfactor=A,FWHM=FWHM,noise_power=noise_power,\\\n\t\t\t\t\t\trot=a.rot,delensing_fraction=delensing_fraction,useTensors=a.useTensors,\\\n\t\t\t\t\t\tdebiasAmplitude=True,rot_average=a.rot_average,KKdebiasH2=a.KKdebiasH2)\n\t\tbias_data[n]=(-1.*(self_ests[3]**4.+self_ests[4]**2.)+4.*(cross_ests[3]**2.+cross_ests[4]**2.))*wCorrection\n\t# Now compute the mean bias - this debiases the DATA only\n\tbias=np.mean(bias_data)\n\tdel bias_self,bias_cross\n\t\n HexPow2-=bias\t\n \n norm=np.sqrt(Afs**2.+Afc**2.)\n fsbar,fcbar=Afs/norm,Afc/norm\n\n sin2a=fsbar/np.sqrt(2.*(fcbar+1.))\n cos2a=np.sqrt((1.+fcbar)/2.)\n\n # Step 4: Compute B estimate\n angleMap=fImap.thetaMap*np.pi/180.\n fB_est=fImap.copy()\n if useQU:\n \tbaseMap=fQUmap.copy()\n else:\n \tbaseMap=fImap.copy()\n fB_est.kMap=baseMap.kMap*(sin2a*np.cos(2.*angleMap)-cos2a*np.sin(2.*angleMap))\n\n # Step 5: Now compute cross coefficient\n crossPow=fftTools.powerFromFFT(fB_est,fBtrue)\n estPow=fftTools.powerFromFFT(fB_est,fB_est)\n\n from hades.PowerMap import oneD_binning\n lC,pC=oneD_binning(crossPow,lMin,lMax/2.,l_step,exactCen=False)\n lE,pE=oneD_binning(estPow,lMin,lMax/2.,l_step,exactCen=False)\n lB,pB=oneD_binning(powBtrue,lMin,lMax/2.,l_step,exactCen=False)\n #rho=np.array(pC)/np.sqrt(np.array(pB)*np.array(pE))\n ratio=np.array(pC)/np.array(pE)\n sign=np.sign(np.mean(ratio))\n\n # Step 6: Now compute the actual angle\n alpha0=0.25*np.arctan2(fsbar,fcbar) # range is [-pi/4,pi/4]\n if sign==-1.0:\n alpha0+=np.pi/2.\n \n # Step 7: Compute the ratio of H^2/<I^4> for rescaling\n ratio=(np.abs(HexPow2)/scaling)**0.25\n \n alpha_deg=alpha0*180./np.pi\n print 'MapID: %s Angle: %.2f Ratio: %.2e' %(map_id,alpha_deg,ratio)\n \n return alpha_deg,ratio", "def get_displacement(self, color, depth):\n displacement = None\n keypoints, descriptors = self._feature_detector.detectAndCompute(color, None)\n \n if descriptors is not None and self._last_descriptors is not None:\n matches = self._matcher.match(descriptors, self._last_descriptors)\n matches = self._select_matches(matches) \n points, last_points = self._get_points_from_matches(keypoints, self._last_keypoints, matches)\n t = []\n horizontal = 0\n for i in range(len(matches)):\n d1 = depth[int(points[i][1]), int(points[i][0])]\n d2 = self._last_depth[int(last_points[i][1]), int(points[i][0])]\n #print(d1, d2)\n \n if d1 != 0 and abs(d1-d2) < 10:\n t.append(matches[i])\n horizontal += (points[i][0] - last_points[i][0]) * d1\n \n matches = t\n horizontal /= len(t) \n \n if len(matches) > 0:\n if self._config['show_matches']:\n matches_preview = cv2.drawMatches(color, keypoints, self._last_color, self._last_keypoints, matches, None, flags=2)\n cv2.imshow('Basic_Odometry_matches_preview', matches_preview)\n \n points, last_points = self._get_points_from_matches(keypoints, self._last_keypoints, matches)\n #It uses default values for parameters - there must be here because of broken python bindings I suppose\n tf_mat = cv2.estimateRigidTransform(points, last_points, False, 500, 0.5, 3)\n \n if tf_mat is not None:\n a11, a12, b1 = tf_mat[0][0], tf_mat[0][1], tf_mat[0][2]\n a21, a22, b2 = -tf_mat[1][0], tf_mat[1][1], tf_mat[1][2]\n roll = math.atan2(-a12, a11) \n \n displacement = {\n 'roll': roll,\n 'horizontal': horizontal,#b1,\n 'vertical': b2,\n 'depth': math.sqrt(a11**2+a12**2) * (-1 if a11 < 0 else 1)\n }\n \n if descriptors is not None: \n self._last_keypoints = keypoints\n self._last_descriptors = descriptors\n self._last_color = color\n self._last_depth = depth\n \n return displacement", "def test_objectbased_depth(layout, gain_calc):\n bf_no_depth = AudioBlockFormatObjects(position=dict(azimuth=0, elevation=0, distance=1),\n width=360, height=360, depth=0)\n bf_with_depth = AudioBlockFormatObjects(position=dict(azimuth=0, elevation=0, distance=0.5),\n width=0, height=0, depth=1)\n gains_no_depth = gain_calc.render(ObjectTypeMetadata(bf_no_depth)).direct\n gains_with_depth = gain_calc.render(ObjectTypeMetadata(bf_with_depth)).direct\n\n npt.assert_allclose(np.linalg.norm(gains_no_depth), 1.0)\n npt.assert_allclose(np.linalg.norm(gains_with_depth), 1.0)\n\n front_idx = layout.channel_names.index(\"M+000\")\n assert np.all(gains_with_depth[~layout.is_lfe]) > 0 and gains_with_depth[front_idx] > gains_no_depth[front_idx]", "def find_all_ORFs(dna):\n #these functions offset to analyze each each open frame reference\n zero_offset=find_all_ORFs_oneframe(dna[0:])\n \n one_offset=find_all_ORFs_oneframe(dna[1:])\n \n two_offset=find_all_ORFs_oneframe(dna[2:])\n \n return zero_offset+one_offset+two_offset\n \n # YOUR IMPLEMENTATION HERE", "def approximate_betweenness(graph, max_depth):\n ###TODO\n pass", "def colourMagnitudeDiagram(lens_r_list, lens_gr_list, lens_gi_list, source_r_list, source_gr_list,\n source_gi_list, positive_path):\n\n int_lens_r_list = []\n int_lens_gr_list = []\n int_lens_gi_list = []\n int_source_r_list = []\n int_source_gr_list = []\n int_source_gi_list = []\n\n # creating lists containing floats and integrs that can be used in the graph\n for i in range(0, len(lens_r_list)):\n int_lens_r = float(lens_r_list[i])\n int_lens_r_list.append(int_lens_r)\n int_lens_gr = float(lens_gr_list[i])\n int_lens_gr_list.append(int_lens_gr)\n int_lens_gi = float(lens_gi_list[i])\n int_lens_gi_list.append(int_lens_gi)\n int_source_r = float(source_r_list[i])\n int_source_r_list.append(int_source_r)\n int_source_gr = float(source_gr_list[i])\n int_source_gr_list.append(int_source_gr)\n int_source_gi = float(source_gi_list[i])\n int_source_gi_list.append(int_source_gi)\n\n # Getting the min and max fro the axees\n lens_r_min = min(int_lens_r_list)\n lens_r_max = max(int_lens_r_list)\n lens_gr_min = min(int_lens_gr_list)\n lens_gr_max = max(int_lens_gr_list)\n lens_gi_min = min(int_lens_gi_list)\n lens_gi_max = max(int_lens_gi_list)\n source_r_min = min(int_source_r_list)\n source_r_max = max(int_source_r_list)\n source_gr_min = min(int_source_gr_list)\n source_gr_max = max(int_source_gr_list)\n source_gi_min = min(int_source_gi_list)\n source_gi_max = max(int_source_gi_list)\n\n r_min = min(lens_r_min, source_r_min)\n r_max = max(lens_r_max, source_r_max)\n gr_min = min(lens_gr_min, source_gr_min)\n gr_max = max(lens_gr_max, source_gr_max)\n gi_min = min(lens_gi_min, source_gi_min)\n gi_max = max(lens_gi_max, source_gi_max)\n\n y_min = gr_min - 0.2\n y_max = gr_max + 0.2\n x_min = r_min - 0.2\n x_max = r_max + 0.2\n a_min = gi_min -0.2\n a_max = gi_max+0.2\n\n x = int_lens_r_list\n y = int_lens_gr_list\n x2 = int_source_r_list\n y2 = int_source_gr_list\n gi_source = int_source_gi_list\n gi_lens = int_lens_gi_list\n\n # plotting the colour magnitude graph\n fig1 = plt.figure()\n plt.scatter(x, y, color='b', marker='.', label='Lenses')\n plt.scatter(x2, y2, color='r', marker='.', label='Sources')\n plt.xlabel('r')\n plt.ylabel('g-r')\n\n axes = plt.axes()\n axes.set(xlim=(x_min, x_max), ylim=(y_min, y_max))\n axes.get_yaxis().set_major_locator(LinearLocator(numticks=12))\n axes.get_xaxis().set_major_locator(LinearLocator(numticks=12))\n\n if positive_path == 'Training/g_r_PositiveAll':\n plt.title('Positively Simulated Training Data Colour Magnitude (g-r vs r) Diagram')\n elif positive_path == 'Testing/g_r_PositiveAll':\n plt.title('Positively Simulated Testing Data Colour Magnitude (g-r vs r) Diagram')\n elif positive_path == 'Training/g_r_AllData':\n plt.title('All Positively Simulated Data Colour Magnitude (g-r vs r) Diagram')\n plt.legend()\n plt.show()\n fig1.savefig('%s_g_r_colourMagnitudeDiagram.png' % positive_path)\n\n fig2 = plt.figure()\n plt.scatter(gi_lens, y, color = 'b', marker= '.', label = 'Lenses')\n plt.scatter(gi_source, y2, color ='r', marker='.', label = 'Sources')\n if positive_path == 'Training/g_r_PositiveAll':\n plt.title('Positively Simulated Training Data Colour Magnitude (g-r vs g-i ) Diagram')\n elif positive_path == 'Testing/g_r_PositiveAll':\n plt.title('Positively Simulated Testing Data Colour Magnitude (g-r vs g-i) Diagram')\n elif positive_path == 'Training/g_r_AllData':\n plt.title('All Positively Simulated Data Colour Magnitude (g-r vs g-i) Diagram')\n\n axes = plt.axes()\n axes.set(xlim=(a_min, a_max), ylim=(y_min, y_max))\n axes.get_yaxis().set_major_locator(LinearLocator(numticks=12))\n axes.get_xaxis().set_major_locator(LinearLocator(numticks=12))\n\n plt.xlabel('g-i')\n plt.ylabel('g-r')\n plt.legend()\n plt.show()\n fig2.savefig('%s_g_i_colourMagnitudeDiagram.png' % positive_path)", "def antenny_calibrate(self):\n if self.antenny_config.get(\"use_bno08x_rvc\"):\n t = .25\n d = .5\n us = 50\n else:\n t = .1\n d = .5\n us = 100\n self.platform.auto_calibrate_elevation_servo(us=us, t=t, d=d)\n self.platform.auto_calibrate_azimuth_servo(us=us, t=t, d=d)\n if self.antenny_config.get(\"use_bno055\"):\n self.platform.auto_calibrate_magnetometer()\n self.platform.auto_calibrate_gyroscope()\n self.platform.auto_calibrate_accelerometer()", "def optical_depth_height(path_to_netcdf, opt_variable='upopt', tau=1.):\n data = nc.Dataset(path_to_netcdf)\n tau = data[opt_variable][:]\n z = data['z'][:]\n t1height = []\n decreasing = True\n # check if optical depth is increasing or decreasing\n if tau.transpose()[0][1] - tau.transpose()[0][0] > 0.:\n decreasing = False\n for i, t in enumerate(tau.transpose()):\n for j, opt in enumerate(t):\n if decreasing:\n # record first tau <= 1\n if opt <= 1.:\n t1height.append(z[j])\n t1index.append(j)\n break\n else:\n # increasing, so record first tau >= 1\n if opt >= 1.:\n t1height.append(z[j])\n t1index.append(j)\n break\n return t1height", "def e_c_b97_polarized(rhoa,\n rhob,\n sigma_aa,\n sigma_ab,\n sigma_bb,\n power_series_ss=(\n (0, 0.1737), (1, 2.3487), (2, -2.4868)\n ),\n power_series_os=(\n (0, 0.9454), (1, 0.7471), (2, -4.5961)\n ),\n gamma_ss=0.2,\n gamma_os=0.006):\n del sigma_ab\n\n e_c_lda_aa, e_c_lda_bb, e_c_lda_ab = lda.decomposed_e_c_lda_polarized(\n rhoa, rhob)\n\n xa = get_reduced_density_gradient(rhoa, sigma_aa)\n xb = get_reduced_density_gradient(rhob, sigma_bb)\n xave = jnp.sqrt(0.5 * (xa ** 2 + xb ** 2))\n\n f_c_aa = f_b97(\n xa, power_series=power_series_ss, gamma=gamma_ss, polarized=True)\n f_c_bb = f_b97(\n xb, power_series=power_series_ss, gamma=gamma_ss, polarized=True)\n f_c_ab = f_b97(\n xave, power_series=power_series_os, gamma=gamma_os, polarized=True)\n\n return e_c_lda_aa * f_c_aa + e_c_lda_bb * f_c_bb + e_c_lda_ab * f_c_ab", "def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)", "def distance_23(self, alphas, motor_positions):\n\n self.roof_vertex_z[1] = motor_positions[1] * math.cos(alphas[1])\n s2 = motor_positions[1] * math.sin(alphas[1])\n self.roof_vertex_x[1] = self.base_point[1][0] - (s2 * 0.5) # sin 30° = 1/2\n self.roof_vertex_y[1] = self.base_point[1][1] - (s2 * 0.8660254037844386) # cos 30° = sqrt(3) / 2\n\n self.roof_vertex_z[2] = motor_positions[2] * math.cos(alphas[2])\n s3 = motor_positions[2] * math.sin(alphas[2])\n self.roof_vertex_x[2] = self.base_point[2][0] - (s3 * 0.5) # sin 30° = 1/2\n self.roof_vertex_y[2] = self.base_point[2][1] + (s3 * 0.8660254037844386) # cos 30° = sqrt(3) / 2\n\n return math.sqrt(\n ((self.roof_vertex_x[2] - self.roof_vertex_x[1]) ** 2) +\n ((self.roof_vertex_y[1] - self.roof_vertex_y[2]) ** 2) +\n ((self.roof_vertex_z[2] - self.roof_vertex_z[1]) ** 2))", "def shadeToDepth(color):\n minDistance = 0.7\n maxDistance = 20\n maxColor = 255\n color = np.average(color, axis=None)\n # depth = mx + b\n # m = (minDistance - maxDistance)/maxColor\n # x = color\n # b = maxDistance\n m = (maxDistance - minDistance)/maxColor\n x = color\n b = minDistance\n return m * x + b", "def alpha_crit_fromEarth(a_p): #OK\n a_earth = 1. #in AU\n if a_p > a_earth:\n alpha_max = np.arcsin(a_earth/a_p) #occurs at quadrature\n else: #if a_p < a_earth:\n alpha_max = np.pi #0 deg when opposite side of sta180 deg on same side of star\n return alpha_max", "def test_GBL_tau_inst():\n dz = 0.05\n z = numpy.arange(0., 80. + 1.5*dz, dz)\n\n # Fully ionized H and He\n x_ionH = 1.0\n x_ionHe = 2.0\n\n cosmo = {}\n cosmo['omega_M_0'] = numpy.array([[0.3],[0.6],[1.0]])\n cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']\n cosmo['h'] = 0.65\n cosmo['omega_b_0'] = 0.02 / cosmo['h']**2.\n cosmo['Y_He'] = 0.24\n cd.set_omega_k_0(cosmo)\n\n tau_inst = cr.optical_depth_instant(z, x_ionH=x_ionH, x_ionHe=x_ionHe, \n **cosmo)\n tau_int = cr.integrate_optical_depth(x_ionH, x_ionHe, z, **cosmo)\n\n linestyle = ['-', ':', '--']\n \n pylab.figure()\n pylab.subplot(2,1,1)\n pylab.title(\"Compare to GB&L fig. 1 (astro-ph/9812125v3.)\")\n for i in range(len(linestyle)):\n pylab.plot(z, tau_inst[i], ls=linestyle[i], color='b')\n pylab.plot(z, tau_int[i], ls=linestyle[i], color='r')\n\n pylab.xlim(0,80)\n pylab.ylim(0,1)\n pylab.xlabel(r\"$\\mathrm{z_{ion}}$\")\n pylab.ylabel(r\"$\\tau$\")\n \n pylab.subplot(2,1,2)\n for i in range(len(linestyle)):\n pylab.plot(z, \n 1.e4 * (tau_int[i] - tau_inst[i])/tau_inst[i], \n ls=linestyle[i], color='k')\n diff = (tau_int[i] - tau_inst[i]) / tau_inst[i]\n diff[numpy.isnan(diff)] = 0.0\n print(\"max fractional error in num. int. = %.3g\" % \n numpy.max(numpy.abs(diff))\n )\n ntest.assert_array_less(numpy.abs(diff), \n numpy.zeros(diff.shape) + 2.e-4)\n\n pylab.xlim(0,40)\n pylab.xlabel(r\"$\\mathrm{z_{ion}}$\")\n pylab.ylabel(r\"$\\mathrm{10^4 \\times (num.\\tau - ana.\\tau)/ana.\\tau}$\")", "def getDiameter(antenna):\n if antenna >=1 and antenna <= 6: \n diameter = 10.4\n elif antenna >=7 and antenna <= 15: \n diameter = 6.1\n elif antenna >=16 and antenna <= 23: \n diameter = 3.5\n else:\n raise Exception,'Invalid antenna number : '+str(antenna)\n return diameter", "def comp_angle_magnet(self):\n Rbo = self.get_Rbo()\n W0 = self.comp_W0m()\n Harc = self.comp_H_arc()\n if self.is_outwards():\n return float(2 * arctan(W0 / (2 * (Rbo + self.H1 - Harc))))\n else:\n return float(2 * arctan(W0 / (2 * (Rbo - self.H1 - Harc))))\n\n # if self.W0_is_rad:\n # return self.W0\n # else: # Convert W0 from m to rad\n # Rbo = self.get_Rbo()\n # return float(2 * arcsin(self.W0 / (2 * Rbo)))", "def MiyamotoNagaiAccel(self, M, rd, r):\n R = np.sqrt(r[0]**2 + r[1]**2) #Finding magnitude of x and y compnets\n zd = rd/5. #Calculating \"zd\"\n B = rd + np.sqrt(r[2]**2 + zd**2) #Calclating \"B\"\n zstuff = 1/np.sqrt(r[2]**2 + zd**2) #Calculating stuff that only appears in z componet\n MNa = -self.G*M/(R**2+B**2)**1.5 * r * np.array([1,1,zstuff]) #Putting it all together\n\n return MNa", "def calculate_SWIR2_TOA(d,sza,L):\n rho_SWIR2_TOA=np.pi*(d**2)*L[:,178,:]/(0.0825*np.cos(sza))\n return rho_SWIR2_TOA", "def get_re(r, cl, aoa, arctan, rel_v, blade_number):\n chord = 8*math.pi*r*(1-math.cos(math.radians(arctan-aoa))) / blade_number / cl\n Re = round(rel_v * chord / 0.00001511 / 100) * 100\n return Re, chord", "def _estimate_bearing_(self):\n fname = \"data/sim/{dn}/{rad}/bearing.mat\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n m = {}\n lat, lon, bearing = utils.get_sd_radar(self.rad)\n p = (lat, lon)\n gc = GC(p, p)\n dist = np.linspace(0,self.mrange,self.nmrange)\n lats, lons = [], []\n for d in dist:\n x = gc.destination(p, bearing, distance=d)\n lats.append(x[0])\n lons.append(x[1])\n rinc = dist[1]-dist[0]\n m[\"dist\"], m[\"lat\"], m[\"lon\"] = dist, np.array(lats), np.array(lons)\n m[\"olat\"], m[\"olon\"], m[\"rb\"], m[\"num_range\"], m[\"max_range\"], m[\"range_inc\"] = lat, lon, bearing, float(self.nmrange),\\\n float(self.mrange), float(rinc)\n m[\"start_height\"], m[\"height_inc\"], m[\"num_heights\"] = float(self.sheight), float(self.hinc),\\\n float(len(np.arange(self.sheight,self.eheight,self.hinc)))\n m[\"ht\"] = np.arange(self.sheight,self.eheight,self.hinc)\n m[\"freq\"], m[\"tol\"], m[\"nhops\"] = float(self.frequency), float(1e-7), float(self.nhops)\n m[\"elev_s\"], m[\"elev_i\"], m[\"elev_e\"] = float(self.selev), float(self.ielev), float(self.eelev)\n m[\"radius_earth\"] = 6371.0\n m[\"d_ratio\"], m[\"d_start\"], m[\"d_end\"], m[\"d_rtime\"] = float(self.d_ratio), float(self.d_start),\\\n float(self.d_end), float(self.d_rtime)\n m[\"f_ratio\"], m[\"f_start\"], m[\"f_end\"], m[\"f_rtime\"] = float(self.f_ratio), float(self.f_start),\\\n float(self.f_end), float(self.f_rtime)\n m[\"e_ratio\"] = float(self.e_ratio)\n savemat(fname, m)\n self.m, self.lat, self.lon, self.ht = m, m[\"lat\"], m[\"lon\"], m[\"ht\"]\n return", "def depth_heading_rc(self, goal):\n # initialize empty RC command\n channels = [1500] * 8\n # compute heading and depth changes\n zout = self.get_depth_pwm(goal)\n hout = self.get_heading_pwm(goal)\n\n channels[self.zchannel] = zout\n channels[self.rchannel] = hout\n return channels", "def _estimate_bearing_(self):\n fname = \"data/sim/{dn}/{rad}/bearing.mat\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n m = {}\n lat, lon, bearing = utils.get_sd_radar(self.rad)\n p = (lat, lon)\n gc = GC(p, p)\n dist = np.linspace(0,self.mrange,self.nmrange)\n lats, lons = [], []\n for d in dist:\n x = gc.destination(p, bearing, distance=d)\n lats.append(x[0])\n lons.append(x[1])\n rinc = dist[1]-dist[0]\n m[\"dist\"], m[\"lat\"], m[\"lon\"] = dist, np.array(lats), np.array(lons)\n m[\"olat\"], m[\"olon\"], m[\"rb\"], m[\"num_range\"], m[\"max_range\"], m[\"range_inc\"] = lat, lon, bearing, float(self.nmrange),\\\n float(self.mrange), float(rinc)\n m[\"start_height\"], m[\"height_inc\"], m[\"num_heights\"] = float(self.sheight), float(self.hinc),\\\n float(len(np.arange(self.sheight,self.eheight,self.hinc)))\n m[\"ht\"] = np.arange(self.sheight,self.eheight,self.hinc)\n m[\"freq\"], m[\"tol\"], m[\"nhops\"] = float(self.frequency), float(1e-7), float(self.nhops)\n m[\"elev_s\"], m[\"elev_i\"], m[\"elev_e\"] = float(self.selev), float(self.ielev), float(self.eelev)\n m[\"radius_earth\"] = 6371.0\n m[\"d_ratio\"], m[\"d_start\"], m[\"d_end\"] = float(self.d_ratio), 10., 35.\n m[\"f_ratio\"], m[\"f_start\"], m[\"f_end\"] = float(self.f_ratio), 130., 240.\n m[\"e_ratio\"], m[\"e_start\"], m[\"e_end\"] = float(self.e_ratio), 50., 70.\n savemat(fname, m)\n self.m, self.lat, self.lon, self.ht = m, m[\"lat\"], m[\"lon\"], m[\"ht\"]\n return", "def distance_13(self, alphas, motor_positions):\n\n self.roof_vertex_z[0] = motor_positions[0] * math.cos(alphas[0])\n self.roof_vertex_x[0] = motor_positions[0] * math.sin(alphas[0])\n self.roof_vertex_z[2] = motor_positions[2] * math.cos(alphas[2])\n\n s3 = motor_positions[2] * math.sin(alphas[2])\n self.roof_vertex_x[2] = self.base_point[2][0] - (s3 / 2) # sin 30° = 1/2\n self.roof_vertex_y[2] = self.base_point[2][1] + (s3 * 0.8660254037844386) # cos 30° = sqrt(3) / 2\n\n return math.sqrt(\n ((self.roof_vertex_x[2] - self.roof_vertex_x[0]) ** 2) +\n (self.roof_vertex_y[2] ** 2) +\n ((self.roof_vertex_z[2] - self.roof_vertex_z[0]) ** 2))", "def radialApproxEffect(hubdist1,hubdist2,width,length):\n #Grating coordinates\n x,y = np.meshgrid(np.linspace(-width,width,1000),\\\n np.linspace(-length,length,1000))\n y1 = y + hubdist1\n y2 = y + hubdist2\n\n #Convert to period and yaw angle\n period1 = np.sqrt(x**2+y1**2)/hubdist1*160. #nm\n period2 = np.sqrt(x**2+y2**2)/hubdist2*160. #nm\n yaw = blazeYaw(1.5*np.pi/180,2.4,3,160.)\n yaw1 = np.pi/2 - np.arctan(x/y1) + yaw\n yaw2 = np.pi/2 - np.arctan(x/y2) + yaw\n\n #Determine alpha and beta\n beta0,alpha0 = litBetaAlpha(1.5*np.pi/180,2.4,3,160.)\n alpha1 = alpha0 + 3*2.4/period1*np.sin(yaw1)\n alpha2 = alpha0 + 3*2.4/period2*np.sin(yaw2)\n beta1 = beta0 + (3*2.4/period1)*np.cos(yaw1)\n beta2 = beta0 + (3*2.4/period2)*np.cos(yaw2)\n\n #Determine spot shifts\n x1 = hubdist2*(alpha1/beta1)\n x2 = hubdist2*(alpha2/beta2)\n \n\n pdb.set_trace()\n \n return x1,x2", "def _calculate_a_value(self, bval, nvalue, nyr, cmag, ref_mag):\n\n denominator = np.sum(nyr * np.exp(-bval * (cmag - ref_mag)))\n return nvalue / denominator", "def getTheta(self, trackWidth):\n leftDist = leftUS.sensor_detect()\n print(\"LEFT US: \" + str(leftDist))\n rightDist = rightUS.sensor_detect()\n print(\"RIGHT US: \" + str(rightDist))\n #totalWidth (hypotenuse) = leftUS + rightUS + robotWidth\n totalWidth = leftDist + rightDist + 6\n try:\n print(math.acos(trackWidth/totalWidth))\n return math.acos(trackWidth/totalWidth)\n except ValueError:\n return 0", "def calMeasuredContactAngle(self):\n #account the base\n bottomLength = 0\n arrayHeight = np.empty([0, ], dtype = 'int64')\n for i in sp.arange(self.nx):\n if (self.densityFluid1[1, i] >= 0.485):\n bottomLength += 1\n #account the height\n for m in sp.arange(self.nx):\n tmpHeight = 0\n for n in sp.arange(1, self.ny - 1):\n if (self.densityFluid1[n, m] >= 0.485):\n tmpHeight += 1\n arrayHeight = np.append(arrayHeight, tmpHeight)\n heightH = np.amax(arrayHeight)\n #radius of droplet\n radiusD = (4. * np.power(heightH, 2.) + np.power(bottomLength, 2.)) / \\\n (8. * heightH)\n contactAngle = np.arctan((bottomLength) / (2. * (radiusD - heightH))) \n return contactAngle", "def computeRjb(self, lon, lat, depth):\n pass", "def bv_to_radius(b_minus_v):\n # Boyajian 2012\n X = b_minus_v\n a0 = 0.3830\n a1 = 0.9907\n a2 = -0.6038\n Y = 0\n # Ignore metallicity\n a3 = 0\n a4 = 0\n a5 = 0\n return (a0 + a1 * X + a2 * X ** 2 + a3 * X * Y +\n a4 * Y + a5 * Y ** 2) * R_sun", "def computeRhyp(self, lon, lat, depth):\n origin = self._origin\n oldshape = lon.shape\n\n\n rhyp = geodetic.distance(origin.lon, origin.lat, origin.depth,\n lon, lat, depth)\n rhyp = rhyp.reshape(oldshape)\n return rhyp", "def calculateDetectorArea(self):\n area = 0.0\n r = self.geoParam['CylinderLightGuideRadius']\n while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):\n area -= math.pow(r,2)\n r += self.geoParam['DetectorThickness']\n area += math.pow(r,2)\n r += self.geoParam['DetectorSpacing']\n return math.pi*area", "def rayleigh(th,r,wl,a,n1,n2):\n c = np.cos(th)\n c2,s2 = c**2, np.sin(th)**2\n k = 2*np.pi/wl\n n_2 = n2**2/n1**2\n m = (k**4)*(a**6)*(abs(n_2-1)**2) / ((abs(n_2+2)**2) * 2 * (r**2))\n return m*np.array([[1+c2 , -s2 , 0 , 0],\n [-s2 , 1+c2 , 0 , 0],\n [0 , 0 , 2*c , 0],\n [0 , 0 , 0 , 2*c]])", "def paf_o_r(rr_o_r, alpha):\n return ((rr_o_r - 1) * (1 - alpha)) / ((rr_o_r - 1) * (1 - alpha) + 1)", "def myDihedralFunctionAirliner(Epsilon):\n BaseDihedral = 7\n\n # A simple model of a loaded wing shape:\n return BaseDihedral + Epsilon*Epsilon*10" ]
[ "0.5567054", "0.55453414", "0.5525464", "0.5403252", "0.539342", "0.53296256", "0.53177613", "0.52844834", "0.52782506", "0.524908", "0.52308244", "0.52085507", "0.5208206", "0.5199287", "0.5178122", "0.51560545", "0.51540154", "0.51226914", "0.51127833", "0.5078825", "0.5057937", "0.5048884", "0.50282073", "0.5016557", "0.49781042", "0.4977122", "0.49544078", "0.49530238", "0.49478027", "0.49472052", "0.49209085", "0.49140793", "0.49038708", "0.48967668", "0.48903805", "0.488919", "0.48789334", "0.48715436", "0.48710847", "0.48707694", "0.4867667", "0.48640135", "0.4851743", "0.48509294", "0.48500252", "0.48411572", "0.48265257", "0.48225453", "0.48111543", "0.48071784", "0.48034796", "0.4801928", "0.4801597", "0.4799918", "0.47858912", "0.4779709", "0.4779709", "0.47790226", "0.4778265", "0.47772378", "0.47747242", "0.4773483", "0.47710076", "0.476825", "0.47618446", "0.4758449", "0.474737", "0.47425744", "0.47421792", "0.47263193", "0.47199884", "0.47175983", "0.4710021", "0.47099128", "0.47085398", "0.47040403", "0.46939683", "0.46932667", "0.4690595", "0.46883106", "0.46842098", "0.46803418", "0.46753258", "0.4675066", "0.4674817", "0.467167", "0.4670782", "0.46669513", "0.46668252", "0.46667317", "0.46665388", "0.46640545", "0.4660455", "0.46579146", "0.46521062", "0.46471214", "0.4646077", "0.46442637", "0.4634683", "0.4633802" ]
0.53029495
7
Calculate Linke turbidity using Kasten pyrheliometric formula. Note that broadband aerosol optical depth (AOD) can be approximated by AOD measured at 700 nm according to Molineaux [4] . Bird and Hulstrom offer an alternate approximation using AOD measured at 380 nm and 500 nm. Based on original implementation by Armel Oumbe.
def kasten96_lt(airmass_absolute, precipitable_water, aod_bb): # "From numerically integrated spectral simulations done with Modtran # (Berk, 1989), Molineaux (1998) obtained for the broadband optical depth # of a clean and dry atmospshere (fictitious atmosphere that comprises only # the effects of Rayleigh scattering and absorption by the atmosphere gases # other than the water vapor) the following expression" # - P. Ineichen (2008) delta_cda = -0.101 + 0.235 * airmass_absolute ** (-0.16) # "and the broadband water vapor optical depth where pwat is the integrated # precipitable water vapor content of the atmosphere expressed in cm and am # the optical air mass. The precision of these fits is better than 1% when # compared with Modtran simulations in the range 1 < am < 5 and # 0 < pwat < 5 cm at sea level" - P. Ineichen (2008) delta_w = 0.112 * airmass_absolute ** (-0.55) * precipitable_water ** 0.34 # broadband AOD delta_a = aod_bb # "Then using the Kasten pyrheliometric formula (1980, 1996), the Linke # turbidity at am = 2 can be written. The extension of the Linke turbidity # coefficient to other values of air mass was published by Ineichen and # Perez (2002)" - P. Ineichen (2008) lt = -(9.4 + 0.9 * airmass_absolute) * np.log( np.exp(-airmass_absolute * (delta_cda + delta_w + delta_a)) ) / airmass_absolute # filter out of extrapolated values return lt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def estimate_lwdown(tairK, rh):\n zeroC = 273.15\n\n sat_vapress = 611.2 * np.exp(17.67 * ((tairK - zeroC) / (tairK - 29.65)))\n vapress = np.maximum(5.0, rh) / 100. * sat_vapress\n lw_down = 2.648 * tairK + 0.0346 * vapress - 474.0\n\n return lw_down", "def derive_RiekeLebofsky(wavelength):\n filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'L', 'M', \n '[8.0]', '[8.5]', '[9.0]', '[9.5]', '[10.0]', '[10.5]', \n '[11.0]', '[11.5]', '[12.0]', '[12.5]', '[13.0]']\n #wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.25, 1.635, 2.2, \n # 3.77, 4.68, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n # 11.5, 12.0, 12.5, 13.0])\n \n # Wavelengths from Nishiyama+09 plot of RL+85 law...slightly different than standard, \n # drop N filter\n wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.17, 1.57, 2.12, \n 3.40, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n 11.5, 12.0, 12.5, 13.0])\n A_Av = np.array([1.531, 1.324, 1.00, 0.748, 0.482, 0.282, 0.175, 0.112,\n 0.058, 0.023, 0.02, 0.043, 0.074, 0.087, 0.083,\n 0.074, 0.060, 0.047, 0.037, 0.030, 0.027])\n # Want to change this from A/Av to A/AK\n k_ind = np.where(np.array(filters) == 'K')\n Ak_Av = A_Av[k_ind]\n Av_Ak = 1.0 / Ak_Av\n\n A_Ak = A_Av * Av_Ak\n \n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_Ak, k=3, s=0)\n A_Ak_at_wave = interpolate.splev(wavelength, spline_interp)\n\n return A_Ak_at_wave", "def getTauLos(losdens, kap):\n rhocell = losdens['valcell']\n nzs =len(rhocell)\n nz = nzs + 1\n zlos = losdens['zlos'] # cell\n zlosi = losdens['zlosi'] # wall\n\n tau, dtaustg, taustg = getTauz(zlosi, zlos, rhocell, kap)\n\n\n# dzlos = zlosi[1:] - zlosi[:-1] # zlos is in the same direction as tau\n# dtaustg = kap * rhocell * dzlos\n\n# tau = np.zeros(nz, dtype=np.float64)\n# for iz in range(nzs):\n# tau[iz+1] = sum(dtaustg[:iz+1])\n\n# taustg = 0.5*(tau[1:] + tau[:-1])\n\n return tau, dtaustg, taustg", "def calc_h_lat(dry_bulb_C, humidity_ratio_out_kgperkg):\n\n h_kJ_kg = humidity_ratio_out_kgperkg * (dry_bulb_C * CPW_kJ_kgC + h_we_kJ_kg)\n\n return h_kJ_kg", "def planckwavelen(wavel,Temp):\n wavel=wavel*1.e-6 #convert to meters\n c1=2.*h*c**2.\n c2=h*c/kb\n Blambda=1.e-6*c1/(wavel**5.*(np.exp(c2/(wavel*Temp)) -1))\n return Blambda", "def make_stehle(self):\n\n temp_k = self.temp * e / k # temperature in K\n dens_cm = self.e_dens * 1.e-6 # electronic density in cm-3\n prefix = 'n_' + str(self.n_upper) + '_' + str(self.n_lower) + '_'\n\n # extract raw tabulated tabulated_data\n tab_temp_k = np.array(pystark.nc.variables[prefix + 'tempe'].data) # tabulated electron temperatures (K)\n olam0 = pystark.nc.variables[prefix + 'olam0'].data # line centre wavelength (A)\n num_tab_dens = pystark.nc.variables[prefix + 'id_max'].data\n fainom = pystark.nc.variables[prefix + 'fainom'].data\n tab_dens_cm = np.array(pystark.nc.variables[prefix + 'dense'].data) # tabulated electron densities (cm ** -3)\n f00 = np.array(pystark.nc.variables[prefix + 'f00'].data) # normal Holtsmark field strength (30 kV / m)\n dl12 = np.array(pystark.nc.variables[prefix + 'dl12'].data)\n dl12s = np.array(pystark.nc.variables[prefix + 'dl12s'].data)\n fainu = pystark.nc.variables[\n prefix + 'fainu'].data # Asymptotic value of iStark * (alpha ** 2.5) (\"wings factor in alfa units\")\n pr0 = np.array(pystark.nc.variables[\n prefix + 'pr0'].data) # Ratio of the mean interelectronic distance to the electronic Debye length\n jtot = np.array(pystark.nc.variables[prefix + 'jtot'].data,\n dtype=np.int) # \"number of wave lengths for the couple (T,Ne)\"\n dom = np.array(pystark.nc.variables[prefix + 'dom'].data) # frequency detunings in units (rad / (s*ues)\n d1om = np.array(pystark.nc.variables[prefix + 'd1om'].data)\n o1line = np.array(pystark.nc.variables[prefix + 'o1line'].data)\n o1lines = np.array(pystark.nc.variables[prefix + 'o1lines'].data)\n\n # ensure given temperature + density falls within tabulated values\n # change sligtly the value of the input density\n # dens_cm in order to remain , as far as possible, inside the tabulation\n # JSA: this first step seems bogus!\n\n if np.abs(dens_cm - tab_dens_cm[0]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[0] * 1.001\n\n for id in np.arange(1, num_tab_dens + 1):\n if np.abs(dens_cm - tab_dens_cm[id]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[id] * 0.999\n\n if dens_cm >= 2.0 * tab_dens_cm[num_tab_dens]:\n raise Exception(\n 'Your input density is higher than the largest tabulated value %f' % tab_dens_cm[num_tab_dens])\n\n if dens_cm <= tab_dens_cm[0]:\n raise Exception('Your input density is smaller than the smallest tabulated value %f' % tab_dens_cm[0])\n\n if temp_k >= tab_temp_k[9]:\n raise Exception('Your input temperature is higher than the largest tabulated value %f' % tab_temp_k[9])\n\n if temp_k <= tab_temp_k[0]:\n raise Exception('Your input temperature is lower than the smallest tabulated value %f' % tab_temp_k[0])\n\n normal_holtsmark_field = 1.25e-9 * (dens_cm ** (2. / 3.)) # normal field value in ues\n\n # calculate line centre wavelength and frequency using Rydberg formula\n # JSA: I have made this step clearer and corrected for deuteron mass in the Rydberg constant (though the effect is small)\n # TODO make sure this matches olam0 parameter above -- why were there two variables in the first place?!\n # rydberg_m = Rydberg / (1. + (electron_mass / physical_constants['deuteron mass'][0]))\n # wl_0_angst = 1e10 * (rydberg_m * (1 / n_lower ** 2 - 1 / n_upper ** 2)) ** -1\n\n wl_centre_angst = self.wl_centre * 1e10\n\n c_angst = c * 1e10 # velocity of light in Ansgtroms / s\n angular_freq_0 = 2 * np.pi * c_angst / wl_centre_angst # rad / s\n\n otrans = -2 * np.pi * c_angst / wl_centre_angst ** 2\n\n olines = o1lines / np.abs(otrans)\n oline = o1line / np.abs(otrans)\n\n # Limit analysis_tools to uncorrelated plasmas.\n # check that mean interelectronic distance is smaller than the electronic Debye length (equ. 10)\n PR0_exp = 0.0898 * (dens_cm ** (1. / 6.)) / np.sqrt(temp_k) # = (r0 / debye)\n if PR0_exp > 1.:\n raise Exception('The plasma is too strongly correlated\\ni.e. r0/debye=0.1\\nthe line cannot be computed.')\n\n # fainom_exp=fainom*(F00_exp**1.5)\n # fainum_exp=fainom_exp/( (OPI*2.)**1.5)\n\n # ========================\n # TABULATION Format CDS\n # si on veut ecrire\n # n -np lambda0 kalpha Ne E0 T R0/Debye Dalpha iDoppler iStark\n\n # IN_cds= N+0.01\n # INP_cds = NP+0.01\n\n # ***********************************************************\n # Don't edit the CDS format...\n # ***********************************************************\n\n # Skipped the code in the IF statement starting at line 470, since it\n # isn't used, if (.FALSE.) ...\n\n # ==============================================\n # define an unique detunings grid - domm - for the tabulated\n # profiles ( various temperatures , densities)\n # calculate all the line shapes for this common grid\n # units used at this points are Domega_new= Delta(omega)/F00\n # in rd/(s-1 ues)\n\n max_num_dens = 30 # Maximum number of densities\n max_num_tab_temp = 10\n max_num_detunings = 60 # Maximum number of detunings\n jtot = jtot.astype(np.int)\n domm = np.zeros(100000)\n dom0 = np.zeros(10000)\n tprof = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n tprofs = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n uprof = np.zeros([max_num_dens, 10000])\n uprofs = np.zeros([max_num_dens, 10000])\n\n inc = 0\n domm[inc] = 0.0\n # ---- Look to replace this loop\n for id in np.arange(num_tab_dens + 1): # loop over tab densities\n for j in np.arange(max_num_tab_temp): # loop over tab temperatures (?)\n for i in np.arange(1, jtot[id, j]):\n inc += 1\n dom0[inc] = dom[id, j, i]\n\n inc = np.count_nonzero(dom)\n npik = inc + 1\n # nut=10000\n\n # Calling numpy sort instead of piksrt\n tmp = np.sort(dom0[0:npik])\n dom0[0:npik] = tmp[0:npik]\n # dom0 seems to agree with the FORTRAN version\n\n inc = 0\n domm[0] = 0.0\n # print 'npik',npik\n # ---- Look to replace this loop\n for i in np.arange(1, npik):\n dif = (dom0[i] - dom0[i - 1])\n if dif <= 1.0E-6:\n continue\n if dif / np.abs(dom0[i]) <= 0.1:\n continue\n inc = inc + 1\n domm[inc] = dom0[i]\n\n jdom = inc + 1 # One line after marker 35\n\n for id in np.arange(num_tab_dens):\n for j in np.arange(10):\n if pr0[id, j] > 1.0:\n continue\n\n tprof[id, j, 0] = oline[id, j, 0]\n tprofs[id, j, 0] = olines[id, j, 0]\n\n if jtot[id, j] == 0:\n continue\n\n for i in np.arange(1, jdom + 1):\n skip1 = False\n skip2 = False\n # print 'i',i\n domeg = domm[i]\n ij_max = jtot[id, j]\n # print 'domeg,ij_max',domeg,ij_max\n for ij in np.arange(1, ij_max - 1):\n # print 'ij',ij\n test = (domeg - dom[id, j, ij]) * (domeg - dom[id, j, ij - 1])\n # print 'test1:',test\n if test <= 0.0:\n # print 'triggered test1'\n x1 = dom[id, j, ij - 1]\n x2 = dom[id, j, ij]\n x3 = dom[id, j, ij + 1]\n y1 = oline[id, j, ij - 1]\n y2 = oline[id, j, ij]\n y3 = oline[id, j, ij + 1]\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij - 1]\n y2 = olines[id, j, ij]\n y3 = olines[id, j, ij + 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n skip1 = True\n skip2 = True\n break\n\n if skip1 is False:\n test = (domeg - dom[id, j, ij_max - 2]) * (domeg - dom[id, j, ij_max - 1])\n # print 'test2:',test\n # print 'domeg',domeg\n # print 'dom[id,j,ij_max-1]',dom[id,j,ij_max-2]\n # print 'dom[id,j,ij_max]',dom[id,j,ij_max-1]\n if test <= 0.0:\n # print 'triggered test2'\n x1 = dom[id, j, ij_max - 3]\n x2 = dom[id, j, ij_max - 2]\n x3 = dom[id, j, ij_max - 1]\n y1 = oline[id, j, ij_max - 3]\n y2 = oline[id, j, ij_max - 2]\n y3 = oline[id, j, ij_max - 1]\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij_max - 3]\n y2 = olines[id, j, ij_max - 2]\n y3 = olines[id, j, ij_max - 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n skip2 = True\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n continue\n\n if skip2 is False:\n if domeg > dom[id, j, ij_max]:\n # print 'triggered test3'\n tprof[id, j, i] = fainom / (domeg ** 2.5)\n tprofs[id, j, i] = tprof[id, j, i]\n continue\n\n # We can skip writing the intermediate file\n\n\n for id in np.arange(num_tab_dens):\n otest_dens = (dens_cm - tab_dens_cm[id]) * (dens_cm - tab_dens_cm[id + 1])\n if otest_dens <= 0.0:\n dense1 = tab_dens_cm[id]\n dense2 = tab_dens_cm[id + 1]\n id1 = id\n id2 = id + 1\n break\n\n if dens_cm >= tab_dens_cm[num_tab_dens]:\n dense1 = tab_dens_cm[num_tab_dens - 1]\n dense2 = tab_dens_cm[num_tab_dens]\n id1 = num_tab_dens - 1\n id2 = num_tab_dens\n\n for it in np.arange(10):\n otest = (temp_k - tab_temp_k[it]) * (temp_k - tab_temp_k[it + 1])\n if otest <= 0.0:\n it1 = it\n it2 = it + 1\n # pr01 = pr0[id2,it1] # max value of pr0 for T1,T2,dense1,dense2\n tempe1 = tab_temp_k[it]\n tempe2 = tab_temp_k[it + 1]\n break\n\n # interpolation in temperature\n for id in np.arange(id1, id2 + 1):\n for i in np.arange(jdom):\n uprof[id, i] = tprof[id, it1, i] + (temp_k - tempe1) * (tprof[id, it2, i] - tprof[id, it1, i]) / (\n tempe2 - tempe1)\n uprofs[id, i] = tprofs[id, it1, i] + (temp_k - tempe1) * (tprofs[id, it2, i] - tprofs[id, it1, i]) / (\n tempe2 - tempe1)\n\n delta_lambda = np.zeros(jdom)\n delta_nu = np.zeros(jdom)\n wprof_nu = np.zeros(jdom)\n wprofs_nu = np.zeros(jdom)\n\n for i in np.arange(jdom):\n wprof = uprof[id1, i] + (dens_cm - dense1) * (uprof[id2, i] - uprof[id1, i]) / (dense2 - dense1)\n wprofs = uprofs[id1, i] + (dens_cm - dense1) * (uprofs[id2, i] - uprofs[id1, i]) / (dense2 - dense1)\n delta_omega = domm[i] * normal_holtsmark_field\n delta_nu[i] = delta_omega / (2 * np.pi)\n delta_lambda[i] = wl_centre_angst * delta_omega / (angular_freq_0 + delta_omega)\n # print(delta_lambda[i])\n wprof_nu[i] = (wprof / normal_holtsmark_field) * (2. * np.pi)\n wprofs_nu[i] = (wprofs / normal_holtsmark_field) * (2. * np.pi)\n # print '%e %e %e %e' %(delta_lambda[i],delta_nu[i],wprof_nu[i],wprofs_nu[i])\n\n delta_lambda2 = np.concatenate((-delta_lambda[::-1], delta_lambda)) + wl_centre_angst # + olam0\n delta_nu2 = np.concatenate((-delta_nu[::-1], delta_nu))\n wprof_nu2 = np.concatenate((wprof_nu[::-1], wprof_nu))\n wprofs_nu2 = np.concatenate((wprofs_nu[::-1], wprofs_nu))\n\n # for some reason, i only get a good agreement with the other models if i take the pure Stark broadened Stehle\n # output and manually convolve it with the Doppler profile -- not sure why...\n ls_sd = wprofs_nu2\n\n # interpolate onto frequency axis\n ls_sd = np.interp(self.freq_axis, delta_nu2 + self.freq_centre, ls_sd)\n\n return ls_sd", "def k_Wa92(wind_second_moment, temp_C):\n\n U2 = wind_second_moment\n\n Sc = schmidt_number(temp_C)\n k = (0.31 * U2) * (660 / Sc) ** 0.5\n\n return k", "def abbott_steam():\n per_klb = 20 # dollars per klb of steam\n kwh_eq = to_kwh(1) # kwh equivalent of steam\n per_kwh = per_klb / kwh_eq\n return per_kwh", "def test_klauder(self):\n ideal = np.array([0.14899879, -0.16633309, -0.42806931, 0.16605633,\n 0.70769336, 0.16605633, -0.42806931, -0.16633309])\n actual = misc.klauder(8)\n np.testing.assert_allclose(ideal, actual, atol=1e-8, rtol=1e-8)", "def calc_TEB(Imap_name='HFI_SkyMap_353_2048_R2.02_full.fits',\n Pmap_name='HFI_SkyMap_353_2048_R2.02_full.fits',\n nus=None, fwhm=0.063, nside=16, lmax=100,\n lmaps_only=False, filename=None):\n\n # read from file if it's there\n if filename is None:\n filename = 'bispectrum_lmax{}'.format(lmax)\n if nus is not None:\n filename += '_{}-{}-{}GHz.npy'.format(nus[0],nus[1],nus[1])\n else:\n filename += '_{}'.format(Imap_name[-5])\n if Imap_name != Pmap_name:\n filename += '_{}.npy'.format(Pmap_name[-5])\n else:\n filename += '.npy'\n print 'looking for {} ...'.format(filename)\n if os.path.exists(filename) and not lmaps_only:\n bispectrum = np.load(filename, 'r')\n return bispectrum\n\n # compute it, if the file doesn't exist\n if nus is not None:\n Imap_name = 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(nus[0])\n Pmap_name = 'HFI_SkyMap_{}_2048_R2.02_full.fits'.format(nus[1])\n title = '$I_{%i} P^2_{%i}$ (equilateral)' % (nus[0],nus[1])\n \n Imap = prepare_map( Imap_name, field=0,\n nside_out=nside, fwhm=fwhm )\n Tlm = hp.map2alm( Imap, lmax=lmax )\n\n Qmap, Umap = prepare_map( Pmap_name, field=(1,2),\n nside_out=nside, fwhm=fwhm )\n \n \n Elm,Blm = hp.map2alm_spin( (Qmap,Umap), 2, lmax=lmax )\n\n if lmax is None:\n lmax = hp.sphtfunc.Alm.getlmax(len(Tlm))\n ls, ms = hp.sphtfunc.Alm.getlm(lmax,np.arange(len(Tlm)))\n lmin = ls.min()\n mapsize = len(Imap)\n pixelsize = hp.pixelfunc.nside2pixarea(nside)\n \n Ylm = calc_Ylm(Imap, ls, ms)\n\n\n #return Ylm, Tlm, ls, ms\n print 'calculating Tl,El,Bl ...'\n \n Tl = sum_over_m(Tlm, Ylm, ls,\n lmax=lmax, lmin=lmin, mapsize=mapsize)\n El = sum_over_m(Elm, Ylm, ls,\n lmax=lmax, lmin=lmin, mapsize=mapsize)\n Bl = sum_over_m(Blm, Ylm, ls,\n lmax=lmax, lmin=lmin, mapsize=mapsize)\n\n if lmaps_only:\n return Tl,El,Bl\n \n hs = get_hs(lmin=lmin, lmax=lmax)\n\n print 'calculating bispectrum ...'\n bispectrum = calc_bispectrum(Tl, El, Bl, hs,\n pixelsize,\n lmax=lmax, lmin=lmin,\n mapsize=mapsize)\n clean_bispectrum_of_naninf(bispectrum, hs, inplace=True)\n np.save(filename, bispectrum)\n return bispectrum", "def PlankFunction(wavelen,T=5778.):\n\n c1=1.191042E8\n c2=1.4387752E4\n L=c1/(wavelen**5*(np.exp(c2/(wavelen*T))-1))\n return L", "def k_Li86(wind_ms, temp_C):\n from numpy import zeros_like\n\n U = wind_ms\n T = temp_C\n\n Sc = schmidt_number(T)\n k = zeros_like(temp_C)\n\n i1 = U <= 3.6\n i2 = (U > 3.6) & (U < 13.0)\n i3 = U >= 13.0\n\n k[i1] = (0.17 * U[i1]) * (Sc[i1] / 600) ** (-2.0 / 3.0)\n k[i2] = ((U[i2] - 3.4) * 2.8) * (600 / Sc[i2]) ** 0.5\n k[i3] = ((U[i3] - 8.4) * 5.9) * (600 / Sc[i3]) ** 0.5\n\n return k", "def wind_chill(T_a, v):\r\n return 13.12 + 0.6215*(T_a) - 11.37*(v)**0.16 + 0.3965*(T_a)*(v)**0.16", "def moonlongitude(time):\n B0 = 481267.8809\n C0 = 218.3162\n # fmt: off\n A = np.array([62888.e-4, 12740.e-4, 6583.e-4, 2136.e-4, 1851.e-4, \\\n 1144.e-4, 588.e-4, 571.e-4, 533.e-4, 458.e-4, 409.e-4, \\\n 347.e-4, 304.e-4, 154.e-4, 125.e-4, 110.e-4, 107.e-4, \\\n 100.e-4, 85.e-4, 79.e-4, 68.e-4, 52.e-4, 50.e-4, 40.e-4, \\\n 40.e-4, 40.e-4, 38.e-4, 37.e-4, 28.e-4, 27.e-4, 26.e-4, \\\n 24.e-4, 23.e-4, 22.e-4, 21.e-4, 21.e-4, 21.e-4, 18.e-4, \\\n 16.e-4, 12.e-4, 11.e-4, 9.e-4, 8.e-4, 7.e-4, 7.e-4, \\\n 7.e-4, 7.e-4, 6.e-4, 6.e-4, 5.e-4, 5.e-4, 5.e-4, \\\n 4.e-4, 4.e-4, 3.e-4, 3.e-4, 3.e-4, 3.e-4, 3.e-4, \\\n 3.e-4, 3.e-4])\n B = np.array([477198.868, 413335.35, 890534.22, 954397.74, \\\n 35999.05, 966404.0, 63863.5, 377336.3, \\\n 1367733.1, 854535.2, 441199.8, 445267.1, \\\n 513197.9, 75870, 1443603, 489205, 1303870, \\\n 1431597, 826671, 449334, 926533, 31932, \\\n 481266, 1331734, 1844932, 133, 1781068, \\\n 541062, 1934, 918399, 1379739, 99863, \\\n 922466, 818536, 990397, 71998, 341337, \\\n 401329, 1856938, 1267871, 1920802, 858602, \\\n 1403732, 790672, 405201, 485333, 27864, \\\n 111869, 2258267, 1908795, 1745069, 509131, \\\n 39871, 12006, 958465, 381404, 349472, \\\n 1808933, 549197, 4067, 2322131.])\n C = np.array([44.963, 10.74, 145.70, 179.93, 87.53, 276.5, \\\n 124.2, 13.2, 280.7, 148.2, 47.4, 27.9, 222.5, \\\n 41, 52, 142, 246, 315, 111, 188, \\\n 323, 107, 205, 283, 56, 29, 21, \\\n 259, 145, 182, 17, 122, 163, 151, \\\n 357, 85, 16, 274, 152, 249, 186, \\\n 129, 98, 114, 50, 186, 127, 38, \\\n 156, 90, 24, 242, 223, 187, 340, \\\n 354, 337, 58, 220, 70, 191])\n # fmt: on\n RAD = 0.0174532925199433\n tempb = (B * time + C) * RAD\n amp = A * np.cos(tempb)\n moonlon = np.sum(amp)\n moonlon = (moonlon + B0 * time + C0) * RAD\n return moonlon", "def ReadTinker():\n # Total Potential Energy : {f} Kcal/mole\n total_line = \" Total Potential Energy :\"\n with open('LICHM_TINKEREnergy_0.log') as f:\n for line in f:\n if line.startswith(total_line):\n # print(line)\n TinkE = re.findall(r'\\-\\d+\\.*\\d*', line)\n TinkE = float(TinkE[0])\n # if AMOEBA == True:\n # if line.startswith(\"Polarization\"):\n # Epol = re.findall(r'\\-\\d+\\.*\\d*', line)\n # Epol = float(Epol[0])\n # elif line.startswith(\"Implicit Solvation\")\n # Esolv = re.findall(r'\\-\\d+\\.*\\d*', line)\n # Esolv = float(Esolv[0])\n f.close()\n # if AMOEBA == True:\n # TINKERPolForces = EPol + ESolv\n # TinkE += TINKERPolForces\n #\n TinkE *= kcal2ev\n return TinkE", "def earth_tide(theta, lamda, gtime):\n\n global dsz, dcz, dsl, dcl, ssz, scz, ssl, scl, dpar, sdist # bpos common block\n global h, k, l # love common block\n h = [0.6114, 0.2891, 0.175]\n k = [0.304, 0.09421, 0.043]\n l = [0.0832, 0.0145, 0.0103]\n\n global azt, azs # azimut common block\n global etmut # tdiff common block\n global moon # sunny common block\n moon = 0\n # hardwire these - you can only send it ONE droptime\n deltat = 1\n NPT = 1\n\n temp_time = num2date(gtime)\n\n YY = temp_time.year\n MO = temp_time.month\n DD = temp_time.day\n HH = temp_time.hour\n MM = temp_time.minute\n SS = temp_time.second\n # Initialize variables\n irl = 1\n iflag = 0\n ntotl = 1\n iget = [0, 0, 0, 0, 0, 0, 0] # ' !!!\n ispc = [0, 0, 0, 0] # ' !!!\n ntw = [1, 0, 0] # ' !!!\n ioptn = 't'\n ielement = 0\n # \tdata statements for input and output unit numbers (on terminal I/O)\n inun = 5\n ioun = 6\n nptpb = 6\n\n yr1 = YY - 1900\n day1 = date2num(datetime(YY, MO, DD))\n # \tfind times in hours from 0 hr, 1 jan 1900\n # matlab:\n ts = (\n SS / 3600\n + MM / 60\n + HH\n + 24 * (day1 - 1)\n + 8760 * yr1\n + 24 * np.fix((yr1 - 1) / 4)\n )\n # python:\n dj = date_to_julian_day(datetime(YY, MO, DD))\n djref = date_to_julian_day(datetime(1899, 12, 31, 0, 0, 0))\n delta_dj = (\n dj - djref\n ) # difference in days from current date (0hr) to 0hr, 1 jan 1900\n delta_djhr = float(delta_dj) * 24.0 + HH - 12.0 + MM / 60.0 + SS / 3600.0\n te = ts + (NPT - 1) * deltat / 3600\n d = deltat / 3600\n # terms=(te-ts)/d + 1\n terms = NPT\n\n # done asking questions - begin execution\n i = 1\n tt = ts\n sph(theta, lamda, 0)\n etmut = 41.184 + yr1 - 70\n # matlab:\n # t = (tt+12 + (etmut/3600))/876600\n t = (delta_djhr + etmut / 3600) / 876600\n # t is ephemeris time in julian centuries from 12 hr 0 jan 1900\n ephem(t)\n\n # calculate normalized gravity tides\n [grav, tilt, strain, gdc] = elastd(ntw)\n\n gravtide = 1.0e5 * grav\n # convert m/s² to mgal: 1m/s² = 100 gal = 100 000 mgal\n\n iflag = 1\n\n iterms = np.fix(terms)\n i = 1\n return gravtide", "def derive_Hosek18b(wavelength):\n # Extinction law definition\n wave = np.array([0.8059, 0.962, 1.25, 1.53, 2.14, 3.545])\n A_AKs = np.array([7.943, 5.715, 3.142, 2.04, 1.0, 0.50])\n \n # Following Hosek+18, Interpolate over the curve with cubic spline interpolation\n spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)\n A_AKs_at_wave = interpolate.splev(wavelength, spline_interp)\n\n # This curve already assumes A_Ks = 1.0, so we can go straight to\n # output \n return A_AKs_at_wave", "def halflife(self, humidity: _VectorisedFloat, inside_temp: _VectorisedFloat) -> _VectorisedFloat:\n # Updated to use the formula from Dabish et al. with correction https://doi.org/10.1080/02786826.2020.1829536\n # with a maximum at hl = 6.43 (compensate for the negative decay values in the paper). \n # Note that humidity is in percentage and inside_temp in °C.\n # factor np.log(2) -> decay rate to half-life; factor 60 -> minutes to hours\n hl_calc = ((np.log(2)/((0.16030 + 0.04018*(((inside_temp-273.15)-20.615)/10.585)\n +0.02176*(((humidity*100)-45.235)/28.665)\n -0.14369\n -0.02636*((inside_temp-273.15)-20.615)/10.585)))/60)\n \n return np.where(hl_calc <= 0, 6.43, np.minimum(6.43, hl_calc))", "def kth_func(Th, ThS, lbd, ksat):\n if Th < 0.0:\n # rwarn(\"water content < 0 IN kth_func\")\n Th = 0.0\n kth = ksat * (Th / ThS) ** (3 + (2 / lbd))\n\n return kth", "def calc_h_sen(dry_bulb_C):\n\n h_kJ_kg = dry_bulb_C * CPA_kJ_kgC\n\n return h_kJ_kg", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def stockdon2006(H,L,B):\n \n # Make sure parameters are double\n H = np.double(H)\n L = np.double(L)\n B = np.double(B)\n \n # Compute incident swash (equation 11) \n incSwash = 1.1 / 2 * 0.75 * B * (H*L)**0.5\n \n # Infragravity swash (Equation 12)\n igSwash = 1.1 / 2 * 0.06 * (H*L)**0.5\n \n # Compute R2% (Equation 19)\n setup = 1.1 * 0.35 * B * ((H * L)**0.5)\n swash = 1.1 / 2.0 * (H*L * (0.563 * B**2 + 0.004))**0.5 \n r2 = setup + swash\n \n return r2,setup,incSwash,igSwash", "def planck_w(lam, T):\n return ((2*h*c**2)/(lam**5))*(1./(np.exp((h*c)/(lam*k*T))-1))", "def get_sn2005ek(colorplt=False):\n z = 0.016551\n ebv = 0.210\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 53639.9\n print (\"adopt r band t_max from Drout+13\")\n \n # tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\\t')\n # tb = tb.drop(columns=[\"Unnamed: 6\"])\n \n mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,\n 53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,\n 53654.2, 53655.2, 53656.2, 53657.2])\n \n Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,\n 20.07, np.nan, 20.67, 20.90, 21.05, np.nan,\n 21.74, np.nan, np.nan, np.nan])\n \n Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07, \n 0.07, np.nan, 0.04, 0.04, 0.04, np.nan,\n 0.12, np.nan, np.nan, np.nan])\n \n Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,\n 18.93, 19.48, 19.63, 19.86, 19.98, 20.35,\n 20.60, 20.74, 20.88, 21.22])\n \n Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,\n 0.02, 0.06, 0.03, 0.03, 0.04, 0.05, \n 0.08, 0.10, 0.08, 0.13])\n \n Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18, \n np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,\n 20.08, np.nan, 20.47, np.nan])\n \n Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,\n np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,\n 0.05, np.nan, 0.08, np.nan])\n\n Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71, \n np.nan, 18.13, 18.26, 18.51, 18.61, 18.74, \n 19.01, np.nan, 19.47, np.nan])\n \n Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,\n np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,\n 0.05, np.nan, 0.06, np.nan])\n \n mymjds = np.hstack([mjds, mjds, mjds, mjds])\n mymags = np.hstack([Bmags, Vmags, Rmags, Imags])\n myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])\n myfilts = np.hstack([ np.repeat(\"B\", len(Bmags)),\n np.repeat(\"V\", len(Bmags)),\n np.repeat(\"R\", len(Rmags)),\n np.repeat(\"I\", len(Imags)) ])\n ix = ~np.isnan(mymags)\n tb = pd.DataFrame({'mjd': mymjds[ix],\n 'mag': mymags[ix],\n 'emag': myemags[ix],\n \"filter\": myfilts[ix]})\n \n ixB = tb['filter'].values==\"B\"\n ixV = tb['filter'].values==\"V\"\n ixR = tb['filter'].values==\"R\"\n ixI = tb['filter'].values==\"I\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixV] = 5430\n tb['wave'].values[ixR] = 6349\n tb['wave'].values[ixI] = 8797\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['B', 'R', 'I']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"R\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"B\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"R\"]\n itb = tbsub[tbsub[\"filter\"].values==\"I\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"BmR\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"RmI\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def Tloken(self, x):\n return 11.2 * (self.r500*0.7)**2. * ( 1. + 0.75*x)**(-1.6)", "def B_func(Th33, Th1500):\n\n D = ln(Th33) - ln(Th1500)\n B = (ln(1500) - ln(33)) / D\n\n def lbd_func(C):\n \"\"\"return the slope of logarithmic tension-moisture curve\"\"\"\n if C == 0:\n return 0.0\n lbd = 1 / C\n return lbd\n\n return lbd_func(B)", "def planckian(temp, wavelength):\n if wavelength==560: return 100.0\n if temp<60: temp=60 # For simplicity, in very low temperature\n num = wavelength**(-5)\n try:\n v=num / (math.exp(0.0143877687750393/(wavelength*(10**(-9))*temp)) - 1)\n except:\n print(temp)\n print(wavelength)\n raise ValueError\n v2=(560.0**(-5)) / (math.exp(0.0143877687750393/(560.0*(10**(-9))*temp)) - 1)\n return v*100.0/v2", "def kervella(magB=None, magV=None, magK=None):\n if magB is None or np.isnan(magB) or magB > 49:\n magB = np.nan\n if magV is None or np.isnan(magV) or magV > 49:\n magV = np.nan\n if magK is None or np.isnan(magK) or magK > 49:\n magK = np.nan\n const1 = np.array([0.0755, 0.0535])\n const2 = np.array([0.5170, 0.5159])\n mag = np.array([magV, magB])\n vals = 10**(const1*(mag-magK)+const2-0.2*magK)\n diameter = {}\n if not np.isnan(vals[0]):\n diameter['V'] = vals[0]*u.mas\n if not np.isnan(vals[1]):\n diameter['B'] = vals[1]*u.mas\n return diameter", "def ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n # g = gbar * n * h\n v = voltage_clamp_func(t,voltage_clamp_params)\n n = Y[0]\n h = Y[1]\n q10 = 1.0#3.3 # Preserved in case it is useful but disabled\n \n ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4\n ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10\n\t\t\n hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))\n htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10\n \n # Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms\n if htau < 5.0:\n htau = 5.0\n\n dn = (ninf-n)/ntau\n dh = (hinf-h)/htau\n \n return [dn,dh]", "async def wetbulb(self, temp, humidity, pressure):\n t = float(temp)\n rh = float(humidity)\n p = float(pressure)\n\n # Variables\n edifference = 1\n twguess = 0\n previoussign = 1\n incr = 10\n es = 6.112 * math.exp(17.67 * t / (t + 243.5))\n e2 = es * (rh / 100)\n\n while (abs(edifference) > 0.005):\n ewguess = 6.112 * math.exp((17.67 * twguess) / (twguess + 243.5))\n eguess = ewguess - p * (t - twguess) * 0.00066 * (1 + (0.00115 * twguess))\n edifference = e2 - eguess\n if edifference == 0:\n break\n\n if edifference < 0:\n cursign = -1\n if (cursign != previoussign):\n previoussign = cursign\n incr = incr / 10\n else:\n incr = incr\n else:\n cursign = 1\n if (cursign != previoussign):\n previoussign = cursign\n incr = incr / 10\n else:\n incr = incr\n\n twguess = twguess + incr * previoussign\n\n return await self.temperature(twguess)", "def fahrenheitToKelvin(fahrenheit:float, ndigits = 2)->float:\n return round(((float(fahrenheit) - 32) * 5 / 9) + 273.5, ndigits)", "def compute_kinoshita(s):\n length = int(NBENDS*LAMBDA/DS) + 1\n x = np.zeros(length)\n y = np.zeros(length)\n cur = np.zeros(length+1)\n theta = THETA0*np.sin(2*np.pi*s/LAMBDA) \\\n + THETA0**3*(JS*np.cos(6*np.pi*s/LAMBDA) \\\n - JF*np.sin(6*np.pi*s/LAMBDA))\n theta[np.abs(theta)<ZERO] = 0\n for i in range(length):\n cossum, sinsum = 0, 0\n for j in range(i):\n cossum += DS*np.cos(theta[j])\n sinsum += DS*np.sin(theta[j])\n x[i] = 0 if np.abs(cossum) < ZERO else cossum\n y[i] = 0 if np.abs(sinsum) < ZERO else sinsum\n x = np.concatenate((x, np.array([x[-1]+x[1]-x[0]])))\n y = np.concatenate((y, np.array([y[-1]+y[1]-y[0]])))\n s = np.concatenate((s, np.array([s[-1]+DS])))\n theta = np.concatenate((theta, np.array([theta[-1]])))\n if FLIPSTRM:\n x = x[::-1]\n y = y[::-1]\n theta = np.concatenate((theta[::-1][1:], np.array([theta[0]])))\n for i in range(1, length):\n cur[i] = (theta[i]-theta[i-1])/DS\n cur[i] = 0 if np.abs(cur[i]) < ZERO else cur[i]\n cur[0], cur[-1] = cur[-2], cur[1]\n return s, x, y, cur, theta", "def get_hbls_hbbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n u = self.u\n v = self.v\n \n v_upts = TTTW_func.v2u(v)\n Hz = z_u_w[:,1:] - z_u_w[:,:-1]\n\n\n\n # CALCULATE swr_frac\n self.swr_frac = TTTW_func.lmd_swr_frac(self.grid_dict)\n\n\n # WHOLE THING HAPPENS IN j loop through y-indices\n \n # INITIALIZE ARRAYS\n self.kmo = np.zeros([Ly])\n self.Cr = np.zeros([Ly])\n self.kbl = np.empty([Ly],dtype='int')\n self.C_h_MO = np.zeros([Ly])\n self.Cr = np.zeros([Ly,N+1]) # sum term\n self.FC = np.zeros([Ly,N+1])\n self.swdk_r = np.zeros([Ly,N+1])\n \n self.zscale = np.zeros([Ly,N])\n self.Kern = np.zeros([Ly,N])\n\n \n # --> LOOP THROUGH Y-INDICES\n for j in range(Ly):\n if self.LIMIT_MO_DEPTH:\n self.kmo[j] = 0\n self.C_h_MO[j] = self.C_MO *self.ustar[j]**3/self.vonKar\n \n self.kbl[j] = 0\n self.Cr[j,-1] = 0 # set top Cr\n self.Cr[j,0] = 0 # set bottom Cr\n \n # SEARCH FOR MIXED LAYER DEPTH\n self.FC[j,-1] = 0.\n\n\n # ---> LOOP TOP TO BOTTOM (FORTRAN ==> k=N-1,1,-1)\n for k in range(N-1,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n \n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n self.zscale[j,k_w] = zscale\n if self.LMD_KPP:\n if self.LMD_BKPP:\n zscaleb = z_u_r[j,k_r] - z_u_w[j,0]\n Kern = zscale * zscaleb**2 / ( (zscale + self.epssfcs*self.hbls_old[j]) * (zscaleb**2+(self.epssfcb**2*self.hbbl_old[j]**2)))\n else:\n Kern = zscale / (zscale + (self.epssfcs*self.hbls_old[j]))\n else:\n Kern = 1.\n \n\n\n self.Kern[j,k_w] = Kern\n self.FC[j,k_w] = self.FC[j,k_w+1] + Kern * (\\\n ( ( u[j,k_r+1] - u[j,k_r] )**2 + ( v_upts[j,k_r+1] - v_upts[j,k_r])**2 ) \\\n / (Hz[j,k_r] + Hz[j,k_r+1]) \\\n - 0.5 * ( Hz[j,k_r] + Hz[j,k_r+1]) * (self.Ri_inv * self.bvf[j,k_w] + self.C_Ek*self.f[j]*self.f[j]))\n\n\n #\t\tLOOP THAT FINDS BL DEPTH ##\n #----> LOOP TOP TO BOTTOM (start at free surface, w-level surface) \n \n if self.LMD_KPP:\n #swdk_r only used in this function so don't need to be class attribute\n # but for testing make it an attribute to see what it is\n \n # fortran equivlanet ===> k=N,1,-1 \n for k in range(N,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n ###################################################################### \n self.swdk_r[j,k_w] = np.sqrt( self.swr_frac[j,k_w] * self.swr_frac[j,k_w-1])\n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n Bfsfc = self.Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])\n \n self.bvf_max = np.sqrt(np.max([0,self.bvf[j,k_w-1]]))\n \n # CALCULATE TURBULENT VELOCITY SCALE FOR TRACERS\n \t\t\t self.ws = self.lmd_wscale_ws_only(Bfsfc, zscale,self.hbls_old[j],self.ustar[j])\n \n self.Vtsq = self.Vtc * self.ws* self.bvf_max + self.V0\n \n\n self.Cr[j,k_w] = self.FC[j,k_w] + self.Vtsq\n \n\n #######################################################################\n \n # SEARCH FOR hbls vertical level #\n '''\n kbl is specified at vertical w-level (via Cr which is at\n vertical w-levels)\n '''\n if self.kbl[j] == 0 and self.Cr[j,k_w] < 0:\n self.kbl[j] = k_w\n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] == 0 and Bfsfc*(z_u_w[j,N] - z_u_r[j,k_r]) > self.C_h_MO[j]:\n self.kmo[j] = k_w\n\n \n #--> still in j-loop\n #######################################################\n \n # \t\tGET SURFACE BOUNDARY LAYER DEPTH # \n self.hbls[j] = z_u_w[j,N] - z_u_w[j,0] + self.eps # set hbls as depth of entire water column\n if self.kbl[j] > 0:\n k_w = self.kbl[j]\n k_r = k_w - 1 \n if k_w == N: # set hbls at the surface btwn w- and rho-levels at surface\n self.hbls[j] = z_u_w[j,N] - z_u_r[j,N-1]\n \n else:\n self.hbls[j] = z_u_w[j,N] - ( z_u_r[j,k_r] * self.Cr[j,k_w+1] - z_u_r[j,k_r+1] * self.Cr[j,k_w]) / \\\n (self.Cr[j,k_w+1] - self.Cr[j,k_w])\n \n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] > 0:\n k_w = self.kmo[j]\n k_r = k_w-1\n if k_w == N:\n z_up = z_u_w[j,N]\n cff_up = np.max([0,Bo[j]])\n else:\n z_up = z_r[j,k_w+1]\n cff_up = np.max([0, Bo[j] + self.Bosol[j]*(1-self.swdk_r[j,(k_w-1)+1])])\n \n cff_dn = np.max([0,Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])]) \n h_MO = z_u_w[j,N] + self.C_h_MO[j] * ( cff_up*z_up - cff_dn * z_u_r[j,k_r] ) \\\n / ( cff_up * cff_dn * (z_up - z_u_r[j,k_r]) ) \\\n + self.C_h_MO[j] * (cff_dn - cff_up)\n\n self.hbls[j] = np.min([self.hbls[j],np.max([h_MO,0])])\n\n\n\n #### GET BOTTOM BOUNDARY LAYER DEPTH #######\n if self.LMD_BKPP:\n self.kbl[j] = 0 # reset Cr at bottom and kbl for BKPP\n self.Cr[j,0] = 0.\n self.FC[j,0] = 1.5 * self.FC[j,1] - 0.5 * self.FC[j,2] # linear extrapolation\n \n #---> LOOP BOTTOM TO TOP\n # FIND kbl for BBL\n for k in range(1,N+1):\n k_r = k-1\n k_w = k \n self.Cr[j,k_w] = self.FC[j,k_w] - self.FC[j,0]\n \n # LOOK FOR FIRST ZERO CROSSING FROM BOTTOM UP\n if self.kbl[j] == 0 and self.Cr[j,k_w] > 0:\n self.kbl[j] = k_w \n \n\n self.hbbl[j] = z_u_w[j,N] - z_u_w[j,0] # total depth\n if self.kbl[j] > 0 :\n k_w = self.kbl[j] \n k_r = k_w -1\n if k_w == 1: # NO BBL CASE\n self.hbbl[j] = z_u_r[j,0] - z_u_w[j,0] #in between bottom rho and w-level\n else:\n self.hbbl[j] = ( z_u_r[j,k_r-1] * self.Cr[j,k_w] - z_u_r[j,k_r] * self.Cr[j,k_w-1]) / \\\n (self.Cr[j,k_w] - self.Cr[j,k_w-1]) - z_u_w[j,0]", "def ksat_func(ThS, Th33, lbd):\n\n # assert ThS > Th33, \"sat <= fc IN ksat_func\"\n ksat = 1930 * (ThS - Th33) ** (3 - lbd)\n return ksat", "def test_thermal_relaxation_error_kraus(self):\n t1, t2, time, p1 = (1, 2, 1, 0.3)\n error = thermal_relaxation_error(t1, t2, time, p1)\n circ, p = error.error_term(0)\n self.assertEqual(p, 1)\n self.assertEqual(circ[0]['name'], 'kraus')\n self.assertEqual(circ[0]['qubits'], [0])", "def rad(tx,K,w,e,T0,Vo,P):\r\n\r\n M=2*np.pi*(tx-T0)/P #Mean anomaly\r\n E=np.pi\r\n for j in range(0,25):\r\n E=(M-e*(E*np.cos(E)-np.sin(E)))/(1-e*np.cos(E))\r\n th=2*np.arctan(((1+e)/(1-e))**0.5*np.tan(E/2))\r\n return K*(np.cos(th+w)+e*np.cos(w))+Vo", "def kelvinToRankie(kelvin:float, ndigits = 2)->float:\n return round(float(fahrenheit)+ 459, ndigits)", "def molar_mass_dry_air():\n return 28.9647", "def whc_tot(mukey, layers=''):\n #read appropriate soils.in content to a python list\n mukey = str(mukey)\n soil_path = \"/data/paustian/ernie/SSURGO_master_script/soil_test2/\"\n soil_fpath = soil_path+mukey[:-3]+\"/\"+mukey+\".in\"\n cont = [[]]\n data_input = open(soil_fpath, 'r')\n for line in data_input:\n cont.append(line.split())\n del cont[0]\n\n #convert all entries in the 2D list to float format where possible, or zero in the case\n #of very small numbers recorded in scientific notation\n for k in range(len(cont)):\n for l in range(len(cont[k])):\n cont[k][l] = float(cont[k][l])\n\n #loop through list and compute the water holding capacity increment represented in \n #each line\n min_h2o_evap = 0\n min_h2o = 0\n max_h2o = 0\n whc = 0\n for i in range(len(cont)):\n if not layers:\n depth = cont[i][1] - cont[i][0]\n FC = cont[i][3]\n WP = cont[i][4]\n WHC = FC - WP\n if i != 0:\n min_h2o_evap += depth*WP\n min_h2o += depth*WP\n max_h2o += depth*FC\n whc += depth*WHC\n else:\n if 1+i <= layers:\n depth = cont[i][1] - cont[i][0]\n FC = cont[i][3]\n WP = cont[i][4]\n WHC = FC - WP\n if i != 0:\n min_h2o_evap += depth*WP\n min_h2o += depth*WP\n max_h2o += depth*FC\n whc += depth*WHC\n if layers:\n if layers > len(cont):\n print \"NOTE: specified layer limit exceeds number of layers found in soils.in file\"\n\n return whc, min_h2o, max_h2o", "def flattop_risefall(t, params):\n risefall = tf.cast(params['risefall'].get_value(), dtype=tf.float64)\n t_final = tf.cast(params['t_final'].get_value(), dtype=tf.float64)\n t_up = risefall\n t_down = t_final - risefall\n return (1 + tf.math.erf((t - t_up) / risefall)) / 2 * \\\n (1 + tf.math.erf((-t + t_down) / risefall)) / 2", "def hz2mel(hz):\r\n return 2595 * np.log10(1+hz/700.0)", "def derive_Hosek18(wavelength):\n # Extinction law definition\n wave = np.array([0.8059, 0.962, 1.25, 1.53, 2.14, 3.545])\n A_AKs = np.array([9.66, 6.29, 3.56, 2.33, 1.0, 0.50])\n \n\n # Following Hosek+18, Interpolate over the curve with cubic spline interpolation\n spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)\n A_AKs_at_wave = interpolate.splev(wavelength, spline_interp)\n\n # This curve already assumes A_Ks = 1.0, so we can go straight to\n # output \n return A_AKs_at_wave", "def compute_windchill(t,v):\n a = 35.74\n b = 0.6215\n c = 35.75\n d = 0.4275\n v16 = v**0.16\n wci = a+(b*t)-(c*v16)+(d*t*v16)\n return wci", "def KendallTau_calc(TP, FP, FN, TN):\n try:\n n = TP + FP + FN + TN\n return (2 * (TP + TN - FP - FN)) / (n * (n - 1))\n except Exception:\n return \"None\"", "def T_naught(z, h, OM, OB):\n\n T0 = 28.5 * ((1.0+z)/10.0)**(0.5) * OB/0.042 * h/0.73 * (0.24/OM)**(0.5)\n return T0", "def uL( wavelen, **kwargs ):\n Cc = C.c * 1e6 # speed of light um s^-1\n\n Gamma = kwargs.get('Gamma', 2*np.pi *5.9e6 ) # linewidth s^-1\n lambda0 = kwargs.get('lambda0', 0.671 ) # transition wavelength in microns \n \n omega0 = 2*np.pi*Cc / lambda0\n omegaL = 2*np.pi*Cc / wavelen\n intensity = 1.0 \n depthJ = (intensity)* -3*np.pi* Cc**2*Gamma / ( 2*omega0**3) * \\\n ( 1/(omega0 - omegaL ) + 1/(omega0 + omegaL ) ) # Joule\n depthuK = depthJ / C.k *1e6 # C.k is Boltzmann's constant\n return depthuK", "def water_uptake_campbell(self, soil):\r\n daily_ref_evap_transp = soil.daily_ref_evap_transp\r\n root_hydr_cond = np.zeros(soil.total_layers)\r\n shoot_hydr_cond = np.zeros(soil.total_layers)\r\n plant_hydr_cond = np.zeros(soil.total_layers)\r\n root_activity = np.zeros(soil.total_layers)\r\n root_cond_adj = np.zeros(soil.total_layers)\r\n tot_root_cond_adj = 0\r\n salinity_factor = np.zeros(soil.total_layers)\r\n soil_water_pot_avg = 0\r\n WAT_POT_FIELD_CAP = -33\r\n\r\n # Transpiration\r\n self.pot_transp = daily_ref_evap_transp * self.light_intercpt\r\n self.max_pot_transp = (self.campbell_max_daily_transp *\r\n self.light_intercpt)\r\n self.expect_transp = min(self.pot_transp, self.max_pot_transp) # mm/day\r\n\r\n # Plant hydraulic conductance (kg s m-4)\r\n tot_plant_hydr_cond = (self.max_pot_transp /\r\n (WAT_POT_FIELD_CAP -\r\n self.leaf_water_pot_stress_onset))\r\n # assumption of 2/3 of plant hydraulic conductance is from roots\r\n tot_root_hydr_cond = tot_plant_hydr_cond / 0.65\r\n # assumption of 1/3 of plant hydraulic conductivity is from shoots\r\n tot_shoot_hydr_cond = tot_plant_hydr_cond / 0.35\r\n\r\n for lyr in soil.layers:\r\n root_activity[lyr] = 1\r\n salinity_factor[lyr] = 1\r\n root_cond_adj[lyr] = (root_activity[lyr] * self.root_fraction[lyr]\r\n * salinity_factor[lyr])\r\n root_hydr_cond[lyr] = tot_root_hydr_cond * root_cond_adj[lyr]\r\n tot_root_cond_adj += root_cond_adj[lyr]\r\n\r\n # Root, shoot and plant hydraulic conductance(kg s m-4)\r\n for lyr in soil.layers:\r\n if root_cond_adj[lyr] > 0:\r\n shoot_hydr_cond[lyr] = (tot_shoot_hydr_cond *\r\n root_cond_adj[lyr] / tot_root_cond_adj)\r\n plant_hydr_cond[lyr] = (root_hydr_cond[lyr] *\r\n shoot_hydr_cond[lyr] /\r\n (root_hydr_cond[lyr] +\r\n shoot_hydr_cond[lyr]))\r\n else:\r\n plant_hydr_cond[lyr] = 0\r\n\r\n tot_root_hydr_cond *= tot_root_cond_adj\r\n tot_plant_hydr_cond = ((tot_root_hydr_cond * tot_shoot_hydr_cond) /\r\n (tot_root_hydr_cond + tot_shoot_hydr_cond))\r\n\r\n if tot_plant_hydr_cond > 0:\r\n for lyr in soil.layers:\r\n soil_water_pot_avg += (soil.water_potential[lyr] *\r\n root_cond_adj[lyr])\r\n leaf_water_pot = (soil_water_pot_avg - self.expect_transp /\r\n tot_plant_hydr_cond)\r\n if leaf_water_pot < self.leaf_water_pot_stress_onset:\r\n leaf_water_pot = ((tot_plant_hydr_cond * soil_water_pot_avg *\r\n (self.leaf_water_pot_stress_onset -\r\n self.leaf_water_pot_wilt_point) +\r\n self.leaf_water_pot_wilt_point *\r\n self.expect_transp)\r\n / (tot_plant_hydr_cond *\r\n (self.leaf_water_pot_stress_onset -\r\n self.leaf_water_pot_wilt_point) +\r\n self.expect_transp))\r\n if leaf_water_pot < self.leaf_water_pot_wilt_point:\r\n leaf_water_pot = self.leaf_water_pot_wilt_point\r\n self.att_transp = 0\r\n transp_ratio = self.att_transp / self.expect_transp\r\n\r\n elif leaf_water_pot < self.leaf_water_pot_stress_onset:\r\n self.att_transp = (self.expect_transp * (leaf_water_pot -\r\n self.leaf_water_pot_wilt_point) / (\r\n self.leaf_water_pot_stress_onset -\r\n self.leaf_water_pot_wilt_point))\r\n transp_ratio = self.att_transp / self.expect_transp\r\n\r\n else:\r\n self.att_transp = self.expect_transp\r\n transp_ratio = 1\r\n # crop water uptake (kg/m2/d = mm/d)\r\n for lyr in soil.layers:\r\n self.water_uptake[lyr] = (plant_hydr_cond[lyr] *\r\n (soil.water_potential[lyr] -\r\n leaf_water_pot) * transp_ratio)\r\n if self.water_uptake[lyr] < 0:\r\n self.water_uptake[lyr] = 0\r\n self.crop_transp = self.water_uptake.sum() # mm/day\r\n self.cum_transp += self.crop_transp\r\n self.cum_pot_transp += self.expect_transp\r\n self.transp_ratio = self.crop_transp / self.expect_transp", "def k_Ni00(wind_ms, temp_C):\n\n U = wind_ms\n\n Sc = schmidt_number(temp_C)\n k = (0.333 * U + 0.222 * U ** 2) * (600 / Sc) ** 0.5\n\n return k", "def calc_equil(sst, ft_qv, use_NT=False):\n \n run_main(sst, ft_qv, use_NT)\n \n # grab csv file\n with open('dumpmodel.csv','r') as f:\n df_result=pd.read_csv(f)\n\n # last time step into named tupple\n out=df_result.iloc[-1]\n steady_state=make_tuple(out.to_dict())\n steady_state\n \n # obtain steady-state values\n dth=steady_state.deltheta\n dqt=steady_state.delqv\n thetal_m=steady_state.theta\n qt_m=steady_state.qv\n h=steady_state.h\n press=tf.find_press(steady_state.h) #kPa\n thetal_ft = steady_state.theta + dth\n qt_ft = steady_state.qv + dqt\n zb = steady_state.LCL\n zi = steady_state.h\n we = steady_state.went\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-h)\n LTS = thetal_3000 - steady_state.theta\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n # calculate LWP\n rho = 1.\n LWP = 0.5*rho*(zi-zb)**2\n \n # put all required variables into output array\n out_array = np.array([thetal_m, qt_m, zi, zb, we, LWP, delta_Fr, LTS, dqt])\n \n return out_array", "def rt60_eyring(S, V, a, m, c):\n\n return -(24 * np.log(10) / c) * V / (S * np.log(1 - a) + 4 * m * V)", "def bet(P_1,V0_1,Vi_1,Vflap_1,J_a,Twist,Pitch,Chord,PLE,Polar,rho,dt):\n Phi = np.zeros_like(Twist)\n V_4 = np.zeros_like(P_1)\n dV_4 = np.zeros_like(P_1)\n F_5 = np.zeros_like(P_1)\n F_4 = np.zeros_like(P_1)\n F_1 = np.zeros_like(P_1)\n l = np.zeros_like(Twist)\n d = np.zeros_like(Twist)\n m = np.zeros_like(Twist)\n aoa = np.zeros_like(Twist)\n daoa = np.zeros_like(Twist)\n cl = np.zeros_like(Twist)\n cd = np.zeros_like(Twist)\n cm = np.zeros_like(Twist)\n w = np.zeros_like(Chord)\n wy = np.zeros_like(Twist)\n S = np.zeros_like(Twist)\n \n Vflap_1[np.abs(Vflap_1) < 1e-8] = 0\n \n for i in range(len(P_1)):\n \n for j in range(len(P_1[i])):\n \n # Calculate wing element elevation angle\n \n if j <= J_a:\n \n dz = P_1[i,J_a,2]\n dy = P_1[i,J_a,1]\n\n Phi[i,j] = np.arctan(dz/dy)\n \n else:\n \n dz = P_1[i,-1,2]-P_1[i,J_a+1,2]\n dy = P_1[i,-1,1]-P_1[i,J_a+1,1]\n \n Phi[i,j] = np.arctan(dz/dy)\n \n # Calculate local flow velocity\n V_4[i,j] = roty((Twist[i,j]+Pitch[j]), rotx(-Phi[i,j],(V0_1 + Vi_1 + Vflap_1[i,j]))) \n V_4[np.abs(V_4) < 1e-12] = 0\n\n # Calculate angle of attack\n aoa[i,j] = np.arctan(-V_4[i,j,2]/V_4[i,j,0])\n\n # Find cl,cd,cm from polars with linear interpolation\n cl[i,j] = np.interp(np.degrees(aoa[i,j]),Polar[j,0],Polar[j,1])\n cd[i,j] = np.interp(np.degrees(aoa[i,j]),Polar[j,0],Polar[j,2])\n cm[i,j] = np.interp(np.degrees(aoa[i,j]),Polar[j,0],Polar[j,3])\n\n # Calculate element width\n if j < len(P_1[i])-1:\n wy[i,j] = P_1[i,j+1,1]-P_1[i,j,1]\n else:\n wy[i,j] = P_1[i,j,1]-P_1[i,j-1,1]\n \n # Calculate element surface area\n S[i,j] = Chord[j] * wy[i,j]\n \n # Calculate aerodynamic forces\n l[i,j] = 0.5 * rho * np.linalg.norm(V_4[i,j])**2 * S[i,j] * cl[i,j]\n d[i,j] = 0.5 * rho * np.linalg.norm(V_4[i,j])**2 * S[i,j] * cd[i,j]\n m[i,j] = 0.5 * rho * np.linalg.norm(V_4[i,j])**2 * S[i,j] * cm[i,j]\n \n # Force vector in Blade Element local axes\n F_5[i,j] = np.array([-d[i,j], 0, l[i,j]])\n \n # Force vector in Blade local axes\n F_4[i,j] = roty(aoa[i,j],F_5[i,j])\n \n \n \n \"\"\"\n Add mass effect and rotate to stroke plane axes\n \"\"\"\n for i in range(len(P_1)):\n \n for j in range(len(P_1[i])):\n # Add mass effects\n if i==0:\n daoa[i,j] = (aoa[1,j]-aoa[0,j])/dt\n dV_4[i,j] = (V_4[1,j]-V_4[0,j])/dt\n \n elif i < len(P_1)-1:\n daoa[i,j] = (aoa[i+1,j]-aoa[i-1,j])/(2*dt)\n dV_4[i,j] = (V_4[i+1,j]-V_4[i-1,j])/(2*dt)\n \n else:\n daoa[i,j] = (aoa[i,j]-aoa[i-1,j])/dt\n dV_4[i,j] = (V_4[i,j]-V_4[i-1,j])/dt\n \n \n F_4[i,j,0] = F_4[i,j,0] - 0.25 * rho * np.pi * Chord[j] * S[i,j] * V_4[i,j,2] * daoa[i,j]\n F_4[i,j,2] = F_4[i,j,2] + 0.25 * rho * np.pi * Chord[j] * S[i,j] * dV_4[i,j,2]\n \n # Tranform to Stroke Plane Axes\n F_1[i,j] = rotx(Phi[i,j],roty(-(Twist[i,j]+Pitch[j]),F_4[i,j]))\n\n return F_1", "def eq_br_mo(k, K, kr, theta_eq, bist_ang_az, eq_azi, wind_dir, u_10, fetch, div, polarization):\r\n\r\n nk = k.shape[2]\r\n\r\n # Set azimuth to compute transfer function\r\n azimuth = np.linspace(-np.pi, np.pi, 37)\r\n\r\n # # Sea surface slope in the direction of incidence angle\r\n ni2 = GoM_ni2_func(kr, k, K, u_10, fetch, azimuth, div, wind_dir)\r\n\r\n nn = 89 * 2 * np.pi / 180\r\n ni = (np.arange(nk) * nn / nk).reshape(1, nk) - nn / 2\r\n ni = ni.reshape(nk, 1)\r\n ni = np.tan(ni)\r\n\r\n Br = np.zeros([div.shape[0], div.shape[1]])\r\n\r\n for ii in np.arange(ni2.shape[0]):\r\n for jj in np.arange(ni2.shape[1]):\r\n P = np.exp(-0.5 * (ni - np.mean(ni)) ** 2 / ni2[ii, jj]) / np.sqrt(2 * np.pi * ni2[ii, jj])\r\n # the range of the sea surface slope\r\n angle_index = np.logical_and(-3 * 180 * np.arctan(np.sqrt(ni2[ii,jj])) / np.pi < np.arctan(ni) * 180 / np.pi, np.arctan(ni) * 180 / np.pi < 3 * 180 * np.arctan(np.sqrt(ni2[ii,jj])) / np.pi)\r\n P = P[angle_index]\r\n nini = ni[angle_index]\r\n nnk = nini.shape[0]\r\n nini = nini.reshape(nnk, 1)\r\n # local incidence angle\r\n theta_l = np.abs(theta_eq[jj] - np.arctan(nini).reshape(nnk, 1))\r\n # geometric scattering coefficients [Plant 1997] equation 5,6\r\n eps_sin = np.sqrt(const.epsilon_sw-np.sin(theta_l)**2)\r\n # compute the wave number for bistatic geometry\r\n kbr = 2 * kr * np.sin(theta_l) * np.cos(bist_ang_az[jj]/2)\r\n kkbr = np.sort(kbr[:,0])\r\n T = Trans_func(kkbr, K[ii, jj], u_10[ii, jj], fetch, azimuth, div[ii, jj])[np.argsort(kbr[:,0])].reshape(nnk,1)\r\n Skb = B_int_single(kkbr.reshape(nnk, 1), u_10[ii, jj], fetch, eq_azi[jj])[np.argsort(kbr[:,0]), :] * (1+abs(T)) / kbr.reshape(nnk,1) ** 4\r\n Skb_pi = B_int_single(kkbr.reshape(nnk, 1), u_10[ii, jj], fetch, np.pi + eq_azi[jj])[np.argsort(kbr[:,0]), :] * (1+abs(T)) / kbr.reshape(nnk, 1) ** 4\r\n Skb_r = (Skb + Skb_pi) / 2 # Kudryavtsev 2003a equation 2\r\n if polarization == 'VV':\r\n G = np.cos(theta_l) ** 2 * (const.epsilon_sw - 1) * (\r\n const.epsilon_sw * (1 + np.sin(theta_l) ** 2) - np.sin(theta_l) ** 2) / (\r\n const.epsilon_sw * np.cos(theta_l) + eps_sin) ** 2\r\n G = np.abs(G) ** 2\r\n else:\r\n G = np.cos(theta_l) ** 2 * (const.epsilon_sw - 1) / (np.cos(theta_l) + eps_sin) ** 2\r\n G = np.abs(G) ** 2\r\n # pure Bragg scattering NRCS\r\n br0 = 16 * np.pi * kr ** 4 * G * Skb_r\r\n # Bragg scattering composite model\r\n BR = br0 * P.reshape(nnk, 1)\r\n # integral over kbr >= kd\r\n a = np.tan(theta_eq[jj] - const.d / (2 * np.cos(bist_ang_az[jj]/2)))\r\n b = np.tan(theta_eq[jj] + const.d / (2 * np.cos(bist_ang_az[jj]/2)))\r\n Br[ii, jj] = np.trapz(BR[nini <= a], nini[nini <= a]) + np.trapz(BR[nini >= b], nini[nini >= b])\r\n return Br", "def calc_lhv(self):\n hf = {}\n hf['hydrogen'] = 0\n hf['methane'] = -74.85\n hf['ethane'] = -84.68\n hf['propane'] = -103.8\n hf['butane'] = -124.51\n hf['O2'] = 0\n hf['CO2'] = -393.5\n # water (gaseous)\n hf['H2O'] = -241.8\n\n lhv = 0\n\n for f, x in self.fuel.val.items():\n molar_masses[f] = CP.PropsSI('M', f)\n fl = set(list(hf.keys())).intersection(\n set([a.replace(' ', '') for a in CP.get_aliases(f)]))\n if len(fl) == 0:\n continue\n\n if list(fl)[0] in self.fuels():\n structure = fluid_structure(f)\n\n n = {}\n for el in ['C', 'H', 'O']:\n if el in structure:\n n[el] = structure[el]\n else:\n n[el] = 0\n\n lhv += (-(n['H'] / 2 * hf['H2O'] + n['C'] * hf['CO2'] -\n ((n['C'] + n['H'] / 4) * hf['O2'] +\n hf[list(fl)[0]])) / molar_masses[f] * 1000) * x\n\n return lhv", "def ts_bt_func(thermal_rad, k1, k2):\n ts_bt = np.copy(thermal_rad).astype(np.float64)\n ts_bt[ts_bt <= 0] = np.nan\n np.reciprocal(ts_bt, out=ts_bt)\n ts_bt *= k1\n ts_bt += 1.0\n np.log(ts_bt, out=ts_bt)\n np.reciprocal(ts_bt, out=ts_bt)\n ts_bt *= k2\n return ts_bt.astype(np.float32)", "def valley(self, h=2, b1=3, b2=3, b3=9):\n mua = 2 * h / self.sk\n mub = 2 * b3 / (b1+b2)\n muc = 5\n self.mu_valley = mu = min(mua, mub, muc)\n self.s_valley = self.mu_valley * self.Ce * self.Ct * self.sk\n print(f'The valley snow shape coefficient = {self.mu_valley :.2f}')\n print(f'The peak valley snow load = {self.s_valley :.2f}kPa')", "def flattop_risefall_1ns(t, params):\n params['risefall'] = 1e-9\n return flattop_risefall(t, params)", "def hz2mel(hz):\n return 2595 * np.log10(1+hz/700.)", "def jarrow_rudd(s, k, t, v, rf, cp, am=False, n=100):\n\n\t# Basic Calculations\n\th = t / n \n\tu = math.exp((rf - 0.5 * math.pow(v, 2)) * h + v * math.sqrt(h))\n\td = math.exp((rf - 0.5 * math.pow(v, 2)) * h - v * math.sqrt(h))\n\tdrift = math.exp(rf * h)\n\tq = (drift - d) / (u - d)\n\n\t#Process the terminal stock price\n\tstkval = np.zeros((n+1, n+1))\n\toptval = np.zeros((n+1, n+1))\n\tstkval[0, 0] = s\n\n\tfor i in range(1, n+1):\n\t\tstkval[i, 0] = stkval[i - 1, 0] * u\n\t\tfor j in range(1, i + 1):\n\t\t\tstkval[i, j] = stkval[i - 1, j - 1] * d\n\n\t# Backwards recursion for option price\n\tfor j in range(n + 1):\n\t\toptval[n, j] = max(0, cp * (stkval[n, j] - k))\n\tfor i in range(n - 1, -1, -1):\n\t\tfor j in range(i + 1):\n\t\t\toptval[i, j] = (q * optval[i + 1, j] + (1 - q) * optval[i + 1, j + 1]) / drift\n\t\t\tif am:\n\t\t\t\toptval[i, j] = max(optval[i, j], cp * (stkval[i, j] - k))\n\treturn optval[0, 0]", "def compute_thermo(E,dos,TT):\n if (len(dos)<3):\n print (\"Not enough points in the phonon DOS!\")\n return None\n \n ZPE = 0.5*dos_integral(E,dos,1)\n modes = dos_integral(E,dos)\n \n EvibT = np.zeros(len(TT))\n SvibT = np.zeros(len(TT))\n CvibT = np.zeros(len(TT))\n FvibT = np.zeros(len(TT))\n for i in range(0,len(TT)):\n h = 0.5*(E[2]-E[0])\n arg = K_BOLTZMANN_RY*TT[i]\n arg2 = 2.0 * arg\n Evib = 0.0\n Svib = 0.0\n Cvib = 0.0\n for j in range(0,len(dos)-3,3):\n\n Evib += 3.0*E[j]/tanh(E[j]/(arg2))*dos[j]+\\\n 3.0*E[j+1]/tanh(E[j+1]/(arg2))*dos[j+1]+\\\n 2.0*E[j+2]/tanh(E[j+2]/(arg2))*dos[j+2]\n \n Svib += 3.0*(E[j]/arg2/tanh(E[j]/arg2)-log(2.0*sinh(E[j]/arg2)))*dos[j]+\\\n 3.0*(E[j+1]/arg2/tanh(E[j+1]/arg2)-log(2.0*sinh(E[j+1]/arg2)))*dos[j+1]+\\\n 2.0*(E[j+2]/arg2/tanh(E[j+2]/arg2)-log(2.0*sinh(E[j+2]/arg2)))*dos[j+2]\n\n try: # avoid overflow error for arg very small\n Cvib += 3.0*pow(E[j]/arg,2)/( 4.0*pow(sinh(E[j]/(arg2)),2) )*dos[j]+\\\n 3.0*pow(E[j+1]/arg,2)/( 4.0*pow(sinh(E[j+1]/(arg2)),2) )*dos[j+1]+\\\n 2.0*pow(E[j+2]/arg,2)/( 4.0*pow(sinh(E[j+2]/(arg2)),2) )*dos[j+2]\n except:\n Cvib += 0.0\n\n EvibT[i] = h*0.5*Evib*3.0/8.0 # h is the integration step, 0.5 comes from the equation for E,\n # the factor 3.0/8.0 comes from the Simpson 3/8 rule\n SvibT[i] = h*K_BOLTZMANN_RY*Svib*3.0/8.0\n CvibT[i] = h*K_BOLTZMANN_RY*Cvib*3.0/8.0\n FvibT = EvibT - SvibT * TT\n\n print ()\n return TT, EvibT, SvibT, CvibT, FvibT, ZPE, modes", "def overheads(NPT, DIT, NDIT):\n ov = 360. + 120. + NPT*NDIT*(DIT + 80. + 15.)\n print 'Telescope time in h = ', ov/3600.", "def test_calculate_enthalpy(self):\n expected = [454734.6, 807677.3, 1171053.1]\n result = WetBulbTemperature()._calculate_enthalpy(\n self.mixing_ratio, self.specific_heat, self.latent_heat, self.temperature\n )\n self.assertArrayAlmostEqual(result, expected, decimal=1)", "def lunarperigee(time):\n dtor = np.pi / 180\n t1 = 1 + time\n t2 = t1 * t1\n t3 = t2 * t1\n perigee = (\n 334.329653 * dtor\n + 4069.0340329575 * dtor * t1\n - 0.010325 * dtor * t2\n - 1.2e-5 * dtor * t3\n )\n return perigee", "def hz2mel(hz):\n return 1127 * np.log(1 + hz / 700)", "def slewEstimate(source1,source2=None,ant=1,returnUnit='time') :\n azel1 = azel(source1)\n azel1[0] = _wrapFix(azel1[0], ant)\n if source2 == None: \n azel2 = currentAzel(ant)\n else:\n azel2 = azel(source2)\n azel2[0] = _wrapFix(azel2[0], ant)\n az1 = [azel1[0], _wrapChoice(azel1[0], ant)]\n az2 = [azel2[0], _wrapChoice(azel2[0], ant)]\n print az1, az2\n deltaAz = 360\n deltaAz = min(deltaAz, abs(az1[0]-az2[0]))\n deltaAz = min(deltaAz, abs(az1[0]-az2[1]))\n deltaAz = min(deltaAz, abs(az1[1]-az2[0]))\n deltaAz = min(deltaAz, abs(az1[1]-az2[1]))\n delta = [deltaAz, abs(azel1[1]-azel2[1])]\n print delta\n if ant < 7:\n azRate = 60.0\n elRate = 30.0\n settleTime = 5.0\n elif ant < 16: \n azRate = 120.0\n elRate = 60.0\n settleTime = 10.0\n else: \n azRate = 60.0\n elRate = 30.0\n settleTime = 8.0\n if returnUnit == 'time': \n return max(delta[0]/azRate,delta[1]/elRate) + settleTime/60.0\n return delta", "def hz2mel(hz):\n\treturn 2595 * numpy.log10(1 + hz / 700.0)", "def derive_Fritz11(wavelength):\n # Extinction law definition\n wave = np.array([1.282, 1.736, 2.166, 2.625, 2.758, 2.873, 3.039, 3.297, 3.74, 3.819, 3.907, 4.052,\n 4.376, 5.128, 5.908, 6.772, 7.459, 7.502, 8.76, 12.371, 19.062])\n A_AKs = np.array([7.91, 4.30, 2.49, 1.83, 1.51, 1.84, 2.07, 1.66, 1.19, 1.19, 1.09, 1.01, 1.09, 0.99,\n 1.04, 0.84, 0.81, 0.79, 2.04, 1.34, 1.34])\n\n\n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)\n A_at_wave = interpolate.splev(wavelength, spline_interp)\n\n # We'll call 2.14 microns the K-band\n idx = np.where( abs(wavelength - 2.14) == min(abs(wavelength - 2.14)) )\n A_AKs_at_wave = A_at_wave / A_at_wave[idx] \n\n return A_AKs_at_wave", "def internal_heat_gain(dwelling):\n losses_gain = -40 * dwelling.Nocc\n water_heating_gains = (1000. / 24.) * dwelling.heat_gains_from_hw / DAYS_PER_MONTH\n\n mean_appliance_energy = 207.8 * (dwelling.GFA * dwelling.Nocc) ** 0.4714\n appliance_consumption_per_day = (mean_appliance_energy / 365.) * (\n 1 + 0.157 * numpy.cos((2. * math.pi / 12.) * (numpy.arange(12) - .78)))\n\n appliance_consumption = appliance_consumption_per_day * DAYS_PER_MONTH\n\n if dwelling.reduced_gains:\n met_gain = 50 * dwelling.Nocc\n cooking_gain = 23 + 5 * dwelling.Nocc\n appliance_gain = (0.67 * 1000. / 24) * appliance_consumption_per_day\n light_gain = 0.4 * dwelling.full_light_gain\n else:\n met_gain = 60 * dwelling.Nocc\n cooking_gain = 35 + 7 * dwelling.Nocc\n appliance_gain = (1000. / 24) * appliance_consumption_per_day\n light_gain = dwelling.full_light_gain\n\n total_internal_gains = (met_gain\n + light_gain\n + appliance_gain\n + cooking_gain\n + water_heating_gains\n + dwelling.pump_gain\n + losses_gain)\n\n if dwelling.reduced_gains:\n summer_met_gain = 60 * dwelling.Nocc\n summer_cooking_gain = 35 + 7 * dwelling.Nocc\n summer_appliance_gain = (1000. / 24) * appliance_consumption_per_day\n summer_light_gain = dwelling.full_light_gain\n total_internal_gains_summer = (summer_met_gain +\n water_heating_gains +\n summer_light_gain +\n summer_appliance_gain +\n summer_cooking_gain +\n dwelling.pump_gain +\n losses_gain\n - dwelling.heating_system_pump_gain)\n else:\n total_internal_gains_summer = total_internal_gains - dwelling.heating_system_pump_gain\n\n # Apply results to dwelling\n return dict(appliance_consumption=appliance_consumption,\n met_gain=met_gain,\n cooking_gain=cooking_gain,\n appliance_gain=appliance_gain,\n light_gain=light_gain,\n water_heating_gains=water_heating_gains,\n losses_gain=losses_gain,\n total_internal_gains=total_internal_gains,\n total_internal_gains_summer=total_internal_gains_summer)", "def twostr_func(wavelength, F_s, solarzenithangle,albedo_dif, \n\t\t\talbedo_dir, temp_ground, w_0, g, tau_n, temp_c):\n\t\n\t########################\n\t###Import useful libraries\n\t########################\n\timport numpy as np\n\timport pdb\n\timport scipy.linalg\n\n\n\n\n\t########################\n\t###Define model parameters\n\t########################\n\t#Properties of the ground\n\temissivity_ground=1.-albedo_dif #emissivity of ground. 1=perfect BB emitter.\n\n\t#Optical depth structure\n\tNlayer=len(tau_n) #number of layers in the atmospheric model.\n\t\n\ttau_c=np.zeros(Nlayer+1)# tau_c[n] is the cumulative optical depth at the upper edge of layer n. So tau_c[0]=0, and tau_c[N] is the maximum possible.\n\tfor n in range(0, Nlayer):\n\t\ttau_c[n+1]=tau_c[n]+tau_n[n] \n\n\t#In the Toon formalism, j=0 corresponds to space, and j=N+1 corresponds to the planet surface.\n\t#These points in wavelength space define the edges of the bins in tau space. \n\t#Other terminology:\n\t#\ttau_c=cumulative optical depth of layers *above* layer n. \n\t#\ttau_n=total optical depth of the layer n\n\t#\ttau=total optical depth at any point within a layer n, hence satisfying 0<tau<tau_n\n\n\tmu_0=np.cos(solarzenithangle) #\"incident direction of solar beam\"\n\n\n\t########################\n\t###Determine the two-stream approximation coefficients.\n\t########################\n\t#Eddington and quadrature are good at solar wavelengths (i.e., not thermal blackbody dominated). delta scalings of Joseph et al (1976) recommended to replace w_0, g, tau in this case. However, when dominated by internal isotropic sources like the Planck function, hemispheric mean approximation is preferable. When w_0=0, quadrature case has problems. This happens esp at thermal wavelengths. Again this favors using hemispheric mean at these wavelengths\n\t\n\t#We use quadrature because 1) we are at solar wavelengths for this UV work and 2) that's what twostr.f does (which is our comparison case)\n\tgamma_1= np.sqrt(3.)*(2.-w_0*(1.+g))/2. #consistent with Toon et al; consistent with Pierrehumbert gamma_1\n\tgamma_2=np.sqrt(3.)*w_0*(1.-g)/2. #consistent with Toon et al; consistent with Pierrehumbert gamma_2\n\tgamma_3=(1.-np.sqrt(3.)*g*mu_0)/2. #consistent with Toon et al; equal to the Pierrehumbert gamma_plus/w_0\n\tgamma_4=1.-gamma_3 #consistent with Toon et al; equal to the Pierrehumbert gamma_minus/w_0\n\tmu_1=1./np.sqrt(3.)+np.zeros(np.shape(gamma_1))#In Toon paper (eqn 18), this is given by: (1.-w_0)/(gamma_1-gamma_2). For the quadrature approximation, it is 1./np.sqrt(3.). Given its use, it seems to relate most closely to gamma_B from Pierrehumbert (see eqs 5.27, 5.30)\n\n\t##Eddington\n\t#gamma_1= (7.-w_0*(4.+3.*g))/4.\n\t#gamma_2=-1.*(1.-w_0*(4.-3.*g))/4.\n\t#gamma_3=(2.-3.*g*mu_0)/4.\n\t#gamma_4=1.-gamma_3 #consistent with Toon et al; equal to the Pierrehumbert gamma_minus/w_0\n\t#mu_1=1./2.+np.zeros(np.shape(gamma_1))#In Toon paper (eqn 18), this is given by: (1.-w_0)/(gamma_1-gamma_2). For the quadrature approximation, it is 1./np.sqrt(3.). Given its use, it seems to relate most closely to gamma_B from Pierrehumbert (see eqs 5.27, 5.30)\n\n\talambda=np.sqrt(np.abs(gamma_1*gamma_1-gamma_2*gamma_2)) #this is the lower-case lambda, from eqn 21 of Toon et al\n\t\t\t\t\t\t\t\t #The absolute value was added based on the code Toon just sent us. This corresponds to his AK(L,J) parameter. But it should not matter since gamma_1>gamma_2 for w_0<1.\n\tclambda=(gamma_1-alambda)/(gamma_2) #this is the upper-case lambda, from eqn 22 of Toon et al\n\n\tEMLT=np.exp(-alambda*tau_n) #this appears to be a prefactor used to facilitate computation of eqn 44 of Toon et al\n\te1=1.+clambda*EMLT\n\te2=1.-clambda*EMLT\n\te3=clambda+EMLT\n\te4=clambda-EMLT\n\n\t########################\n\t###Set up calculation\n\t########################\n\t\"\"\"\n\tThe fundamental equation we are solving is of form:\n\tA_{l}*Y_{l-1}+B_{l}*Y_{l}+D{l+1}=E_{l} (equation 39 of Toon et al)\n\tHere, A_l, B_l, D_l, E_l are quantities we determine, and the Y_l is what we solve for.\n\tHence, we can summarize that we are solving a matrix equation that takes form:\n\tPY=E\n\twhere Y[l]=Y_l\n\t E[l]=E_l\n\t P[l, l-1]=A_l [row, column]\n\t P[l, l]=B_l\n\t P[l, l+1]=D_l\n\t P[i,j]=0 else\n\tToon et al use 1-indexing. Hence n runs from 1 to N, l runs from 1 to 2N, where N is the number of layers, and they have:\n\tY_l=Y_{1n} for l=1,3,5,...2n-1...2N-1\n\tY_l=Y_{2n} for l=2,4,6,...2n...2N\n\n\tHowever, we use Python, which has 0-indexing. Hence *we* choose that n runs from 0 to N-1, l runs from 0 to 2N-1, and:\n\tY_l=Y_{1n} for l=0,2,4...2n...2N-2\n\tY_l=Y_{2n} for l=1,3,5...2n+1...2N-1\n\n\tThe Y_{1n} and Y_{2n} are related to F^+_n and F^-_n via equations 31 and 32 of Toon et al.\n\tThis parametrization has been done to remove exponentials with positive operands (ie ones that could grow large and lead to numerical instabilities) from the matrix.\n\n\tNote: The mapping of this PQ=R to the F+ and F- is unclear because of 1) this parametrization in terms of Y_l (done to eliminate numerical instabilities) and 2)further linear combinations done to convert a pentagiagonal matrix to an even simpler tridiagonal matrix. Hence intuitive checks are hard.\n\t\"\"\"\n\n\t########################\n\t###Set up surface flux\n\t########################\n\tS_sfc=albedo_dir*mu_0*np.exp(-tau_c[-1]/mu_0)*np.pi*F_s+emissivity_ground*np.pi*Planck(temp_ground, wavelength)\n\t#Surface emission. Formed by adding blackbody emission from the ground to the reflected energy from the direct beam. The direct beam's reflected energy is assumed to be purely diffuse. This corresponds to equations 37 and 38 of Toon et al. Note that this does NOT match equation 5.31 of Pierrehumbert because it does not include the reflected diffuse radiation. So, this implicitly assumes the diffuse albedo to be 0. \n\n\t########################\n\t###Set up C-values\n\t########################\n\t#In the reshuffled set of parameters used in this formalism, these seem analagous to the forcing term in Pierrehumbert. All the added radiation is contained in here.\n\n\tdef C_plus(n, tau): #implementation of superposition of eqns 23 and 27 from Toon et al\n\t\tsolarrad_denominator=alambda[n]**2.-1./mu_0**2.\n\t\tsolarrad_prefactor=w_0[n]*F_s*np.pi\n\t\tsolarrad_exponential=np.exp(-1.*(tau_c[n]+tau)/mu_0)\n\t\tsolarrad_factor=((gamma_1[n]-1./mu_0)*gamma_3[n]+gamma_4[n]*gamma_2[n])\n\t\tsolarrad=solarrad_prefactor*solarrad_factor*solarrad_exponential/solarrad_denominator #units of flux: erg/s/cm2/nm\n\t\t\n\t\tblackbody_prefactor=2*np.pi*mu_1[n]\n\t\tB0n=Planck(temp_c[n], wavelength)\n\t\tB1n=(Planck(temp_c[n+1], wavelength)-B0n)/tau_n[n] #this is effectively a slope\n\t\tblackbody_factor=B0n+B1n*(tau+1./(gamma_1[n]+gamma_2[n]))\n\t\tblackbody=blackbody_prefactor*blackbody_factor #start with units of the Planck function, which are: erg/s/cm2/nm/sr. But multiplying by 2pi sr restores the units of flux. So can safely add them. \n\t\t\n\t\tresult=solarrad+blackbody\n\t\treturn result\n\n\tdef C_minus(n, tau): #implementation of superposition of eqns 24 and 27 from Toon et al\n\t\tsolarrad_denominator=alambda[n]**2.-1./mu_0**2.\n\t\tsolarrad_prefactor=w_0[n]*F_s*np.pi\n\t\tsolarrad_exponential=np.exp(-1.*(tau_c[n]+tau)/mu_0)\n\t\tsolarrad_factor=((gamma_1[n]+1./mu_0)*gamma_4[n]+gamma_3[n]*gamma_2[n])\n\t\tsolarrad=solarrad_prefactor*solarrad_factor*solarrad_exponential/solarrad_denominator #units of flux: erg/s/cm2/nm\n\t\t\n\t\tblackbody_prefactor=2*np.pi*mu_1[n]\n\t\tB0n=Planck(temp_c[n], wavelength)\n\t\tB1n=(Planck(temp_c[n+1], wavelength)-B0n)/tau_n[n] #this is effectively a slope\n\t\tblackbody_factor=B0n+B1n*(tau-1./(gamma_1[n]+gamma_2[n]))\n\t\tblackbody=blackbody_prefactor*blackbody_factor #start with units of the Planck function, which are: erg/s/cm2/nm/sr. But multiplying by 2pi sr restores the units of flux. So can safely add them. \n\t\t\n\t\tresult=solarrad+blackbody\n\t\treturn result\n\n\t########################\n\t###Calculate matrix coefficients\n\t#########################\n\t#initialize the A, B, D, and E.\n\tA=np.zeros(Nlayer*2)\n\tB=np.zeros(np.shape(A))\n\tD=np.zeros(np.shape(A))\n\tE=np.zeros(np.shape(A))\n\n\n\t#For l=0 (n=0) we have the boundary condition that the downward diffuse flux at the top of the first layer is equal to any incident diffuse downward flux. We set this to be zero.\n\tA[0]=0.\n\tB[0]=e1[0]\n\tD[0]=-1.*e2[0]\n\tE[0]=0.-1*C_minus(0,0) #This is really F_minus[0,0], i.e. we are assuming there is no downward diffuse flux from the top of the atmosphere.\n\n\t#for l=2N-1 (n=N-1), we have the boundary condition that the upward flux at the surface is the sume of the reflected downward diffuse flux and energy from any other sources (e.g. reflected direct beam, BB emission of the ground)/np.sqrt(3.)\n\tA[2*Nlayer-1]=e1[Nlayer-1]-albedo_dif*e3[Nlayer-1]\n\tB[2*Nlayer-1]=e2[Nlayer-1]-albedo_dif*e4[Nlayer-1]\n\tD[2*Nlayer-1]=0.\n\tE[2*Nlayer-1]=S_sfc-C_plus(Nlayer-1, tau_n[Nlayer-1])+albedo_dif*C_minus(Nlayer-1, tau_n[Nlayer-1])\n\n\t#There is a problem in the Toon paper. As written, the l=2n depends on e_n+1, running over the array edge. twostr.f resolves this by adopting a different mapping: their definition reduces to defining l=2(n+1) and running n from 0 to N-1. In this case, l=2 (The third value in the list of ls) depends on n=0 and n=1. This eliminates the overflow problem. We have implemented this below.\n\t\n\t##For n=1,2,3...N-1, l=2,4,6,...2N-2:\n\tfor n in range(0, Nlayer-1):\n\t\tl=2*(n+1)\n\t\tA[l]=e2[n]*e3[n]-e4[n]*e1[n]\n\t\tB[l]=e1[n]*e1[n+1]-e3[n]*e3[n+1]\n\t\tD[l]=e3[n]*e4[n+1]-e1[n]*e2[n+1]\n\t\t\n\t\tE[l]=e3[n]*(C_plus(n+1, 0.)-C_plus(n, tau_n[n]))+e1[n]*(C_minus(n,tau_n[n])-C_minus(n+1,0.))\n\n\n\t#For n=0...N-2, l=1,3...2N-3:\n\tfor n in range(0, Nlayer-1):\n\t\tl=2*n+1\n\t\tA[l]=e2[n+1]*e1[n]-e3[n]*e4[n+1]\n\t\tB[l]=e2[n]*e2[n+1]-e4[n]*e4[n+1]\n\t\tD[l]=e1[n+1]*e4[n+1]-e2[n+1]*e3[n+1]\n\t\t\n\t\tE[l]=e2[n+1]*(C_plus(n+1, 0.)-C_plus(n, tau_n[n]))-e4[n+1]*(C_minus(n+1, 0)-C_minus(n, tau_n[n])) #twostr.f has a -1*e_{4,n+1}. We have applied the same even though this is NOT what is written in the Toon et al paper. We have done this because Toon told us (6/26/2015) that there are some sign errors in the coefficients, and we currently trust the validated CLIMA code over the paper we know has errors in it. EDIT: Looking at the code Toon shared with us, he does the same. \n\n\n\t########################\n\t###Assemble matrix equation components\n\t#########################\n\tP=np.zeros([Nlayer*2,Nlayer*2])\n\n\t#l=0: no \"A\" coefficient b/c l-1 has no meaning\n\tP[0,0]=B[0]\n\tP[0,1]=D[0]\n\n\t#l=2N-1: no \"D\" coefficient b/c l+1 has no meaning\n\tP[2*Nlayer-1,2*Nlayer-1-1]=A[2*Nlayer-1]\n\tP[2*Nlayer-1,2*Nlayer-1]=B[2*Nlayer-1]\n\n\tfor l in range(1, Nlayer*2-1): #This populates the matrix P in PY=E. \n\t\tP[l, l-1]=A[l]\n\t\tP[l,l]=B[l]\n\t\tP[l,l+1]=D[l]\n\n\t########################\n\t###Invert matrix\n\t#########################\n\t#Y=np.linalg.solve(P, E) #this is the Y_l\n\t\n\t#try using a specialized solver\n\tab=np.zeros([3,2*Nlayer])\n\tab[0,:]=np.append(0.0, np.diag(P, k=1))\n\tab[1,:]=np.diag(P, k=0)\n\tab[2,:]=np.append(np.diag(P, k=-1),0.0)\n\t#pdb.set_trace()\n\tY=scipy.linalg.solve_banded((1,1), ab, E) #this is the Y_l\n\n\n\t########################\n\t###Convert from Y_l to Y_1n, Y_2n\n\t#########################\n\t#The Y_1n as defined in Toon et al correspond to l=1,3, 5...2N-1. Adjusting for the zero-indexing of Python as we have done, they instead correspond to l=0,2,...2N-2\n\t#The Y_2n as defined in Toon et al correspond to l=2,4,6...2N. Adjusting for Python zero-indexing as we have done, they instead correspond to l=1,3,5...2N-1.\n\t#For detail, see eq. 40.\n\tY_1=np.zeros(Nlayer)\n\tY_2=np.zeros(Nlayer)\n\tfor n in range(0, Nlayer):\n\t\tY_1[n]=Y[2*n]\n\t\tY_2[n]=Y[2*n+1] \n\t\t#last number called is Nlayer-1=N-1, so is consistent.\n\t\n\t########################\n\t###Convert from Y_1n, Y_2n to F_plus, F_minus\n\t#########################\n\tdef F_plus(n,tau): #defined from Eqn 31 of Toon et al.\n\t\tterm1=Y_1[n]*(np.exp(-alambda[n]*(tau_n[n]-tau))+clambda[n]*np.exp(-alambda[n]*tau))\n\t\tterm2=Y_2[n]*(np.exp(-alambda[n]*(tau_n[n]-tau))-clambda[n]*np.exp(-alambda[n]*tau))\n\t\tterm3=C_plus(n,tau)\n\t\t\n\t\tresult=term1+term2+term3\n\t\treturn result\n\n\tdef F_minus(n, tau): #defined from Eqn 32 of Toon et al.\n\t\tterm1=Y_1[n]*(clambda[n]*np.exp(-alambda[n]*(tau_n[n]-tau))+np.exp(-alambda[n]*tau))\n\t\tterm2=Y_2[n]*(clambda[n]*np.exp(-alambda[n]*(tau_n[n]-tau))-np.exp(-alambda[n]*tau))\n\t\tterm3=C_minus(n,tau)\n\t\t\n\t\tresult=term1+term2+term3\n\t\treturn result\n\t\n\t########################\n\t###Evaluate F_plus, F_minus at boundary edges\n\t#########################\n\tF_plus_tau0=np.zeros(np.shape(tau_n))\n\tF_plus_taumax=np.zeros(np.shape(tau_n))\n\tF_minus_tau0=np.zeros(np.shape(tau_n))\n\tF_minus_taumax=np.zeros(np.shape(tau_n))\n\n\tfor n in range(0, Nlayer):\n\t\tF_plus_tau0[n]=F_plus(n, 0.)\n\t\tF_plus_taumax[n]=F_plus(n, tau_n[n])\n\t\tF_minus_tau0[n]=F_minus(n, 0.)\n\t\tF_minus_taumax[n]=F_minus(n, tau_n[n])\n\n\n\t########################\n\t###Convert from Y_1n, Y_2n to F_net, mean intensity.\n\t#########################\n\t#test if diffuse flux dominates over direct flux. If direct flux dominant, instead set mu_1=mu_0\n\t\n\t#if F_minus_taumax[-1]<mu_0*np.pi*F_s*np.exp(-tau_c[-1]/mu_0):\n\t\t#mu_1=np.zeros(np.shape(mu_1))+mu_0\n\t#mu_1=np.zeros(np.shape(mu_1))+mu_0\n\t\n\tF_net=np.zeros(np.shape(tau_n)) #defined from Eqn 48 of Toon et al. This quantity is the net flux at the BASE of layer n.\n\tfor n in range(0, Nlayer):\n\t\tdirect=mu_0*np.pi*F_s*np.exp(-(tau_c[n]+tau_n[n])/mu_0) #eqn 50 of Toon et al\n\n\t\tterm1=Y_1[n]*(e1[n]-e3[n])\n\t\tterm2=Y_2[n]*(e2[n]-e4[n])\n\t\tterm3=C_plus(n, tau_n[n])-C_minus(n, tau_n[n])\n\t\t\n\t\tF_net[n]=term1+term2+term3 -direct\n\n\tAMEAN=np.zeros(np.shape(tau_n)) #defined from Eqn 49 of Toon et al. This is the equivalent of the quantity AMEAN in the twostr.f code. It is equal to 4*np.pi*J_n, where J_n is the mean intensity at the base of layer n. Hence this quantity AMEAN should be equal to the total intensity received by a point at the base of layer n. \n\tfor n in range(0, Nlayer):\n\t\tdirect=mu_0*np.pi*F_s*np.exp(-(tau_c[n]+tau_n[n])/mu_0) #eqn 50 of Toon et al\n\t\n\t\tterm1=Y_1[n]*(e1[n]+e3[n])\n\t\tterm2=Y_2[n]*(e2[n]+e4[n])\n\t\tterm3=C_plus(n, tau_n[n])+C_minus(n, tau_n[n])\n\t\t\n\t\t#AMEAN[n]=(1./mu_1[n])*(term1+term2+term3)+direct/mu_0\t\n\t\tAMEAN[n]=(1./mu_1[n])*(F_plus_taumax[n]+F_minus_taumax[n])+direct/mu_0\t\n\t\n\t########################\n\t###Compute \"surface intensity\"\n\t#########################\t\n\t#\"Surface intensity\" refers to the total intensity that would be intercepted by a particle at the surface of the planet. Whereas the total intensity is equal to (F_plus[-1]+F_minus[-1])/mu_1+direct[-1]/mu_0, the surface intensity is instead equal to (F_minus[-1])/mu_1+direct[-1]/mu_0, i.e. the downwelling diffuse intensity (since the bottom intensity is cut out due to there being a planet there) plus the direct intensity\n\t\n\tsurface_intensity=(F_minus_taumax[-1]/mu_1[-1])+(np.pi*F_s)*np.exp(-(tau_c[-1])/mu_0)\n\t\n\t########################\n\t###Return Result\n\t#########################\n\t#F_minus_tau0\n\t#np.max(np.abs((F_minus_taumax[:-1]-F_minus_tau0[1:]))/F_minus_tau0[1:])\n\t#np.max(np.abs((F_plus_taumax[:-1]-F_plus_tau0[1:]))/F_plus_tau0[1:])\n\t\n\treturn (F_plus_tau0, F_plus_taumax, F_minus_tau0, F_minus_taumax, F_net, AMEAN, surface_intensity)", "def hz2mel(hz):\n return 1127 * numpy.log(1+hz/700.0)", "def DeltaT_EspenakMeeus(ut):\n # Fred Espenak writes about Delta-T generically here:\n # https://eclipse.gsfc.nasa.gov/SEhelp/deltaT.html\n # https://eclipse.gsfc.nasa.gov/SEhelp/deltat2004.html\n # He provides polynomial approximations for distant years here:\n # https://eclipse.gsfc.nasa.gov/SEhelp/deltatpoly2004.html\n # They start with a year value 'y' such that y=2000 corresponds\n # to the UTC Date 15-January-2000. Convert difference in days\n # to mean tropical years.\n\n y = 2000 + ((ut - 14) / _DAYS_PER_TROPICAL_YEAR)\n\n if y < -500:\n u = (y - 1820) / 100\n return -20 + (32 * u*u)\n\n if y < 500:\n u = y / 100\n u2 = u*u; u3 = u*u2; u4 = u2*u2; u5 = u2*u3; u6 = u3*u3\n return 10583.6 - 1014.41*u + 33.78311*u2 - 5.952053*u3 - 0.1798452*u4 + 0.022174192*u5 + 0.0090316521*u6\n\n if y < 1600:\n u = (y - 1000) / 100\n u2 = u*u; u3 = u*u2; u4 = u2*u2; u5 = u2*u3; u6 = u3*u3\n return 1574.2 - 556.01*u + 71.23472*u2 + 0.319781*u3 - 0.8503463*u4 - 0.005050998*u5 + 0.0083572073*u6\n\n if y < 1700:\n u = y - 1600\n u2 = u*u; u3 = u*u2\n return 120 - 0.9808*u - 0.01532*u2 + u3/7129.0\n\n if y < 1800:\n u = y - 1700\n u2 = u*u; u3 = u*u2; u4 = u2*u2\n return 8.83 + 0.1603*u - 0.0059285*u2 + 0.00013336*u3 - u4/1174000\n\n if y < 1860:\n u = y - 1800\n u2 = u*u; u3 = u*u2; u4 = u2*u2; u5 = u2*u3; u6 = u3*u3; u7 = u3*u4\n return 13.72 - 0.332447*u + 0.0068612*u2 + 0.0041116*u3 - 0.00037436*u4 + 0.0000121272*u5 - 0.0000001699*u6 + 0.000000000875*u7\n\n if y < 1900:\n u = y - 1860\n u2 = u*u; u3 = u*u2; u4 = u2*u2; u5 = u2*u3\n return 7.62 + 0.5737*u - 0.251754*u2 + 0.01680668*u3 - 0.0004473624*u4 + u5/233174\n\n if y < 1920:\n u = y - 1900\n u2 = u*u; u3 = u*u2; u4 = u2*u2\n return -2.79 + 1.494119*u - 0.0598939*u2 + 0.0061966*u3 - 0.000197*u4\n\n if y < 1941:\n u = y - 1920\n u2 = u*u; u3 = u*u2\n return 21.20 + 0.84493*u - 0.076100*u2 + 0.0020936*u3\n\n if y < 1961:\n u = y - 1950\n u2 = u*u; u3 = u*u2\n return 29.07 + 0.407*u - u2/233 + u3/2547\n\n if y < 1986:\n u = y - 1975\n u2 = u*u; u3 = u*u2\n return 45.45 + 1.067*u - u2/260 - u3/718\n\n if y < 2005:\n u = y - 2000\n u2 = u*u; u3 = u*u2; u4 = u2*u2; u5 = u2*u3\n return 63.86 + 0.3345*u - 0.060374*u2 + 0.0017275*u3 + 0.000651814*u4 + 0.00002373599*u5\n\n if y < 2050:\n u = y - 2000\n return 62.92 + 0.32217*u + 0.005589*u*u\n\n if y < 2150:\n u = (y-1820)/100\n return -20 + 32*u*u - 0.5628*(2150 - y)\n\n # all years after 2150\n u = (y - 1820) / 100\n return -20 + (32 * u*u)", "def _microstrip_v_with_Lk(wire_width, dielectric_thickness, eps_r, Lk_per_sq):\n L_m, C_m = _microstrip_LC_per_meter(wire_width,\n dielectric_thickness,\n eps_r)\n Lk_m = Lk_per_sq * (1.0/wire_width)\n v = 1 / sqrt((L_m+Lk_m) * C_m)\n return v", "def isDeboutHandCoded( sk, bOnlyTorso = False, bVerbose = False ):\n \n neck = sk.listPoints[Skeleton.getNeckIndex()]\n \n if bVerbose: print(\"neck: %s\" % str(neck))\n \n legsInfo = sk.getLegs()\n if bVerbose: print(\"legs: %s\" % str(legsInfo))\n \n bb = sk.getBB_Size()\n sto = sk.getStomach()\n \n\n \n rh,rk,ra = legsInfo[0] # hip, knee, ankle\n lh,lk,la = legsInfo[1]\n \n avgFeets = [ ra[0]+la[0],ra[1]+la[1],ra[2]+la[2] ]\n div2(avgFeets)\n \n lal = sk.getArms()\n if bVerbose: print(\"arms: %s\" % str(lal))\n rs,re,rw = lal[0] #shoulder, elbow, wrist\n ls,le,lw = lal[1]\n \n rThreshold = 0.2\n \n #~ # si les pieds sont plus bas que les hanches\n # a essayer: orientation cou/(estomac ou moyenne des hanches): vertical => debout; sinon couche\n # a essayer: quand les fesses sont sur le sol\n \n # NB: on n'arrivera jamais a voir que quelqu'un qui est assis ou couche' oriente' vers la camera est tombe'\n \n # si les mains ou a defaut les coudes sont plus hautes que les pieds ou a defaut les hanches\n bDeboutFromArmsLegsHeight = None\n \n if rw[2] > rThreshold:\n rHi = rw[:2]\n elif re[2] > rThreshold:\n rHi = re[:2]\n else:\n rHi = None\n\n if lw[2] > rThreshold:\n lHi = lw[:2]\n elif le[2] > rThreshold:\n lHi = le[:2]\n else:\n # check le neck\n if neck[2] > rThreshold:\n lHi = neck[:2]\n else:\n lHi = None\n \n if lHi != None or rHi != None:\n \n if lHi == None:\n hi = rHi\n elif rHi == None:\n hi = lHi\n else:\n hi = avg2(rHi,lHi)\n \n \n \n if ra[2] > rThreshold:\n rLo = ra[:2]\n elif rk[2] > rThreshold:\n rLo = rk[:2]\n else:\n rLo = None\n\n if la[2] > rThreshold:\n lLo = la[:2]\n elif lk[2] > rThreshold:\n lLo = lk[:2]\n else:\n lLo = None\n \n if lLo != None or rLo != None:\n\n if lLo == None:\n lo = rLo\n elif rLo == None:\n lo = lLo\n else:\n lo = avg2(rLo,lLo)\n \n if bVerbose: print(\"rLo:%s,lLo:%s\" % (rLo,lLo) )\n \n if bVerbose: print(\"hi:%s,lo:%s\" % (hi,lo) )\n \n #~ return hi[1]<lo[1] # add a margin ?\n \n bb = sk.getBB_Size()\n rMargin = bb[1]/4\n\n \n bDeboutFromArmsLegsHeight = hi[1]+rMargin<lo[1] # WRN: pixel Y are inverted (high pixel are smaller than lower)\n \n if bVerbose: print(\"rMargin:%5.2f, bDeboutFromArmsLegsHeight: %s\"% (rMargin,bDeboutFromArmsLegsHeight) )\n\n bDeboutFromTorsoAngle = None\n if (rh[2] > rThreshold or lh[2] > rThreshold) and neck[2] > rThreshold:\n if (rh[2] > rThreshold and lh[2] > rThreshold):\n avg_hip = avg2(rh,lh)\n elif rh[2] > rThreshold:\n avg_hip = rh\n else:\n avg_hip = lh\n dx = avg_hip[0]-neck[0]\n dy = avg_hip[1]-neck[1]\n if abs(dx) < 0.1:\n coef = dy*10\n else:\n coef = dy/dx\n bDeboutFromTorsoAngle = abs(coef) > 1. # 1: diagonal\n if bVerbose: print(\"coef: %5.1f (dy:%3.1f,dx:%3.1f), bDeboutFromTorsoAngle: %s\" % (coef,dy, dx, bDeboutFromTorsoAngle) )\n #~ else:\n #~ return None\n \n # fesses sur le sol\n bNotBumOnGround = None\n if rh[2] > rThreshold and lh[2] > rThreshold:\n avg_hip = avg2(rh,lh)\n elif rh[2] > rThreshold:\n avg_hip = rh\n elif lh[2] > rThreshold:\n avg_hip = lh\n else:\n avg_hip = None\n if avg_hip != None:\n # look for lower point in legs, but not hip:\n rLowest = -10000\n #~ for i in range(cv2_openpose.Skeleton.NBR_POINTS):\n for i in [cv2_openpose.Skeleton.RKNEE,cv2_openpose.Skeleton.LKNEE,cv2_openpose.Skeleton.RANKLE,cv2_openpose.Skeleton.LANKLE]:\n if i == cv2_openpose.Skeleton.RHIP or i == cv2_openpose.Skeleton.LHIP:\n continue\n if sk.listPoints[i][2] < rThreshold:\n continue\n if sk.listPoints[i][1] > rLowest:\n rLowest = sk.listPoints[i][1]\n if rLowest >= 0:\n #~ lenLimbs = sk.getLenLimbs()\n #~ if bVerbose: print(\"lenLimbs: %s\" % str(lenLimbs) )\n #~ rLenLegs = (lenLimbs[0][0] +lenLimbs[1][0]) / 2\n rLenLegs = sk.getAvgLenLeg()\n if rLenLegs != None:\n bNotBumOnGround = (avg_hip[1] + (rLenLegs*0.75)) < rLowest\n if bVerbose: print(\"avg hip: %5.1f, lowest: %5.1f, rLenLegs: %5.1f, bNotBum: %s\" % (avg_hip[1],rLowest,rLenLegs, bNotBumOnGround) )\n if legsInfo[0][2][2] < rThreshold and legsInfo[1][2][2] < rThreshold:\n if bVerbose: print(\"INF: no foot seen, reseting bNotBumOnGround\" )\n # on ne voit aucun pied, soit ils ne sont pas a l'ecran soit il sont derriere, dans le doute, on prefere dire None\n bNotBumOnGround = None\n #~ return bNotBumOnGround\n \n # on veut etre sur => si hesitation, ne se prononces pas\n if 0:\n if not bOnlyTorso:\n if bDeboutFromArmsLegsHeight != bDeboutFromTorsoAngle:\n return None\n\n \n #~ if bDeboutFromArmsLegsHeight:\n if bDeboutFromTorsoAngle == None and bNotBumOnGround:\n return 1\n \n if bDeboutFromTorsoAngle and bNotBumOnGround:\n return 1\n\n \n if bDeboutFromTorsoAngle == None and bNotBumOnGround == None and bDeboutFromArmsLegsHeight != None:\n return bDeboutFromArmsLegsHeight\n \n if bDeboutFromTorsoAngle != None and bNotBumOnGround == None:\n return bDeboutFromTorsoAngle\n \n return 0\n \n \n else:\n if bDeboutFromTorsoAngle == bNotBumOnGround and bDeboutFromTorsoAngle == bDeboutFromArmsLegsHeight:\n return bDeboutFromTorsoAngle\n if bNotBumOnGround != None:\n return bNotBumOnGround\n if bDeboutFromTorsoAngle != None:\n return bDeboutFromTorsoAngle \n if bDeboutFromArmsLegsHeight != None:\n return bDeboutFromArmsLegsHeight\n return None", "def BraggEnergy(ID,hkl,twotheta):\n ID=goodID(ID)\n d=dSpace(ID,hkl)\n l=2*d*sind(twotheta/2.0)\n E=lam2E(l)\n return E", "def getWNSA1(ChargeSA):\n temp = 0.0\n for i in ChargeSA:\n temp = temp+i[2]\n if temp == 0.0:\n return 0.0\n\n return getPNSA1(ChargeSA)*temp/1000", "def le_calibration_func(etr, kc, ts):\n return etr * kc * (2.501 - 2.361E-3 * (ts - 273)) * 2500 / 9", "def RiekeLebofsky85(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def leff(self):\n with Vega() as v:\n s = self.reinterp(v.wavelength)\n w = s._wavelength\n if s.transmit.max() > 0:\n leff = np.trapz(w * s.transmit * v.flux.value, w, axis=-1)\n leff /= np.trapz(s.transmit * v.flux.value, w, axis=-1)\n else:\n leff = float('nan')\n if s.wavelength_unit is not None:\n leff = leff * Unit(s.wavelength_unit)\n if self.wavelength_unit is not None:\n return leff.to(self.wavelength_unit)\n return leff\n else:\n return leff", "def test_double_ended_ols_wls_estimate_synthetic():\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 50\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 100)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real) / \\\n (np.exp(-gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(-gamma / temp_real) - 1)\n rst = C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \\\n np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \\\n np.exp(gamma / temp_real) / (np.exp(-gamma / temp_real) - 1)\n rast = C_m * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * np.exp(\n -dalpha_m * (-x[:, None] + cable_len)) / \\\n (np.exp(-gamma / temp_real) - 1)\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'rst': (['x', 'time'], rst),\n 'rast': (['x', 'time'], rast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'userAcquisitionTimeBW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '1'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n method='ols',\n solver='sparse')\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=7)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method='wls',\n solver='sparse',\n tmpw_mc_size=5)\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=5)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=6)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)", "def closest_cruising_altitude(altitude):\n return 1000 * ((altitude + 500) // 1000)", "def _get_tb(I, nu, beam):\n from astropy import units as u\n return (1222.0*I/(nu**2*(beam.minor/1.0).to(u.arcsecond)*(beam.major/1.0).to(u.arcsecond))).value", "def Frischknecht16_net(self):\n import numpy.lib.recfunctions as rcfuncs\n import os\n \n # Define metallicites \n self.metallicities = [0.0134,1e-3,1e-5] # First is solar value\n \n # Define masses\n self.masses= np.array((15,20,25,40))\n \n # Define isotope indexing. For radioactive isotopes with half-lives << Chempy time_step they are assigned to their daughter element\n # NB: we only use elements up to Ge here, as in the paper\n indexing={}\n indexing['H']=['p','d']\n indexing['He'] = ['he3','he4']\n indexing['Li'] = ['li6','li7']\n indexing['Be'] = ['be9']\n indexing['B'] = ['b10','b11']\n indexing['C'] = ['c12','c13']\n indexing['N'] = ['n14','n15']\n indexing['O'] = ['o16','o17','o18']\n indexing['F'] = ['f19']\n indexing['Ne'] = ['ne20','ne21','ne22']\n indexing['Na'] = ['na23']\n indexing['Mg'] = ['mg24','mg25','mg26','al26']\n indexing['Al'] = ['al27']\n indexing['Si'] = ['si28','si29','si30']\n indexing['P'] = ['p31']\n indexing['S'] = ['s32','s33','s34','s36']\n indexing['Cl'] = ['cl35','cl37']\n indexing['Ar'] = ['ar36','ar38','ar40']\n indexing['K'] = ['k39','k41']\n indexing['Ca'] = ['ca40','ca42','ca43','ca44','ca46','ca48']\n indexing['Sc'] = ['sc45']\n indexing['Ti'] = ['ti46','ti47','ti48','ti49','ti50']\n indexing['V'] = ['v50','v51']\n indexing['Cr'] = ['cr50','cr52','cr53','cr54']\n indexing['Mn'] = ['mn55']\n indexing['Fe'] = ['fe54', 'fe56','fe57','fe58']\n indexing['Co'] = ['fe60', 'co59']\n indexing['Ni'] = ['ni58','ni60','ni61','ni62','ni64']\n indexing['Cu'] = ['cu63','cu65']\n indexing['Zn'] = ['zn64','zn66','zn67','zn68','zn70']\n indexing['Ga'] = ['ga69','ga71']\n indexing['Ge'] = ['ge70','ge72','ge73','ge74','ge76']\n\n # Define indexed elements \n self.elements = list(indexing.keys())\n \n \n # Define data types\n dt = np.dtype('U8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')\n \n # Initialise yield table\n yield_table = {}\n \n \n # Import full table with correct rows and data-types\n z = np.genfromtxt(localpath+'input/yields/Frischknecht16/yields_total.txt',skip_header=62,dtype=dt)\n \n \n \n # Create model dictionary indexed by metallicity, giving relevant model number for each choice of mass\n # See Frischknecht info_yields.txt file for model information\n model_dict = {}\n model_dict[0.0134] = [2,8,14,27]\n model_dict[1e-3]=[4,10,16,28]\n model_dict[1e-5]=[6,12,18,29]\n \n # Import list of remnant masses for each model (from row 32-60, column 6 of .txt file) \n # NB: these are in solar masses\n rem_mass_table = np.loadtxt(localpath+'input/yields/Frischknecht16/yields_total.txt',skiprows=31,usecols=6)[:29]\n\n # Create one subtable for each metallicity \n for metallicity in self.metallicities:\n additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds'] # List of keys for table\n names = additional_keys + self.elements\n \n # Initialise table and arrays \n base = np.zeros(len(self.masses))\n list_of_arrays = []\n for i in range(len(names)):\n list_of_arrays.append(base)\n yield_subtable = np.core.records.fromarrays(list_of_arrays,names=names)\n mass_in_remnants = np.zeros(len(self.masses))\n total_mass_fraction = np.zeros(len(self.masses))\n element_mass = np.zeros(len(self.masses))\n \n # Add masses to table\n yield_subtable['Mass'] = self.masses\n \n \n # Extract remnant masses (in solar masses) for each model:\t\t\t\n for mass_index,model_index in enumerate(model_dict[metallicity]):\n mass_in_remnants[mass_index] = rem_mass_table[model_index-1] \n \n # Iterate over all elements \n for element in self.elements:\n element_mass = np.zeros(len(self.masses))\n for isotope in indexing[element]: # Iterate over isotopes of each element\n for mass_index,model_index in enumerate(model_dict[metallicity]): # Iterate over masses \n for row in z: # Find required row in table \n if row[0] == isotope:\n element_mass[mass_index]+=row[model_index] # Compute cumulative mass for all isotopes\n yield_subtable[element]=np.divide(element_mass,self.masses) # Add entry to subtable\n \n all_fractions = [row[model_index] for row in z] # This lists all elements (not just up to Ge)\n total_mass_fraction[mass_index] = np.sum(all_fractions) # Compute total net mass fraction (sums to approximately 0)\n \n # Add fields for remnant mass (now as a mass fraction) and unprocessed mass fraction\t\t\t\n yield_subtable['mass_in_remnants']=np.divide(mass_in_remnants,self.masses) \n yield_subtable['unprocessed_mass_in_winds'] = 1.-(yield_subtable['mass_in_remnants']+total_mass_fraction) # This is all mass not from yields/remnants\n \n # Add subtable to full table\n yield_table[metallicity]=yield_subtable\n\n # Define final yield table for output\n self.table = yield_table", "def b_Kjeldsen2008(self):\n return 10.0**(-3.16*self.string_to_param(\"log_Teff\") + 0.184*self.string_to_param(\"log_g\")+11.7)", "def T_L(Td, taue):\n return np.sqrt(np.pi)/2.0 * taue * np.exp(-(np.pi*taue/(4*Td))*(np.pi*taue/(4*Td)))", "def blackbody( wave, T, waveunit='Angstrom' ):\n \n if waveunit=='Angstrom':\n # convert wavelength from angstroms to cm\n wave = wave / 1e10 * 100.\n elif waveunit=='nm':\n # convert wavelength from angstroms to cm\n wave = wave / 1e9 * 100.\n\n return( ((2 * h * c* c)/wave**5 ) / (exp(h*c/(wave*k*T))-1) )", "def calculate_wrist_ee(self):\n\n data = [i * (30 / 75) for i in self.df_epoch[\"LWrist\"]]\n\n # Modified equation from Powell et al. 2017. Removed resting component (constant = 1.15451)\n mets = [.022261 * i for i in data]\n\n # Converts METs to relative VO2 (mL O2/kg/min)\n r_vo2 = [3.5 * m for m in mets]\n\n # Converts relative VO2 to absolute VO2 (L O2/kg/min)\n a_vo2 = [i * self.weight / 1000 for i in r_vo2]\n\n # Converts absolute VO2 to kcal/min (assumes 1 L O2 -> 4.825 kcal)\n kcal_min = [a * 4.825 for a in a_vo2]\n\n # Calculates kcal/epoch\n kcal_epoch = [k * (15 / 60) for k in kcal_min]\n\n total_ee = sum([i for i in kcal_epoch if not np.isnan(i)])\n print(\"-Total energy expenditure estimated from Wrist is {} kcal.\".format(int(total_ee)))\n\n self.df_epoch[\"Wrist_EE\"] = kcal_min", "def abbott_elec():\n per_kwh = 0.08 # [$/kWh]\n return per_kwh", "def adjust_for_speed_of_light_in_water(df, tide_level):\n speed_of_light_air = 300000\n speed_of_light_water = 225000\n coef = speed_of_light_water / speed_of_light_air\n df['Height'] = (df['Height']- tide_level) * coef\n return df", "def convert_ha_to_km2(nb):\n return nb / 100", "def calc_relhum(dewpt,t):\n\n relhum=100.*(np.exp((const.es_Abolton*dewpt)/(const.es_Bbolton+dewpt))/np.exp((const.es_Abolton*t)/(const.es_Bbolton+t)))\n return relhum", "def getWNSA2(ChargeSA):\n temp = 0.0\n for i in ChargeSA:\n temp = temp+i[2]\n if temp == 0.0:\n return 0.0\n\n return getPNSA2(ChargeSA)*temp/1000.0", "def get_D_C3H8_air_Kn(self, T):\n\n Kn = self.get_Kn(T)\n D_C3H8_air = self.get_D_C3H8_air(T)\n\n self.D_C3H8_air_Kn = D_C3H8_air / Kn\n\n return self.D_C3H8_air_Kn", "def stiffenerBuckle(dim):\n bst = dim[0]\n tst = dim[1]\n tsk = dim[2]\n\n epsilont = kt * ((tst / bst)) ** 2\n Et = (Esk * tsk) + (Est * ((bst * tst) / bsk))\n Nst = Et*epsilont # Critical Load\n rsf = Nst/Nx\n return rsf - 1 # Using a target Reserve Factor of 1", "def bjerrum_length_water(temperature=298.15):\n bjerrum = np.power(ELECTRON_CHARGE, 2.0) / \\\n (4.0 * np.pi *\n ELECTRIC_CONSTANT *\n dielectric_constant_water(temperature) *\n BOLTZMANN_CONSTANT *\n temperature\n )\n return bjerrum", "def UTMtoLL(northing, easting, zone, isSouthernHemisphere=True,\r\n ReferenceEllipsoid=23):\r\n k0 = 0.9996\r\n a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius]\r\n eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]\r\n e1 = (1-sqrt(1-eccSquared))/(1+sqrt(1-eccSquared))\r\n\r\n x = easting - 500000.0 #remove 500,000 meter offset for longitude\r\n y = northing\r\n\r\n ZoneNumber = int(zone)\r\n if isSouthernHemisphere:\r\n y -= 10000000.0 # remove 10,000,000 meter offset used\r\n # for southern hemisphere\r\n\r\n LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 # +3 puts origin in middle of zone\r\n\r\n eccPrimeSquared = (eccSquared)/(1-eccSquared)\r\n\r\n M = y / k0\r\n mu = M/(a*(1-eccSquared/4-3*eccSquared*eccSquared/64-5*eccSquared*eccSquared*eccSquared/256))\r\n\r\n phi1Rad = (mu + (3*e1/2-27*e1*e1*e1/32)*sin(2*mu)\r\n + (21*e1*e1/16-55*e1*e1*e1*e1/32)*sin(4*mu)\r\n +(151*e1*e1*e1/96)*sin(6*mu))\r\n phi1 = phi1Rad*_rad2deg;\r\n\r\n N1 = a/sqrt(1-eccSquared*sin(phi1Rad)*sin(phi1Rad))\r\n T1 = tan(phi1Rad)*tan(phi1Rad)\r\n C1 = eccPrimeSquared*cos(phi1Rad)*cos(phi1Rad)\r\n R1 = a*(1-eccSquared)/pow(1-eccSquared*sin(phi1Rad)*sin(phi1Rad), 1.5)\r\n D = x/(N1*k0)\r\n\r\n Lat = phi1Rad - (N1*tan(phi1Rad)/R1)*(D*D/2-(5+3*T1+10*C1-4*C1*C1-9*eccPrimeSquared)*D*D*D*D/24\r\n +(61+90*T1+298*C1+45*T1*T1-252*eccPrimeSquared-3*C1*C1)*D*D*D*D*D*D/720)\r\n Lat = Lat * _rad2deg\r\n\r\n Long = (D-(1+2*T1+C1)*D*D*D/6+(5-2*C1+28*T1-3*C1*C1+8*eccPrimeSquared+24*T1*T1)\r\n *D*D*D*D*D/120)/cos(phi1Rad)\r\n Long = LongOrigin + Long * _rad2deg\r\n return (Lat, Long)", "def calc_ti(self):\n m = 0\n for i in self.inl:\n m += i.m.val_SI * i.fluid.val[self.fuel_alias.val]\n\n for o in self.outl:\n m -= o.m.val_SI * o.fluid.val[self.fuel_alias.val]\n\n return m * self.lhv", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def fuel_cond(T):\n\n kc = 1.841e-19*math.pow(T,6) - 2.097e-15*math.pow(T,5) +\\\n 9.721e-12*math.pow(T,4) - 2.369e-8*math.pow(T,3) +\\\n 3.283e-5*math.pow(T,2) - 0.0267*T + 63.18\n \n return kc", "def SSt_theo_old(D, k):\n\ta1b = k[\"A1B1\"]\n\tba1 = k[\"B1A1\"]\n\tca1 = k[\"C1A1\"]\n\tcb = k[\"B1C1\"]\n\tnum = a1b*ba1*ca1*ca1 + ba1*ba1*ca1*ca1 + 3*a1b*ba1*ca1*cb + 2*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 2*ba1*ca1*ca1*cb + 2*a1b*ba1*cb*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*a1b*ca1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t3*ba1*ca1*ca1*ca1 + 2*a1b*ba1*ba1*cb + ba1*ba1*ba1*cb + 2*a1b*ba1*ca1*cb + \\\n\t\t\t3*ba1*ba1*ca1*cb + 4*a1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + \\\n\t\t\t2*a1b*ba1*cb*cb + 2*ba1*ba1*cb*cb + 2*a1b*ca1*cb*cb + 4*ba1*ca1*cb*cb + \\\n\t\t\t2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\t(a1b*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 4*ba1*ba1*ca1*ca1 + a1b*ca1*ca1*ca1 + \\\n\t\t\t2*ca1*ca1*ca1*ca1 + ba1*ba1*ba1*cb + 3*a1b*ba1*ca1*cb + 3*ba1*ba1*ca1*cb + \\\n\t\t\ta1b*ca1*ca1*cb + 5*ba1*ca1*ca1*cb + 3*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + \\\n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\t(ba1*ba1*ba1*ca1 + a1b*ba1*ca1*ca1 + 3*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + \\\n\t\t\t2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\tba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\tden = a1b*(ba1*ba1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb + ba1*ba1*cb*cb + \n\t\t\t2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + 4*ca1*ca1*ca1*cb + \n\t\t\t2*ba1*ba1*cb*cb + 4*ba1*ca1*cb*cb + 2*ca1*ca1*cb*cb) * D + \\\n\t\t\t\\\n\t\t\ta1b*(2*ba1*ba1*ca1*ca1 + 4*ca1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 6*ba1*ca1*ca1*cb + \n\t\t\t4*ca1*ca1*ca1*cb + ba1*ba1*cb*cb + 2*ba1*ca1*cb*cb + ca1*ca1*cb*cb) * D*D + \\\n\t\t\t\\\n\t\t\ta1b*(4*ba1*ca1*ca1*ca1 + 2*ba1*ba1*ca1*cb + 2*ba1*ca1*ca1*cb) * D*D*D + \\\n\t\t\t\\\n\t\t\ta1b*ba1*ba1*ca1*ca1 * D*D*D*D\n\t##\n\ttau = num/den\n\t##\n\treturn tau*np.log(20)", "def celciusToKelvin(celcius: float, ndigits: int = 2)->float:\n return round(float(celcius) + 273.15, ndigits)", "def bern_metric(pipe_diameter, delta_p, pipe_length):\n fr_c = 0.003 # assuming Reynolds number is 10**5 and pipe material is smooth copper\n fr_reyn = 0.046 / (reynolds_num(pipe_diameter, delta_p, pipe_length) ** 0.2) # Taitel and Dukler approximation\n rho = 1000 # density of water @ 4 deg celsius (kg/m**3)\n\n v = math.sqrt((2 * delta_p) / (rho * (4 * fr_reyn * (pipe_length / pipe_diameter) - 1)))\n flow_rate_turb = v * ((math.pi / 4) * (pipe_diameter ** 2))\n\n return flow_rate_turb, v" ]
[ "0.6074756", "0.60489684", "0.6021683", "0.58620036", "0.5819184", "0.5817391", "0.5701168", "0.56542647", "0.5622918", "0.5620226", "0.5618999", "0.55917037", "0.55595964", "0.5550017", "0.5537136", "0.5530169", "0.5526478", "0.5487843", "0.5477624", "0.5460396", "0.54503703", "0.54500014", "0.54438895", "0.54244804", "0.54152185", "0.5406566", "0.54061073", "0.53903854", "0.5387455", "0.5368951", "0.5346727", "0.53384906", "0.5325162", "0.53191954", "0.5314231", "0.53136086", "0.5313077", "0.5311422", "0.53073394", "0.530346", "0.53003144", "0.52962947", "0.52958554", "0.52827436", "0.5282719", "0.52772045", "0.52708966", "0.5266472", "0.5264123", "0.5263611", "0.5249507", "0.5246597", "0.52375174", "0.5234523", "0.5220995", "0.52207047", "0.5215768", "0.5214564", "0.5214163", "0.52097803", "0.5203122", "0.5202069", "0.5200833", "0.51967096", "0.5193933", "0.5193548", "0.519249", "0.519136", "0.51818526", "0.5181353", "0.517518", "0.51728415", "0.5172196", "0.5167219", "0.51639974", "0.51602536", "0.5159262", "0.51495284", "0.5149191", "0.51454246", "0.51407903", "0.51385975", "0.5137425", "0.51349795", "0.51336193", "0.5132459", "0.51317227", "0.513127", "0.5126381", "0.5125836", "0.5123143", "0.5117155", "0.5116209", "0.51135945", "0.5110454", "0.51063466", "0.51031625", "0.50993836", "0.50989574", "0.50924534" ]
0.64853966
0
r""" Get AOD at specified wavelength using Angstrom turbidity model.
def angstrom_aod_at_lambda(aod0, lambda0, alpha=1.14, lambda1=700.0): return aod0 * ((lambda1 / lambda0) ** (-alpha))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_experimental_spectra(mol):\n\n data = pd.read_csv(mol, sep=',')\n wavelength = data.values[:, 0]\n\n absorption = data.values[:, 1]\n\n func = interp1d(wavelength, absorption, kind='quadratic')\n wavelength_new = 1. / np.linspace(1. / wavelength.max(), 1. / wavelength.min(), 100)\n absorption_new = func(wavelength_new)\n absorption_new *= 100. / absorption_new.max()\n\n return wavelength_new, absorption_new", "def referenceIllum(temp, wavelength):\n ct=temp\n if ct <= 0:\n return 0\n if ct < 4000:\n return planckian(ct, wavelength)\n if ct < 5000:\n p=planckian(ct, wavelength)\n d=dseries(ct, wavelength)\n return p+(d-p)*(ct-4000)/1500.0\n return dseries(ct, wavelength)", "def wavelength(refractive_index, omega):\n return 2 * np.pi * cgs.c / (refractive_index * omega)", "def airydisk(unit_r, fno, wavelength):\n u_eff = unit_r * np.pi / wavelength / fno\n return abs(2 * jinc(u_eff)) ** 2", "def return_obs_RA_DEC():\n return SkyCoord('03h 32m 30s', '10d 00m 24s')", "def get(self, wave, flux, **kwargs):\n if hasUnit(wave):\n _w = wave.to('AA').magnitude\n else:\n print(\"Warning: assuming units are in Angstroms\")\n _w = _drop_units(wave)\n _f = _drop_units(flux)\n\n blue = self._get_wavelength_attrs_with_units('blue').magnitude\n red = self._get_wavelength_attrs_with_units('red').magnitude\n band = self._get_wavelength_attrs_with_units('band').magnitude\n \n nocheck = kwargs.pop('nocheck', False)\n not_covered = (blue[0] < _w[0]) | (red[-1] > _w[-1])\n if (not_covered):\n if (not nocheck):\n raise ValueError(\"Spectrum does not cover this index.\")\n else:\n return np.zeros(_f.shape[0]) * float('nan') \n else:\n return self._get_indice(_w, _f, blue, red, band, self.index_unit, **kwargs)", "def aIllum(wavelength):\n return planckian(2856, wavelength)", "def RiekeLebofsky85(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def _get(self, wave, flux, **kwargs):\n if hasUnit(wave):\n _w = wave.to('AA').value\n else:\n print(\"Warning: assuming units are in Angstroms\")\n _w = _drop_units(wave)\n _f = _drop_units(flux)\n\n blue = self._get_wavelength_attrs_with_units('blue').value\n red = self._get_wavelength_attrs_with_units('red').value\n band = self._get_wavelength_attrs_with_units('band').value\n\n nocheck = kwargs.pop('nocheck', False)\n not_covered = (blue[0] < _w[0]) | (red[-1] > _w[-1])\n if (not_covered):\n if (not nocheck):\n raise ValueError(\"Spectrum does not cover this index.\")\n else:\n return np.zeros(_f.shape[0]) * float('nan')\n else:\n return self._get_indice(_w, _f, blue, red, band, self.index_unit,\n **kwargs)", "def search(self, wavelength, format=None):\n q = self.create_query(wavelength, format)\n return q.execute()", "def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def Damineli16(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def read_aeronet_data_main(station_name, month, year, plot_results):\n # Load AERONET file of month-year\n station = gs.Station(station_name)\n\n monthdays = (date(year, month + 1, 1) - date(year, month, 1)).days\n start_day = datetime(year, month, 1, 0, 0)\n end_day = datetime(year, month, monthdays, 0, 0)\n wavelengths = [355, 532, 1064]\n\n base_name = f\"{start_day.strftime('%Y%m%d')}_{end_day.strftime('%Y%m%d')}_{station.aeronet_name}\"\n file_name = os.path.join(station.aeronet_folder, base_name, base_name + '.lev20')\n # TODO : add automatic download of `.lev20' file from AERONET in case a file is missing.\n aeronet_data = pd.read_csv(file_name, skiprows=6).dropna()\n\n # Parse data and rename columns for easier extrapolation of AOD values\n df_dt = pd.to_datetime(aeronet_data['Date(dd:mm:yyyy)'] + aeronet_data['Time(hh:mm:ss)'], format=\"%d:%m:%Y%H:%M:%S\")\n columns = ['AOD_1640nm', 'AOD_1020nm', 'AOD_675nm', 'AOD_500nm', 'AOD_380nm', 'AOD_340nm']\n df_AOD_ANGSTROM = aeronet_data[columns].copy(deep=True)\n df_AOD_ANGSTROM.index = df_dt\n for col in sorted(columns):\n col_new = int(col.split('_')[1].replace('nm', ''))\n df_AOD_ANGSTROM.rename(columns={col: col_new}, inplace=True)\n\n cols = df_AOD_ANGSTROM.columns.values.tolist()\n cols.extend(wavelengths)\n df_AOD_ANGSTROM = df_AOD_ANGSTROM.reindex(cols, axis='columns').sort_index(axis=1)\n\n # Calculate AOD for missing wavelengths as $355,532,1064$\n # by interpolation values from the nearest existing measured wavelengths.\n cols = df_AOD_ANGSTROM.columns.values.tolist()\n for wavelength in wavelengths:\n col_ind = df_AOD_ANGSTROM.columns.get_loc(wavelength)\n ratio = (cols[col_ind + 1] - cols[col_ind]) / (cols[col_ind + 1] - cols[col_ind - 1])\n df_AOD_ANGSTROM[wavelength] = df_AOD_ANGSTROM.iloc[:, col_ind - 1] * \\\n ratio + (1 - ratio) * \\\n df_AOD_ANGSTROM.iloc[:, col_ind + 1]\n\n # Create dataset of AOD per wavelength\n ds_chans = []\n for wavelength in wavelengths:\n aeronet_ds_chan = xr.Dataset(\n data_vars={'aod': ('Time', df_AOD_ANGSTROM[wavelength]),\n 'lambda_nm': ('Wavelength', [wavelength])\n },\n coords={'Time': df_AOD_ANGSTROM.index.tolist(),\n 'Wavelength': [wavelength]\n })\n ds_chans.append(aeronet_ds_chan)\n ds_aod = xr.concat(ds_chans, dim='Wavelength')\n\n ds_aod.aod.attrs['long_name'] = r'$\\tau$'\n ds_aod = ds_aod.aod.where(ds_aod >= 0, drop=True)\n ds_aod.attrs = {'info': 'Aerosol Optical Depth - generated from AERONET - level 2.0',\n 'location': station.name, 'source_file': file_name,\n 'start_time': start_day.strftime(\"%Y-%d-%m\"), 'end_time': end_day.strftime(\"%Y-%d-%m\")}\n\n # Calculate Angstrom Exponent\n couples = [(355, 532), (355, 1064), (532, 1064)]\n angstrom_daily = []\n for lambda_1, lambda_2 in couples:\n angstrom_couple = xr.apply_ufunc(lambda x, y: misc_lidar.angstrom(ds_aod.sel(Wavelength=x).aod,\n ds_aod.sel(Wavelength=y).aod, x, y), lambda_1, lambda_2,\n keep_attrs=True).rename('angstrom')\n angstrom_ds_chan = xr.Dataset(\n data_vars={'angstrom': ('Time', angstrom_couple.values),\n 'lambda_nm': ('Wavelengths', [f\"{lambda_1}-{lambda_2}\"])\n },\n coords={'Time': df_AOD_ANGSTROM.index.tolist(),\n 'Wavelengths': [f\"{lambda_1}-{lambda_2}\"]\n })\n\n angstrom_daily.append(angstrom_ds_chan)\n ds_ang = xr.concat(angstrom_daily, dim='Wavelengths')\n ds_ang.angstrom.attrs['long_name'] = r'$\\AA$'\n ds_ang.attrs = {'info': 'Angstrom Exponent - generated from AERONET AOD',\n 'location': station.name, 'source_file': file_name,\n 'start_time': start_day.strftime(\"%Y-%d-%m\"), 'end_time': end_day.strftime(\"%Y-%d-%m\")}\n\n # Show AOD and Angstrom Exponent for a period\n if plot_results:\n t_slice = slice(start_day, start_day + timedelta(days=30) - timedelta(seconds=30))\n\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 8))\n ax = axes.ravel()\n for wavelength in wavelengths:\n aod_mean = ds_aod.aod.sel(Wavelength=wavelength, Time=t_slice).mean().item()\n aod_std = ds_aod.aod.sel(Wavelength=wavelength, Time=t_slice).std().item()\n textstr = ' '.join((\n r'$\\mu=%.2f$, ' % (aod_mean,),\n r'$\\sigma=%.2f$' % (aod_std,)))\n ds_aod.aod.sel(Wavelength=wavelength, Time=t_slice).plot(label=fr\"{wavelength}, \" + textstr, ax=ax[0])\n ax[0].set_title(ds_aod.attrs['info'])\n ax[0].legend()\n ax[0].set_ylabel(r'$\\tau$')\n\n for lambda_1, lambda_2 in couples:\n angstrom_mean = ds_ang.angstrom.sel(Wavelengths=f\"{lambda_1}-{lambda_2}\", Time=t_slice).mean().item()\n angstrom_std = ds_ang.angstrom.sel(Wavelengths=f\"{lambda_1}-{lambda_2}\", Time=t_slice).std().item()\n textstr = ' '.join((\n r'$\\mu=%.2f$, ' % (angstrom_mean,),\n r'$\\sigma=%.2f$' % (angstrom_std,)))\n ds_ang.angstrom.sel(Wavelengths=f\"{lambda_1}-{lambda_2}\", Time=t_slice).plot(x='Time',\n label=fr\"$ \\AA \\, {lambda_1},{lambda_2}$, \" + textstr\n , ax=ax[1])\n ax[1].legend()\n ax[1].set_title('Angstrom Exponent')\n plt.tight_layout()\n plt.show()\n\n # Angstrom Exponent distribution of a month\n couple_0 = f\"{355}-{532}\"\n couple_1 = f\"{532}-{1064}\"\n\n x = ds_ang.angstrom.sel(Time=t_slice, Wavelengths=couple_0).values\n y = ds_ang.angstrom.sel(Time=t_slice, Wavelengths=couple_1).values\n\n fig, ax = plt.subplots(nrows=1, ncols=1)\n ax.scatter(x=x, y=y)\n ax.set_ylabel(couple_0)\n ax.set_xlabel(couple_1)\n ax.set_title(f\"Angstrom Exponent distribution {t_slice.start.strftime('%Y-%m')}\")\n plt.tight_layout()\n plt.show()\n\n # Save AOD and Angstrom Exponent datasets\n nc_base_name = f\"{start_day.strftime('%Y%m%d')}_{end_day.strftime('%Y%m%d')}_{station.name}\"\n\n xr_utils.save_dataset(ds_aod, folder_name=station.aeronet_folder, nc_name=nc_base_name+\"_aod.nc\")\n xr_utils.save_dataset(ds_ang, folder_name=station.aeronet_folder, nc_name=nc_base_name+\"_ang.nc\")", "def wavelength_ex(hdulist):\n wave = hdulist[1].data['loglam']\n wave = 10**wave\n\n return wave", "def Hosek18b(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def wavelength(energy):\n return 2 * PI * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def Nishiyama09(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def find_nearest_wav(self, wavelength):\n\n idx = np.searchsorted(self.wavelengths, wavelength, side=\"left\")\n if idx > 0 and (idx == len(self.wavelengths) or math.fabs(wavelength - self.wavelengths[idx-1]) < math.fabs(wavelength - self.wavelengths[idx])):\n return self.wavelengths[idx-1]\n else:\n return self.wavelengths[idx]", "def get_wavelength_location(headers, wavelength):\n start = headers['CRVAL1']\n step = headers['CDELT1']\n distance = wavelength - start\n number = round(distance / step)\n return number", "def search(self, wavelength, format=None, **keywords):\n q = self.create_query(wavelength, format)\n return q.execute()", "def wvac2air(w):\n scalar = False\n if isinstance(w, (int, float)):\n w = [w]\n scalar = True\n w = np.array([w])\n wair = w.copy()\n\n mask = w > 2000. # Modify only wavelength above 2000 A\n\n s2 = (1e4/w[mask])**2\n f = 1.+0.05792105/(238.0185-s2)+0.00167917/(57.362-s2)\n wair[mask] = w[mask]/f\n return wair[0][0] if scalar else wair[0]", "def Hosek18(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def NoguerasLara18(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self)) \n\n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def get_wave(q):\n\n approximant = 'SEOBNRv4'\n chi1 = [0,0,0]\n chi2 = [0,0,0]\n deltaTOverM = 0.1\n omega0 = 2e-2\n\n t, h = LALPy.generate_LAL_waveform(approximant, q, chi1, chi2, deltaTOverM, omega0)\n\n Amp = np.abs(h)\n peakIdx = np.argmax(Amp)\n\n t -= t[peakIdx]\n\n tmin = -500\n if min(t) > tmin:\n raise Exception('Data not long enough, decrease omega0.')\n keepIdx = t - tmin > -1e-3 # simple hack to ensure t_vec is always nearly the same\n t = t[keepIdx]\n h = h[keepIdx]\n\n tmax = 100\n keepIdx = t - tmax < 1e-3\n t = t[keepIdx]\n h = h[keepIdx]\n\n return t, h", "def GetWavelengths (self) :\n\t\treturn self.run(\"GetWavelengths\")", "def Fritz11(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def Cardelli89(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def OxygenTransmission(T,P,n_wv,wavelength,dr,freq_lim=np.array([lp.c/770e-9,lp.c/768e-9]),sim_nu=np.array([]),spec_file=''):\n # fraction of O2 by number density\n fO2 = (32*0.2320+28.02*0.7547+44.01*0.00046+39.94*0.0128+20.18*0.000012+4.0*0.0000007+83.8*0.000003+131.29*0.00004)*0.2320/32.0\n \n if len(spec_file) == 0:\n spec_file = '/Users/mhayman/Documents/DIAL/O2_HITRAN2012_760_781.txt'\n \n if sim_nu.size==0:\n sim_nu = np.arange(-3e9,3e9,20e6)\n \n# inu0 = np.argmin(np.abs(sim_nu)) # index to center of frequency array\n \n n_o2=fO2*(P/(lp.kB*T)-n_wv) # to convert atm to Pa use *101325\n ext_o2 = rb.ExtinctionFromHITRAN(lp.c/wavelength+sim_nu,T,P,(mO2*1e-3)/lp.N_A,nuLim=freq_lim,freqnorm=True,filename=spec_file).T\n T_o2 = np.exp(-np.cumsum(n_o2[np.newaxis,:]*ext_o2,axis=1)*dr)\n \n return T_o2,sim_nu", "def add_wavelength(filename, model, std_tol, overwrite=False, plot_path=None):\n hdulist = fits.open(filename)\n\n # read both hdu's\n logger.debug(\"\\tObject: {}\".format(hdulist[0].header['OBJECT']))\n\n # extract just the middle part of the CCD (we only really care about Halpha)\n tbl = Table(hdulist[1].data)\n\n if 'wavelength' in tbl.colnames and not overwrite:\n logger.debug(\"\\tTable already contains wavelength values!\")\n return\n\n # compute wavelength array for the pixels\n wavelength, var = model.gp.predict(model.y, tbl['pix']-model.x_shift,\n return_var=True)\n bad_idx = np.sqrt(var) > std_tol.to(u.angstrom).value\n wavelength[bad_idx] = np.nan\n\n tbl['wavelength'] = wavelength\n tbl['wavelength_err'] = np.sqrt(var)\n\n new_hdu1 = fits.table_to_hdu(tbl)\n new_hdulist = fits.HDUList([hdulist[0], new_hdu1])\n\n logger.debug(\"\\tWriting out file with wavelength array.\")\n new_hdulist.writeto(filename, overwrite=True)\n\n if plot_path is not None:\n # plot the spectrum vs. wavelength\n fig,axes = plt.subplots(2, 1, figsize=(12,8), sharex=True)\n\n axes[0].plot(tbl['wavelength'], tbl['source_flux'],\n marker='', drawstyle='steps-mid', linewidth=1.)\n axes[0].errorbar(tbl['wavelength'], tbl['source_flux'], 1/np.sqrt(tbl['source_ivar']),\n linestyle='none', marker='', ecolor='#666666', alpha=1., zorder=-10)\n axes[0].set_ylim(tbl['source_flux'][200]/4, np.nanmax(tbl['source_flux']))\n axes[0].set_yscale('log')\n\n axes[1].plot(tbl['wavelength'], tbl['background_flux'],\n marker='', drawstyle='steps-mid', linewidth=1.)\n axes[1].errorbar(tbl['wavelength'], tbl['background_flux'], 1/np.sqrt(tbl['background_ivar']),\n linestyle='none', marker='', ecolor='#666666', alpha=1., zorder=-10)\n axes[1].set_ylim(1e-1, np.nanmax(tbl['background_flux']))\n axes[1].set_yscale('log')\n\n fig.tight_layout()\n _filename_base = path.splitext(path.basename(filename))[0]\n fig.savefig(path.join(plot_path, '{0}_1d_wvln.png'\n .format(_filename_base)))\n\n plt.close(fig)", "def calc_impedance(self) -> (Ohm, None):\n power: ComType = complex(0)\n power_unit: str = ''\n amp: NumType = 0\n amp_unit: str = ''\n volt: NumType = 0\n volt_unit: str = ''\n\n if self._volt_exists and self._volt_exists:\n if hasattr(self._obj1, 'volts'):\n volt, volt_unit = self._obj1.volts, self._obj1.volt_unit\n elif hasattr(self._obj1, 'amps'):\n amp, amp_unit = self._obj1.amps, self._obj1.amp_unit\n if hasattr(self._obj2, 'volts'):\n volt, volt_unit = self._obj2.volts, self._obj2.volt_unit\n elif hasattr(self._obj2, 'amps'):\n amp, amp_unit = self._obj2.amps, self._obj2.amp_unit\n z = volt / amp\n z_unit: str = f'Ohms ({volt_unit}/{amp_unit})'\n\n elif self._amp_exists and self._power_exists:\n if hasattr(self._obj1, 'amps'):\n amp, amp_unit = self._obj1.amps, self._obj1.amp_unit\n elif hasattr(self._obj1, 'power'):\n power, power_unit = self._obj1.power, self._obj1.power_unit\n if hasattr(self._obj2, 'amps'):\n amp, amp_unit = self._obj2.amps, self._obj2.amp_unit\n elif hasattr(self._obj2, 'power'):\n power, power_unit = self._obj2.power, self._obj2.power_unit\n z = power / amp**2\n z_unit: str = f'Ohms ({power_unit}/{amp_unit}^2)'\n\n else:\n return None\n\n return Ohm(z, z_unit, self._obj1.frequency, self._obj1.freq_unit)", "def getOrmsby(f,t):\n assert len(f) == 4, 'Ormsby wavelet needs 4 frequencies as input'\n f = np.sort(f) #Ormsby wavelet frequencies must be in increasing order\n pif = pi*f\n den1 = pif[3] - pif[2]\n den2 = pif[1] - pif[0]\n term1 = (pif[3]*np.sinc(pif[3]*t))**2 - (pif[2]*np.sinc(pif[2]))**2\n term2 = (pif[1]*np.sinc(pif[1]*t))**2 - (pif[0]*np.sinc(pif[0]))**2\n\n wav = term1/den1 - term2/den2;\n return wav", "def wavelength(self):\n return wavelength(energy)", "def bird_hulstrom80_aod_bb(aod380, aod500):\n # approximate broadband AOD using (Bird-Hulstrom 1980)\n return 0.27583 * aod380 + 0.35 * aod500", "def get(self):\n try:\n print('jwlee-test-observation-get, self.level:', self.level)\n if self.level is not None:\n data_obs = self._obs_file.get(\n self.var, level=self.level, region=self.region\n )\n else:\n data_obs = self._obs_file.get(self.var, region=self.region)\n return data_obs\n except Exception as e:\n if self.level is not None:\n logging.getLogger(\"pcmdi_metrics\").error(\n \"{} {} {} {}\".format(\n \"Failed opening 4D OBS\", self.var, self.obs_or_model, e\n )\n )\n else:\n logging.getLogger(\"pcmdi_metrics\").error(\n \"{} {} {} {}\".format(\n \"Failed opening 3D OBS\", self.var, self.obs_or_model, e\n )\n )", "def DeMarchi16(self, wavelength, AK):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AK (since law assumes AK = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AK\n\n return A_at_wave", "def air_to_vac(self, wave):\n #Convert to um\n wave_um = wave*.001\n ohm2 = (1./wave_um)**(2)\n\n #Calculate index at every wavelength\n nn = []\n for x in ohm2:\n n = 1+10**(-8)*(8342.13 + (2406030/float(130.-x)) + (15997/float(389-x)))\n nn.append(n)\n \n #Get new wavelength by multiplying by index of refraction\n vac_wave = nn*wave\n return vac_wave", "def _get_omega(self, vehicle_id):\n pos = self.positions[vehicle_id]\n omega = self.frenets[vehicle_id].get_omega(\n pos[0], pos[1], pos[2], pos[3])\n\n return omega", "def derive_Damineli16(wavelength):\n # From their eq 19\n x = np.log10(2.159 / wavelength)\n log_A_AKs = -0.015 + 2.33*x + 0.522*x**2. - 3.001*x**3. + 2.034*x**4.\n\n # Now to convert this back to linear space\n A_AKs_at_wave = 10**log_A_AKs \n\n return A_AKs_at_wave", "def bragg_law(self, d_list, wavelength):\r\n new_twotheta = []\r\n for d in d_list:\r\n new_twotheta.append(2*math.degrees(np.arcsin(wavelength/(2*d))))\r\n return new_twotheta", "def efftau_madau(rwl, z):\n\n from numpy import array, where, exp\n\n la = 1215.67 ## Lyman alpha. Angstroms. \n lb = 1026. ## Lyman beta. Angstroms. \n lg = 973.\n ld = 950.\n le = 938.\n ll = 912. ## Lyman limit.\n\n ## Redshifted from restframe to (definitely) observed. \n wl = rwl*(1. + z)\n \n n = len(wl)\n c = array([3.6e-3, 1.7e-3, 1.2e-3, 9.3e-4])\n l = array([ la, lb, lg, ld])\n\n tau = np.zeros_like(wl)\n xe = 1. + z\n\n ## Lyman series\n for i in range(len(l) ):\n indices = where(wl <= l[i]*xe) ## Note: no lower wavelength limit is correct as 'broadcasted' corrections. \n tau[indices] += c[i]*(wl[indices]/l[i])**3.46\n\n ## Photoelectric absorption\n xc = wl/ll\n xc3 = xc**3\n \n tau = where(wl <= ll*xe, tau + 0.25*xc3*(xe**.46 - xc**0.46) \\\n + 9.4*xc**1.5*(xe**0.18 - xc**0.18) \\\n - 0.7*xc3*(xc**(-1.32) - xe**(-1.32)) \\\n - 0.023*(xe**1.68-xc**1.68), tau)\n \n '''\n min_tau = tau.min()\n index = np.where(tau == min_tau)\n tau[:index] = min_tau \n \n tau = where(tau < 0.0, 0.0, tau)\n '''\n\n return where(tau > 700., 0., exp(-tau))", "def powerlaw(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def get_signal(self, audio, gain, phase):\n max_delay_ms = self.center_ms + self.depth_ms\n max_length_samples = int(self.sample_rate / 1000.0 * max_delay_ms)\n\n depth_phase = self.depth_ms / max_delay_ms\n center_phase = self.center_ms / max_delay_ms\n phase = phase * depth_phase + center_phase\n wet_audio = core.variable_length_delay(\n audio=audio, phase=phase, max_length=max_length_samples\n )\n # Remove channel dimension.\n if gain.dim() == 3:\n gain = gain[:, 0, :]\n\n wet_audio *= gain\n return (wet_audio + audio) if self.add_dry else wet_audio", "def Schlafly16(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def get_amesdusty_atmosphere(metallicity=0, temperature=5000, gravity=4):\n sp = pysynphot.Icat('AMESdusty', temperature, metallicity, gravity)\n\n # Do some error checking\n idx = np.where(sp.flux != 0)[0]\n if len(idx) == 0:\n print( 'Could not find AMESdusty Allard+ 2000 atmosphere model for')\n print( ' temperature = %d' % temperature)\n print( ' metallicity = %.1f' % metallicity)\n print( ' log gravity = %.1f' % gravity)\n\n return sp", "def _get_wavelength_attrs_with_units(self, attrname, units='AA'):\n attr = self._lick[attrname]\n if self.wavelength_unit is not None:\n if units is None:\n return attr * Unit(self.wavelength_unit)\n else:\n return (attr * Unit(self.wavelength_unit)).to(units)\n else:\n return attr", "def antennaResponse(peak, xoff, yoff, fwhm):\n return peak * np.exp(CNST / (fwhm*fwhm) * (xoff*xoff + yoff*yoff))", "def convertToOpd(self, wavelength):\n\n if wavelength is None:\n scale = 1.\n else:\n scale = 2. * math.pi / wavelength\n self.opd = self.pupil * np.exp(1.j * self.phase * scale)", "def Fitzpactrick09(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def absorbance( self, lmin=0, lmax=0 ):\n A = self.prop[\"SPEC\"][:,1]\n if lmax>0:\n m = np.vstack( (self.wavelength(), A) ).T # matrix w lambda and absorbance\n m = m[ m[:,0] >= lmin ] # slice by wavelength...\n m = m[ m[:,0] <= lmax ]\n return np.average( m[:,1] ) # scalar\n return A # array", "def search(url, wavelength, **keywords):\n service = SLAService(url)\n return service.search(wavelength, **keywords)", "def derive_RiekeLebofsky(wavelength):\n filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K', 'L', 'M', \n '[8.0]', '[8.5]', '[9.0]', '[9.5]', '[10.0]', '[10.5]', \n '[11.0]', '[11.5]', '[12.0]', '[12.5]', '[13.0]']\n #wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.25, 1.635, 2.2, \n # 3.77, 4.68, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n # 11.5, 12.0, 12.5, 13.0])\n \n # Wavelengths from Nishiyama+09 plot of RL+85 law...slightly different than standard, \n # drop N filter\n wave = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.17, 1.57, 2.12, \n 3.40, 4.75, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,\n 11.5, 12.0, 12.5, 13.0])\n A_Av = np.array([1.531, 1.324, 1.00, 0.748, 0.482, 0.282, 0.175, 0.112,\n 0.058, 0.023, 0.02, 0.043, 0.074, 0.087, 0.083,\n 0.074, 0.060, 0.047, 0.037, 0.030, 0.027])\n # Want to change this from A/Av to A/AK\n k_ind = np.where(np.array(filters) == 'K')\n Ak_Av = A_Av[k_ind]\n Av_Ak = 1.0 / Ak_Av\n\n A_Ak = A_Av * Av_Ak\n \n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_Ak, k=3, s=0)\n A_Ak_at_wave = interpolate.splev(wavelength, spline_interp)\n\n return A_Ak_at_wave", "def get_wave_unit(tag, hdulist, idx=None):\n from astropy.units import Unit\n if idx is None:\n idx = 1\n # Use Table\n if isinstance(hdulist[idx],BinTableHDU):\n tab = Table(hdulist[idx].data)\n header = hdulist[idx].header\n else:\n # NEED HEADER INFO\n return None\n # Try table header (following VLT/X-Shooter here)\n keys = list(header) # Python 3\n values = list(itervalues(header)) # Python 3\n hidx = values.index(tag)\n if keys[hidx][0:5] == 'TTYPE':\n try:\n tunit = header[keys[hidx].replace('TYPE','UNIT')]\n except KeyError:\n return None\n else:\n if tunit in ['Angstroem', 'Angstroms', 'ANGSTROMS']:\n tunit = 'Angstrom'\n unit = Unit(tunit)\n return unit\n else:\n return None", "def wavelength(self,freq):\n return self.phase_velocity()/freq", "def simulate_UVSPEC(file, config):\n\n wavelength = config['wavelength']\n\n # Coordenates from position of the station Hannover\n latitude = 52.39 # positive in the northern hemisphere\n longitud = 9.7 # negative reckoning west from prime meridian in Greenwich,\n\n # Read name of the file (correct time)\n name = os.path.split(file)\n time_n = name[1][0:15]\n print(\"Time name\", time_n)\n # convert time to datetime format\n time = datetime.datetime.strptime(time_n,\n '%Y%m%d_%H%M%S')\n # Calculate the azimuth and zenith angles in function of the date\n elev = ps.GetAltitude(latitude, longitud, time)\n azi = ps.GetAzimuth(latitude, longitud, time)\n zenith = 90 - elev\n\n # Correction between the sign and real azimuth for plot of radiance\n if -180 <= azi < 0:\n azi = 180 - azi\n elif -360 <= azi < -180:\n azi = -azi - 180\n else:\n pass\n\n print(\"Azimuth: {:5.1f}\".format(azi),\n \"\\nZenith: {:5.1f}\".format(zenith))\n\n # Change the value of zenith and azimuth angles in function of the time and\n # position in the UVSPEC file\n\n with open(config['personal_libraries'] + 'MUDIS_HDF5/MUDIS_radiance_Input.txt', 'r') as file:\n data = file.readlines()\n\n data[14] = \"day_of_year \" + str(time.timetuple().tm_yday) + \" \" + \"\\n\"\n data[15] = \"wavelength \" + str(\"{}\".format(wavelength)) + \" \" + \\\n str(\"{}\".format(wavelength)) + \\\n \" # wavelength to calcule [nm] \\n\"\n data[17] = \"sza \" + str(\"{:2.3f}\".format(zenith)) + \\\n \" # Solar zenith angle \\n\"\n data[18] = \"phi0 \" + str(\"{:2.3f}\".format(azi)) + \\\n \" #Azimuth angle with zenith position \\n\"\n\n with open(config['personal_libraries'] + 'MUDIS_HDF5/MUDIS_radiance_Input.txt', 'w') as file:\n file.writelines(data)\n\n # Create the directory to save the results\n os.makedirs(os.path.dirname(config['str_dir'] + '/simulation/' + '{}/{}nm/txt_files/'.format(time_n[0:8],\n wavelength)),\n exist_ok=True)\n\n # Run the program UVSPEC in the terminal\n os.system(config['UVSPEC_path'] + 'uvspec < ' + config['personal_libraries'] +\n 'MUDIS_HDF5/MUDIS_radiance_Input.txt> ' + config['str_dir'] + '/simulation/' +\n '{}/{}nm/txt_files/'.format(time_n[0:8], wavelength) + time_n +\n '.txt')", "def get_evwma(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.EVWMA(data)\n if result is None:\n raise IndicatorException\n return result", "def _get_wavelength_attrs_with_units(self, attrname, units='AA'):\n attr = self._lick[attrname]\n if self.wavelength_unit is not None:\n if units is None:\n return attr * unit[self.wavelength_unit]\n else:\n return (attr * unit[self.wavelength_unit]).to(units)\n else:\n return attr", "def convert_nasa_unit_to_astropy(unit_str):\n\n print('Unit str: ' + repr(unit_str))\n if unit_str in ['days', 'hrs']:\n unit_str = unit_str[:-1]\n elif unit_str == 'decimal degrees':\n unit_str = 'degree'\n elif (\n unit_str in ['dex', 'Earth flux', 'sexagesimal']\n or\n unit_str.startswith('log10(')\n or\n unit_str.startswith('log(')\n ):\n return None\n elif unit_str == 'Solar mass':\n unit_str = 'solMass'\n elif unit_str == 'Solar radii':\n unit_str = 'solRad'\n elif unit_str.endswith(' mass'):\n unit_str = unit_str.split()[0].lower() + 'Mass'\n elif unit_str.endswith(' radii'):\n unit_str = unit_str.split()[0].lower() + 'Rad'\n elif unit_str.startswith('percent'):\n return 0.01\n\n print('Converted to: ' + repr(unit_str))\n\n return Unit(unit_str)", "def readAmesDustySpectrum(fname=''):\n print('Reading : ', fname)\n\n # Get the effective temperature, logg and metallicity from the file name\n ind = fname.find('lte')\n fname_tags = fname[ind+3:ind+13].split('-')\n teff = np.float(fname_tags[0]) * 100.\n logg = np.float(fname_tags[1]) * 100.\n mph = np.float(fname_tags[2]) * 100.\n\n wav = []\n inu = []\n bnu = []\n with open(fname, 'r') as rfile:\n dum = rfile.readline()\n while dum != '':\n dum = str(dum).replace('D', 'E')\n sdum = dum.split()\n wav.append(np.float(sdum[0]))\n inu.append(np.float(sdum[1]))\n bnu.append(np.float(sdum[2]))\n dum = rfile.readline()\n\n wav = np.array(wav)\n inu = np.array(inu)\n bnu = np.array(bnu)\n ii = wav.argsort()\n\n wav = wav[ii]\n inu = inu[ii]\n bnu = bnu[ii]\n\n # \"Decode\" the intensity arrays\n inu = 10.**(inu - 8.0) * wav\n bnu = 10.**(bnu - 8.0) * wav\n\n # Convert the wavelength to micron from Angstrom\n wav /= 1e4\n nwav = wav.shape[0]\n\n return {'teff': teff, 'logg': logg, 'mph': mph, 'nwav': nwav, 'wav': wav, 'inu': inu, 'bnu': bnu}", "def dseries(temp, wavelength):\n if wavelength < 300 or wavelength > 830:\n return 0\n mm=wavelength%10\n s=_dseriesd(temp, wavelength-mm)\n if mm==0:\n return s\n m=mm*0.1\n e=_dseriesd(temp, (wavelength-mm)+10)\n return s+(e-s)*m", "def wavelength(self):\n return self.get(self._names[\"wavelength\"])", "def wavelength(self):\n return self.getparam(\"WAVELENGTH\")", "def wavelength(self):\n return self.getparam(\"WAVELENGTH\")", "def wavelength_to_wavenumber(wavelength):\n return 1. / wavelength", "def speed_of_sound(altitude):\n t = temperature(altitude) # R\n a = sqrt(gamma*gas_constant*t) # [ft/s]\n return a", "def get(self, wave, flux, **kwargs):\n return self._get(wave, flux.to('flam').value, **kwargs)", "def AcquiredData (self, arguments=None) :\n\t\tself.OODriver.Wrapper_getSpectrum(self.wrapperHandle,self.spectrometerIndex,self.bufferHandle)\n\t\t\n\t\tif self.OODriver.Wrapper_isSaturated(self.wrapperHandle,self.spectrometerIndex) :\n\t\t\tprint \"Warning: OcenOptics spectrometer is saturated!\"\n\t\t\t\n\t\ttry : return self.buffer[self.spectral_interval]\n\t\texcept AttributeError : return self.buffer", "def blackbody( wave, T, waveunit='Angstrom' ):\n \n if waveunit=='Angstrom':\n # convert wavelength from angstroms to cm\n wave = wave / 1e10 * 100.\n elif waveunit=='nm':\n # convert wavelength from angstroms to cm\n wave = wave / 1e9 * 100.\n\n return( ((2 * h * c* c)/wave**5 ) / (exp(h*c/(wave*k*T))-1) )", "def get_pulse_value(object = pulse_value_req):\n try:\n response = urllib2.urlopen(object).read()\n pulse_value = json.loads(response)\n return pulse_value['GetSensorValue'][0]\n except URLError, e:\n print 'Error: No Heartrate Value.'", "def measure_wavelength_avg(self, from_time, to_time, print_n=False):\n from_idx = self._find_index_for_time(from_time)\n to_idx = self._find_index_for_time(to_time)\n \n # print number of measurements\n if print_n:\n n_measurements = to_idx - from_idx + 1\n print 'n measurements:', n_measurements\n \n # calculate overlap\n overlap = self.overlap()[from_idx:to_idx+1,:]\n \n # intitialize wavelength storage\n wavelengths = np.zeros(overlap.shape[0])\n \n for i in range(overlap.shape[0]):\n this_overlap = overlap[i,:]\n \n # set all overlap entries below 0.1% of the maximum overlap to zero\n this_overlap[this_overlap<0.001*np.max(this_overlap)] = 0.0\n \n nonzero = np.nonzero(this_overlap)\n \n # find connected section of chain with nonzero overlap\n consecutives = np.split(nonzero, np.where(np.diff(nonzero) != 1)[0]+1)\n \n if len(consecutives) != 1:\n warnings.warn('Wavelength could not be determined unambiguously.')\n return np.nan\n else:\n # add 1 since overlap involves two beads each\n wavelengths[i] = float( len(consecutives[0][0]) ) + 1\n \n return np.mean( wavelengths )", "def calculate_flux(self, band):\n\n if (self.wavelength[0] > band.wavelength[0] or\n self.wavelength[-1] < band.wavelength[-1]):\n\n warn('Spectrum does not cover the whole bandpass, '\n 'extrapolating...')\n dw = np.median(np.diff(self.wavelength.value))\n spec_wavelength = np.arange(\n band.wavelength.value[0],\n band.wavelength.value[-1] + dw, dw) * angstrom\n spec_flux = np.interp(spec_wavelength, self.wavelength,\n self.flux.value)\n\n else:\n spec_wavelength = self.wavelength\n spec_flux = self.flux.value\n\n i, j = spec_wavelength.searchsorted(\n Quantity([band.wavelength[0], band.wavelength[-1]]))\n wavelength = spec_wavelength[i:j]\n flux = spec_flux[i:j]\n\n dw_band = np.median(np.diff(band.wavelength))\n dw_spec = np.median(np.diff(wavelength))\n\n if dw_spec.value > dw_band.value > 20:\n\n warn('Spectrum wavelength sampling interval {0:.2f}, but bandpass'\n 'sampling interval {1:.2f}'.format(dw_spec, dw_band))\n\n # Interpolate the spectrum to the passband wavelengths:\n flux = np.interp(band.wavelength, wavelength, flux)\n band_transmission = band.transmission\n wavelength = band.wavelength\n\n else:\n # Interpolate the band transmission to the spectrum wavelengths:\n band_transmission = np.interp(\n wavelength, band.wavelength, band.transmission)\n\n # Weight by the response and wavelength, appropriate when we're\n # counting the number of photons within the band:\n flux = (np.trapz(band_transmission * flux * wavelength, wavelength) /\n np.trapz(band_transmission * wavelength, wavelength))\n flux *= erg / s / cm ** 2 / angstrom\n\n return flux", "def wair2vac(w):\n scalar = False\n if isinstance(w, (int, float)):\n w = [w]\n scalar = True\n w = np.array([w])\n wvac = w.copy()\n\n mask = w > 2000. # Modify only wavelength above 2000 A\n\n s2 = (1e4/w[mask])**2\n f = 1.+0.05792105/(238.0185-s2)+0.00167917/(57.362-s2)\n wvac[mask] = w[mask]*f\n return wvac[0][0] if scalar else wvac[0]", "def _get_url_for_timerange(self, timerange, **kwargs):\n base_url = \"https://data.ngdc.noaa.gov/platforms/solar-space-observing-satellites/goes/goes{goes_number}/\"\n supported_waves = [94, 131, 171, 195, 284, 304]\n supported_levels = (\"2\", \"1b\")\n\n # these are optional requirements so if not provided assume defaults\n # if wavelength is not provided assuming all of them\n if \"wavelength\" in kwargs.keys():\n wavelength_input = kwargs.get(\"wavelength\")\n if isinstance(wavelength_input, u.Quantity): # not a range\n if int(wavelength_input.to_value('Angstrom')) not in supported_waves:\n raise ValueError(f\"Wavelength {kwargs.get('wavelength')} not supported.\")\n else:\n wavelength = [kwargs.get(\"wavelength\")]\n else: # Range was provided\n compress_index = [wavelength_input.wavemin <= this_wave <=\n wavelength_input.wavemax for this_wave in (supported_waves * u.Angstrom)]\n if not any(compress_index):\n raise ValueError(\n f\"Wavelength {wavelength_input} not supported.\")\n else:\n wavelength = list(compress(supported_waves, compress_index)) * u.Angstrom\n else: # no wavelength provided return all of them\n wavelength = supported_waves * u.Angstrom\n # check that the input wavelength can be converted to angstrom\n waves = [int(this_wave.to_value('angstrom', equivalencies=u.spectral()))\n for this_wave in wavelength]\n # use the given satellite number or choose the best one\n satellitenumber = int(kwargs.get(\n \"satellitenumber\", self._get_goes_sat_num(timerange.start)))\n if satellitenumber < 16:\n raise ValueError(f\"Satellite number {satellitenumber} not supported.\")\n # default to the highest level of data\n level = str(kwargs.get(\"level\", \"2\")) # make string in case the input is a number\n\n if level not in supported_levels:\n raise ValueError(f\"Level {level} is not supported.\")\n\n results = []\n for this_wave in waves:\n if level == \"2\":\n search_pattern = base_url + \\\n r'l{level}/data/suvi-l{level}-ci{wave:03}/%Y/%m/%d/dr_suvi-l{level}-ci{wave:03}_g{goes_number}_s%Y%m%dT%H%M%SZ_.*\\.fits'\n elif level == \"1b\":\n if this_wave in [131, 171, 195, 284]:\n search_pattern = base_url + \\\n r'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave:03}_G{goes_number}_s%Y%j%H%M%S.*\\.fits.gz'\n elif this_wave == 304:\n search_pattern = base_url + \\\n r'l{level}/suvi-l{level}-he{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-He{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\\.fits.gz'\n elif this_wave == 94:\n search_pattern = base_url + \\\n r'l{level}/suvi-l{level}-fe{wave:03}/%Y/%m/%d/OR_SUVI-L{level}-Fe{wave_minus1:03}_G{goes_number}_s%Y%j%H%M%S.*\\.fits.gz'\n\n if search_pattern.count('wave_minus1'):\n scraper = Scraper(search_pattern, level=level, wave=this_wave,\n goes_number=satellitenumber, wave_minus1=this_wave-1)\n else:\n scraper = Scraper(search_pattern, level=level, wave=this_wave,\n goes_number=satellitenumber)\n results.extend(scraper.filelist(timerange))\n return results", "def d65Illum(wavelength):\n return _d65Illum.calc(wavelength)", "def read_data(self, uv, tave=False):\n self.freqs = uv.freq_array[0]\n a1 = uv.ant_1_array[:uv.Nbls]\n a2 = uv.ant_2_array[:uv.Nbls]\n for a in uv.antenna_numbers:\n if not a in a1 and not a in a2:\n if not a in self.dead: self.dead.append(a)\n pid = np.where(uv.polarization_array == pol_lookup[self.pol])[0][0]\n data = uv.data_array[:,0,:,pid].reshape(uv.Ntimes,uv.Nbls,uv.Nfreqs)\n flag = uv.flag_array[:,0,:,pid].reshape(uv.Ntimes,uv.Nbls,uv.Nfreqs)\n ind = np.where(a1!=a2)[0]\n self.mask = output_mask_array(flag[:,ind])\n self.shape_waterfall = (uv.Ntimes, uv.Nfreqs)\n def creat_dict(ii):\n if a1[ii] < 57 or a2[ii] < 57 or a1[ii] == a2[ii]: return # hard coded for MWA Phase II\n if a1[ii] in self.dead or a2[ii] in self.dead: return\n bl = (a1[ii],a2[ii])\n md = np.ma.masked_array(data[:,ii],flag[:,ii])\n diff = md[1:] - md[:-1]\n self.noise[bl] = np.var(diff,axis=0).data/2\n zerofq = np.where(np.sum(np.logical_not(diff.mask),axis=0) < 3)[0]\n md.mask[:,zerofq] = True\n self.data_backup[bl] = {self.pol: np.complex64(md.data)}\n self.flag_backup[bl] = {self.pol: np.copy(md.mask)}\n if tave:\n md = np.mean(md,axis=0,keepdims=True)\n self.data[bl] = {self.pol: np.complex64(md.data)}\n self.flag[bl] = {self.pol: md.mask}\n map(creat_dict, np.arange(uv.Nbls))\n if tave: self.mask= np.product(self.mask, axis=0, keepdims=True).astype(bool)\n mask = np.copy(self.mask)\n if mask.ndim == 2: mask = np.product(self.mask, axis=0).astype(bool)\n self.gains = RedGain(freqs=self.freqs, mask=mask)\n self.gains.get_auto(uv)", "def RomanZuniga07(self, wavelength, AKs):\n # If input entry is a single float, turn it into an array\n try:\n len(wavelength)\n except:\n wavelength = [wavelength]\n\n # Return error if any wavelength is beyond interpolation range of\n # extinction law\n if ((min(wavelength) < (self.low_lim*10**-4)) | (max(wavelength) > (self.high_lim*10**-4))):\n return ValueError('{0}: wavelength values beyond interpolation range'.format(self))\n \n # Extract wave and A/AKs from law, turning wave into micron units\n wave = self.wave * (10**-4)\n law = self.obscuration\n\n # Find the value of the law at the closest points\n # to wavelength\n A_AKs_at_wave = []\n for ii in wavelength:\n idx = np.where( abs(wave - ii) == min(abs(wave - ii)) )\n A_AKs_at_wave.append(law[idx][0])\n\n # Now multiply by AKs (since law assumes AKs = 1)\n A_at_wave = np.array(A_AKs_at_wave) * AKs\n\n return A_at_wave", "def wind(\n da_model: Union[xr.DataArray, xr.Dataset],\n wind: xr.DataArray = None,\n wind_u: xr.DataArray = None,\n wind_v: xr.DataArray = None,\n altitude: float = 10,\n altitude_correction: bool = False,\n freq: pd.Timedelta = None,\n reproj_method: str = \"nearest_index\",\n resample_kwargs: dict = {},\n logger=logger,\n):\n if wind_u is not None and wind_v is not None:\n wind = np.sqrt(np.power(wind_u, 2) + np.power(wind_v, 2))\n elif wind is None:\n raise ValueError(\"Either wind or wind_u and wind_v varibales must be supplied.\")\n\n if wind.raster.dim0 != \"time\":\n raise ValueError(f'First wind dim should be \"time\", not {wind.raster.dim0}')\n\n # compute wind at 2 meters altitude\n if altitude_correction:\n wind = wind * (4.87 / np.log((67.8 * altitude) - 5.42))\n # downscale wind (lazy)\n wind_out = wind.raster.reproject_like(da_model, method=reproj_method)\n # resample time\n wind_out.name = \"wind\"\n wind_out.attrs.update(unit=\"m s-1\")\n if freq is not None:\n resample_kwargs.update(upsampling=\"bfill\", downsampling=\"mean\", logger=logger)\n wind_out = resample_time(wind_out, freq, conserve_mass=False, **resample_kwargs)\n return wind_out", "def calc(self, wavelength):\n if wavelength < self.minWavelength or wavelength > self.maxWavelength:\n return 0\n mm=wavelength%self.interval\n s=self._calcd(wavelength-mm)\n if mm==0:\n return s\n m=mm*1.0/self.interval\n e=self._calcd((wavelength-mm)+self.interval)\n return s+(e-s)*m", "def test_filt_abmag(self):\n sun = Sun.from_builtin('E490_2014')\n V = get_bandpass('johnson v')\n wave, fluxd = sun.filt(V, unit=u.ABmag)\n assert np.isclose(fluxd.value, -26.77, atol=0.007)", "def bommer_stafford_alarcon_ds_2009(magnitude=7.0, distance=10.0, vs30=760.0, ztor=0.0, duration_type='DS575H'):\n\n # duration type map\n dur_map = {'DS575H':0, 'DS595H': 1}\n dur_tag = dur_map.get(duration_type.upper(), None)\n if dur_tag is None:\n print(\"SignificantDurationModel.bommer_stafford_alarcon_ds_2009: duration_type='DS575H','DS595H'?\")\n return None, None, None, None\n \n # modeling coefficients\n c0 = [-5.6298, -2.2393]\n m1 = [1.2619, 0.9368]\n r1 = [2.0063, 1.5686]\n r2 = [-0.2520, -0.1953]\n h1 = [-2.3316, 2.5000]\n v1 = [-0.2900, -0.3478]\n z1 = [-0.0522, -0.0365]\n tauCoeff = [0.3527, 0.3252]\n phiCoeff = [0.4304, 0.3460]\n sigma_c = [0.1729, 0.1114]\n sigma_Tgm = [0.5289, 0.4616]\n\n # median\n ds_median = np.exp(c0[dur_tag]+m1[dur_tag]*magnitude+(r1[dur_tag]+ \\\n r2[dur_tag]*magnitude)*np.log(np.sqrt(distance**2+h1[dur_tag]**2))+ \\\n v1[dur_tag]*np.log(vs30)+z1[dur_tag]*ztor)\n # standard deviations\n ds_sigma = sigma_Tgm[dur_tag]\n ds_tau = tauCoeff[dur_tag]\n ds_phi = phiCoeff[dur_tag]\n\n # return\n return np.log(ds_median), ds_sigma, ds_tau, ds_phi", "def read300yrh(period):\n directory300 = '/seley/ypeings/simu/PAMIP-1.1-QBO-300yr/monthly/'\n file300 = 'U10_1700-2000.nc'\n filename = directory300 + file300\n \n data = Dataset(filename)\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n u10q = data.variables['U10'][:]\n data.close()\n \n ### Reshape in year/month\n u10n = np.reshape(u10q,(u10q.shape[0]//12,12,lat.shape[0],lon.shape[0]))\n \n ### Calculate over particular months\n u10 = UT.calcDecJanFeb(u10n,lat,lon,'surface',1)\n \n ### Slice U10 at 65N\n latq = np.where((lat >= 64.5) & (lat <= 65.5))[0]\n lat = lat[latq].squeeze()\n u10 = u10[:,latq,:].squeeze()\n \n ### Take zonal mean \n u10z = np.nanmean(u10,axis=1)\n \n ### Remove missing data\n mask = np.where(u10z > -1e5)[0]\n \n ### Detrend\n u10zdt = sss.detrend(u10z[mask],type='linear')\n \n return lat,lon,u10zdt", "def derive_Fritz11(wavelength):\n # Extinction law definition\n wave = np.array([1.282, 1.736, 2.166, 2.625, 2.758, 2.873, 3.039, 3.297, 3.74, 3.819, 3.907, 4.052,\n 4.376, 5.128, 5.908, 6.772, 7.459, 7.502, 8.76, 12.371, 19.062])\n A_AKs = np.array([7.91, 4.30, 2.49, 1.83, 1.51, 1.84, 2.07, 1.66, 1.19, 1.19, 1.09, 1.01, 1.09, 0.99,\n 1.04, 0.84, 0.81, 0.79, 2.04, 1.34, 1.34])\n\n\n # Interpolate over the curve\n spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)\n A_at_wave = interpolate.splev(wavelength, spline_interp)\n\n # We'll call 2.14 microns the K-band\n idx = np.where( abs(wavelength - 2.14) == min(abs(wavelength - 2.14)) )\n A_AKs_at_wave = A_at_wave / A_at_wave[idx] \n\n return A_AKs_at_wave", "def getFWHM(antenna, freq):\n diameter = getDiameter(antenna)\n lam = 299792458.0 / (freq * 1e9)\n fwhmo = lam / math.pi * 180.0 * 60.0\n fwhm = 1.22 * fwhmo / diameter\n return fwhm", "def __init__(self, downsampling=False, wl_range = False, spectra_path = '/Users/kyleturner/Dropbox/My_Box/Code/M_Files/process_svc_v2p1/rrs_model_3C-master/spectra/'):\n\n\t\t\ta_ph = pd.read_csv(spectra_path + 'phyto.A', skiprows=11, sep='\\t', index_col=0).iloc[:,0]\n\t\t\n\t\t\ta_w = pd.read_csv(spectra_path + 'water.A', skiprows=10, sep='\\t', index_col=0).iloc[:,0]\n\t\t\tdaw_dT = pd.read_csv(spectra_path + 'daWdT.txt', skiprows=10, sep='\\t', index_col=0).iloc[:,0]\n\t\t\tastar_y = pd.read_csv(spectra_path + 'Y.A', skiprows=11, delimiter=' ', index_col=0).loc[350:].iloc[:,0]\n\n\t\t\td = pd.DataFrame(index = a_ph.index)\n\t\t\td.index.name = 'Wavelength, [nm]'\n\t\t\td['astar_ph'] = a_ph\n\t\t\td['astar_y'] = astar_y \n\t\t\td['a_w'] = a_w \n\t\t\td['daw_dT'] = daw_dT\n\n\t\t\tif downsampling == False:\n\t\t\t\tself.spectra = d\n\t\t\telse: \n\t\t\t\tself.spectra = d.apply(lambda x: pd.rolling_mean(x, downsampling, center=True))\n\n\t\t\tif wl_range != False:\n\t\t\t\tself.spectra = self.spectra.loc[wl_range[0]:wl_range[1]].dropna()\n\n\t\t\tself.wl = np.array(self.spectra.index)\n\t\t\t\t\n\t\t\tself.model = self._compile_model()", "def turbulence(*args, attenuation: Union[float, bool]=0.0, frequency: Union[float, bool]=0.0,\n magnitude: Union[float, bool]=0.0, maxDistance: Union[float, bool]=0.0, name:\n Union[AnyStr, bool]=\"\", noiseLevel: Union[int, bool]=0, noiseRatio: Union[float,\n bool]=0.0, perVertex: bool=True, phase: Union[float, bool]=0.0, phaseX:\n Union[float, bool]=0.0, phaseY: Union[float, bool]=0.0, phaseZ: Union[float,\n bool]=0.0, position: Union[List[float, float, float], List[List[float, float,\n float]], bool]=None, torusSectionRadius: Union[float, bool]=0.0,\n volumeExclusion: bool=True, volumeOffset: Union[List[float, float, float],\n bool]=None, volumeShape: Union[AnyStr, bool]=\"\", volumeSweep: Union[float,\n bool]=0.0, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def twostr_func(wavelength, F_s, solarzenithangle,albedo_dif, \n\t\t\talbedo_dir, temp_ground, w_0, g, tau_n, temp_c):\n\t\n\t########################\n\t###Import useful libraries\n\t########################\n\timport numpy as np\n\timport pdb\n\timport scipy.linalg\n\n\n\n\n\t########################\n\t###Define model parameters\n\t########################\n\t#Properties of the ground\n\temissivity_ground=1.-albedo_dif #emissivity of ground. 1=perfect BB emitter.\n\n\t#Optical depth structure\n\tNlayer=len(tau_n) #number of layers in the atmospheric model.\n\t\n\ttau_c=np.zeros(Nlayer+1)# tau_c[n] is the cumulative optical depth at the upper edge of layer n. So tau_c[0]=0, and tau_c[N] is the maximum possible.\n\tfor n in range(0, Nlayer):\n\t\ttau_c[n+1]=tau_c[n]+tau_n[n] \n\n\t#In the Toon formalism, j=0 corresponds to space, and j=N+1 corresponds to the planet surface.\n\t#These points in wavelength space define the edges of the bins in tau space. \n\t#Other terminology:\n\t#\ttau_c=cumulative optical depth of layers *above* layer n. \n\t#\ttau_n=total optical depth of the layer n\n\t#\ttau=total optical depth at any point within a layer n, hence satisfying 0<tau<tau_n\n\n\tmu_0=np.cos(solarzenithangle) #\"incident direction of solar beam\"\n\n\n\t########################\n\t###Determine the two-stream approximation coefficients.\n\t########################\n\t#Eddington and quadrature are good at solar wavelengths (i.e., not thermal blackbody dominated). delta scalings of Joseph et al (1976) recommended to replace w_0, g, tau in this case. However, when dominated by internal isotropic sources like the Planck function, hemispheric mean approximation is preferable. When w_0=0, quadrature case has problems. This happens esp at thermal wavelengths. Again this favors using hemispheric mean at these wavelengths\n\t\n\t#We use quadrature because 1) we are at solar wavelengths for this UV work and 2) that's what twostr.f does (which is our comparison case)\n\tgamma_1= np.sqrt(3.)*(2.-w_0*(1.+g))/2. #consistent with Toon et al; consistent with Pierrehumbert gamma_1\n\tgamma_2=np.sqrt(3.)*w_0*(1.-g)/2. #consistent with Toon et al; consistent with Pierrehumbert gamma_2\n\tgamma_3=(1.-np.sqrt(3.)*g*mu_0)/2. #consistent with Toon et al; equal to the Pierrehumbert gamma_plus/w_0\n\tgamma_4=1.-gamma_3 #consistent with Toon et al; equal to the Pierrehumbert gamma_minus/w_0\n\tmu_1=1./np.sqrt(3.)+np.zeros(np.shape(gamma_1))#In Toon paper (eqn 18), this is given by: (1.-w_0)/(gamma_1-gamma_2). For the quadrature approximation, it is 1./np.sqrt(3.). Given its use, it seems to relate most closely to gamma_B from Pierrehumbert (see eqs 5.27, 5.30)\n\n\t##Eddington\n\t#gamma_1= (7.-w_0*(4.+3.*g))/4.\n\t#gamma_2=-1.*(1.-w_0*(4.-3.*g))/4.\n\t#gamma_3=(2.-3.*g*mu_0)/4.\n\t#gamma_4=1.-gamma_3 #consistent with Toon et al; equal to the Pierrehumbert gamma_minus/w_0\n\t#mu_1=1./2.+np.zeros(np.shape(gamma_1))#In Toon paper (eqn 18), this is given by: (1.-w_0)/(gamma_1-gamma_2). For the quadrature approximation, it is 1./np.sqrt(3.). Given its use, it seems to relate most closely to gamma_B from Pierrehumbert (see eqs 5.27, 5.30)\n\n\talambda=np.sqrt(np.abs(gamma_1*gamma_1-gamma_2*gamma_2)) #this is the lower-case lambda, from eqn 21 of Toon et al\n\t\t\t\t\t\t\t\t #The absolute value was added based on the code Toon just sent us. This corresponds to his AK(L,J) parameter. But it should not matter since gamma_1>gamma_2 for w_0<1.\n\tclambda=(gamma_1-alambda)/(gamma_2) #this is the upper-case lambda, from eqn 22 of Toon et al\n\n\tEMLT=np.exp(-alambda*tau_n) #this appears to be a prefactor used to facilitate computation of eqn 44 of Toon et al\n\te1=1.+clambda*EMLT\n\te2=1.-clambda*EMLT\n\te3=clambda+EMLT\n\te4=clambda-EMLT\n\n\t########################\n\t###Set up calculation\n\t########################\n\t\"\"\"\n\tThe fundamental equation we are solving is of form:\n\tA_{l}*Y_{l-1}+B_{l}*Y_{l}+D{l+1}=E_{l} (equation 39 of Toon et al)\n\tHere, A_l, B_l, D_l, E_l are quantities we determine, and the Y_l is what we solve for.\n\tHence, we can summarize that we are solving a matrix equation that takes form:\n\tPY=E\n\twhere Y[l]=Y_l\n\t E[l]=E_l\n\t P[l, l-1]=A_l [row, column]\n\t P[l, l]=B_l\n\t P[l, l+1]=D_l\n\t P[i,j]=0 else\n\tToon et al use 1-indexing. Hence n runs from 1 to N, l runs from 1 to 2N, where N is the number of layers, and they have:\n\tY_l=Y_{1n} for l=1,3,5,...2n-1...2N-1\n\tY_l=Y_{2n} for l=2,4,6,...2n...2N\n\n\tHowever, we use Python, which has 0-indexing. Hence *we* choose that n runs from 0 to N-1, l runs from 0 to 2N-1, and:\n\tY_l=Y_{1n} for l=0,2,4...2n...2N-2\n\tY_l=Y_{2n} for l=1,3,5...2n+1...2N-1\n\n\tThe Y_{1n} and Y_{2n} are related to F^+_n and F^-_n via equations 31 and 32 of Toon et al.\n\tThis parametrization has been done to remove exponentials with positive operands (ie ones that could grow large and lead to numerical instabilities) from the matrix.\n\n\tNote: The mapping of this PQ=R to the F+ and F- is unclear because of 1) this parametrization in terms of Y_l (done to eliminate numerical instabilities) and 2)further linear combinations done to convert a pentagiagonal matrix to an even simpler tridiagonal matrix. Hence intuitive checks are hard.\n\t\"\"\"\n\n\t########################\n\t###Set up surface flux\n\t########################\n\tS_sfc=albedo_dir*mu_0*np.exp(-tau_c[-1]/mu_0)*np.pi*F_s+emissivity_ground*np.pi*Planck(temp_ground, wavelength)\n\t#Surface emission. Formed by adding blackbody emission from the ground to the reflected energy from the direct beam. The direct beam's reflected energy is assumed to be purely diffuse. This corresponds to equations 37 and 38 of Toon et al. Note that this does NOT match equation 5.31 of Pierrehumbert because it does not include the reflected diffuse radiation. So, this implicitly assumes the diffuse albedo to be 0. \n\n\t########################\n\t###Set up C-values\n\t########################\n\t#In the reshuffled set of parameters used in this formalism, these seem analagous to the forcing term in Pierrehumbert. All the added radiation is contained in here.\n\n\tdef C_plus(n, tau): #implementation of superposition of eqns 23 and 27 from Toon et al\n\t\tsolarrad_denominator=alambda[n]**2.-1./mu_0**2.\n\t\tsolarrad_prefactor=w_0[n]*F_s*np.pi\n\t\tsolarrad_exponential=np.exp(-1.*(tau_c[n]+tau)/mu_0)\n\t\tsolarrad_factor=((gamma_1[n]-1./mu_0)*gamma_3[n]+gamma_4[n]*gamma_2[n])\n\t\tsolarrad=solarrad_prefactor*solarrad_factor*solarrad_exponential/solarrad_denominator #units of flux: erg/s/cm2/nm\n\t\t\n\t\tblackbody_prefactor=2*np.pi*mu_1[n]\n\t\tB0n=Planck(temp_c[n], wavelength)\n\t\tB1n=(Planck(temp_c[n+1], wavelength)-B0n)/tau_n[n] #this is effectively a slope\n\t\tblackbody_factor=B0n+B1n*(tau+1./(gamma_1[n]+gamma_2[n]))\n\t\tblackbody=blackbody_prefactor*blackbody_factor #start with units of the Planck function, which are: erg/s/cm2/nm/sr. But multiplying by 2pi sr restores the units of flux. So can safely add them. \n\t\t\n\t\tresult=solarrad+blackbody\n\t\treturn result\n\n\tdef C_minus(n, tau): #implementation of superposition of eqns 24 and 27 from Toon et al\n\t\tsolarrad_denominator=alambda[n]**2.-1./mu_0**2.\n\t\tsolarrad_prefactor=w_0[n]*F_s*np.pi\n\t\tsolarrad_exponential=np.exp(-1.*(tau_c[n]+tau)/mu_0)\n\t\tsolarrad_factor=((gamma_1[n]+1./mu_0)*gamma_4[n]+gamma_3[n]*gamma_2[n])\n\t\tsolarrad=solarrad_prefactor*solarrad_factor*solarrad_exponential/solarrad_denominator #units of flux: erg/s/cm2/nm\n\t\t\n\t\tblackbody_prefactor=2*np.pi*mu_1[n]\n\t\tB0n=Planck(temp_c[n], wavelength)\n\t\tB1n=(Planck(temp_c[n+1], wavelength)-B0n)/tau_n[n] #this is effectively a slope\n\t\tblackbody_factor=B0n+B1n*(tau-1./(gamma_1[n]+gamma_2[n]))\n\t\tblackbody=blackbody_prefactor*blackbody_factor #start with units of the Planck function, which are: erg/s/cm2/nm/sr. But multiplying by 2pi sr restores the units of flux. So can safely add them. \n\t\t\n\t\tresult=solarrad+blackbody\n\t\treturn result\n\n\t########################\n\t###Calculate matrix coefficients\n\t#########################\n\t#initialize the A, B, D, and E.\n\tA=np.zeros(Nlayer*2)\n\tB=np.zeros(np.shape(A))\n\tD=np.zeros(np.shape(A))\n\tE=np.zeros(np.shape(A))\n\n\n\t#For l=0 (n=0) we have the boundary condition that the downward diffuse flux at the top of the first layer is equal to any incident diffuse downward flux. We set this to be zero.\n\tA[0]=0.\n\tB[0]=e1[0]\n\tD[0]=-1.*e2[0]\n\tE[0]=0.-1*C_minus(0,0) #This is really F_minus[0,0], i.e. we are assuming there is no downward diffuse flux from the top of the atmosphere.\n\n\t#for l=2N-1 (n=N-1), we have the boundary condition that the upward flux at the surface is the sume of the reflected downward diffuse flux and energy from any other sources (e.g. reflected direct beam, BB emission of the ground)/np.sqrt(3.)\n\tA[2*Nlayer-1]=e1[Nlayer-1]-albedo_dif*e3[Nlayer-1]\n\tB[2*Nlayer-1]=e2[Nlayer-1]-albedo_dif*e4[Nlayer-1]\n\tD[2*Nlayer-1]=0.\n\tE[2*Nlayer-1]=S_sfc-C_plus(Nlayer-1, tau_n[Nlayer-1])+albedo_dif*C_minus(Nlayer-1, tau_n[Nlayer-1])\n\n\t#There is a problem in the Toon paper. As written, the l=2n depends on e_n+1, running over the array edge. twostr.f resolves this by adopting a different mapping: their definition reduces to defining l=2(n+1) and running n from 0 to N-1. In this case, l=2 (The third value in the list of ls) depends on n=0 and n=1. This eliminates the overflow problem. We have implemented this below.\n\t\n\t##For n=1,2,3...N-1, l=2,4,6,...2N-2:\n\tfor n in range(0, Nlayer-1):\n\t\tl=2*(n+1)\n\t\tA[l]=e2[n]*e3[n]-e4[n]*e1[n]\n\t\tB[l]=e1[n]*e1[n+1]-e3[n]*e3[n+1]\n\t\tD[l]=e3[n]*e4[n+1]-e1[n]*e2[n+1]\n\t\t\n\t\tE[l]=e3[n]*(C_plus(n+1, 0.)-C_plus(n, tau_n[n]))+e1[n]*(C_minus(n,tau_n[n])-C_minus(n+1,0.))\n\n\n\t#For n=0...N-2, l=1,3...2N-3:\n\tfor n in range(0, Nlayer-1):\n\t\tl=2*n+1\n\t\tA[l]=e2[n+1]*e1[n]-e3[n]*e4[n+1]\n\t\tB[l]=e2[n]*e2[n+1]-e4[n]*e4[n+1]\n\t\tD[l]=e1[n+1]*e4[n+1]-e2[n+1]*e3[n+1]\n\t\t\n\t\tE[l]=e2[n+1]*(C_plus(n+1, 0.)-C_plus(n, tau_n[n]))-e4[n+1]*(C_minus(n+1, 0)-C_minus(n, tau_n[n])) #twostr.f has a -1*e_{4,n+1}. We have applied the same even though this is NOT what is written in the Toon et al paper. We have done this because Toon told us (6/26/2015) that there are some sign errors in the coefficients, and we currently trust the validated CLIMA code over the paper we know has errors in it. EDIT: Looking at the code Toon shared with us, he does the same. \n\n\n\t########################\n\t###Assemble matrix equation components\n\t#########################\n\tP=np.zeros([Nlayer*2,Nlayer*2])\n\n\t#l=0: no \"A\" coefficient b/c l-1 has no meaning\n\tP[0,0]=B[0]\n\tP[0,1]=D[0]\n\n\t#l=2N-1: no \"D\" coefficient b/c l+1 has no meaning\n\tP[2*Nlayer-1,2*Nlayer-1-1]=A[2*Nlayer-1]\n\tP[2*Nlayer-1,2*Nlayer-1]=B[2*Nlayer-1]\n\n\tfor l in range(1, Nlayer*2-1): #This populates the matrix P in PY=E. \n\t\tP[l, l-1]=A[l]\n\t\tP[l,l]=B[l]\n\t\tP[l,l+1]=D[l]\n\n\t########################\n\t###Invert matrix\n\t#########################\n\t#Y=np.linalg.solve(P, E) #this is the Y_l\n\t\n\t#try using a specialized solver\n\tab=np.zeros([3,2*Nlayer])\n\tab[0,:]=np.append(0.0, np.diag(P, k=1))\n\tab[1,:]=np.diag(P, k=0)\n\tab[2,:]=np.append(np.diag(P, k=-1),0.0)\n\t#pdb.set_trace()\n\tY=scipy.linalg.solve_banded((1,1), ab, E) #this is the Y_l\n\n\n\t########################\n\t###Convert from Y_l to Y_1n, Y_2n\n\t#########################\n\t#The Y_1n as defined in Toon et al correspond to l=1,3, 5...2N-1. Adjusting for the zero-indexing of Python as we have done, they instead correspond to l=0,2,...2N-2\n\t#The Y_2n as defined in Toon et al correspond to l=2,4,6...2N. Adjusting for Python zero-indexing as we have done, they instead correspond to l=1,3,5...2N-1.\n\t#For detail, see eq. 40.\n\tY_1=np.zeros(Nlayer)\n\tY_2=np.zeros(Nlayer)\n\tfor n in range(0, Nlayer):\n\t\tY_1[n]=Y[2*n]\n\t\tY_2[n]=Y[2*n+1] \n\t\t#last number called is Nlayer-1=N-1, so is consistent.\n\t\n\t########################\n\t###Convert from Y_1n, Y_2n to F_plus, F_minus\n\t#########################\n\tdef F_plus(n,tau): #defined from Eqn 31 of Toon et al.\n\t\tterm1=Y_1[n]*(np.exp(-alambda[n]*(tau_n[n]-tau))+clambda[n]*np.exp(-alambda[n]*tau))\n\t\tterm2=Y_2[n]*(np.exp(-alambda[n]*(tau_n[n]-tau))-clambda[n]*np.exp(-alambda[n]*tau))\n\t\tterm3=C_plus(n,tau)\n\t\t\n\t\tresult=term1+term2+term3\n\t\treturn result\n\n\tdef F_minus(n, tau): #defined from Eqn 32 of Toon et al.\n\t\tterm1=Y_1[n]*(clambda[n]*np.exp(-alambda[n]*(tau_n[n]-tau))+np.exp(-alambda[n]*tau))\n\t\tterm2=Y_2[n]*(clambda[n]*np.exp(-alambda[n]*(tau_n[n]-tau))-np.exp(-alambda[n]*tau))\n\t\tterm3=C_minus(n,tau)\n\t\t\n\t\tresult=term1+term2+term3\n\t\treturn result\n\t\n\t########################\n\t###Evaluate F_plus, F_minus at boundary edges\n\t#########################\n\tF_plus_tau0=np.zeros(np.shape(tau_n))\n\tF_plus_taumax=np.zeros(np.shape(tau_n))\n\tF_minus_tau0=np.zeros(np.shape(tau_n))\n\tF_minus_taumax=np.zeros(np.shape(tau_n))\n\n\tfor n in range(0, Nlayer):\n\t\tF_plus_tau0[n]=F_plus(n, 0.)\n\t\tF_plus_taumax[n]=F_plus(n, tau_n[n])\n\t\tF_minus_tau0[n]=F_minus(n, 0.)\n\t\tF_minus_taumax[n]=F_minus(n, tau_n[n])\n\n\n\t########################\n\t###Convert from Y_1n, Y_2n to F_net, mean intensity.\n\t#########################\n\t#test if diffuse flux dominates over direct flux. If direct flux dominant, instead set mu_1=mu_0\n\t\n\t#if F_minus_taumax[-1]<mu_0*np.pi*F_s*np.exp(-tau_c[-1]/mu_0):\n\t\t#mu_1=np.zeros(np.shape(mu_1))+mu_0\n\t#mu_1=np.zeros(np.shape(mu_1))+mu_0\n\t\n\tF_net=np.zeros(np.shape(tau_n)) #defined from Eqn 48 of Toon et al. This quantity is the net flux at the BASE of layer n.\n\tfor n in range(0, Nlayer):\n\t\tdirect=mu_0*np.pi*F_s*np.exp(-(tau_c[n]+tau_n[n])/mu_0) #eqn 50 of Toon et al\n\n\t\tterm1=Y_1[n]*(e1[n]-e3[n])\n\t\tterm2=Y_2[n]*(e2[n]-e4[n])\n\t\tterm3=C_plus(n, tau_n[n])-C_minus(n, tau_n[n])\n\t\t\n\t\tF_net[n]=term1+term2+term3 -direct\n\n\tAMEAN=np.zeros(np.shape(tau_n)) #defined from Eqn 49 of Toon et al. This is the equivalent of the quantity AMEAN in the twostr.f code. It is equal to 4*np.pi*J_n, where J_n is the mean intensity at the base of layer n. Hence this quantity AMEAN should be equal to the total intensity received by a point at the base of layer n. \n\tfor n in range(0, Nlayer):\n\t\tdirect=mu_0*np.pi*F_s*np.exp(-(tau_c[n]+tau_n[n])/mu_0) #eqn 50 of Toon et al\n\t\n\t\tterm1=Y_1[n]*(e1[n]+e3[n])\n\t\tterm2=Y_2[n]*(e2[n]+e4[n])\n\t\tterm3=C_plus(n, tau_n[n])+C_minus(n, tau_n[n])\n\t\t\n\t\t#AMEAN[n]=(1./mu_1[n])*(term1+term2+term3)+direct/mu_0\t\n\t\tAMEAN[n]=(1./mu_1[n])*(F_plus_taumax[n]+F_minus_taumax[n])+direct/mu_0\t\n\t\n\t########################\n\t###Compute \"surface intensity\"\n\t#########################\t\n\t#\"Surface intensity\" refers to the total intensity that would be intercepted by a particle at the surface of the planet. Whereas the total intensity is equal to (F_plus[-1]+F_minus[-1])/mu_1+direct[-1]/mu_0, the surface intensity is instead equal to (F_minus[-1])/mu_1+direct[-1]/mu_0, i.e. the downwelling diffuse intensity (since the bottom intensity is cut out due to there being a planet there) plus the direct intensity\n\t\n\tsurface_intensity=(F_minus_taumax[-1]/mu_1[-1])+(np.pi*F_s)*np.exp(-(tau_c[-1])/mu_0)\n\t\n\t########################\n\t###Return Result\n\t#########################\n\t#F_minus_tau0\n\t#np.max(np.abs((F_minus_taumax[:-1]-F_minus_tau0[1:]))/F_minus_tau0[1:])\n\t#np.max(np.abs((F_plus_taumax[:-1]-F_plus_tau0[1:]))/F_plus_tau0[1:])\n\t\n\treturn (F_plus_tau0, F_plus_taumax, F_minus_tau0, F_minus_taumax, F_net, AMEAN, surface_intensity)", "def vacuum_to_air(wavelength):\n # Following the vacuum to air conversion the formula from Donald Morton (2000, ApJ. Suppl., 130, 403) which is also a IAU standard\n # - More info: http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion\n s_square = np.power(1.e4 / wavelength, 2)\n n = 1 + 0.0000834254 + 0.02406147 / (130 - s_square) + 0.00015998 / (38.9 - s_square)\n return wavelength/n # Angstroms", "def refractive_index_fused_silica(wavelength):\n wavelength_um = wavelength / 1000\n\n A0 = 2.104025406E+00\n A1 = -1.456000330E-04\n A2 = -9.049135390E-03\n A3 = 8.801830992E-03\n A4 = 8.435237228E-05\n A5 = 1.681656789E-06\n A6 = -1.675425449E-08\n A7 = 8.326602461E-10\n\n n = np.sqrt( A0 + A1 * wavelength_um ** 4 + A2 * wavelength_um ** 2 + A3 * wavelength_um ** -2 + \\\n A4 * wavelength_um ** -4 + A5 * wavelength_um ** -6 + A6 * wavelength_um ** -8 + A7 * wavelength_um ** -10 )\n\n return n", "def d50Illum(wavelength):\n return _d50Illum.calc(wavelength)", "def test_ulaw(self):\n duration = 1\n num_channels = 1\n sample_rate = 8000\n path = self.get_temp_path(\"data.wav\")\n sox_utils.gen_audio_file(\n path, sample_rate=sample_rate, num_channels=num_channels, bit_depth=8, encoding=\"u-law\", duration=duration\n )\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == 8\n assert info.encoding == \"ULAW\"", "def get_ambient_light(self, ldr_voltage: Optional[int] = None) -> float:\n if ldr_voltage is None:\n ldr_voltage = self.raw_ldr_voltage\n\n # TODO: this conversion algorithm is straight from the manual but it seems odd.\n # It goes \"to infinity\" as ldr_voltage nears 1023 (hence the clamp, I guess)\n # Clarify.\n if ldr_voltage > 1022:\n ldr_voltage = 1022\n if ldr_voltage < 1:\n ldr_voltage = 1\n\n return self.ldr_pull_up_resistance / ((1023 / ldr_voltage) - 1)", "def _frequency_to_wavelength(freq):\n return ifc.SPEED_OF_LIGHT_METRES_PER_SECOND / freq", "def set_wavelength(self, wavelength):\n print('Setting Santec wavelength to %.4f nm' % wavelength)\n\n # We need to select which of the 4 lasers to select depending on\n # the desired wavelength\n\n if 1530.0 < wavelength < 1630.000001:\n self.santec1.write(\"SW 4\")\n self.santec4.write(\"WA %.4f\" % wavelength)\n if self.active_module != 4:\n self.active_module = 4\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1440.0 < wavelength < 1530.1:\n self.santec1.write(\"SW 3\")\n self.santec3.write(\"WA %.4f\" % wavelength)\n if self.active_module != 3:\n self.active_module = 3\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1355 < wavelength < 1440.1:\n self.santec1.write(\"SW 2\")\n self.santec2.write(\"WA %.4f\" % wavelength)\n if self.active_module != 2:\n self.active_module = 2\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1259.999999 < wavelength < 1355.1:\n self.santec1.write(\"SW 1\")\n self.santec1.write(\"WA %.4f\" % wavelength)\n if self.active_module != 1:\n self.active_module = 1\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n else:\n print(\"Wavelength out of range. No change will be made\")", "def get_iPTF16asu():\n z = 0.187\n ebv = 0.0\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n \n tb = asci.read('../data/otherSN/Whitesides2017/table1.txt')\n tb = tb.to_pandas()\n tb = tb[tb[\"col4\"].values!=\">\"]\n \n tb = tb.rename(columns={'col1' : 'mjd',\n 'col2': 'tmax_rf',\n 'col3': 'filter',\n \"col4\": 'mag',\n 'col5': 'emag',\n 'col6': 'instrument'})\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb[\"mag\"] = np.array(tb[\"mag\"].values, dtype = np.float)\n #tb[\"emag\"] = np.array(tb[\"emag\"].values, dtype = np.float)\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb = tb[tb.wave!=0]\n return tb", "def gen_OSA(self, time_window_ps, center_wavelength_nm, power, \n power_is_epp = False,\n fileloc = 'O:\\\\OFM\\\\Maser\\\\Dual-Comb 100 MHz System\\\\Pump spectrum-Yb-101614.csv',\n log = True, rows = 30): # Yb spectrum\n \n try:\n self.fileloc = fileloc\n \n self.set_time_window_ps(time_window_ps)\n \n self.center_wl = center_wavelength_nm # reference wavelength (nm) \n \n self.w0 = (2. * np.pi * self.c) / self.center_wl # reference angular frequency \n \n self.setup_grids()\n \n if not power_is_epp: \n power = power / self.frep\n \n # Read in OSA data\n osa_data = np.genfromtxt(self.fileloc, delimiter = ',', skiprows = rows) \n \n wavelengths = osa_data[:,0]# (nm)\n wavelengths = self.internal_wl_from_nm(wavelengths)\n \n intensity = osa_data[:,1]# (arb. units)\n\n if log:\n intensity = 10.**(intensity / 10.)\n \n freq_abs = self.c/wavelengths\n freq_abs = np.sort(freq_abs)\n \n self.freq_rel = freq_abs - self.c / self.center_wl\n \n pulse_envelope = interp1d(self.freq_rel, intensity, kind='linear',\n bounds_error = False, fill_value=0)\n \n self.gridded_intensity = pulse_envelope(self.V / (2*np.pi))\n \n # Calculate time domain complex electric field A\n self.A = IFFT_t(self.gridded_intensity)\n # Calculate normalization factor to achieve requested \n # pulse energy\n e_scale = np.sqrt(power / self.calc_epp() )\n self.A = self.A * e_scale\n \n except IOError:\n print ('File not found.')", "def read300yr(period):\n directory300 = '/seley/ypeings/simu/PAMIP-1.1-QBO-300yr/monthly/'\n file300 = 'U10_1700-2000.nc'\n filename = directory300 + file300\n \n data = Dataset(filename)\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n u10q = data.variables['U10'][:]\n data.close()\n \n ### Reshape in year/month\n u10n = np.reshape(u10q,(u10q.shape[0]//12,12,lat.shape[0],lon.shape[0]))\n \n ### Calculate over particular months\n u10 = UT.calcDecJanFeb(u10n,lat,lon,'surface',1)\n \n ### Slice U10 at 65N\n latq = np.where((lat >= 64.5) & (lat <= 65.5))[0]\n lat = lat[latq].squeeze()\n u10 = u10[:,latq,:].squeeze()\n \n ### Take zonal mean \n u10z = np.nanmean(u10,axis=1)\n \n ### Remove missing data\n mask = np.where(u10z > -1e5)[0]\n \n ### Detrend\n u10zdt = sss.detrend(u10z[mask],type='linear')\n \n return lat,lon,u10zdt", "def get_tod(self, unit=None):\n tod = Tod.empty((self.get_ndetectors(), np.sum(self.get_nsamples())))\n sizeofpmatrix = self.info.npixels_per_sample * tod.size\n pmatrix = np.zeros(sizeofpmatrix, dtype=int)\n status = tmf.madmap1_read_tod(self.info.todfile, self.info.invnttfile,\n self.info.convert, self.info.npixels_per_sample, 0, tod.T, pmatrix)\n if status != 0: raise RuntimeError()\n if unit is not None:\n tod.unit = unit\n return tod", "def getWavelength(self, inAngle, outAngle, wavelengths = [0.25,1.0]):\n\n # Get prism point and angle of input at Unit3d\n #\n pt = self.getInputPoint()\n u = Unit3d(Angle(inAngle))\n\n # Guess at initial wavelngth\n wave = (wavelengths[1] - wavelengths[0])/2\n # Make input ray at guess wavelength\n ray = IntensityRay(pt,u,wave)\n\n # Parameters for seaerch\n delta = 0.1\n forward = True\n na = float(\"inf\") # New angle\n\n while abs(na - outAngle) > 1.0e-9/abs(outAngle) :\n nray = ray*self # New Ray through prism\n na = nray.getAngle()\n na = na.theta*math.cos(na.psi) # In radians\n if na < outAngle: # Less that target\n wave += delta\n forward = True\n else:\n if forward: # Half step\n delta *= 0.5\n forward = False\n wave -= delta\n if wave < wavelengths[0] or wave > wavelengths[1]:\n print(\"Out of wavelength range :\")\n return float(\"nan\")\n\n ray.wavelength = wave # Update the wavelength of ray\n\n return ray.getWavelength() # End of loop, so success, return value", "def getCalibration(self):\n self.a0 = float(self.getParameter(index=1))\n self.a1 = float(self.getParameter(index=2))\n self.a2 = float(self.getParameter(index=3))\n self.a3 = float(self.getParameter(index=4))\n status = self.getStatus()\n self.wavelength = [ self.a0 + self.a1*x + self.a2*x*x + self.a3*x*x*x \n for x in range(status.pixels)]\n if self.discardTrailingSamples > 0:\n self.wavelength = self.wavelength[:-self.discardTrailingSamples]\n if self.discardLeadingSamples > 0:\n self.wavelength = self.wavelength[self.discardLeadingSamples:]", "def get_observation(observation_id: str):\n pass", "def spectrum(self, wl: Union[float, ndarray]) -> Union[float, ndarray]:\n wlm = wl * 1e-9 # Wavelength to meters\n return 3.74183e-16 * wlm ** -5. / (np.exp(0.014388 / (wlm * self.temp)) - 1.)", "def test_regression_determine_wavelength_solution(\n ad, params, caplog, change_working_dir, path_to_refs, request):\n caplog.set_level(logging.INFO, logger=\"geminidr\")\n\n with change_working_dir():\n logutils.config(file_name='log_regress_{:s}.txt'.format(ad.data_label()))\n p = GNIRSLongslit([ad])\n p.viewer = geminidr.dormantViewer(p, None)\n\n p.determineWavelengthSolution(**{**determine_wavelength_solution_parameters,\n **params})\n\n wcalibrated_ad = p.streams[\"main\"][0]\n\n for record in caplog.records:\n if record.levelname == \"WARNING\":\n assert \"No acceptable wavelength solution found\" not in record.message\n\n ref_ad = astrodata.open(os.path.join(path_to_refs, wcalibrated_ad.filename))\n model = am.get_named_submodel(wcalibrated_ad[0].wcs.forward_transform, \"WAVE\")\n ref_model = am.get_named_submodel(ref_ad[0].wcs.forward_transform, \"WAVE\")\n\n x = np.arange(wcalibrated_ad[0].shape[1])\n wavelength = model(x)\n ref_wavelength = ref_model(x)\n\n pixel_scale = wcalibrated_ad[0].pixel_scale() # arcsec / px\n slit_size_in_arcsec = float(wcalibrated_ad[0].focal_plane_mask(pretty=True).replace('arcsec', ''))\n slit_size_in_px = slit_size_in_arcsec / pixel_scale\n dispersion = abs(wcalibrated_ad[0].dispersion(asNanometers=True)) # nm / px\n\n # We don't care about what the wavelength solution is doing at\n # wavelengths outside where we've matched lines\n lines = ref_ad[0].WAVECAL[\"wavelengths\"].data\n indices = np.where(np.logical_and(ref_wavelength > lines.min(),\n ref_wavelength < lines.max()))\n tolerance = 0.5 * (slit_size_in_px * dispersion)\n\n write_report = request.config.getoption('--do-report', False)\n failed = False\n try:\n np.testing.assert_allclose(wavelength[indices], ref_wavelength[indices],\n atol=tolerance)\n except AssertionError:\n failed = True\n raise\n finally:\n if write_report:\n do_report(wcalibrated_ad, ref_ad, failed=failed)\n\n if request.config.getoption(\"--do-plots\"):\n do_plots(wcalibrated_ad)" ]
[ "0.5712585", "0.5581919", "0.5486108", "0.53831595", "0.53533727", "0.53258014", "0.5305268", "0.526969", "0.52618587", "0.52508855", "0.52357584", "0.51796144", "0.516192", "0.5157981", "0.5145479", "0.5129892", "0.50970304", "0.50838137", "0.5080797", "0.5045471", "0.5044165", "0.50435984", "0.50404006", "0.5017592", "0.49941468", "0.49887577", "0.49881014", "0.49863347", "0.49751332", "0.49611792", "0.49575964", "0.49547914", "0.4938535", "0.49320897", "0.49053022", "0.48991328", "0.48884866", "0.48695815", "0.48341852", "0.4829966", "0.48170704", "0.48146194", "0.48130408", "0.48050964", "0.48024154", "0.48015058", "0.4800287", "0.47913882", "0.47849107", "0.47846746", "0.4778462", "0.47762427", "0.476735", "0.4762327", "0.47537616", "0.47494623", "0.47297296", "0.47169808", "0.47160786", "0.47144374", "0.470919", "0.470919", "0.47044158", "0.46978906", "0.46942973", "0.46867839", "0.46839", "0.46835375", "0.46805042", "0.46799946", "0.46629068", "0.4659726", "0.4649702", "0.4648093", "0.46471384", "0.4640122", "0.46289173", "0.46147165", "0.4609321", "0.46053845", "0.4598725", "0.45923424", "0.45877036", "0.45812902", "0.45766076", "0.45701858", "0.45607302", "0.45580047", "0.45323497", "0.4523522", "0.45233053", "0.45223525", "0.45193225", "0.45183045", "0.45174035", "0.4514776", "0.45126286", "0.45121214", "0.4510876", "0.45090827", "0.45070258" ]
0.0
-1
r""" Calculate Angstrom alpha exponent.
def angstrom_alpha(aod1, lambda1, aod2, lambda2): return - np.log(aod1 / aod2) / np.log(lambda1 / lambda2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_exponent():\n pass", "def powerlaw(E,alpha,A):\n\n\treturn A*E**alpha", "def calc_alpha(epsilon): \n return float(0.5 * np.log((1-epsilon)/epsilon))", "def encode_exponent(e: int) -> int:\n assert 0 <= e <= MAX_EXPONENT\n return DECODING_TABLE[e]", "def test_exp_decay(self, alpha: float):\n x = np.linspace(0, 1, 100)\n y = np.exp(alpha * x)\n\n alpha_guess = guess.exp_decay(x, y)\n\n self.assertAlmostEqualAbsolute(alpha_guess, alpha)", "def exp(self, num, zf=2):\n return str(num).zfill(zf)", "def superobl_alpha(k):\n if k%2==0:\n return 2**(-(k - 1))*(1 - 1/(k + 1)**2)**(k/2)\n else:\n return 2**(-(k - 1))*(1 - 1/k**2)**((k - 1)/2)", "def schechter(l,alpha):\n return exp(-l)*(l**alpha)", "def myExp(base,exponent,modulus):\n result = 1\n while exponent > 0:\n if exponent & 1 == 1:\n result = (result * base) % modulus\n exponent = exponent >> 1\n base = (base * base) % modulus\n return result", "def correctalpha(desiredalpha, level):\n \n correctedalpha = 1 - (1 - desiredalpha) ** (1.0 / level)\n \n return correctedalpha", "def _alpha(self):\n return _handle_ab(self.solution, self.use_const)[0]", "def power_normalize(xx, alpha = 0.5):\r\n\treturn np.sign(xx) * np.abs(xx) ** alpha", "def calculateCrypt(asci: int, e: int, n: int) -> int:\n return pow(int(asci),e,n)", "def get_ralpha(self, alpha, xalpha):\n A = self.A\n b = self.b\n ralpha = dot(A, xalpha - b)\n return ralpha", "def _se_alpha(self):\n return _handle_ab(self._se_all, self.use_const)[0]", "def inverse_exponential(x):\n return math.exp(-x)", "def negative_exponent():\n print(\"Problem: Negative exponent\")\n\n a = float(input())\n n = int(input())\n\n result = power(a, n)\n print(result)", "def exponent(num,power=2):\n return num ** power", "def decode_exponent(e: int) -> int:\n assert 0 <= e <= MAX_EXPONENT\n return ENCODING_TABLE[e]", "def rdf_deriv(alpha):\n\n ALPHA_MAX = 0.59999\n\n ALPHA0 = 0.6\n\n numpy.where(alpha > ALPHA_MAX, ALPHA_MAX, alpha)\n\n return -1.0/(3.0*ALPHA0)*(alpha/ALPHA0)**(-2.0/3.0)/(1.0-(alpha/ALPHA0)**(1.0/3.0))**2", "def alpha_number(alpha):\r\n if alpha.isupper() == False:\r\n num = ord(alpha) - 96\r\n return num\r\n elif alpha.isupper() == True:\r\n num = ord(alpha) - 64\r\n return num", "def exponential(value):\n return math.exp(value)", "def reg(letter,number):\n v=ord(letter.lower())-ord('a')\n v=v<<10\n v+=number\n return v", "def exponent(self):\n return self.__exponent", "def nalpha(self) -> int:\n return self._core.nalpha()", "def dd_xpowalpha(cls,grid,alpha,cutoff=False):\n grid.l.info('bc.hom: Setting initial data to (-x)^alpha.')\n grid.l.debug('bc.hom: Parameters to dd_xpowalpha: alpha={},cutoff={}'.format(alpha,cutoff))\n if alpha is 0:\n def tmp(x): return float(x[1]<=0)\n return cls._tpl(grid, tmp) \n\n if cutoff:\n def tmp(x):\n return sum(pow(-1*float(x[i]<0)*x[i],alpha) for i in range(0,grid.u.ndim))\n else:\n def tmp(x):\n return sum(pow(float(x[i]>=0)*x[i],alpha)-pow(-1*float(x[i]<0)*x[i],alpha) for i in range(0,grid.u.ndim))\n return cls._tpl(grid, tmp)", "def alpha_from_R(R):\n return 1-np.abs(R)**2", "def se_alpha(self):\n return self._se_alpha", "def powAlpha( n ):\n return (1-betaval)*Fib(n) + Fib(n-1)\n #return Fib(n+1) - Fib(n) * betaval", "def private_exponent(primes, e): \n return pow(e, -1, math.lcm((primes[0]-1),(primes[1]-1)))", "def getExponent(number):\n exponent = np.floor(np.log10(number))\n return(exponent)", "def exponentiation():\n print(\"Problem: Exponentiation\")\n\n a = float(input())\n n = int(input())\n\n result = power_v2(a, n)\n print(result)", "def calc_alpha_init(alpha, decay):\n\tif not decay or decay <= 0:\n\t\treturn alpha\n\telse:\n\t\treturn float(alpha * decay)", "def Exp(num):\n return math.exp(float(num))", "def alpha(self):\n return self._alpha", "def alpha(self):\n return self._alpha", "def alpha(self):\n return self._alpha", "def _get_alpha(cls, cl: float, tail: str):\n alpha = (1 - cl) / 2 if tail == \"two\" else (1 - cl)\n return round(alpha, 3)", "def getExponent(self):\n return _libsbml.ASTNode_getExponent(self)", "def alpha(self) -> float:\n return self._alpha", "def _g(self, x):\n e_x = math.exp(-self._alpha * x)\n return (1.0 / (1 + e_x))", "def get_alpha(self, error_rate, func='default'):\n return 0.5 * np.log((1. - error_rate) / error_rate)", "def num_alpha(self) -> int:\n return self._num_alpha", "def calculate_alphabeta(self, p, prec):\n var('x')\n sqrtdelta = None\n try:\n M = Qp(p, prec).extension(x ** 2 - self.constants.delta, names=\"padicroot\")\n sqrtdelta = M.gen(0)\n except NotImplementedError:\n try:\n M = Qp(p, prec)\n sqrtdelta = M(self.constants.delta).sqrt()\n except NotImplementedError:\n # Exceptional case.\n M = Qp(p, prec).extension(x ** 2 - self.constants.A * x - self.constants.B, names=\"padicroot\")\n alpha = M.gen(0)\n beta = self.constants.A - alpha\n return (alpha, beta)\n\n alpha = (self.constants.A + sqrtdelta) / 2\n beta = self.constants.A - alpha\n return (alpha, beta)", "def test_check_digits_with_wrong_alphabet(self, _, alpha):\n with self.assertRaises(exceptions.WrongArgumentValueError):\n positional.encode(42, 10, alphabet=alpha)", "def exponential(image: np.ndarray) -> np.ndarray:\n return np.power(image, 0.75).astype('uint8')", "def power(base, exponent):\n return base ** exponent", "def getExponent(self):\n return _libsbml.Unit_getExponent(self)", "def erfcinv(a):", "def modExponent(self, base, power):\n result = 1\n power = int(power)\n base = base % self.mod\n while power > 0:\n if power & 1:\n # self.modReduce(result * base)\n result = result * base % self.mod\n base = base * base % self.mod # self.modReduce(base * base)\n power = power >> 1\n return result", "def optimal_alpha():\n\n # When I checked all of alphas, -0.01 was the best\n alpha = -0.01\n # np.random.choice([-0.06, -0.01, 0.04, 0.1])\n return alpha", "def alpha(self):\n if self._alpha is None:\n df = self.fdist\n alpha_ = -self._powerlaw(df.index, df.freq)\n self._alpha = alpha_\n\n # return\n return self._alpha", "def check_alpha(a):\n\n a = check_1d(a, \"alpha\")\n if any(map(lambda d: d <= 0, a)):\n raise Exception('Alpha cannot be 0 or negative')\n\n return a", "def Alpha(self):\r\n return self._alpha", "def alpha(self):\r\n return self.unif[17]", "def power(num, exponent):\n return num ** exponent", "def comp_alpha(self):\n pass", "def alpha(self):\n return tf.clip_by_value(tf.exp(self.log_alpha), *SCALE_ALPHA_MIN_MAX)", "def exp(a: Decimal, b: Decimal) -> Decimal:\n return a ** b", "def calculateDeCrypt(asci: int, d: int, n: int) -> int:\n return pow(int(asci),d,n)", "def exponents(num1, num2):\n product = num1 ** num2\n return product", "def alpha_0(r): # r in um\n r1 = r / (1e6) # meters\n epsilonr = 3.\n return 3. * epsilon0 * ((epsilonr - 1)/(epsilonr + 2)) * (4. * np.pi / 3.) * (r1 ** 3)", "def _enc(x: int) -> float:\n return 2 + x + (29 / (x ** 2 + (1 - x) ** 2))", "def idecibel(x):\n return 10.0 ** (x / 10.0)", "def letter2num(letters, zbase=True):\n\n letters = letters.upper()\n res = 0\n weight = len(letters) - 1\n for i, ch in enumerate(letters):\n res += (ord(ch) - 64) * 26 ** (weight - i)\n if not zbase:\n return res\n return res - 1", "def gamma_natural(A):\n return 2.6544188e-12*A", "def letter_code(letter):\n value = ord(letter.lower()) - ord('a') + 10\n return value + value // 11", "def fill_alpha(self) -> Number:\r\n from apysc.type import value_util\r\n self._initialize_fill_alpha_if_not_initialized()\r\n fill_alpha: Number = value_util.get_copy(value=self._fill_alpha)\r\n return fill_alpha", "def expexp(x,y,z,p):\n\n \"\"\"\n Fermat's little theorem can be exploited to handle large values.\n This theorem states that:\n (a^p) is equivalent to (a mod p)\n This is the same as:\n (a^(p - 1)) is equivalent to (1 mod p)\n Thus, modular exponentiation can be done with (p - 1) to get\n (y^z mod (p - 1)), which is stored as b.\n For each test, the b values are:\n Test 1: b = 0\n Test 2: b = 4\n Test 3: b = 72\n Test 4: b = 72\n As shown, these values are much smaller to handle. Now, \n perform modular exponentiation again, this time with (p),\n to get (x^(y^z) mod p), store as a, and return.\n For each test, the a values are:\n Test 1: a = 1\n Test 2: a = 16\n Test 3: a = 1\n Test 4: a = 4\n Each return value matches the expected values in the test,\n therefore the algorithm is correct.\n \"\"\"\n b = pow(y, z, p - 1)\n a = pow(x, b, p)\n return a", "def calculate_alpha(color, F, B):\n return np.dot(color - B, F - B) / np.dot(F - B, F - B)", "def addExponent(self):\n\t\t# if the exponent part is not set and this number is allowed an exponent\n\t\tif(self.exponent == None and self.allowExponent):\n\t\t\t# set the exponent to another number (disallowing exponents since we can't\n\t\t\t# have an exponent with an exponent\n\t\t\tself.exponent = Number(allowExponent = False)", "def _pvalue_alpha(self):\n return _handle_ab(self._pvalues_all, self.use_const)[0]", "def letter_num(num: int):\n if abs(num) > 26 or num == 0:\n let = ord('a') + 26 - 1\n else:\n let = ord('a') + abs(num) - 1\n return chr(let)", "def Es_case_E(z, x, gamma):\n \n if z == 0 and x == 0:\n return 0\n \n beta2 = 1-1/gamma**2\n beta = sqrt(beta2)\n \n L = (z + beta*sqrt(x**2*(1-beta2) + z**2))/(1-beta2)\n \n S = sqrt(x**2 + L**2)\n N1 = L - beta * S\n D = S-beta*L\n \n return N1/D**3", "def exp(x):\n raise NotImplementedError", "def power(num, exponent):\n power = num ** exponent\n return power", "def fast_exp(a, x, n):\n x_2 = int2bin(x)\n vprint(\"{} = [{}]_2\".format(str(x), x_2))\n powers = [a % n]\n vprint(\"{}^(2^0) = {}^1 = {} \\\\equiv {}\".format(a, a, a, (a % n)))\n i = 1\n while i < len(x_2):\n # This (hilariously ugly) print statement prints the\n # intermediary operations in a format that can be easily\n # exported to LaTeX. TODO: Split it up into sane chunks.\n vprint(\"{}^{{ {}^{} }} = {}^{{ {} }} = {}^{{ {} }} * {}^{{ {} }} = {}*{} = {} \\\\equiv {}\".format(\n a, 2, i,\n a, pow(2, i),\n a, pow(2, i-1),\n a, pow(2, i-1),\n powers[-1], powers[-1],\n powers[-1] * powers[-1],\n (powers[-1] * powers[-1]) % n))\n next_power = (powers[-1] * powers[-1]) % n\n powers.append(next_power)\n i += 1\n\n vprint(\"{}^{{ {} }} = ...\".format(a, x))\n rpowers = list(reversed(powers))\n prod = 1\n i = 0\n while i < len(x_2):\n bit = x_2[i]\n power = rpowers[i]\n if bit == \"1\":\n vprint(\"* {} \\t== {}^{{ 2^{{ {} }} }}\\n\".format(power, a, len(x_2) - i - 1))\n prod *= power\n i += 1\n result = prod % n\n vprint(\"= {} \\\\equiv {}\".format(prod, result))\n return result", "def alphahighz(self, z):\n return self.alphaMe(3.8,self.r_vect[0],self.alpha0_vect[0]) - 0.018*(z-3.8)", "def alpha_evol( redshift, richness):\n\treturn function_eq10( redshift, richness, alpha_z[0], alpha_z[1], alpha_z[2] )", "def Ernst_T1(TR, alpha_e):\n return -TR / np.log(np.cos(alpha_e))", "def truncated_power(self, exponent, degs = None):\n a = exponent\n return self.truncated_fun(\n fun_der = lambda k, t: binom(a, k) * factorial(k) * t**(a-k),\n degs = degs\n )", "def _get_a(b):\n\t\tbval = (b - 6.0)\n\t\t_a = np.exp(-1.6805 - 1.7139*bval + 0.8155*bval**2 - 0.6667*bval**3)*r0\n\t\treturn _a", "def _get_alpha(self, m_t, v_t):\n return max(0, ((-m_t * self._psi \n + math.sqrt((m_t ** 2 * self._phi ** 4) \n / 4 + v_t * self._phi ** 2 * self._xi)) \n / (v_t * self._xi)))", "def pvalue_alpha(self):\n return self._pvalue_alpha", "def power(base, exp):\n\tans = [1]\n\twhile exp > 0:\n\t\tcarry = 0\n\t\tfor i in xrange(len(ans)):\n\t\t\tmult = ans[i] * base + carry\n\t\t\tans[i] = mult % 10\n\t\t\tcarry = mult / 10\n\t\twhile carry > 0:\n\t\t\tans.append(carry % 10)\n\t\t\tcarry /= 10\n\t\texp -= 1\n\treturn ans", "def encode(num, alphabet=BASE62):\n if num == 0:\n return alphabet[0]\n arr = []\n base = len(alphabet)\n while num:\n num, rem = divmod(num, base)\n arr.append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)", "def exponential(gp_link=None):\r\n if gp_link is None:\r\n gp_link = noise_models.gp_transformations.Log_ex_1()\r\n\r\n analytical_mean = False\r\n analytical_variance = False\r\n return noise_models.exponential_noise.Exponential(gp_link,analytical_mean,analytical_variance)", "def gamma_function(\n a: ArrayLike,\n exponent: ArrayLike = 1,\n negative_number_handling: Literal[\n \"Clamp\", \"Indeterminate\", \"Mirror\", \"Preserve\"\n ]\n | str = \"Indeterminate\",\n) -> NDArrayFloat:\n\n a = as_float_array(a)\n exponent = as_float_array(exponent)\n negative_number_handling = validate_method(\n negative_number_handling,\n (\"Indeterminate\", \"Mirror\", \"Preserve\", \"Clamp\"),\n '\"{0}\" negative number handling is invalid, it must be one of {1}!',\n )\n\n if negative_number_handling == \"indeterminate\":\n return as_float(a**exponent)\n elif negative_number_handling == \"mirror\":\n return spow(a, exponent)\n elif negative_number_handling == \"preserve\":\n return as_float(np.where(a <= 0, a, a**exponent))\n else: # negative_number_handling == 'clamp':\n return as_float(np.where(a <= 0, 0, a**exponent))", "def phase_Earth(alpha):\n phase = 10.**(-0.4*(- 1.060e-3*alpha + 2.054e-4*alpha**2.))\n return phase", "def modular_exponentiation(x, y, n):\r\n result = 1\r\n while y > 0:\r\n if y & 1 == 1:\r\n result = (result * x) % n\r\n\r\n y = y >> 1\r\n x = (x * x) % n\r\n return result", "def __call__(self, epoch):\n decay = (1 - (epoch / float(self.maxEpochs))) ** self.power\n alpha = self.initAlpha * decay\n \n # return alpha\n return float(alpha)", "def mod_exp(val, exp, modulus):\n return pow(int(val), int(exp), int(modulus))", "def _calculate_norm_alpha(sr: int, hop_size: int, tau: float):\n dt = hop_size / sr\n return math.exp(-dt / tau)", "def elevation_to_a_degree(numb1, numb2):\r\n return f\"Your result: {numb1**numb2}\"", "def power(a, b):\n \n return a**b", "def int_to_alpha(num):\n remainder = num\n text = []\n if num >= 26:\n major = remainder // 26\n text.append(ascii_lowercase[remainder // 26 - 1])\n remainder -= major * 26\n text.append(ascii_lowercase[remainder])\n return \"\".join(text)", "def alpha_exact(z, x, beta):\n f = lambda a: a - beta/2 * sqrt(x**2 + 4*(1+x) * sin(a)**2 ) - z\n \n res = scipy.optimize.root_scalar(f, bracket=(-1,1))\n \n return res.root", "def base_alphabet_to_10(letters):\r\n\r\n return sum(\r\n (ord(letter) - A_UPPERCASE + 1) * ALPHABET_SIZE ** i\r\n for i, letter in enumerate(reversed(letters.upper()))\r\n )", "def __pow__(self, a: float) -> np.ndarray:\n return np.e**(a*self.logarithm)", "def encryptAffine(letter, a, b):\n if(gcd(7, 26) != 1):\n return \"Error, not a bijection\"\n else:\n encrypted_letter = \"\"\n for i in range(len(letter)):\n if(ord(letter[i]) == 32):\n encrypted_letter += chr(ord(letter[i]))\n else:\n let = letter[i].lower()\n let = ord(let) - 97\n new_let = (((let* a) + b) % 26) + 97\n encrypted_letter += chr(new_let)\n return encrypted_letter" ]
[ "0.71915054", "0.6592303", "0.64282143", "0.6347429", "0.63466847", "0.63178813", "0.63117254", "0.6303169", "0.6258958", "0.62512463", "0.6246484", "0.62283045", "0.62211376", "0.62090594", "0.61820316", "0.6104411", "0.61022353", "0.608179", "0.60706884", "0.60041946", "0.59448314", "0.5888297", "0.58719623", "0.5861893", "0.5858459", "0.5854756", "0.5851482", "0.58491886", "0.58405995", "0.5835575", "0.58299726", "0.58090067", "0.5783786", "0.578117", "0.5771549", "0.5771549", "0.5771549", "0.57703805", "0.5764625", "0.57486844", "0.570689", "0.57017004", "0.5681861", "0.5679434", "0.5675041", "0.5666225", "0.5664334", "0.56580395", "0.56492186", "0.56377244", "0.5591701", "0.55829555", "0.55766803", "0.55760914", "0.55748713", "0.5574117", "0.55592847", "0.55543405", "0.5543285", "0.55401224", "0.5538213", "0.55374604", "0.55361956", "0.55235755", "0.5522784", "0.5504261", "0.5486094", "0.5484698", "0.5484379", "0.5480815", "0.54704595", "0.5446941", "0.543953", "0.54390705", "0.54312414", "0.54311013", "0.54097974", "0.5408075", "0.5407888", "0.54076636", "0.54063493", "0.5399118", "0.53974146", "0.53936756", "0.5382479", "0.5373985", "0.5368054", "0.5358558", "0.5357504", "0.5356759", "0.53524786", "0.53478545", "0.53436005", "0.53248584", "0.53226775", "0.53216183", "0.5316805", "0.5316458", "0.53039277", "0.5301133" ]
0.56222945
50
Given a file name for baby.html, returns a list starting with the year string followed by the namerank strings in alphabetical order. ['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
def extract_names(filename): f = open(filename,'rU') name_data = f.read() year_data= re.search(r'Popularity\sin\s(\d\d\d\d)', name_data) if not year_data : print ' no year found ' sys.exit(1) name_year=year_data.group(1) #print 'year :' #print name_year tuples=re.findall(r'<td>(\d+)</td><td>(\w+)</td><td>(\w+)</td>',name_data) #print 'tuples' #print tuples dict_name = {} for a,b,c in tuples : #print a + ' boy name: ' + b + ' , girl name : ' + c if b not in dict_name : dict_name[b] = a if c not in dict_name : dict_name[c] = a #print dict_name lst_names = sorted(dict_name.keys()) result_names_sorted = [] result_names_sorted.append(name_year) for name in lst_names : #print name + " : " + dict_name[name] result_names_sorted.append(name + ' ' + dict_name[name]) #print result_names_sorted return result_names_sorted
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_names(filename):\n\n # Extracting the year\n year_match = re.search(r'\\d\\d\\d\\d', filename)\n if not year_match:\n sys.stderr.write('Could not find a year!\\n')\n sys.exit()\n year = year_match.group()\n\n # Opening the file\n try:\n with open(filename) as file:\n data = file.read()\n except FileNotFoundError:\n sys.stderr.write('There is no such file in the directory!\\n')\n sys.exit()\n\n # Finding patterns\n regex = re.compile(r'<td>\\w+')\n names = regex.findall(data)\n for i in range(len(names)):\n names[i] = names[i].replace('<td>', '')\n\n # Creating a dictionary with names data\n names_dict = {}\n for i in range(0, len(names) - 2, 3):\n key = names[i]\n names_dict[key] = [names[i + 1], names[i + 2]]\n\n # Creating a list with result\n boy_names = []\n girl_names = []\n result = [year]\n for key, value in names_dict.items():\n if value[0] not in boy_names:\n result.append(value[0] + ' ' + key)\n boy_names.append(value[0])\n if value[1] not in girl_names:\n result.append(value[1] + ' ' + key)\n girl_names.append(value[1])\n\n result.sort()\n # result.insert(0, year)\n\n return result", "def name_extractor(file):\n \n import os\n import re\n \n name_list = []\n rank_dict = {}\n \n year = re.search(r'(\\d+)\\.html$', file) \n current = open(file) \n match = re.findall(r'<tr\\salign=\"right\"><td>(\\d+).*?>(\\w+).*?>(\\w+)', current.read())\n current.close\n\n \n for one_touple in match: #Check for existing match, only accept lower rank value into dictionary\n \n for index in range(1,2):\n \n if one_touple[index] in rank_dict:\n if rank_dict[one_touple[index]] < one_touple[0]:\n continue\n rank_dict[one_touple[index]] = one_touple[0]\n \n for one_item in rank_dict:\n \n ranking = rank_dict[one_item] #Build target list from dictionary formatted as \"Name rank\"\n name_list.append(f\"{one_item} {ranking}\") \n \n name_list = sorted(name_list)\n name_list.insert(0,year.group(1))\n \n return name_list", "def extract_names(filename):\n raw_text = read_html(filename) \n \n #searching for the year\n year = re.search('(<h3 align=\"center\">Popularity in )(\\d\\d\\d\\d)',raw_text).group(2)\n \n #searching for the list of names\n list_of_names = re.findall('<td>(\\d+)</td><td>(\\w+)</td><td>(\\w+)</td>',raw_text)\n \n #pair each name with it's rank\n name_and_rank = [] \n for line in list_of_names:\n name_and_rank.append((line[1], line[0]))\n name_and_rank.append((line[2], line[0]))\n \n # sort the list alphabetically\n name_and_rank = sorted(name_and_rank, key = lambda x:x[0])\n name_and_rank = dict(name_and_rank)\n\n return year, name_and_rank[:20]", "def getFileNames():\n input_path = \"/Users/tim/OneDrive/Master/Text_Mining/project/texts/glenarvon_html/\"\n temp_list = os.listdir(input_path)\n name_list = [i for i in temp_list if i[-4:] == \"html\"]\n name_list.sort(key=natural_keys) # see http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside\n return name_list, input_path", "def extract_names(filename):\n # +++your code here+++\n f = open(filename, 'r')\n fl = read_file(filename)\n\n l = []\n lFiltFinal = []\n\n year_match = re.search(r'Popularity\\sin\\s(\\d\\d\\d\\d)', f.read())\n year = year_match.group(1)\n\n for line in fl:\n #if '<h3 align=\"center\">Popularity in' in line:\n #year = line[-10:-6]\n if '<tr align=\"right\"><td>' in line:\n rank = line[line.find('<td>')+len('<td>'):line.find('</td>')]\n boys = line[line.index('</td><td>')+len('</td><td>'):line.index('</td><td>',line.index('</td><td>')+1)]\n girls = line[line.index('</td><td>',line.index('</td><td>')+1)+len('</td><td>'):-6]\n l.append([boys,rank])\n l.append([girls,rank])\n\n lFilt = list(unique_by_first_n(1, l))\n\n lFiltFinal.append(year)\n for key in lFilt:\n lFiltFinal.append( key[0] + ' ' + key[1])\n\n lFiltFinal.sort()\n return lFiltFinal", "def extract_names(filename):\n # +++your code here+++\n # Opening the file\n f = open(filename, 'rU')\n # Reading all of the lines\n lines = f.readlines()\n # Empty list to hold the year, names, and ranks\n ranks_names = []\n for line in lines:\n # search for the year\n year = re.search(r'\\s(\\d\\d\\d\\d)</h3>', line)\n # if the year is found, append it to the list\n if year: \n ranks_names.append(year.group(1))\n # search for the rank, male name, and female name\n rank_male_female = re.search(r'(\\d+)</td><td>(\\w+)</td><td>(\\w+)</td>', line)\n # If they are found then append the male name plus its rank, as well as the \n # female name plus its rank\n if rank_male_female:\n ranks_names.append(rank_male_female.group(2) + ' ' + rank_male_female.group(1))\n ranks_names.append(rank_male_female.group(3) + ' ' + rank_male_female.group(1))\n # Sort the list alphabetically\n ranks_names.sort()\n # Return the list\n return ranks_names", "def extract_names(filename):\n f = open(filename, 'rU')\n file_text = f.read()\n f.close()\n \n year = re.search(r'Popularity in \\d\\d\\d\\d', file_text)\n if year:\n year = year.group()[-4:]\n else:\n print 'ERROR: year not found in ' + filename\n \n html_rank_names = re.findall(r'<tr align=\"right\"><td>\\d+</td><td>\\w+</td><td>\\w+</td>', file_text)\n name_ranks = {}\n i = 0\n while i < len(html_rank_names):\n line = html_rank_names[i]\n first_tag = line.find('<td>')\n first_end_tag = line.find('</td>')\n rank = line[first_tag + 4 : first_end_tag]\n \n second_tag = first_end_tag + 9\n second_end_tag = line.find('</td>', second_tag)\n name1 = line[second_tag : second_end_tag]\n \n third_tag = second_end_tag + 9\n third_end_tag = len(line) - 5\n name2 = line[third_tag : third_end_tag]\n \n # if the names already are in the dict, skip them because they have a larger number than what is already in the dict\n if name1 not in name_ranks: name_ranks[name1] = rank\n if name2 not in name_ranks: name_ranks[name2] = rank\n i = i + 1\n \n year_name_ranks = []\n year_name_ranks.append(year)\n for name, rank in name_ranks.iteritems():\n year_name_ranks.append(name + ' ' + rank)\n year_name_ranks.sort()\n return year_name_ranks", "def name_list(file_name):\n \n li = open(file_name)\n list_of_names = []\n\n for name in li:\n (first,last) = str.split(name,' ')\n list_of_names.append(Name(first,last))\n return list_of_names", "def parse_year(txt):\n\n txt = txt.strip()\n if \"-\" in txt:\n res = re.sub('[^0-9]', '', txt)\n return [res[0:4], res[4:8]]\n else:\n return [txt, txt]", "def get_imdb_list():\n list_file = 'imdb.txt'\n name_column = 26\n f = open(list_file, 'r')\n film_list = []\n pos = 0\n\n for line in f:\n pos += 1\n words = line.split()\n name = line[name_column:-1]\n # could be problematic is there are brackets in the film name\n year = name[name.find('(') + 1:name.find(')')]\n name = name.replace('(' + year + ')', '')\n film = {\n 'pos': pos,\n 'score': Decimal(words[2]),\n 'name': name.strip(),\n 'year': year\n }\n film_list.append(film)\n f.close()\n return film_list", "def list_names(text):\n with open(text, 'r') as names:\n list_of_names = list(names)\n name_list = [name.strip() for name in list_of_names]\n return name_list", "def getYears():\n url = \"http://www.boxofficemojo.com/weekend/\"\n src = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(src, 'html.parser')\n year_header = soup.find_all(name = \"b\")[1]\n year_elems = year_header.find_all([\"a\", \"font\"])\n years = [int(year.get_text()) for year in year_elems]\n return years", "def year_tracker(words):\n new_words = []\n for w in words:\n new_word = re.sub(r\"^[1][789][0-9]{2}$\", \"jahreszahl\", w) # for 1700-1999\n new_word = re.sub(r\"^[2][01][0-9]{2}$\", \"jahreszahl\", new_word) # for 2000-2199\n new_words += [new_word]\n return new_words", "def getFile(filename):\n f = open(filename)\n lines = f.readlines()\n f.close()\n names = []\n names.append(\"\")\n for name in lines[0].split(','):\n \n names.append(name[1:-1])\n names = sorted(names)\n return names", "def load_names(file_name: str) -> List[str]:\n full_path_name = os.path.join(os.getcwd(), 'names', file_name)\n with open(full_path_name, 'r') as file:\n names = file.read().rstrip('\\n').split(',')\n return sorted(names)", "def get_names(url):\n\t# get html element tree\n\ttree = get_tree(url)\n\t# Names are text within <a> elements in this list\n\t# xpath returns a list with alternating last and first names as elements\n\t# Concatenate each last name and first name pair and put in new list as full name\n\tnames = tree.xpath('//*[@id=\"research-teachinglist\"]/li//a//text()')\n\tfull_names = []\n\tfor i in range(0, len(names)-1, 2):\n\t\tfull_names.append(names[i] + names[i+1])\n\n\treturn full_names", "def get_filenames(start_year = 1980, end_year = 2009, path = 'ucr_offenses_known_monthly_1960_2016_dta/'):\n filenames = []\n for filename in os.listdir(path):\n #ignore pdf files in folder, filter out dta file names that contain 1980-2009\n if filename.endswith('.dta'):\n for years in range(start_year, end_year + 1):\n if str(years) in filename:\n filenames.append(filename)\n return(filenames)", "def get_surnames(filename):\n result = []\n with open(filename, \"r\") as file:\n for line in file.readlines():\n surname = line.split('\\t')[1]\n result.append(surname)\n return result", "def filter_years(text):\n months = ['january', 'february', 'march', 'april', 'may', 'june',\n 'july', 'august', 'september', 'october', 'november', 'december']\n prepositions = ['around', 'after', 'at', 'as',\n 'approximately', 'before', 'between', 'by',\n 'during', 'from', 'in', 'near', 'past',\n 'since', 'until', 'within'] # removed: about, on\n conjugations = ['and']\n articles = ['the']\n times = ['early', 'mid', 'late']\n patterns = months + prepositions + conjugations + articles + times\n re_string = r'\\b(' + '|'.join(patterns) + r')\\b(\\s|-)\\b([0-9]{3,4})s?\\b(?i)(?!\\sMYA)\\s?(BCE|BC)?'\n years = [int(match.group(3)) * (-2*bool(match.group(4))+1)\n for match in re.finditer(re_string, text, re.IGNORECASE)]\n re_string = r'([0-9]{1,2})(st|nd|rd|th) century\\s?(BCE|BC)?'\n centuries = [(int(match.group(1)) * 100 - 100) * (-2*bool(match.group(2))+1)\n for match in re.finditer(re_string, text, re.IGNORECASE)]\n years += centuries\n years = [y for y in years if y<Dump.MAX_YEAR]\n return sorted(years + centuries)", "def getnames(f):\n # Assumes file is sorted with girl names first, boy names second, and the\n # most popular name at the top of each list.\n\n lineoftext = f.readline()\n girlname,sex,count = processline(lineoftext)\n\n while sex != \"M\":\n name,sex,count = processline(f.readline())\n boyname=name\n\n return girlname,boyname", "def get_team_names(self, year1, year2):\n\n base_url = 'http://www.sports-reference.com/cbb/schools/'\n response = urllib2.urlopen(base_url)\n content = response.read()\n soup = BeautifulSoup(content, 'html.parser')\n soup_results = soup.findAll('tr', {'class':''})\n extract_name = lambda name: name.split('/')[3]\n team_names = []\n \n for result in soup_results[1::]:\n year_span = result.findAll('td', {'align':'center'}) \n year_span = map(int, [year.string for year in year_span])\n\n if year_span[0] <= year1 and year_span[1] >= year2:\n team_name = result.find('a', href = True).get('href')\n team_name = extract_name(team_name)\n team_names.append(str(team_name))\n\n self.team_names = team_names", "def get_titles(filename):\n\n with open(filename) as f:\n reader = csv.DictReader(f, fieldnames = ['title', 'year'])\n titles = list(reader)\n return titles", "def extract_names(elements, year):\r\n milers = []\r\n groupings = _miler_grouper(elements)\r\n # import pdb; pdb.set_trace()\r\n for miler in groupings:\r\n person = {'last_name': miler[0],\r\n 'first_name': miler[1],\r\n 'trail_name': miler[2],\r\n 'year': year}\r\n milers.append(person)\r\n\r\n return milers", "def baby_search_engine(name):\r\n \r\n name_ranking = []\r\n \r\n for publication_name, name_list in baby_names.items():\r\n publication = {}\r\n if name.capitalize() in name_list:\r\n publication['list'] = publication_name\r\n publication['rank'] = name_list.index(name.capitalize()) + 1\r\n name_ranking.append(publication)\r\n\r\n \r\n return sorted(name_ranking, key=lambda k: k['rank'])", "def get_names(path='names.txt'):\n if not os.path.exists(path):\n logging.info('data not found, downloading.')\n url = 'https://www.sec.gov/rules/other/4-460list.htm'\n r = requests.get(url)\n tree = html.fromstring(r.content)\n names = tree.xpath('//tr/td[2]')\n names = [name.text.strip()\n for name in names if name.text is not None]\n\n url = 'http://www.nasdaq.com/screening/companies-by-industry.aspx?industry=Technology&render=download'\n r = requests.get(url)\n newnames = [line.strip().split(',')[1].replace('\"', '')\n for line in r.text.split('\\n')[:-1]]\n names.extend(newnames)\n with open(path, 'w') as txtfile:\n txtfile.write('\\n'.join(names))\n else:\n with open(path, 'r') as txtfile:\n names = txtfile.read().split('\\n')\n return names", "def get_dates_list() -> List[str]:\n dates = listdir(\"hansard_gathering/processed_hansard_data\")\n return sorted([_file for _file in dates if not _file.endswith(\"_num\")])", "def process_file(file_name):\n f = open(\"babynames/\" + file_name, 'r')\n reader = csv.reader(f)\n return list(reader)", "def get_oscars_best_picture_list():\n list_file = 'oscar_best_picture_list.txt'\n f = open(list_file, 'r')\n film_list = []\n\n for line in f:\n words = line.split('-')\n film = {\n 'year': words[0][:-1],\n 'name': words[1][2:-2]\n }\n film_list.append(film)\n f.close()\n # Reverse as we want newest first not last\n film_list.reverse()\n return film_list", "def get_names(filename):\n\n with open(filename, \"r\") as readFile:\n names = [each.split(',') for each in readFile] #Split at the commas\n names = list(names[0]) #Retrieves the list of names within the list names\n names = [each.strip(\"\\\"\") for each in names] #Strips each name of the double quotes\n return names", "def extract(fichier):\r\n names = []\r\n for line in open(fichier, 'r'):\r\n for almost_name in line.split(','):\r\n names.append(almost_name[1:len(almost_name)-1]) \r\n return [i for i in names if i != '']", "def get_html_files_for_candidates(f_name):\n candidates = ['Joe Biden','Kamala Harris','Elizabeth Warren','Bernie Sanders']\n candidates_files = ['{}.html'.format(candidate) for candidate in candidates]\n results = []\n files = sorted([os.path.join(f_name, f) for f in os.listdir(f_name) if os.path.isfile(os.path.join(f_name, f))]) #extra check if is file unnecessary\n for file in files:\n for c in candidates_files:\n if c in file:\n results.append(file)\n print(results)\n return results", "def get_people(filename):\n\n lines = [line.rstrip('\\n').rstrip('\\r') for line in open(filename)]\n return lines", "def read_file(file_name):\n with open(file_name) as f:\n content = f.readlines()\n names = []\n dnas = []\n dna = \"\"\n name = \"\"\n for line in content:\n line = line.strip()\n if line[0] == \">\":\n names.append(name)\n dnas.append(dna)\n name = line[1:]\n dna = \"\"\n else:\n dna += line\n names.append(name)\n dnas.append(dna)\n\n return (names[1:], dnas[1:])", "def get_bfi_list():\n list_file = 'bfi_sight_and_sound_2012.txt'\n f = open(list_file, 'r')\n film_list = []\n\n for line in f:\n words = line.split(' ')\n #NOTE: pos is not the position in the pyton list but in the original\n # list so is not always an integer due to joint places\n film = {'pos': words[0], 'name': words[1][:-1]}\n film_list.append(film)\n f.close()\n return film_list", "def read_all_names(start,end,gender,datadir):\r\n names = []\r\n name_dict = {}\r\n\r\n for year in range(start,end+1):\r\n year_names = []\r\n year = str(year)\r\n try:\r\n path = r\"{0}/yob{1}.txt\".format(datadir,year)\r\n f = open(path)\r\n reader = csv.reader(f)\r\n rank = 1\r\n for name in reader:\r\n if name[1].upper() == gender.upper():\r\n #first time name appears\r\n if name[0] not in name_dict:\r\n name_dict[name[0]] = {}\r\n name_dict[name[0]].update({'gender':gender.upper()})\r\n name_dict[name[0]].update({'intro_year_pop':int(name[pop_index])})\r\n name_dict[name[0]].update({'intro_year':year})\r\n name_dict[name[0]].update({'intro_year_rank':rank})\r\n name_dict[name[0]].update({'pops':{year:int(name[pop_index])}})\r\n name_dict[name[0]].update({'ranks':{year:rank}})\r\n #every time the name appears\r\n name_dict[name[0]]['pops'].update({year:int(name[pop_index])})\r\n name_dict[name[0]]['ranks'].update({year:rank})\r\n rank = rank + 1\r\n year_names.append(name[name_index])\r\n names.extend(year_names)\r\n f.close()\r\n except IOError:\r\n \tprint(\"{0} not found\".format(path))\r\n return name_dict", "def display_get_date_ordered():\n ordered_games = reports.get_date_ordered(filename)\n print(\"Ordered games by date and by alphabet:\")\n for title in ordered_games:\n print(title)\n print()", "def get_word_list(file_name):\n file_ = open(file_name, 'r')\n lines = file_.readlines()\n\n start_line = 0\n while lines[start_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n start_line += 1\n\n lines = lines[start_line+1:]\n\n end_line = 0\n while lines[end_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1:\n end_line += 1\n\n lines = lines[:end_line-3]\n\n list_ = ' '.join(lines)\n list_ = str.lower(list_)\n list_ = list_.translate(None, string.punctuation)\n list_ = list_.split()\n\n return list_", "def get_word_list(file_name):\n\tbook = get_file_text(file_name)\n\tbook = strip_header(book)\n\tbook = strip_punctuation(book)\n\tbook = book.lower()\n\twords = re.split(r'\\s+', book)\n\treturn words", "def get_word_list(file_name):\n\tnew_list = []\n\n\tf = open(file_name,'r')\n\tlines = f.readlines()\n\tcurr_line = 0\n\tend_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\twhile lines[end_line].find('End of the Project Gutenberg EBook') == -1:\n\t\tend_line -= 1\n\tlines = lines[curr_line + 1:end_line]\n\n\tlong_lines = ''.join(str(e) for e in lines)\n\tlong_lines = long_lines.lower()\n\tlong_lines = long_lines.translate(None, punctuation)\n\n\twords = long_lines.split()\n\tfor item in words:\n\t\tnew_list.append(item)\n\n\treturn new_list", "def get_word_list(file_name):\n\n\tstoryEdit = []\n\n\t#Reads the file starting after the beginning\t\n\tf = open(file_name,'r')\n\tlines = f.readlines()\n\tcurr_line = 0\n\twhile lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n\t\tcurr_line += 1\n\tlines = lines[curr_line+1:]\n\n\n\t#Loops through each row, making everything lowercase and replacing all punctuation\n\tfor row in lines:\n\t \trow = row.lower()\n\t \trow = row.translate(string.maketrans(\"\",\"\"), string.punctuation)\n\t \tstoryEdit += row.split()\n\n\n\t#Returns the final list as \n\treturn storyEdit", "def get_teams_from_text(file_name, index):\n\n\tteams = []\n\tList = open(\"teams/\" + file_name + \".txt\",'r').read().splitlines()\n\t\n\tj = 0\n\tfor i in range(0,len(List)):\n\t\tif j < index:\n\t\t\tdummy_team = Team(List[i], \"Wappen\")\n\t\t\tteams.append(dummy_team)\n\t\tj = j+1\n\treturn teams", "def read_drama_names(drama_file):\n with open(drama_file, 'rb') as f:\n drama_list = [d.decode('utf-8').strip() for d in f.readlines()]\n return drama_list", "def format_birthday(birth_list):\r\n # Remove space and tab from the left and right part of the string\r\n stripped_text = birth_list.strip(\"\\n\\t\")\r\n # Split the date and count\r\n split_text = [s.split(\"\\t\") for s in stripped_text.split(\"\\n\")[6:]] \r\n split_date = [map(int, s[0].split(\"/\")) for s in split_text] \r\n split_count = map(int, [s[1] for s in split_text])\r\n # Concatenate the date and count \r\n merge = [split_date[i] + [split_count[i]] for i in xrange(365)]\r\n return merge", "def sort_by_surname_desc(names):\n names = dedup_and_title_case_names(names)\n names1 = []\n for n in names:\n x = n.split(\" \")\n names1.append(x[1] + \" \" + x[0])\n return names1\n # ...", "def main(html_file):\n \n biglist = name_extractor(html_file)\n text = '\\n'.join(biglist)\n print (text)", "def fasta_headers(file_name):\n list = []\n with open('../test_files/' + file_name, 'r') as infile:\n text = infile.read()\n seqs = text.split('>')\n for seq in seqs:\n try:\n x = seq.split('\\n', 1)\n if x[0] != '':\n #x[0] contains only headers\n list.append(x[0])\n except:\n pass\n return list", "def extract_bird_name(filename, split=None):\n description = ''\n if split is None:\n words = filename.split()\n else:\n words = filename.split(split)\n for word in words:\n if is_number(word):\n break\n if description is '':\n description += word\n else:\n description += ' ' + word\n return description", "def find_yearly_playlist(data):\n\n if data['year'] and str(data['year']).isdigit():\n median = round(int(data['year']), -1)\n return 'years-{}-{}.m3u'.format((int(median) - 5), (int(median) + 4))\n\n return '00-without-year.m3u'", "def filename_to_file_list_key(filename):\n pattern = r'\\.\\d\\d\\d\\d-'\n index = re.search(pattern, filename)\n if index:\n year = int(filename[index.start() + 1: index.start() + 5])\n else:\n return '0-0'\n # the YYYY field is 4 characters long, the month is two\n year_offset = index.start() + 5\n # two characters for the month, and one for the - between year and month\n month_offset = year_offset + 3\n month = int(filename[year_offset + 1: month_offset])\n key = \"{year}-{month}\".format(year=year, month=month)\n return key", "def get_list(file_name):\n with open(file_name, \"r\", encoding=\"latin-1\") as file:\n text = file.read()\n text = text.lower() # Make everything lowercase\n text = text.split(\"\\n\")\n return text", "def parse(filename):\n file_map = {\n '1995-1996.html': ninety_six,\n '2005-2006.html': twenty_six,\n '2014-2015.html': twenty_fifteen\n }\n func = file_map.get(filename, lambda: \"Invalid File\")\n func(filename)", "def read_names(path):\n return SortedSet([os.path.basename(n) for n in glob.glob(path + os.sep + '*')])", "def getFilmTitles(checkFolder):\n\n files = [str(x) for x in Path(checkFolder).rglob(\"*\")]\n libFilmFiles = list(map(os.path.basename,files)) # Remove the path\n libFilmTitles = [os.path.splitext(x)[0] for x in libFilmFiles]\n return libFilmTitles", "def get_nifty_list(root_dir, name=\"\"):\n \n file_list = glob(root_dir + \"/**/*.nii.gz\", recursive=True)\n file_list = sorted([file for file in file_list if name in file])\n return file_list", "def read_name_file(filename):\n with open(filename, 'r') as f:\n names = f.read()\n names = names.split('\\n')\n names = list(filter(None, names))\n return names", "def _get_authors_list():\n\n articles = os.listdir(\"../data/\")\n authors = []\n for article in articles:\n with open(\"../data/\" + article, 'r') as file:\n lines = file.readlines()\n author = tuple(\n line.replace(\"\\n\", \"\").split()[1] for line in lines\n if \"Автор:\" in line\n )[0]\n authors.append(author)\n\n return authors", "def getlistofpossibletitles(fileitem,shows):\n title = []\n title.append(fileitem)\n lookfor = fileitem.replace(\".\",\" \")\n title.append(lookfor)\n lookfor = fileitem.replace('-',\" \")\n title.append(lookfor)\n return title", "def get_file_two_years(file):\n f = '_' + '(\\d+)' + '_' + '(\\d+)'\n match = re.search(f, file)\n if match:\n return int(match.group(1)), int(match.group(2))", "def get_names():\n only_links = SoupStrainer(\"a\")\n names = set()\n doc = requests.get(NAMES_URL).content\n links = BeautifulSoup(doc, \"html.parser\", parse_only=only_links)\n pokemon = links.find_all(title=re.compile(\"(\\w+)(\\s){1}(\\(Pokémon\\))\"))\n for cell in pokemon:\n names.add(str(cell.string))\n \n\n return names", "def get_word_list(file_name):\n # Read the file specified\n f = open(file_name,'r')\n lines = f.readlines()\n \n # Remove header text from lines\n curr_line = 0\n while lines[curr_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line += 1\n lines = lines[curr_line + 1:]\n\n # Remove footer text from lines\n curr_line = -1\n while lines[curr_line].find('END OF THIS PROJECT GUTENBERG EBOOK') == -1:\n curr_line -= 1\n lines = lines[: curr_line]\n\n # Strip lines into words\n words = []\n for i in range(len(lines)):\n # Remove punctuation\n next_line = lines[i].translate(string.maketrans(\"\",\"\"), string.punctuation)\n next_line = next_line.lower()\n words += next_line.split()\n \n return words", "def get_names_url(i):\n urls = list()\n with open('./urls/fall11_urls_train_'+str(i)+'.txt','r',encoding=\"Latin-1\") as f:\n for line in f:\n urls.append(line)\n urls = [url.strip('\\n') for url in urls]\n urls1 = [url.split('\\t')[1] for url in urls]\n names = [url.split('\\t')[0] for url in urls]\n return urls1,names", "def format_finder(files):\r\n\r\n file_format = ['%Y', '%m', '%d']\r\n year = ''\r\n month = ''\r\n da = ''\r\n\r\n for file in files:\r\n\r\n index = file.find('_')\r\n date = file[: index]\r\n if '-' in date:\r\n separator = '-'\r\n else:\r\n separator = '_'\r\n date = date.split(separator)\r\n\r\n for d in range(len(date)):\r\n\r\n # If the date doesn't contain decimal (Eg: August) then it would return None\r\n if not date[d].isdecimal():\r\n return None\r\n\r\n # If the element in the date is of length greater then 2 then it would be a year (Eg: 2020)\r\n # And that value is set as the index of year\r\n if len(date[d]) > 2:\r\n year = d\r\n\r\n # If the integer of element in the date is of length greater then 12 then it would be a date (Eg: 25)\r\n # And that value is set as the index of date\r\n elif int(date[d]) > 12:\r\n da = d\r\n\r\n # If Both year and date are set, then the correct index for the month would be 3- (year+date)\r\n # Eg: 3 -(0+1)\r\n if (year != '') and (da != ''):\r\n month = 3 - (year + da)\r\n break\r\n\r\n # If Month is set, then we change the format according to their set value\r\n # Eg: format = ['%Y', '%m', '%d'], and year = 1, da = 0, month = 2\r\n # Then format[year=1] = '%Y'\r\n # Then format[da=0] = '%d'\r\n # Then format[month=2] = '%m'\r\n # format = ['%d', '%Y', '%m']\r\n if month:\r\n file_format[year] = '%Y'\r\n file_format[month] = '%m'\r\n file_format[da] = '%d'\r\n break\r\n else:\r\n # The script executes this only if none of the files had an date element( Which is not year)\r\n # That was greater than 12, Eg: 2020-06-10\r\n # Meaning that we cannot know for sure which element represents the date/month\r\n # Hence we arbitrarily assign one element as date and another as month\r\n if year != 0:\r\n # If the index of year is zero, we let the format to be same as it was assigned first\r\n # Else we arbitrarily assign '0' th index to month\r\n file_format[year] = '%Y'\r\n file_format[0] = '%m'\r\n file_format[3 - year] = '%d'\r\n return f'{file_format[0]}-{file_format[1]}-{file_format[2]}'", "def list_assay_files(path):\n if rank != 0:\n print(\"Rank {} tried scanning the directory.\".format(rank))\n sys.exit()\n\n valid_names = []\n for root, dirs, files in os.walk(path):\n for f in files:\n if f.startswith(\"NENE\") and f.endswith((\"A.txt\", \"B.txt\")):\n fullpath = os.path.join(root, f)\n valid_names.append(fullpath)\n\n return valid_names", "def read_screen_names(filename):\n flist = []\n f = open('candidates.txt')\n for line in f:\n \tflist.append(line.strip('\\n'))\t\n return flist", "def read_names_list(file_path):\r\n\tnames_list = []\r\n\twith open(file_path) as file:\r\n\t for line in file:\r\n\t cline = line.rstrip().split()\r\n\t #row_id = cline[0]\r\n\t row_name = cline[1:]\r\n\t #names_list.append((row_id, \" \".join(row_name)))\r\n\t names_list.append(\" \".join(row_name))\r\n\treturn names_list", "def get_oscars_list():\n file = 'oscars.html'\n handler = open(file).read()\n soup = BeautifulSoup(handler)\n film_list = []\n\n for row in soup.findAll('tr'):\n #TODO: add more elegant check instead of dirty try catch\n try:\n links = row.findAll('a')\n cells = row.findAll('td')\n awards = int(cells[2].contents[0])\n\n # Filter only nominations\n if awards > 0:\n film = {\n 'name': links[0].contents[0],\n 'year': links[1].contents[0],\n 'awards': awards,\n 'nominations': cells[3].contents\n }\n film_list.append(film)\n except Exception, e:\n pass\n\n film_list = sorted(film_list, key=lambda k: k['awards'])\n film_list.reverse()\n return film_list", "def GetColumnistNames():\n global columnistnames\n if not columnistnames:\n columnistnames = []\n html = ukmedia.FetchURL( columnistmainpage )\n soup = BeautifulSoup( html )\n for a in soup.findAll( 'a', {'class':'author'} ):\n n = a.renderContents(None).strip()\n if not n in columnistnames:\n columnistnames.append( n )\n return columnistnames", "def test_get_teams_in_year_names():\n assert sorted(gtiy(2008)) == sorted(team_2008)\n assert sorted(gtiy(2009)) == sorted(team_2009)\n assert sorted(gtiy(2010)) == sorted(team_2010)\n assert sorted(gtiy(2011)) == sorted(team_2011)\n assert sorted(gtiy(2012)) == sorted(team_2012)\n assert sorted(gtiy(2013)) == sorted(team_2013)\n assert sorted(gtiy(2014)) == sorted(team_2014)\n assert sorted(gtiy(2015)) == sorted(team_2015)\n assert sorted(gtiy(2016)) == sorted(team_2016)\n assert sorted(gtiy(2017)) == sorted(team_2017)", "def list_titles(genre):\n text = genre_html(genre)\n num_titles = text.count('title=')\n\n titles = []\n for i in range(num_titles):\n start = text.find('title=')\n end = text[start+7:].find('\">')\n title = text[start+7:start+end]\n titles.append(title)\n text = text[start+7:]\n\n return titles", "def get_html_files(f_name):\n files = sorted([os.path.join(f_name, f) for f in os.listdir(f_name) if os.path.isfile(os.path.join(f_name, f))]) #extra check if is file unnecessary\n return files", "def filelist(basedir):\n day_files = []\n for root, dirs, files in os.walk(basedir):\n for file in files:\n if file.endswith(\".png\"):\n day_files.append(os.path.join(file))\n dates_files = []\n\n for i in day_files:\n year = i.split('_')[1]\n day = i.split('_')[2]\n mounth = i.split('_')[3]\n hour = i.split('_')[4]\n dates_files.append(UTCDateTime(year+'-'+mounth+'-'+day+'T'+hour)-3)\n return sorted(dates_files)", "def sort_by_version(compiled_re, names):\n annotated_names = [([int(n) for n in compiled_re.match(name).groups()], name) for name in names]\n annotated_names.sort()\n return [annotated_name[1] for annotated_name in reversed(annotated_names)]", "def getlistofpossibletitles(fileitem,fname):\n title = []\n oddtitles = open(\"oddtitles.txt\", 'r')\n content = oddtitles.read()\n oddtitles.close()\n\n content = content.split(\"\\n\")\n for line in content:\n elements = line.split(',')\n if fileitem in elements[0]:\n #print(elements[1])\n title.append(elements[1].title())\n\n \n title.append(fileitem)\n title.append(fileitem.title())\n lookfor = fileitem.replace(\".\",\" \")\n title.append(lookfor)\n title.append(lookfor.title())\n lookfor = fileitem.replace('-',\" \")\n title.append(lookfor)\n title.append(lookfor.title())\n with open(fname, \"r\") as dataf:\n for line in dataf:\n if lookfor.upper() in line.upper():\n line = line.replace(\"\\n\",\"\")\n title.append(line)\n title.append(line.title())\n return title", "def get_data(url):\n\n request = requests.get(url)\n\n soup = BeautifulSoup(request.text, \"lxml\")\n\n ol_tags = soup.find_all('ol')\n\n names_list = []\n\n for li_tags in ol_tags:\n for names in li_tags:\n names_list.append(names.text)\n\n return names_list", "def sort_slide_names(l): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)", "def get_links(names, html):\n ###TODO\n people = []\n readweb = BeautifulSoup(html, 'html.parser')\n for a in readweb.find_all('a'):\n person = os.path.basename(str(a.get('href')))\n if person in names:\n people.append(person)\n return SortedSet(people)\n pass", "def get_year_desc():\n return Year.objects.all().order_by('-name')", "def report_name_sort(rpt_name: str):\n rptn = rpt_name.split(\" \")\n try:\n rpta = rptn[0].split(\"-\")\n returned = (rpta[0], rpta[1])\n except IndexError:\n returned = (rptn[0], \"\")\n\n return returned", "def _possible_names(self, filename):\n names = [filename]\n if not self._iszip(filename):\n for zipext in _file_openers.keys():\n if zipext:\n names.append(filename+zipext)\n return names", "def create_dropdown_list_years(movies):\n dropdown = ''\n movies.sort(key=lambda x: x.year, reverse=True)\n y = []\n for movie in movies:\n y.append(movie.year)\n y = list(set(y))\n for x in y:\n dropdown += dropdown_tile_year.format(id_year=x,\n year=x)\n \n return dropdown", "def test_iter_all_years(self):\n ar = awstats_reader.AwstatsReader(test_file_dir, 'jjncj.com')\n self.assertEqual([ary.year for ary in ar], [2008,2009])", "def get_animals_list(filename):\n\n f = open(filename, 'r')\n text = f.read()\n f.close()\n animals = text.split('\\n')\n animals = [animal for animal in animals if animal != '' and len(animal) > 1]\n return sorted(animals)", "def loadFiles():\n all_chapters = []\n for name in names:\n f = open(file_path + name, \"r\", encoding=\"utf-8\")\n html_file = f.read()\n f.close()\n chap_text = extractText(html_file)\n new_text = cleanText(chap_text)\n all_chapters.append(new_text)\n concatenated_chapters = \" \".join(all_chapters)\n return concatenated_chapters", "def parse_bib_from_list(filename):\n\tentry_regex = r\"TITEL: .*\\s*AUTOR: .*\"\n\tparse_func = make_parse_func(r\"AUTOR: (.*)\", r\"TITEL: (.*)\", None)\n\treturn parse_bib(filename, entry_regex, parse_func)", "def readFile(fileName):\n\tf = open(fileName, 'r')\n\tnames = map(lambda s: s[1:-1], f.read().split(','))\n\tnames.sort()\n\treturn names", "def get_patient_names(self):\n\t# use pre-defined patient names\n\tif (self.data_names is not None):\n\t\tassert (os.path.isfile(self.data_names))\n\t\twith open(self.data_names) as f:\n\t\t\tcontent = f.readlines()\n\t\tpatient_names = [x.strip() for x in content]\n\t# use all the patient names in data_root\n\telse:\n\t\tpatient_names = os.listdir(self.data_root[0])\n\t\tpatient_names = [name for name in patient_names if 'brats' in name.lower()]\n\treturn patient_names", "def __file_sorter(self, filename: str) -> int:\n filename = filename.split('/')[-1]\n year = int(filename[-3:-1]) # extracts YY from ssssdddh.YYo\n day_info = filename[4:8] # extracts dddh from ssssdddh.YYo\n day_of_year = int(day_info[:3]) # ddd\n hour_block = day_info[-1] # h\n if hour_block.isdigit():\n return (year, day_of_year)\n return (year, day_of_year + ord(hour_block))", "def get_image_names(file_path: str) -> List:\n name_list = []\n f = open(file_path, \"r\")\n lines = f.readlines()\n f.close()\n for line in lines:\n name = line.split(\"/\")[-1].replace(\"\\n\", \"\")\n name_list.append(name)\n return name_list", "def parse_bibtex(file, build_dir):\n\n parser = BibTexParser()\n parser.customization = customizations\n years = []\n with open(file, 'r') as f: \n bibtex = bibtexparser.load(f, parser=parser)\n for i in range (len(bibtex.entries)):\n for key, value in bibtex.entries[i].items():\n if key == 'year':\n years.append(int(value))\n years.sort()\n years.reverse()\n years_no_repeat = []\n for i in range(len(years)):\n if years_no_repeat.count(years[i]) == 0:\n years_no_repeat.append(years[i])\n \n for i in range(len(years_no_repeat)):\n bibtext = copy.deepcopy(bibtex)\n array = []\n for j in range (len(bibtex.entries)):\n for key, value in bibtex.entries[j].items():\n if key == 'year':\n if int(value) == years_no_repeat[i]:\n array.append(bibtex.entries[j])\n bibtext.entries = array\n parse_file = os.path.join(build_dir, str(years_no_repeat[i]) + 'parsed.bib')\n writer = BibTexWriter()\n writer.order_entries_by = ('ENTRYTYPE', )\n with open(parse_file, 'w') as f:\n f.write(writer.write(bibtext))", "def get_page_name(self,en_code):\n files_and_names = {}\n for files_named in self.find_enc(en_code):\n search_in_file = open(self.file_location+\"/\"+files_named)\n for line in search_in_file:\n if '# LINKNAME:' in line:\n #print(line)\n new_line = line.split('# LINKNAME:')\n for nl in new_line:\n fnl = nl.strip()\n if fnl is not None:\n files_and_names[files_named] = fnl\n search_in_file.close()\n return files_and_names", "def sortednameslist(nameslist):\n sortednames = sorted(nameslist, key=lambda x: x[1])\n return sortednames", "def top_ten_movies(path):\n content = open(path, \"r\")\n topten = []\n for x in content:\n topten.append(x) \n return topten", "def parse_authors():\n import subprocess\n try:\n output = subprocess.check_output(['git', 'shortlog', '-s'],\n universal_newlines=True)\n except Exception as ex:\n print('ex = {!r}'.format(ex))\n return []\n else:\n striped_lines = (l.strip() for l in output.split('\\n'))\n freq_authors = [line.split(None, 1) for line in striped_lines if line]\n freq_authors = sorted((int(f), a) for f, a in freq_authors)[::-1]\n # keep authors with uppercase letters\n authors = [a for f, a in freq_authors if a.lower() != a]\n return authors", "def scrape_all_course_names(filename, verbose):\n \n\tsoup =\tBeautifulSoup(open(filename, 'r'), 'html.parser')\n\t#print(soup)\n\tcourses = []\n\t# all the courses are stored in a div w/class=view-content\n\t\t\n\th4_field_content = soup.find('h4', 'field-content')\n\t#print(h4_field_content)\n\tall_course_content = soup.find(\"div\", \"view-content\") # this contains ALL the classes...\n\tif all_course_content == None:\n\t\tprint(\"There are no courses on this page. Try a smaller page number!\")\n\t\treturn []\n\t\n\tcandidate_classes = all_course_content.find_all('a') # we want all the 'a' tags within\n\tclass_list=[]\n course_code_and_number = {}\n\tfor c in candidate_classes:\n\t\t#print(c.text, \"\\n\")\n\t\ttext = c.text # ex. AEMA 611 Experimental Designs 1 (3 credits)\n\t\ttext = text.split(\" \")# split on the space\n\t\tcourse_id = \" \".join(text[:2]).replace('\\n', '') # the first 2 are the course id\n\t\tcourse_name = \" \".join(text[2:-2]).replace('\\n', '')\n\t\tnum_credits = text[-2].replace(\"(\", \"\")# just get the course number, replace the ( with nothing \n\t\n\t\n\t\t#print(f\"{course_id}\\n{course_name}\\n{num_credits}\\n\")\n\t\ttry:# Check that the course number is a digit bc sometimes it is something weird\n\t\t\tfloat(num_credits)\n\t\texcept ValueError:\n\t\t\t#print(f\"Wrong course format. Ignoring {c}\")\n\t\t\tif(verbose):\n\t\t\t\tprint(f\"Wrong course format. Ignoring course: {c.text}\")\n\t\t\tcontinue\n\t\tclass_list.append(course_id)\n\treturn class_list", "def _get_wordlist(file_name):\n ifile = codecs.open(file_name, 'r', encoding='utf-8')\n for _ in range(int(ifile.__next__())):\n yield (ifile.__next__().strip() for _ in range(int(ifile.__next__())))", "def get_word_list(file_name, n):\n f = open(file_name, 'r')\n text = f.read()\n words = re.compile('\\w+').findall(text)\n return get_top_n_words(words, n)", "def read_str_name(path):\r\n name = []\r\n name_stru = {}\r\n with open(path, \"r+\") as f:\r\n line = f.readlines()\r\n \r\n # to load the name to list. files\r\n for i in range(len(line)):\r\n \r\n if line[i][:-1] != '':\r\n \r\n name.append(line[i][:-1])\r\n else:\r\n \r\n name.append(line[i-1][:-1] + str())\r\n \r\n line[i] = line[i-1]\r\n \r\n # to remark the structure name\r\n for s in name:\r\n \r\n name_stru[s] = (name.count(s),name.index(s))\r\n \r\n for key,values in name_stru.items():\r\n \r\n if values[0] != 1:\r\n for i in range(values[0]):\r\n name[values[1]+i] = name[values[1]+i] + str(i+1)\r\n \r\n return name", "def get_title_name_year(self) -> Tuple[str, str]:\n r = self.session.get(f\"https://www.imdb.com/title/{self.imdb}\")\n if r.status_code != 200:\n raise ValueError(f\"An unexpected error occurred getting IMDB Title Page [{r.status_code}]\")\n imdb_page = html.unescape(r.text)\n imdb_title = re.search(\n # testing ground: https://regex101.com/r/bEoEDn/1\n r\"<title>(?P<name>.+) \\(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)\"\n r\"(?P<year>(\\d{4})(|– |–\\d{4})))\\) - IMDb</title>\",\n imdb_page\n )\n if not imdb_title:\n raise ValueError(f\"Could not scrape Movie Title or Year for {self.imdb}...\")\n return imdb_title.group(\"name\").strip(), imdb_title.group(\"year\").strip()", "def createFileNames(nFileNames, seqPrefix):\n nameList = []\n nameList = [seqPrefix+str(i)+\".txt\" for i in range(0, nFileNames)]\n return nameList", "def get_labyrinth_from_file_name(self, file_name):\n\n excel_file = xlrd.open_workbook(self.file_name)\n labyrinth = list()\n for sheet in excel_file.sheets():\n for i in range(sheet.nrows):\n row = list()\n for j in range(sheet.ncols):\n row.append(sheet.cell_value(i, j))\n labyrinth.append(row)\n\n return labyrinth" ]
[ "0.7748594", "0.74819154", "0.73469853", "0.7098531", "0.70823675", "0.6961224", "0.67496955", "0.6549014", "0.6204802", "0.6009291", "0.5994891", "0.59557843", "0.59324557", "0.591772", "0.5879463", "0.5868835", "0.5803098", "0.5757769", "0.57187426", "0.57022166", "0.5698993", "0.56967515", "0.566333", "0.56408554", "0.5637905", "0.5633162", "0.55702776", "0.55400187", "0.5537733", "0.5537317", "0.55194575", "0.5513656", "0.5502073", "0.54979014", "0.5492763", "0.54882896", "0.54785943", "0.5449284", "0.5447175", "0.5423578", "0.54224414", "0.5414946", "0.54095227", "0.5392193", "0.53748566", "0.53620976", "0.5356559", "0.53563696", "0.53461087", "0.53336346", "0.5329907", "0.53074235", "0.52998954", "0.5280197", "0.5265009", "0.525049", "0.5248914", "0.52479285", "0.5245172", "0.52428025", "0.5225552", "0.522313", "0.5215354", "0.52116287", "0.5202964", "0.5200536", "0.519765", "0.51941496", "0.51904005", "0.5177675", "0.51672155", "0.5159661", "0.51533175", "0.5152566", "0.5148155", "0.5144185", "0.5141968", "0.51391196", "0.5137758", "0.5136683", "0.51214767", "0.511978", "0.5116746", "0.51150167", "0.51062936", "0.51006734", "0.5100279", "0.5093798", "0.5087557", "0.5087244", "0.5066502", "0.5060116", "0.50574976", "0.5050647", "0.50425404", "0.5040403", "0.5039778", "0.50362396", "0.5034267", "0.5031472" ]
0.7415442
2
input h (meters) and the coefficients for the linear profile for the free troposphere theta (ft_intercept (K) and slope gamma (K/m)) return the free tropospher theta at height h
def theta_ft(h,ft_intercept,gamma): theta_top = ft_intercept + h*gamma return theta_top
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_theta_surf_hex_h(theta_hs_in_h: float, theta_hs_out_h: float, v_hs: float) -> float:\n\n c = get_specific_heat()\n rho = get_air_density()\n\n # sensible heating capacity of heat source for heating, W\n q_hs_h = (theta_hs_out_h - theta_hs_in_h) * c * rho * v_hs / 3600\n\n # sensible heat transfer coefficient on the surface of the heat exchanger of the internal unit, W/m2K\n alpha_c_hex_h = get_alpha_c_hex_h(v_hs)\n\n # effective area for heat exchange of the surface area of heat exchanger or the internal unit, m2\n a_e_hex = get_a_e_hex()\n\n return (theta_hs_in_h + theta_hs_out_h) / 2 + q_hs_h / (a_e_hex * alpha_c_hex_h)", "def trapezoidal(f, x0, h):\n return (h/2.0 * (f(x0)+f(x0+h)))", "def get_theta_surf_hex_test_h(theta_hs_in_h: float, q_hs_h: float, v_hs: float) -> float:\n\n c = get_specific_heat()\n rho = get_air_density()\n\n theta_hs_out_h = theta_hs_in_h + q_hs_h / (c * rho * v_hs) * 3600\n\n # sensible heat transfer coefficient on the surface of the heat exchanger of the internal unit, W/m2K\n alpha_c_hex_h = get_alpha_c_hex_h(v_hs)\n\n # effective area for heat exchange of the surface area of heat exchanger or the internal unit, m2\n a_e_hex = get_a_e_hex()\n\n return (theta_hs_in_h + theta_hs_out_h) / 2 + q_hs_h / (a_e_hex * alpha_c_hex_h)", "def two_theta_hkl(self, H, K, L):\n return self.unit_cell.two_theta((H, K, L), self.wavelength, deg=True)", "def curve_with_hillcoef(ph, pka, hillcoef):\n# return hillcoef * ph - pka\n return 1/(1+10**(hillcoef*(pka-ph)))", "def get_metrics(H):\n theta = np.arctan2(H[0,1], H[0,0])\n scale = H[0,0] / np.cos(theta)\n tx = H[0,2]\n ty = H[1,2]\t\n return tx,ty,theta", "def phitheta(loc):\n x = loc[0]\n y = loc[1]\n z = loc[2]\n r = sqrt(x**2 + y**2 + z**2)\n theta = arcsin(z/r)\n phi = arctan2(y,x)\n return(phi, theta)", "def _step(self, t, y, h):\n # We must use solvers / implicit form\n f_pn1 = lambda a_n1: (y + h*self.v + (h**2 / 2.0) * \\\n ((1.0 - 2.*self.beta)*self.a + 2.*self.beta*a_n1))\n f_vn1 = lambda a_n1: (self.v + h*((1.0-self.gamma)*self.a + self.gamma*a_n1))\n def f_an1(a_n1):\n f_n1 = self.f(t+h,f_pn1(a_n1),f_vn1(a_n1))\n f_n = self.f(t,y,self.v,)\n return a_n1 - ((1.0+self.alpha)*f_n1 - self.alpha*f_n)\n\n a = self.solver(f_an1, self.a)\n y = f_pn1(a) # Calculate and store new variables. \n self.v = f_vn1(a)\n self.a = a\n return t+h, y", "def __s_polynomial(g, h):\n\n deg_g = __multidegree(g)\n deg_h = __multidegree(h)\n max_deg = map(max, zip(deg_g, deg_h))\n R = g.parent()\n\n # Builds a polynomial with the variables raised to max_deg, in order\n vars = map(R, R.variable_names())\n x_pow_max_deg = reduce(operator.mul, [x ** d for (d, x) in zip(max_deg, vars)], R(1))\n\n quo_g, _ = x_pow_max_deg.quo_rem(g.lt())\n quo_h, _ = x_pow_max_deg.quo_rem(h.lt())\n return quo_g * g - quo_h * h", "def ab2rhotheta(a, b):\n \"\"\" also : y - ax - b = 0 \"\"\"\n \"\"\" y*sin(theta) + x*cos(theta) - rho = 0 \"\"\"\n #print(\"a: %f b: %f\" % (a, b))\n theta = math.atan(a) + math.pi/2.0\n rho = b*math.sin(theta)\n #print(\"a: %f b: %f rho: %f theta: %f\" % (a, b, rho, theta))\n return (rho, theta)", "def alkTphosfac(hguess,ks):\n #mick - first estimate of contribution from phosphate\n #mick based on Dickson and Goyet\n h3po4g,h2po4g,hpo4g,po4g = phosfracs(hguess,ks)\n return h3po4g-hpo4g-2*po4g", "def get_desired_heading(self, h, p):\n heading_vector = [self.alpha*h[0] + self.beta*p[0],\n self.alpha*h[1] + self.beta*p[1]]\n a = [(heading_vector[0]/linalg.norm(heading_vector)),\n (heading_vector[1]/linalg.norm(heading_vector))]\n return a", "def H1(self,kx,ky):\n return -2.*self.t2*np.cos(self.phi)*(np.cos(3.*kx/2.+np.sqrt(3.)*ky/2.)+np.cos(-3.*kx/2.+np.sqrt(3.)*ky/2.)+np.cos(-np.sqrt(3.)*ky))", "def get_theta_hs_out_max_h(\n theta_d_hs_in: np.ndarray, q_hs_max_h: np.ndarray, v_d_supply: np.ndarray) -> np.ndarray:\n\n c = get_specific_heat()\n rho = get_air_density()\n\n return np.minimum(theta_d_hs_in + q_hs_max_h / (c * rho * np.sum(v_d_supply, axis=0)) * 10 ** 6, 45.0)", "def cost(self, h_theta, y):\n \n return -y*np.log(h_theta) - (1. - y)*np.log(1. - h_theta)", "def mort(self, h):\n return(self.mu_bg +\\\n (1.0 - self.mu_bg) * self.pr_P * self.p_att *\\\n (1 - h**self.ap))", "def forward(self, h, r, t):\n h_e, r_e, t_e = self.embed(h, r, t)\n\n norm_h_e = F.normalize(h_e, p=2, dim=-1)\n norm_r_e = F.normalize(r_e, p=2, dim=-1)\n norm_t_e = F.normalize(t_e, p=2, dim=-1)\n\n r_theta = self.theta[r]\n\n if self.l1_flag:\n return r_theta*torch.norm(norm_h_e + norm_r_e - norm_t_e, p=1, dim=-1)\n\n return r_theta*torch.norm(norm_h_e + norm_r_e - norm_t_e, p=2, dim=-1)", "def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n # Using u = ubar to avoid algeabric loops\n \n y = self.plant.h( x , self.plant.ubar , t )\n \n return y", "def lambert_eqarea(khi,phi):\n r = 2 * np.sin(khi/2.)\n th = phi\n return r, th", "def get_h0(self, t):\n return self.h0 * np.sin(2 * np.pi * t / self.Pmod + self.Pmod_phi)", "def Psi(l,m,theta,phi):\n if numpy.isscalar(theta): \n theta=numpy.array([[theta]])\n phi=numpy.array([[phi]])\n Psilm_th=numpy.zeros(theta.shape,dtype=complex)\n Psilm_ph=numpy.zeros(theta.shape,dtype=complex)\n x=numpy.cos(theta)\n thetaNonZerosIdx=numpy.where(theta!=0.0)\n if len(thetaNonZerosIdx[0]) != 0:\n Ylm=scipy.special.sph_harm(m,l,phi[thetaNonZerosIdx],theta[thetaNonZerosIdx])\n #Compute derivative of sphrHarm function w.r.t. theta:\n if l>=numpy.abs(m):\n Plmpo=legendreLM(l,m+1,x[thetaNonZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/float(math.factorial(l+m)))*Plmpo*numpy.exp(1j*m*phi[thetaNonZerosIdx])\n #YlmPmpo=sqrt((l-m)*(l+m+1))*spharm(l,m+1,theta,phi)*exp(-i*phi) %Should be equivalent to above formula.\n dtYlm=+YlmPmpo+m*x[thetaNonZerosIdx]*Ylm/numpy.sin(theta[thetaNonZerosIdx])\n # thetZerInd=[find(theta==0); find(theta==pi)]\n # dtYlm(thetZerInd)=0; %This is a fudge to remove NaNs\n else:\n dtYlm=numpy.zeros(theta[thetaNonZerosIdx].shape,dtype=complex)\n\n #dtYlm=spharmDtheta(l,m,theta,phi)\n\n Psilm_ph[thetaNonZerosIdx]=+1j*m/numpy.sin(theta[thetaNonZerosIdx])*Ylm\n Psilm_th[thetaNonZerosIdx]=+dtYlm\n #Ref: http://mathworld.wolfram.com/VectorSphericalHarmonic.html\n\n thetaZerosIdx=numpy.where(theta==0.0)\n if len(thetaZerosIdx[0]) != 0:\n if numpy.abs(m)==1:\n Yl1B=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*PBl1(l,m)*numpy.exp(1j*m*phi[thetaZerosIdx])\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+m*Yl1B\n Psilm_ph[thetaZerosIdx]=+1j*m*Yl1B\n Psilm_th[thetaZerosIdx]=+dtYlm\n else:\n Plmpo=legendreLM(l,m+1,x[thetaZerosIdx])\n YlmPmpo=math.sqrt((2*l+1)/(4*math.pi)*math.factorial(l-m)/math.factorial(l+m))*Plmpo*numpy.exp(1j*m*phi[thetaZerosIdx])\n dtYlm=+YlmPmpo+0\n Psilm_ph[thetaZerosIdx]=0\n Psilm_th[thetaZerosIdx]=+dtYlm\n return Psilm_th,Psilm_ph", "def phireturn(xhat0, tof):\n\t\n\t\tstoptime = tof\n\t\tnumpoints = 2\n\t\t#Integration time array:\n\t\tt = [stoptime * float(i) / (numpoints - 1) for i in range(numpoints)]\n\t\t\n\t\txsol = twomode.intfull(xhat0, t, abserror=1.0e-14, relerror=1.0e-12)\n\t\t#Phase of the first mode is the slice phase\n\t\tphi = np.angle(xsol[1,0] + 1j*xsol[1,1]) \t\n\t\t\n\t\treturn -phi", "def statsi(h):\n\n # Define constants\n zsa = np.array([0.0, 11000.0, 20000.0, 32000.0, 47000.0, 52000.0, 61000.0, 79000.0, 9.9e20])\n Tsa = np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65,252.65, 180.65, 180.65])\n g = 9.80665\n R = 287.0528\n Re = 6346766.0\n Psa = 101325.0\n\n # Calculate geopotential altitude\n z = Re*h/(Re+h)\n\n # Loop through atmosphere layers\n for i in range(8):\n \n # Calculate layer temperature gradient\n Lt = -(Tsa[i+1]-Tsa[i])/(zsa[i+1]-zsa[i])\n\n # If no temperature gradient\n if Lt == 0.0:\n\n # Are we in this layer of the atmosphere?\n if z <= zsa[i+1]:\n t = Tsa[i] # Temp isn't changing\n p = Psa*np.exp(-g*(z-zsa[i])/R/Tsa[i])\n d = p/R/t\n break\n\n # We need to go higher\n else:\n Psa *= np.exp(-g*(zsa[i+1]-zsa[i])/R/Tsa[i])\n\n # Temperature gradient\n else:\n ex = g/R/Lt\n if z <= zsa[i+1]:\n t = Tsa[i]-Lt*(z-zsa[i])\n p = Psa*(t/Tsa[i])**ex\n d = p/R/t\n break\n else:\n Psa *= (Tsa[i+1]/Tsa[i])**ex\n\n # We have left the atmosphere...\n else:\n t = Tsa[-1]\n p = 0.0\n d = 0.0\n\n return z, t, p, d", "def calcTrh(N, rh, m, G, gamma=0.02):\n return 0.138*N**0.5*rh**1.5/(m**0.5*np.log(gamma*N)*G**0.5)", "def Hstep_cost_function(H): \n U = Wold - Yold\n #cost = -np.trace(H.T@K@H) + (self.admm_rho/2)*(norm(H.T@D - Wold + self.Y, 'fro')**2) \n cost = -np.trace(H.T@K@H)/nsamples + (rho/2)*np.trace((H.T@D - U)@(H.T@D-U).T) \n return cost", "def ISA_trop(h):\n\tT = 288.15 - 0.0065*h;\n\tp = 101325*(T/288.15)**(-g/(-0.0065*287));\n\trho = 1.225*(T/288.15)**(-g/(-0.0065*287) - 1);\n\ta = np.sqrt(1.4*287*T);\n\treturn T, p, rho, a;", "def get_hessian(phi, pred, t, dot_product, reg= 1, regression= \"logistic\"):\n R = np.eye(pred.shape[0])\n if regression == \"logistic\":\n for i in range(pred.shape[0]):\n R[i,i] = pred[i,0] * (1- pred[i,0])\n elif regression == \"probit\":\n for i in range(pred.shape[0]):\n y_n = pred[i,0]\n t_n = t[i,0] \n dotp = dot_product[i, 0]\n pdf = norm.pdf(dotp)\n\n term1 = 1/ (y_n * (1- y_n) + TOLERANCE)\n term2 = (y_n - t_n)/(y_n**2 * (1- y_n) + TOLERANCE)\n term3 = (y_n - t_n)/((1- y_n)**2 * y_n + TOLERANCE)\n term4 = (y_n - t_n)* dotp/(y_n * (1- y_n) * pdf + TOLERANCE)\n\n R[i,i] = (term1 - term2 + term3 - term4)*(pdf**2)\n\n # Add regularization\t\t\t\n hessian = np.matmul(np.matmul(phi.T, R), phi) + np.eye(phi.shape[1])/reg\n return hessian", "def sph_harm_diff_theta(m, l, theta, phi):\n check_degree_and_order(m, l)\n return (m * _sph_harm(m, l, theta, phi) / np.tan(theta) +\n np.sqrt( (l - m) * (l + m + 1) ) * np.exp( -1j*phi ) *\n _sph_harm(m+1, l, theta, phi))", "def qFelder(h):\n\treturn (0.92 + 0.153 * h/1.01) * math.sqrt(9.8 * (2/3.0 * h)**3)", "def decode(self, h):\n return self.tanh(self.linearD(h))", "def decode(self, h):\n return self.tanh(self.linearD(h))", "def decode(self, h):\n return self.tanh(self.linearD(h))", "def vsh2(m, l, theta, phi):\n c_theta = 1/np.sin(phi) * sph_harm_diff_theta(m, l, theta, phi)\n c_phi = sph_harm_diff_phi(m, l, theta, phi)\n return np.array((0, c_theta, c_phi))", "def curve_no_hillcoef(ph, pka):\n# return ph - pka\n return 1/(10**(pka-ph)+1)", "def h_spec(k, He, h):\r\n return np.array(((k**3/np.pi**2) * h))", "def forward(self, t, h):\n if self.i == 0:\n self.A = self.beta * (self.B - self.B.transpose(1, 0)) + (\n 1 - self.beta) * (self.B +\n self.B.transpose(1, 0)) - self.gamma * self.I\n self.W = self.beta * (self.C - self.C.transpose(1, 0)) + (\n 1 - self.beta) * (self.C +\n self.C.transpose(1, 0)) - self.gamma * self.I\n\n return torch.matmul(\n h, self.A) + self.tanh(torch.matmul(h, self.W) + self.z)", "def bidimensional_map_nonlin_6(h, t, x, y, x_0, y_0):\n gamma = 3 * np.pi\n r = np.sqrt(\n np.square(x - x_0 + 0.01 * x_0 * np.sin(gamma * t))\n + np.square(y - y_0 + 0.01 * y_0 * np.sin(2 * gamma * t))\n )\n\n f = lambda r: gamma * r + (gamma / 2) * r**3 + np.sqrt(r)\n\n return h(t - f(r))", "def phs(x, y, rbfParam) :\n return (x**2 + y**2) ** (rbfParam/2)", "def theta_rule(a, u, h, t, n, th):\n \n Dt = t[n+1] - t[n]\n num = (1.0 - (1-th)*a*Dt)*u[n] + (1-th)*a*Dt*h(t[n]) + th*a*Dt*h(t[n+1])\n den = 1 + th*a*Dt\n \n return num/den", "def test_H_hat(self):\n\t\tposition = [0.0, 1.57079, 3.14159, 4.71238, 6.28318, 7.85398, 9.42477]\n\t\tpotential = [0.0, 6.0, 0.0, -6.0, 0.0, 6.0, 0.0]\n\t\tc = 1\n\t\tposition = tf.constant(position, shape = [1, len(position)], dtype = tf.float32)\n\t\tpotential = tf.constant(potential, shape = [1, len(potential)], dtype = tf.float32)\n\t\tbasis = schrodinger.create_basis(5)\n\t\tv = schrodinger.v0(position, potential, basis)\n\t\tcoeff = schrodinger.coefficient(position, basis)\n\t\tv0_hat = tf.linalg.solve(coeff, v)\n\t\tH = schrodinger.H_hat(c, len(basis), v0_hat)\n\t\tself.assertEqual(coeff.get_shape(), [len(basis), len(basis)])", "def bidimensional_map_nonlin_3(h, t, x, y, x_0, y_0):\n gamma = 3 * np.pi\n r = np.sqrt(np.square(x - x_0) + np.square(y - y_0))\n\n f = lambda r: gamma * r + (gamma / 2) * r**3 + np.sqrt(r)\n\n return h(t - f(r))", "def Tanh(z):\n return 1.7159 * np.tanh(2 / 3.0 * z)", "def rh(self, h):\n sez=self.getSect(h)\n area=self.area(sez)\n wetborder = self.wetBorder(sez)\n return area/wetborder", "def geofun(self, h, hK, cK):\n if (h < hK):\n # Wave is rarefaction wave (or depression)\n c = math.sqrt(9.81 * h) # we use math because is faster\n return 2 * (c-cK), 9.81/c\n else:\n # Wave is shock wave (or bore)\n ges = math.sqrt(0.5*9.81 * ( h + hK )/( h * hK ) )\n return ( h - hK ) * ges, ges - 0.25*9.81 * (h-hK)/(ges * h*h)", "def discretizar_funcion(a, I, h, t, th):\n \n u = np.zeros(len(t), dtype=float)\n u[0] = I\n for n in range(0, len(t) - 1):\n u[n+1] = theta_rule(a, u, h, t, n, th)\n return u", "def phi(t, *args):\n # Unpacking data\n mu_1, pi_mu_2, distance, affine_transfo = args\n A, b = get_Ab(t)\n N = len(mu_1)\n assert len(mu_1) == len(pi_mu_2)\n # Computing value of objective function\n r = 0.\n for i in np.arange(N):\n r += distance(affine_transfo(A, b, mu_1[i]), pi_mu_2[i]) ** 2\n return r", "def solver(self, theta):\n\n m = len(self.x)\n n = len(self.t[0])\n h = self.step_x\n k = self.step_t\n lamb = k / (h * h)\n w = np.zeros(m + 1)\n l = np.zeros(m + 1)\n u = np.zeros(m + 1)\n\n uK = np.zeros(self.x.shape)\n print('comecei para k= ({},{})'.format(theta[0], theta[1]))\n startTime = time.clock()\n if self.first:\n self.uR = np.zeros(self.x.shape)\n self.first = False\n error = 0\n errorCurve = np.zeros(self.x.shape)\n z = np.zeros(m + 1)\n w[m] = 0 # following the initial condition u(0,t) = u(l,t) = 0. If needed, change this.\n for i in range(1, m - 1):\n w[i] = self.g(i * h)\n\n l[1] = 1 + lamb\n u[1] = -lamb / (2 * l[1])\n for i in range(2, m - 1):\n l[i] = 1 + lamb + lamb * u[i - 1] / 2\n u[i] = -lamb / (2 * l[i])\n\n l[m - 1] = 1 + lamb + lamb * u[m - 2] / 2\n for j in range(1, n + 1):\n t = j * k # current t\n z[1] = ((1 - lamb) * w[1] + lamb / 2 * w[2] + self.f(t, theta)) / l[1]\n for i in range(2, m):\n z[i] = ((1 - lamb) * w[i] + lamb / 2 * (w[i + 1] + w[i - 1] + z[i - 1]) + self.f(t, theta)) / l[i]\n w[m - 1] = z[m - 1]\n for i in range(m - 2, 0, -1):\n w[i] = z[i] - u[i] * w[i + 1]\n\n for i in range(0, m + 1):\n x = i * h\n # print(x, w[i])\n # print('oi')\n uK[i - 1, j - 1] = w[i]\n self.t[i - 1, j - 1] = t\n self.x[i - 1, j - 1] = x\n error += pow(w[i] - self.uR[i - 1, j - 1], 2) / uK.size\n errorCurve[i - 1, j - 1] = (pow(w[i] - self.uR[i - 1, j - 1], 2)) / uK.size\n print('acabei para k= ({},{}) em {} segundos'.format(theta[0], theta[1], time.clock() - startTime))\n return (uK, error, errorCurve, theta)", "def h(X, theta, n_hidden_layers=1):\n _, a = feed_forward(X, theta, n_hidden_layers)\n L = n_hidden_layers + 1 # last layer\n\n hypothesis = a[L]\n return hypothesis", "def bidimensional_map_nonlin_2(h, t, x, y, x_0, y_0):\n\n gamma = 3 * np.pi\n r = np.sqrt(np.square(x - x_0) + np.square(y - y_0))\n\n f = lambda r: gamma * r\n\n return h(t - f(r))", "def refine_theta_parabolic_peak(\n request_recon_h,\n varphi_estimate,\n deg,\n n_pts = 3,\n alpha_fidelity = 1,\n):\n if n_pts < 3:\n raise ValueError(\n f\"The number of sample points {n_pts} is not enough to fit a parabola\"\n )\n half_window_width = np.pi / (2 * deg)\n angles = np.linspace(\n varphi_estimate - half_window_width,\n varphi_estimate + half_window_width,\n n_pts,\n )\n vals = np.array([request_recon_h(omega) for omega in angles], dtype=complex)\n # impose correction when the circuit error is not coherent\n vals = (vals + (1 - alpha_fidelity) * (1 + 1j) / 4) / alpha_fidelity\n p = np.polyfit(angles, np.abs(vals), 2)\n if p[0] >= 0:\n theta_estimate_mle = -np.inf\n varphi_estimate_mle = -np.inf\n return (theta_estimate_mle, varphi_estimate_mle), (angles, vals)\n else:\n peak_loc = -p[1] / (2 * p[0])\n peak_val = p[2] - p[1] ** 2 / (4 * p[0])\n theta_estimate_mle = peak_val / deg\n varphi_estimate_mle = peak_loc\n return (theta_estimate_mle, varphi_estimate_mle), (angles, vals)", "def calcT(self, theta, phi, T_ss):\n return T_ss * np.maximum(0.0, np.cos(theta) * np.cos(phi))**0.25", "def H(lattice, i, j, h, T):\n\n gamma = np.random.rand()\n delta_E = -2*lattice[i, j] * (lattice[i, j - 1] + lattice[i - 1, j] + lattice[i, j + 1] + lattice[i + 1, j]) - 2*h * lattice[i, j]\n\n return not (delta_E > 0 and np.exp(-(delta_E)/(kb * T)) > gamma)", "def bidimensional_map_nonlin_1(h, t, x, y, x_0, y_0):\n\n gamma = 3 * np.pi\n r = np.sqrt(np.square(x - x_0) + np.square(y - y_0))\n\n f = lambda r: gamma * r + (gamma / 2) * r**2 + np.sqrt(r)\n\n return h(t - f(r))", "def z_eq(self):\n theta = self.T_cmb/2.7\n return 25000.*self.Omega_m*self.h**2.*theta**-4.", "def calc_theta(U, V, quad, h_length, radians):\n import numpy as np\n theta = np.arcsin(U / h_length)\n import numpy as np\n if quad == 1:\n theta = theta\n elif quad == 2:\n theta = -theta + np.pi / 2\n elif quad - - 3:\n theta = np.pi / 2 + theta + np.pi\n elif quad == 4:\n theta = 3 * np.pi / 2\n theta = 2 * np.pi - theta\n if not radians:\n theta = theta * 180 / np.pi\n\n return (theta)", "def h_T(self, z0):\n # Get the governing variables\n (B, N, u_slip, u_inf) = self.get_variables(z0, 0.)\n \n # Compute U_N\n U_N = u_slip / (B * N)**(1./4.)\n \n # Compute the correlation equation\n return 2.9 * np.exp(-(U_N - 1.0)**2 / 28.09) * (B / N**3)**(1./4.)", "def ph(self,k,z=0):\n return self.p(k*self.h,z)*self.h**3", "def _h(W):\r\n # E = slin.expm(W * W)\r\n # h = np.trace(E) - d\r\n M = np.eye(d) + W * W / d\r\n E = np.linalg.matrix_power(M, d - 1)\r\n h = (E.T * M).sum() - d\r\n G_h = E.T * W * 2\r\n return h, G_h", "def func1(y, j, h, add_u = 0):\n y_temp = y[j] + add_u\n N = xsize\n k = np.zeros(xsize)\n for i in range(xsize):\n k[i] = -(1/4.)*(1./h)*(y_temp[(i+1)%N]**2-y_temp[(i-1)%N]**2) - 0.5/(h**3)*(y_temp[(i+2)%N]-2*y_temp[(i+1)%N]+2*y_temp[(i-1)%N]-y_temp[(i-2)%N])\n return k", "def one_step(self, x, h):\n concatHX = torch.cat((x, h), 1)\n zt = self.sigmoid(self.linearZ(concatHX))\n rt = self.sigmoid(self.linearR(concatHX))\n ht = (1-zt)*h + zt* self.tanh(self.linearH(rt*concatHX))\n return ht", "def layer(self, h, t):\n mr1h = torch.matmul(h, self.mr1.weight) # h => [m, d], self.mr1 => [d, k]\n mr2t = torch.matmul(t, self.mr2.weight) # t => [m, d], self.mr2 => [d, k]\n return torch.tanh(mr1h + mr2t)", "def func2(y, j, h, add_u = 0):\n y_temp = y[j] + add_u\n N = xsize\n k = np.zeros(xsize)\n for i in range(xsize):\n k[i] = -(1/4.)*(1./h)*(y_temp[(i+1)%N]**2-y_temp[(i-1)%N]**2)\n return k", "def theta():\n pass", "def forward(self, z_t_1, h_x, phi_table):\n z_category = self.gen_z_t_dist_now(z_t_1, h_x)\n \n if self.use_gumbel_softmax:\n \n# device = z_category.device\n \n averaged_z_t = 0\n \n log_prob = Variable(torch.log(z_category))\n \n for k in range(self.sampling_times): \n curr_z_t = F.gumbel_softmax(log_prob, tau = 0.1)\n \n averaged_z_t += curr_z_t\n \n del curr_z_t\n \n# averaged_z_t = averaged_z_t.to(device)\n \n z_t = averaged_z_t/self.sampling_times\n else:\n z_t = z_category\n \n phi_z = torch.mm(z_t, torch.t(phi_table))\n# mu = self.h_to_mu(h_combined)\n# logvar = self.h_to_logvar(h_combined)\n# std = torch.exp(0.5 * logvar) \n# epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n# z_t = epsilon * std + mu # [batch_sz x z_sz]\n return z_t, z_category, phi_z", "def eval_costh_phi_fold_pt_HX(data):\n return data.costh_HX_fold, data.phi_HX_fold, data.JpsiPt", "def __call__(self, h):\n\n Wh = self.W(h)\n p_yt = F.log_softmax(Wh) # should be (B x V)\n\n return p_yt", "def spherical_harmonics_vec(th,ph, lmax):\n Y = []\n lm = []\n n = lmax*(lmax+2)+1\n sph_it = spherical_harmonics_it(th,ph)\n for i in range(0, n):\n Ylm,l,m = sph_it.next()\n Y.append(Ylm)\n lm.append((l,m))\n assert l == lmax\n assert m == -lmax\n\n return Y, lm", "def get_heat_source_heating_output(\n theta_hs_out_h: np.ndarray, theta_hs_in: np.ndarray, v_supply: np.ndarray, operation: np.ndarray) -> np.ndarray:\n\n c = get_specific_heat()\n rho = get_air_density()\n\n q_hs_h = np.maximum((theta_hs_out_h - theta_hs_in) * c * rho * np.sum(v_supply, axis=0) * 10 ** (-6), 0.0)\n\n return np.where(operation == 'h', q_hs_h, 0.0)", "def calc_h_lat(dry_bulb_C, humidity_ratio_out_kgperkg):\n\n h_kJ_kg = humidity_ratio_out_kgperkg * (dry_bulb_C * CPW_kJ_kgC + h_we_kJ_kg)\n\n return h_kJ_kg", "def h(x, theta):\n # ... dopolnite (naloga 1)\n\n power = x.dot(-theta.T)\n\n return 1 / (1 + np.exp(power))", "def get_f_h_gas_comp_in(p: float, theta: float) -> float:\n\n return -1.00110355 * 10 ** (-1) * p ** 3 \\\n - 1.184450639 * 10 * p ** 2 \\\n - 2.052740252 * 10 ** 2 * p \\\n + 3.20391 * 10 ** (-6) * (theta + 273.15) ** 3 \\\n - 2.24685 * 10 ** (-3) * (theta + 273.15) ** 2 \\\n + 1.279436909 * (theta + 273.15) \\\n + 3.1271238 * 10 ** (-2) * p ** 2 * (theta + 273.15) \\\n - 1.415359 * 10 ** (-3) * p * (theta + 273.15) ** 2 \\\n + 1.05553912 * p * (theta + 273.15)+1.949505039 * 10 ** 2", "def parang (hourangle, declination, latitude):\n\n return -np.arctan2 (-np.sin (hourangle),\n np.cos (declination) * np.tan (latitude)\n - np.sin (declination) * np.cos (hourangle))", "def sph_harm_diff_phi(m, l, theta, phi):\n check_degree_and_order(m, l)\n return 1j * m * _sph_harm(m, l, theta, phi)", "def compute_tsky_hot( xv, yv, hv, thot, tcold):\n\n nData = len(yv) \n epsilons = np.full( nData, EPSILON)\n tsys = np.zeros(nData) # initialize arrays\n\n Z = np.zeros(nData)\n oneMZ = np.zeros(nData)\n # For full Temp calibration, a spectrum taken at high elevation away from \n # The galactic plan is used. For this program the cold spectrum must be\n # the spectrum being calibrated. See the M command for comparision\n epsilons = np.full( nData, EPSILON)\n yv = np.maximum( yv, epsilons)\n hv = np.maximum( hv, epsilons)\n # comput the cold/hot ratio\n Z = yv/hv\n oneMZ = np.full( nData, 1.) - Z\n oneMZ = np.maximum( oneMZ, epsilons)\n\n # the cold, receiver, temperature is this function\n tsys = ((Z*thot) - tcold)/oneMZ\n \n n6 = int(nData/6)\n n56 = 5*n6\n\n tsysmedian = np.median( tsys[n6:n56])\n\n tsky = np.zeros(nData) # initialize arrays\n S = np.zeros(nData) # initialize arrays\n\n # The system gain S is computed assuming a tsys is the cold load\n S = np.full( nData, tsysmedian+thot)/hv\n # scale the observed instensity in counts to Kelvins.\n tsky = S*yv\n\n return tsky", "def hrf_fit_err(theta, z, y, t_r, hrf_dur):\n h, _ = spm_hrf(theta, t_r, hrf_dur, False)\n return 0.5 * np.sum(np.square(y - spectral_convolve(h, z)))", "def sph(grlat, elong, ht):\n\n # Initialize Variables\n global cth, sth, clg, slg, dif, radn, gl # common/obs/\n gn = 9.798277692\n ae = 6378140.0\n f = 0.00335281\n rm = 0.00344978\n dr = 0.01745329252\n\n clong = np.cos(elong * dr)\n slong = np.sin(elong * dr)\n # latitude difference\n dvert = f * (1.0 + 0.5 * f) * np.sin(2.0 * grlat * dr) - 0.5 * f * f * np.sin(\n 4.0 * grlat * dr\n )\n gcclat = (3.1415926535898 / 2.0) - (grlat * dr - dvert)\n cthet = np.cos(gcclat)\n sthet = np.sin(gcclat)\n # geocentric radius\n radn = 1 - f * (cthet ** 2) * (1 + 1.5 * f * (sthet ** 2))\n # formulae for g are from jeffreys, 4.022 and 4.023\n g = gn * (\n 1\n + f\n - 1.5 * rm\n + f * (f - (27 / 14) * rm)\n + (2.5 * rm - f - f * (f - (39 / 14) * rm)) * (cthet ** 2)\n - (f / 2) * (7 * f - 15.0 * rm) * ((cthet * sthet) ** 2)\n )\n # free air correction\n g = g - g * (2.0 * ht * (1.0 + f + rm - 2.0 * f * (cthet ** 2)) / ae)\n\n # Conversion Here for Globals\n cth = cthet\n sth = sthet\n clg = clong\n slg = slong\n dif = dvert\n gl = g", "def isa(h):\n if np.any((h<0)|(h>11000)):\n print (\"The altitude must be between 0m and 11000m\")\n return \"Returning a string to cause errors later\"\n T = T0 + -0.0065*h\n p = p0*(T/T0)**(-g0/(-0.0065*R))\n rho = p/(R*T)\n return T,p,rho", "def Spin(phi,theta):\n return 1/2*(cos(phi)*sin(theta)*xhat + sin(phi)*sin(theta)*yhat + cos(theta)*zhat)", "def H(lattice, i, j, h, T):\n\n gamma = np.random.rand()\n delta_E = 2*lattice[i, j] * (lattice[i, j - 1] + lattice[i - 1, j] + lattice[i, j + 1] + lattice[i + 1, j]) + 2*h * lattice[i, j]\n\n return delta_E < 0 or gamma < np.exp(-(delta_E)/(kb * T))", "def h(self, z):\n # See definition at end of Section 1, p2 of Arnaud et al.\n return np.sqrt(self.Om*(1.+z)**3. + self.Ol)", "def get_z(theta, phi):\n return math.cos(phi)/math.tan(theta/2) + 1j*math.sin(phi)/math.tan(theta/2)", "def compute_stability_fm_h(H, t0, u_attr, r_air, hc, d0, z0m, cp=1004.16):\n L_ob = H.expression(\n '-(r_air * cp * t0 * (u_attr ** 3.0) / 0.41 / 9.806 / H)',\n {'cp': cp, 'H': H, 'r_air': r_air, 't0': t0, 'u_attr': u_attr})\n L_ob = L_ob.where(L_ob.gte(0), -99.0)\n mm_h = H \\\n .expression(\n '((1 - (16.0 * (hc - d0) / L_ob)) ** 0.25)',\n {'d0': d0, 'hc': hc, 'L_ob': L_ob}) \\\n .where(L_ob.eq(-99.0), 0.0)\n fm_h = H \\\n .expression(\n '2.0 * log((1.0 + mm_h) / 2.0) + log((1.0 + (mm_h ** 2)) / 2.0) - '\n '2.0 * atan(mm_h) + (pi / 2)',\n {'mm_h': mm_h, 'pi': math.pi}) \\\n .where(L_ob.lte(-100).Or(L_ob.gte(100)), 0)\n\n # CGM - Swapped order of calc since d0 is an image compute from hc and\n # z_u is being set as a constant number (for now).\n fm_h = fm_h.where(fm_h.eq(hc.subtract(d0).divide(z0m).log()), fm_h.add(1.0))\n # fm_h = fm_h.where(fm_h.eq(hc.subtract(d0).divide(z0m).log()), fm_h.add(1.0))\n return fm_h", "def cost(h, y):\n\tm = y.shape[0]\n\tcost = (-1/m) * (y.T @ np.log(h) + (1 - y).T @ np.log(1 - h))\n\treturn cost", "def guidance_target(self, state, final_x, y_profile, time_horizon, time_step, polyfit=None):\n current_x, current_y, current_theta, current_y_dot = state[XX], state[YY], state[THETA], state[Y_DOT]\n y_index = np.where(y_profile == np.extract(y_profile <= current_y, y_profile)[0])[0][0]\n\n # m, c = polyfit\n beta = 3\n gamma = 0.5\n\n delta_t = [t * time_step for t in range(0, time_horizon)]\n y = y_profile[y_index : y_index+time_horizon]\n y = np.clip(y, y_profile[-1], 26)\n # x_adjust = final_x * (1 - np.exp(-0.05 * np.array(delta_t)))\n # x = current_x + x_adjust * (final_x - current_x)\n x = current_x + (final_x - current_x) / (1 + np.exp(-np.linspace(-4, 4, num=time_horizon)))\n if current_x > final_x:\n x = np.clip(x, final_x, 33)\n else:\n x = np.clip(x, 0, final_x)\n\n y_dot = current_y_dot * np.exp(-np.linspace(0, 2, time_horizon)) # -2 * np.exp(-1/abs(y))\n x_dot = (x - final_x)\n\n theta = current_theta * np.exp(-beta * np.array(delta_t)) + current_theta * 0.3\n if theta[0] > 0:\n theta = np.clip(theta, 0, 15 * DEGTORAD)\n else:\n theta = np.clip(theta, -15 * DEGTORAD, 0)\n theta_dot = gamma * theta\n\n targets = [x, y, x_dot, y_dot, theta, theta_dot]\n return targets", "def infilCapaHorton(f0, fc, k, t):\n ft = fc + (f0 - fc)*np.exp(-k*t)\n return ft", "def gha(self):\n return np.mod(self.gmst*self.turndeg +\n self.turndeg*self.T*self.century +\n self.turndeg/2.0, self.turndeg)", "def func3(y, j, h, add_u = 0):\n y_temp = y[j] + add_u\n N = xsize\n k = np.zeros(xsize)\n for i in range(xsize):\n k[i] = -(1/4.)*(1./h)*(y_temp[(i+1)%N]**2-y_temp[(i-1)%N]**2) + (1/2.)*(1./h**2)*(y_temp[(i+1)%N]-2*y_temp[i%N]+y_temp[(i-1)%N])\n return k", "def vsh1(m, l, theta, phi):\n check_degree_and_order(m, l)\n return np.array((_sph_harm(m, l, theta, phi), 0, 0))", "def vsh3(m, l, theta, phi):\n if l==0: return np.array([0, 0, 0])\n\n r = 1\n R = np.array([r, 0, 0])\n gradY = sph_harm_gradient(m, l, r, theta, phi)\n return -1j * np.cross(R, gradY) / np.sqrt( l * (l + 1))", "def get_thetaRHT_hat(self, sample_psi0, rht_data):\n QRHT = np.sum(np.cos(2*sample_psi0)*rht_data)\n URHT = np.sum(np.sin(2*sample_psi0)*rht_data)\n theta_rht = np.mod(0.5*np.arctan2(URHT, QRHT), np.pi)\n\n return theta_rht", "def eval_costh_phi_fold_HX(data):\n return data.costh_HX_fold, data.phi_HX_fold", "def one_step(self, x, h):\n concatHX = torch.cat((x, h), 1)\n ft = self.sigmoid(self.linearF(concatHX))\n it = self.sigmoid(self.linearI(concatHX))\n newCt = ft*self.ct.clone() + it*self.tanh(self.linearC(concatHX))\n #self.ct = ft*self.ct.clone() + it*self.tanh(self.linearC(concatHX))\n ot = self.sigmoid(self.linearO(concatHX))\n ht = ot*self.tanh(newCt)\n self.ct = newCt\n \n return ht", "def H(t, args):\n\n f0 = args['f0']\n n = args['n']\n omega = args['omega']\n omegaDt = args['omegaDt']\n omegaArgs = args['omegaArgs']\n\n ad = create(n)\n a = destroy(n)\n # H0, for the first two terms see Silveri 2017 Quantum_systems_under_frequency_modulation\n ham = omega(t, omegaArgs)*(ad*a+0.5*qeye(n))\n # additional term because of w(t) not constant\n ham += 1j/4*omegaDt(t, omegaArgs)/omega(t, omegaArgs)*(a*a-ad*ad)\n # Force term (9**10^-9 = x0, extent of ground state wave function), see Wittmann diss\n # with compensation term -f0/w0^2 (e.g. no force in the case of no modulation)\n ham += 9*(f0/(omega(t, omegaArgs)**2) - f0/(omegaArgs[0]**2))*(ad + a)\n # ham += (9*10**-9)/(10**6)*(f0/(omega(t, omegaArgs)**2))*(ad + a)\n return(ham)", "def update_H(self):\n gamma = self.get_gamma()\n delta = self.get_delta()\n summand2 = ((1 + (gamma.transpose().dot(self.H).dot(gamma) /\n delta.transpose().dot(gamma))) *\n delta.dot(delta.transpose()) / delta.transpose().dot(gamma)\n )\n summand3 = - ((delta.dot(gamma.transpose()).dot(self.H) +\n self.H.dot(gamma).dot(delta.transpose())) /\n delta.transpose().dot(gamma))\n self.H = self.H + summand2 + summand3", "def _sph_harm(m, n, theta, phi):\n return sph_harm(m, n, phi, theta)", "def hrf_estim(z, y, t_r, dur, verbose=0):\n args = (z, y, t_r, dur)\n bounds = [(MIN_DELTA + 1.0e-1, MAX_DELTA - 1.0e-1)]\n f_cost = Tracker(hrf_fit_err, args, verbose)\n\n theta, _, _ = fmin_l_bfgs_b(\n func=hrf_fit_err, x0=MAX_DELTA, args=args,\n bounds=bounds, approx_grad=True, callback=f_cost,\n maxiter=99999, pgtol=1.0e-12)\n J = f_cost.J\n h, _ = spm_hrf(theta, t_r, dur, False)\n\n return h, J", "def _prob_v_given_h(self, h):\n return tf.sigmoid(tf.add(self.a, tf.matmul(h, tf.transpose(self.W))))", "def rk4(self, t, h,G) :\r\n k1 = h*self.calc_diff_eqn(t, self.quant_vec,G,self.mass_vec)\r\n k2 = h*self.calc_diff_eqn(t + 0.5*h , self.quant_vec + 0.5*k1 ,G, self.mass_vec)\r\n k3 = h*self.calc_diff_eqn(t + 0.5*h , self.quant_vec + 0.5*k2 ,G, self.mass_vec)\r\n k4 = h*self.calc_diff_eqn(t + h , self.quant_vec + k3 ,G, self.mass_vec)\r\n y_new = self.quant_vec + ((k1 + 2*k2 + 2*k3 + k4)/6)\r\n return y_new", "def theta_v_time():\n pass", "def T_naught(z, h, OM, OB):\n\n T0 = 28.5 * ((1.0+z)/10.0)**(0.5) * OB/0.042 * h/0.73 * (0.24/OM)**(0.5)\n return T0" ]
[ "0.5862929", "0.5850773", "0.57733494", "0.5686483", "0.56282175", "0.5592743", "0.55332625", "0.55067706", "0.55043215", "0.5491298", "0.5484844", "0.5470747", "0.5449616", "0.5431975", "0.5429191", "0.5417069", "0.54054326", "0.5379685", "0.53791565", "0.5375629", "0.5361891", "0.53573406", "0.5336242", "0.532041", "0.5293915", "0.52920234", "0.5254426", "0.5241588", "0.5235897", "0.5231546", "0.5231546", "0.5231546", "0.52311575", "0.51916766", "0.5190566", "0.5189652", "0.5180938", "0.5161224", "0.5159144", "0.51512045", "0.51433146", "0.5132367", "0.51217246", "0.5121117", "0.51159626", "0.5099102", "0.5086923", "0.50843567", "0.50836533", "0.50732934", "0.5071702", "0.5069259", "0.50681096", "0.50654995", "0.5064698", "0.5062462", "0.50561285", "0.5053195", "0.505197", "0.5047961", "0.50362456", "0.5025721", "0.50241864", "0.50150687", "0.5012761", "0.50125396", "0.5012422", "0.5011258", "0.50103176", "0.50068533", "0.49983248", "0.4992054", "0.49916747", "0.4988998", "0.497834", "0.49760893", "0.49728727", "0.49716958", "0.49706614", "0.49693283", "0.49603072", "0.49540213", "0.49522707", "0.49486235", "0.49431026", "0.49425134", "0.49415436", "0.49372387", "0.4935343", "0.49320382", "0.4928524", "0.49270064", "0.49229106", "0.49174798", "0.49158025", "0.49047348", "0.48805198", "0.48761627", "0.4867689", "0.48641452" ]
0.75179046
0
the_vars[0]= thetabar the_vars[1] = h the_vars[2] = qv surface flux from drag law with subsidence and diagnosed deltheta
def dmixed_vars(the_vars,tstep,coeffs): deltheta = theta_ft(the_vars[1],coeffs.ft_intercept,coeffs.ft_gamma) - the_vars[0] F0 = coeffs.U*coeffs.Cd*(coeffs.sst - the_vars[0]) #surface heat flux Fqv0 = coeffs.U*coeffs.Cd*(coeffs.qsfc - the_vars[2]) #surface vapor flux Fint = -coeffs.k*F0 #entrainment heat flux if coeffs.use_NT: # use NT parameterization by calculating we using function went = calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0) # Nicholls-Turton parameterization else: # use simple we parameterization went = -Fint/deltheta #simple entrainment parameterization # calculate delta_Fr delta_Frstar = 82.0 # Wm^-2 Frlambda = 7.9 # Wm^-2, using with CTL from Gesso delta_Fr = delta_Frstar - Frlambda*coeffs.ft_qv*1000 # convert qt_ft to g kg^-1 Fqvent = -went*( coeffs.ft_qv - the_vars[2]) wsubs = -coeffs.D*the_vars[1] rho=1. cp=1004. derivs=np.empty_like(the_vars) # higher delta_Fr from drier air at mixed-layer top...hence cloudy air results in less radiative cooling derivs[0]=(F0 - Fint)/(the_vars[1]*rho) - delta_Fr/1004./the_vars[1] derivs[1] = went + wsubs derivs[2] = (Fqv0 - Fqvent)/the_vars[1] return derivs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ha(env, cstate=0):\n T1 = 10\n T2 = 10\n thM = 20\n thm = 5\n vr = 10.5\n v1 = -1.3\n v2 = -2.7\n assert(T1 == T2)\n\n delta = None # None to cause failure\n # The continous variables used in this ha\n x = T1 # clock1 variable\n y = T2 # clock2 variable\n th = 11.5 # The reactor temperature\n\n # You need vtol here, because of floating point error.\n loc0_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc0_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(vr),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc0_FT = False\n\n loc1_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc1_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v1),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc1_FT = False\n\n loc2_ode_x = ODE(env, S.sympify('diff(x(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_y = ODE(env, S.sympify('diff(y(t))'), S.sympify('1.0'),\n ttol=10**-3, iterations=100)\n loc2_ode_th = ODE(env, S.sympify('diff(th(t))'), S.sympify(v2),\n ttol=10**-3, iterations=100, vtol=10**-10)\n loc2_FT = False\n\n # Location 3 is reactor shutdown\n loc3_FT = False\n\n # Location 0\n def location0(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thM and x >= T1:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 1, 0, x, y, th, None, True, None, None, curr_time\n elif th == thM and y >= T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 2, 0, x, y, th, None, None, True, None, curr_time\n elif th == thM and x < T1 and y < T2:\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 3, 0, x, y, th, None, None, None, True, curr_time\n # The invariant\n elif th <= thM:\n if not loc0_FT:\n x = loc0_ode_x.compute(vals, curr_time-prev_time)\n y = loc0_ode_y.compute(vals, curr_time-prev_time)\n th = loc0_ode_th.compute(vals, curr_time-prev_time)\n loc0_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thM) > loc0_ode_th.vtol:\n deltath = loc0_ode_th.delta(vals, quanta=(thM-th))\n else:\n th = thM\n deltath = 0\n return 0, deltath, x, y, th, False, None, None, None, curr_time\n else:\n # print('th:', th)\n raise RuntimeError('Reached unreachable branch'\n ' in location 0')\n\n def location1(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n x = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc1_FT:\n x = loc1_ode_x.compute(vals, curr_time-prev_time)\n y = loc1_ode_y.compute(vals, curr_time-prev_time)\n th = loc1_ode_th.compute(vals, curr_time-prev_time)\n loc1_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc1_ode_th.vtol:\n deltath = loc1_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 1, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 1')\n\n def location2(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n vals = {S.sympify('x(t)'): x,\n S.sympify('y(t)'): y,\n S.sympify('th(t)'): th}\n curr_time = env.now\n # The edge guard takes preference\n if th == thm:\n y = 0 # Reset\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n return 0, 0, x, y, th, True, None, None, None, curr_time\n # The invariant\n elif th >= thm:\n if not loc2_FT:\n x = loc2_ode_x.compute(vals, curr_time-prev_time)\n y = loc2_ode_y.compute(vals, curr_time-prev_time)\n th = loc2_ode_th.compute(vals, curr_time-prev_time)\n loc2_FT = True\n # print('%7.4f %7.4f %7.4f %7.4f' % (curr_time, x, y, th))\n if abs(th-thm) > loc2_ode_th.vtol:\n deltath = loc2_ode_th.delta(vals, quanta=(thm-th))\n else:\n th = thm\n deltath = 0\n return 2, deltath, x, y, th, False, None, None, None, curr_time\n else:\n raise RuntimeError('Reached unreachable branch'\n ' in location 2')\n\n def location3(x, y, th, loc0_FT, loc1_FT, loc2_FT, loc3_FT, prev_time):\n global step\n # print('total steps: ', step)\n # Done\n print(time.time()-start)\n sys.exit(1)\n\n # The dictionary for the switch statement.\n switch_case = {\n 0: location0,\n 1: location1,\n 2: location2,\n 3: location3\n }\n\n prev_time = env.now\n while(True):\n (cstate, delta, x, y, th,\n loc0_FT, loc1_FT, loc2_FT, loc3_FT,\n prev_time) = switch_case[cstate](x, y, th,\n loc0_FT,\n loc1_FT,\n loc2_FT,\n loc3_FT,\n prev_time)\n # This should always be the final statement in this function\n global step\n step += 1\n yield env.timeout(delta)", "def calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0):\n thetal_m = the_vars[0]\n qt_m = the_vars[2]\n zi = the_vars[1]\n dth = deltheta\n \n thetal_ft = thetal_m + dth\n qt_ft = coeffs.ft_qv\n \n dqt = qt_ft - qt_m\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-zi)\n LTS = thetal_3000 - coeffs.sst # lower tropospheric stability\n\n # calculate coefficients\n press=tf.find_press(zi)\n Ad,Bd,issat = tf.calc_ABcoeffs(thetal_ft,qt_ft,press)\n Aw,Bw,issat = tf.calc_ABcoeffs(thetal_m,qt_m,press)\n \n invert= tf.t_uos_thetal(thetal_m,qt_m,press)\n T_0 = invert.temp\n lv=tf.L_t(invert.temp)\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n del_thv_dry = Ad * dth + Bd * dqt\n del_thv_sat = Aw * dth + Bw * dqt\n \n # account for evaporative cooling (increases we)\n ql_max = invert.ql\n Cl = (Ad*lv/tc.CPD - T_0/tc.EPS)\n Del_thv = del_thv_dry - Cl * ql_max\n \n # calculate buoyancy integral terms\n rho = 1.\n lcl_press=tf.LCL_thetal(thetal_m,qt_m)\n zb=tf.find_height(lcl_press)\n\n T1 = zb/zi\n T2 = 0.5 * zb**2 / zi**2\n T3 = (zi-zb)/zi\n T4 = 0.5 * (zi**2 - zb**2) / zi**2\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n wtl_0=F0\n wqt_0=Fqv0\n Del_F = delta_Fr/(tc.CPD*rho) # use sensitivity to radiation a la Gesso Fig. 3\n term1 = wtl_0 * (Ad * (T1-T2) + Aw * (T3-T4))\n term2 = wqt_0 * (Bd * (T1-T2) + Bw * (T3-T4))\n term3 = Del_F * (Ad * T2 + Aw * T4)\n\n Theta_NE = term1 + term2 + term3\n \n # calculate w*\n wstar=(2.5*9.8/T_0*zi*Theta_NE)**(1/3.)\n \n # calculate chi*\n chi_star = Cl * ql_max / (del_thv_dry - del_thv_sat)\n \n # calculate del_m\n Del_m = del_thv_dry + chi_star * (2. - chi_star) * (del_thv_sat - del_thv_dry)\n \n # calculate we\n a2=15.\n Del_thv_NT = Del_thv / (1. + a2 * (1. - Del_m/Del_thv))\n \n A_NT = 0.2\n fac_NT = 2.5\n\n term4 = Del_thv_NT\n term5 = A_NT * fac_NT * (T2 * del_thv_dry + T4 * del_thv_sat)\n denominator = term4 + term5\n\n we = A_NT * fac_NT * Theta_NE / denominator\n \n return we", "def dewT_2_q_magnus(ds, var):\n A1, B1, C1 = 17.625, 243.04, 610.94\n vpsl = C1 * np.exp(A1 * (ds[var['dew']] - 273.15) / (B1 + (ds[var['dew']] - 273.15)))\n wsl = eps0 * vpsl / (ds[var['pressure']] - vpsl)\n ds[var['spec_h']] = wsl / (1 + wsl)\n return ds", "def _redef_sp1_vars(self):\r\n\r\n if len(self.fq_list) == 0:\r\n no_rad = True\r\n lst_tmp = np.matrix(np.reshape(self.lst_tmp, \r\n (self.lst_tmp.size, 1)))\r\n else: no_rad = False\r\n # The practically constants...\r\n # Big Epsilon:\r\n if self.cond == True:\r\n self.Epsilon = self.d_T * self.thermal_conductivity\r\n else:\r\n self.Epsilon = (self.diff_scale ** 2) / \\\r\n (3.0 * self.absorb_coeffs[self.rad] ** 2)\r\n # Beta:\r\n if self.cond == True:\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff)\r\n else:\r\n self.Beta = (1.0 + 3.0 * self.r2) * (2.0 * self.diff_scale) / \\\r\n ((1.0 - 2.0 * self.r1) * (\r\n 3.0 * self.absorb_coeffs[self.rad]))\r\n\r\n # The feild solutions at the last timestep.\r\n # The integral vF:\r\n if self.cond == True:\r\n # The horrifically complicated F:\r\n def F_func_cond(elem, eta):\r\n F = 0.0\r\n Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F += Tn\r\n for k in range(0, len(self.fq_list)):\r\n vk = self.fq_list[k]\r\n try:\r\n vk_m = self.fq_list[k - 1]\r\n except:\r\n vk_m = self.v0_frequency\r\n absorbtion = self.absorb_coeffs[k]\r\n phi = elem.eval_elem(self.node_map, self.lst_rad[k],\r\n [eta])[0]\r\n inter1 = phi - 4.0 * sconst.pi * \\\r\n self.B_int_function(Tn, self.refr_idx_vol,\r\n vk, vk_m)\r\n inter2 = absorbtion * self.d_T / (self.diff_scale ** 2)\r\n F += inter2 * inter1\r\n return elem.funcs(eta) * F\r\n if not no_rad:\r\n # We're integrating something non-linear for SP1\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func_cond,\r\n self.node_map)\r\n else:\r\n # Or something easier if we're only looking at heat.\r\n self.vF_vect_vol = np.array(self.uv_vol * lst_tmp).reshape(-1)\r\n else:\r\n def F_func_radiative(elem, eta):\r\n T = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n vk = self.fq_list[self.rad]\r\n try:\r\n vk_minus = self.fq_list[self.rad - 1]\r\n except:\r\n vk_minus = self.v0_frequency\r\n n = self.refr_idx_vol\r\n F = 4.0 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func_radiative,\r\n self.node_map)\r\n # The path integral vf:\r\n if self.cond == True:\r\n def f_func_cond(elem, eta):\r\n Tb = self.background_temperature\r\n Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n n = self.refr_idx_background\r\n vk = self.v0_frequency\r\n vk_minus = 0\r\n Bb0 = self.B_int_function(Tb, n, vk, vk_minus)\r\n Bn0 = self.B_int_function(Tn, n, vk, vk_minus)\r\n B_coeff = (self.alpha * sconst.pi) / self.convect_coeff\r\n f = Tb + B_coeff * (Bb0 - Bn0)\r\n return elem.funcs(eta) * f\r\n if not no_rad:\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func_cond,\r\n self.node_map)\r\n else:\r\n try:\r\n self.vf_vect_bound = self.cache_tb_integral_array\r\n except AttributeError:\r\n def elem_functor(elem, eta): return elem.funcs(eta)\r\n self.cache_tb_integral_array = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n elem_functor,\r\n self.node_map)\r\n self.cache_tb_integral_array *= self.background_temperature\r\n self.vf_vect_bound = self.cache_tb_integral_array\r\n \r\n else:\r\n # Radiation f = 4*pi*B^{(k)}(T_b, n_g)\r\n def f_func_radiative(elem, eta):\r\n T = self.background_temperature\r\n vk = self.fq_list[self.rad]\r\n try:\r\n vk_minus = self.fq_list[self.rad - 1]\r\n except:\r\n vk_minus = self.v0_frequency\r\n n = self.refr_idx_vol\r\n f = 4 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func_radiative,\r\n self.node_map)\r\n assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])\r\n assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])\r\n assert (self.vf_vect_bound.shape[0] == \\\r\n self.vF_vect_vol.shape[0])", "def getdelta(self):\n\t\tmyhmag.initializehelmholtz()\n\t\tabar = 13.714285714285715\n\t\tzbar = abar/2.0\n\t\tself.data[\"delta\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tfor i in range(len(self.data[\"rho\"])):\n\t\t\tadgradred,hydrograd,my_nu,my_alpha,self.data[\"delta\"][i],my_gamma1,my_cp,my_cph,my_c_s,failtrig = myhmag.gethelmgrads(self.data[\"T\"][i], self.data[\"rho\"][i], 0.,abar,zbar,True)", "def __init__(self,b,u,v,hbls_old,hbbl_old,Kv_old,Kt_old,srflx,sustr,svstr,f,grid_dict,tstep_mode,dt):\n \n # INPUTS FROM TTTW SYSTEM\n self.b = b #buoyancy field: [Ly,N]\n self.u = u # x-component of velocity [Ly,N]\n self.v = v # y-component of velocity [Ly+1,N]\n self.hbls_old = hbls_old #boundary layer depth from previous time step [Ly]\n self.hbbl_old = hbbl_old # bottom boundary layer depth from previous time step [Ly]\n self.Kv_old = Kv_old # momentum mixing coefficeint from previous time step [Ly,N+1]\n self.Kt_old = Kt_old # tracer mixing coefficient from previous time step [Ly,N+1]\n self.srflx = srflx #solar heat flux [Ly] (degC * (m/s))\n self.sustr = sustr # x-component surface wind stress [Ly] (N/m^2) \n self.svstr = svstr # y-component surface wind stress [Ly+1] (N/m^2)\n self.grid_dict = grid_dict #gridded data\n self.f = f #coriolis parameter\n # KPP-SPECIFIC VARIABLES \n self.hbls = np.zeros([self.b.shape[0]])\n self.hbbl = np.zeros([self.b.shape[0]])\n self.ustar = []\n self.bvf = [] \n self.kmo = []\n self.C_h_MO = []\n self.kbl = []\n self.Cr = [] \n self.Fc = []\n self.ghat = [] #NONLOCAL TERM: TO BE USED IN TIME STEPPING\n self.tstep_mode = tstep_mode# if in time steppign mode, turn on HBL_RATE_LIMIT\n self.dt = dt", "def CalcForce_aeroframe_DEP(V, CoefMatrix, x, rho, g):\r\n\r\n #Compute aero forces\r\n # here x must be of the form (alpha, beta, p, q, r, da, dr, de) (last one punctualy used)\r\n # set non dim for p,q,r\r\n nonDim=np.ones(7)\r\n nonDim[2]=g.b/(2*V)\r\n nonDim[3]=g.c/(2*V)\r\n nonDim[4]=g.b/(2*V)\r\n # F=np.dot(CoefMatrix,x[0:7]) # commented form, modification to account for symmetric drag increase of side slip\r\n F=np.zeros((3))\r\n M=np.zeros((3))\r\n xsym=np.copy(x[0:-1])\r\n xsym[1]=abs(xsym[1]) # make beta always positive since derivatives have already correct sign for drag and lift only\r\n xsym[-3]=abs(xsym[-3]) # make ailerons deflection always positive for drag increase and lift decrease\r\n xsym[-1]=abs(xsym[-1]) # make rudder deflection always positive for drag increase and lift decrease\r\n F[0]=np.dot(CoefMatrix[0],xsym)\r\n F[1]=np.dot(CoefMatrix[1],x[0:-1]) #side force\r\n F[2]=np.dot(CoefMatrix[2],xsym)\r\n M=np.dot(CoefMatrix[3:6,:],x[0:-1])\r\n# print(\"Printing moment coeff\")\r\n# print(M)\r\n\r\n \r\n #No need to project\r\n# alpha=x[0]\r\n# beta=x[1]\r\n# H=np.array([[math.cos(alpha)*math.sin(beta), -math.cos(alpha)*math.sin(beta), -math.sin(alpha)],[math.sin(beta), math.cos(beta), 0],[math.sin(alpha)*math.cos(beta), -math.sin(alpha)*math.sin(beta), math.cos(alpha)]])\r\n if V<=71 :\r\n Fbody=np.array([-F[0]-g.Cd0_fl,F[1],-F[2]-g.CL0_fl]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0_fl,0])\r\n else:\r\n Fbody=np.array([-F[0]-g.Cd0,F[1],-F[2]-g.CL0]) # add alpha=0 coefficients\r\n Moment=M+np.array([0,x[-1]*g.Cm_de+g.Cm0,0])\r\n \r\n\r\n Fbody=0.5*V**2.0*rho*g.S*Fbody\r\n Moment=0.5*V**2.0*rho*g.S*g.b*Moment\r\n \r\n return np.append(Fbody, Moment)", "def driftRHS_3D(field,drift_velocity,t,x):\n f = field.getValue(x)\n fs = np.sqrt(f[0]**2 + f[1]**2 + f[2]**2)\n f = f/fs\n return -f*drift_velocity(fs)", "def dynamics(self,eta,nu,u_actual,u_control,sampleTime): \n \n # Current velocities\n u_c = self.V_c * math.cos(self.beta_c - eta[5]) # current surge velocity\n v_c = self.V_c * math.sin(self.beta_c - eta[5]) # current sway velocity \n \n nu_c = np.array([u_c,v_c,0,0,0,0],float) # current velocity vector\n nu_r = nu - nu_c # relative velocity vector\n \n U_r = math.sqrt( nu_r[0]**2 + nu_r[1]**2 ) # relative speed\n \n # Rudder command and actual rudder angle\n delta_c = u_control[0]\n delta = u_actual[0]\n \n # Rudder forces and moment (Fossen 2021, Chapter 9.5.1)\n b = 0.7 * self.T # rudder height\n AR = b**2 / self.Lambda # aspect ratio: Lamdba = b**2/AR \n CN = 6.13 * self.Lambda / ( self.Lambda + 2.25 ) # normal coefficient\n t_R = 1 - 0.28 * self.Cb - 0.55\n a_H = 0.4\n x_R = -0.45 * self.L\n x_H = -1.0 * self.L\n\n Xdd = -0.5 * ( 1 - t_R ) * self.rho * U_r**2 * AR * CN\n Yd = -0.25 * ( 1 + a_H ) * self.rho * U_r**2 * AR * CN \n Nd = -0.25 * ( x_R + a_H * x_H ) * self.rho * U_r**2 * AR * CN \n \n # Control forces and moment\n delta_R = -delta # physical rudder angle (rad)\n T = self.tau_X # thrust (N)\n t_deduction = 0.1 # thrust deduction number\n tau1 = ( 1 - t_deduction ) * T - Xdd * math.sin( delta_R )**2 \n tau2 = -Yd * math.sin( 2 * delta_R ) \n tau6 = -Nd * math.sin( 2 * delta_R ) \n tau = np.array( [ tau1, tau2, tau6 ],float) \n \n # Linear maneuvering model\n T_surge = self.L # approx. time constant in surge (s)\n xg = 0 # approx. x-coordinate, CG (m) \n \n # 3-DOF ship model\n [M,N] = clarke83(U_r,self.L, self.B, self.T,self.Cb,self.R66,xg,T_surge)\n Minv = np.linalg.inv(M)\n nu3 = np.array( [ nu_r[0], nu_r[1], nu_r[5] ]) \n nu3_dot = np.matmul( Minv, tau - np.matmul(N,nu3) ) \n \n # 6-DOF ship model\n nu_dot = np.array( [ nu3_dot[0],nu3_dot[1],0,0,0,nu3_dot[2] ]) \n\n # Rudder angle saturation\n if ( abs(delta) >= self.deltaMax * math.pi / 180 ):\n delta = np.sign(delta) * self.deltaMax * math.pi / 180\n \n # Rudder dynamics\n delta_dot = (delta_c - delta) / self.T_delta \n\n # Forward Euler integration [k+1]\n nu = nu + sampleTime * nu_dot\n delta = delta + sampleTime * delta_dot\n\n u_actual = np.array([delta],float) \n\n return nu, u_actual", "def velocity_field(xt,yt,x0,y0,Vinf,dia,rot,chord,B,param=None,veltype='all',integration='simp',m=220,n=200):\n rad = dia/2.\n tsr = rad*fabs(rot)/Vinf\n solidity = (chord*B)/rad\n\n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n\n coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9 = coef_val()\n\n # Calculating EMG distribution parameters (based on polynomial surface fitting)\n if param is None:\n loc1 = _parameterval(tsr,solidity,coef0)\n loc2 = _parameterval(tsr,solidity,coef1)\n loc3 = _parameterval(tsr,solidity,coef2)\n spr1 = _parameterval(tsr,solidity,coef3)\n spr2 = _parameterval(tsr,solidity,coef4)\n skw1 = _parameterval(tsr,solidity,coef5)\n skw2 = _parameterval(tsr,solidity,coef6)\n scl1 = _parameterval(tsr,solidity,coef7)\n scl2 = _parameterval(tsr,solidity,coef8)\n scl3 = _parameterval(tsr,solidity,coef9)\n\n else:\n # Reading in EMG distribution parameters\n loc1 = param[0]\n loc2 = param[1]\n loc3 = param[2]\n spr1 = param[3]\n spr2 = param[4]\n skw1 = param[5]\n skw2 = param[6]\n scl1 = param[7]\n scl2 = param[8]\n scl3 = param[9]\n\n ###################################\n if veltype == 'vort':\n # VORTICITY CALCULATION (NO INTEGRATION)\n if x0t < 0.:\n vel = 0.\n else:\n vel = _vawtwake.vorticitystrength(x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)/rot\n ###################################\n else:\n # Integration of the vorticity profile to calculate velocity\n if integration == 'simp':\n # SIMPSON'S RULE INTEGRATION (must use polynomial surface coefficients from VAWTPolySurfaceCoef.csv)\n inte = 1 # Simpson's Rule\n # inte = 2 # Trapezoidal Rule (optional ability of the code-- faster but less accurate)\n\n if param is not None:\n print \"**** Using polynomial surface coefficients from VAWTPolySurfaceCoef.csv for Simpson's rule integration ****\"\n\n vel_xs,vel_ys = _vawtwake.vel_field(xt,yt,x0,y0,dia,rot,chord,B,Vinf,coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9,m,n,inte)\n\n if veltype == 'all':\n vel = sqrt((vel_xs*Vinf + Vinf)**2 + (vel_ys*Vinf)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs*Vinf + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])\n ###################################\n elif integration == 'gskr':\n # 21-POINT GAUSS-KRONROD RULE QUADRATURE INTEGRATION\n xbound = (scl3+5.)*dia\n argval = (x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)\n if veltype == 'all' or veltype == 'x' or veltype == 'ind':\n vel_x = _dblquad(_vawtwake.integrandx,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_xs = (vel_x[0]*fabs(rot))/(2.*pi)\n if veltype == 'all' or veltype == 'y' or veltype == 'ind':\n vel_y = _dblquad(_vawtwake.integrandy,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_ys = (vel_y[0]*fabs(rot))/(2.*pi)\n\n if veltype == 'all':\n vel = sqrt((vel_xs + Vinf)**2 + (vel_ys)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys/Vinf\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])/Vinf\n ###################################\n\n return vel", "def fluid_reynolds(uu, param, grid, lnrho=list(), shock=list(), nghost=3,\n lmix=True):\n #viscous forces\n th2 = 2./3\n th1 = 1./3\n fvisc = np.zeros_like(uu)\n #molecular viscosity contribution\n ldel2, lshock, lhyper3 = False, False, False\n for ivisc in param.ivisc:\n if not 'shock' in ivisc and not 'hyper' in ivisc\\\n and not '\\n' in ivisc:\n ldel2 = True\n if 'shock' in ivisc:\n lshock = True\n if 'hyper3' in ivisc:\n lhyper3 = True\n \n if ldel2:\n if lhyper3:\n lhyper3 = lhyper3==lmix\n del2u = np.zeros_like(uu)\n for j in range(0,3):\n del2u[j] = del2(uu[j],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n del2u[j, : nghost, nghost:-nghost, nghost:-nghost] = del2u[j,-2*nghost: -nghost, nghost: -nghost, nghost: -nghost]\n del2u[j,-nghost: , nghost:-nghost, nghost:-nghost] = del2u[j, nghost:2*nghost, nghost: -nghost, nghost: -nghost]\n del2u[j, nghost:-nghost, : nghost, nghost:-nghost] = del2u[j, nghost: -nghost,-2*nghost: -nghost, nghost: -nghost]\n del2u[j, nghost:-nghost,-nghost: , nghost:-nghost] = del2u[j, nghost: -nghost, nghost:2*nghost, nghost: -nghost]\n del2u[j, nghost:-nghost, nghost:-nghost, : nghost] = del2u[j, nghost: -nghost, nghost: -nghost,-2*nghost: -nghost]\n del2u[j, nghost:-nghost, nghost:-nghost,-nghost: ] = del2u[j, nghost: -nghost, nghost: -nghost, nghost:2*nghost]\n for ivisc in param.ivisc:\n ivisc = str.strip(ivisc,'\\n')\n if 'nu-const' not in ivisc and 'shock' not in ivisc\\\n and 'hyper' not in ivisc and len(ivisc) > 0:\n print('fluid_reynolds WARNING: '+ivisc+' not implemented\\n'+\n 'terms may be missing from the standard rate of strain tensor')\n fvisc = fvisc + param.nu*del2u\n del(del2u)\n tmp0 = grad(uu[0],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp0[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp0[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp0[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp0[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp0[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp0[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp0[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp0[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp0[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n tmp1 = grad(uu[1],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp1[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp1[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp1[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp1[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp1[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp1[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp1[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp1[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp1[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n tmp2 = grad(uu[2],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp2[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp2[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp2[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp2[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp2[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp2[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp2[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp2[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp2[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n #effect of compressibility \n if len(lnrho) > 0:\n divu = div(uu,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n divu[ :nghost,nghost:-nghost,nghost:-nghost] = divu[-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n divu[-nghost:,nghost:-nghost,nghost:-nghost] = divu[ nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n divu[nghost:-nghost, :nghost,nghost:-nghost] = divu[nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n divu[nghost:-nghost,-nghost:,nghost:-nghost] = divu[nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n divu[nghost:-nghost,nghost:-nghost, :nghost] = divu[nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n divu[nghost:-nghost,nghost:-nghost,-nghost:] = divu[nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n gradlnrho = grad(lnrho,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n gradlnrho[j, :nghost,nghost:-nghost,nghost:-nghost] = gradlnrho[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n gradlnrho[j,-nghost:,nghost:-nghost,nghost:-nghost] = gradlnrho[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost, :nghost,nghost:-nghost] = gradlnrho[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost,-nghost:,nghost:-nghost] = gradlnrho[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost,nghost:-nghost, :nghost] = gradlnrho[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n gradlnrho[j,nghost:-nghost,nghost:-nghost,-nghost:] = gradlnrho[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n Sglnrho = np.zeros_like(uu)\n Sglnrho[0] = dot(tmp0,gradlnrho) +\\\n (tmp0[0]+tmp1[0]+tmp2[0]-th2*divu)*gradlnrho[0] \n Sglnrho[1] = dot(tmp1,gradlnrho) +\\\n (tmp0[1]+tmp1[1]+tmp2[1]-th2*divu)*gradlnrho[1]\n Sglnrho[2] = dot(tmp2,gradlnrho) +\\\n (tmp0[2]+tmp1[2]+tmp2[2]-th2*divu)*gradlnrho[2]\n graddivu = grad(divu,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n graddivu[j, :nghost,nghost:-nghost,nghost:-nghost] = graddivu[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n graddivu[j,-nghost:,nghost:-nghost,nghost:-nghost] = graddivu[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost, :nghost,nghost:-nghost] = graddivu[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost,-nghost:,nghost:-nghost] = graddivu[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost,nghost:-nghost, :nghost] = graddivu[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n graddivu[j,nghost:-nghost,nghost:-nghost,-nghost:] = graddivu[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n fvisc = fvisc + param.nu*(th1*graddivu+Sglnrho)\n del(Sglnrho)\n elif param.ldensity:\n print('fluid_reynolds WARNING: no lnrho provided\\n'+\n 'rate of strain tensor likely incomplete')\n #shock contribution\n if lshock:\n if len(shock) == 0:\n print('fluid_reynolds WARNING: no shock provided\\n'+\n 'rate of strain tensor likely incomplete')\n else:\n shock[ :nghost,nghost:-nghost,nghost:-nghost] = shock[-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n shock[-nghost:,nghost:-nghost,nghost:-nghost] = shock[ nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n shock[nghost:-nghost, :nghost,nghost:-nghost] = shock[nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n shock[nghost:-nghost,-nghost:,nghost:-nghost] = shock[nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n shock[nghost:-nghost,nghost:-nghost, :nghost] = shock[nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n shock[nghost:-nghost,nghost:-nghost,-nghost:] = shock[nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n divugradlnrho = np.zeros_like(uu)\n gradshock = grad(shock,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n gradshock[j, :nghost,nghost:-nghost,nghost:-nghost] = gradshock[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n gradshock[j,-nghost:,nghost:-nghost,nghost:-nghost] = gradshock[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost, :nghost,nghost:-nghost] = gradshock[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost,-nghost:,nghost:-nghost] = gradshock[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost,nghost:-nghost, :nghost] = gradshock[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n gradshock[j,nghost:-nghost,nghost:-nghost,-nghost:] = gradshock[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n for j in range(0,3):\n divugradlnrho[j] = param.nu_shock*divu*gradshock[j] +\\\n param.nu_shock*shock*(divu*gradlnrho[j] + graddivu[j])\n del(divu,gradshock,gradlnrho,graddivu)\n fvisc = fvisc + divugradlnrho\n del(divugradlnrho)\n if lhyper3:\n #deluij5 = np.zeros_like([uu,uu,uu])\n #uij5glnrho to be included\n del6u = np.zeros_like(uu)\n for j in range(0,3):\n del6u[j] = del6(uu[j],grid.dx,grid.dy,grid.dz)\n del6u[j, :nghost,nghost:-nghost,nghost:-nghost] = del6u[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n del6u[j,-nghost:,nghost:-nghost,nghost:-nghost] = del6u[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n del6u[j,nghost:-nghost, :nghost,nghost:-nghost] = del6u[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n del6u[j,nghost:-nghost,-nghost:,nghost:-nghost] = del6u[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n del6u[j,nghost:-nghost,nghost:-nghost, :nghost] = del6u[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n del6u[j,nghost:-nghost,nghost:-nghost,-nghost:] = del6u[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n #del6 for non-cartesian tba\n #del6u[j] = del6(uu[j],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n # coordinate_system=param.coord_system)\n fvisc = fvisc + param.nu_hyper3*del6u\n del(del6u)\n fvisc2 = np.sqrt(dot2(fvisc))\n #advective forces\n advec = np.zeros_like(uu)\n advec[0] = dot(uu,tmp0)\n advec[1] = dot(uu,tmp1)\n advec[0] = dot(uu,tmp2)\n del(tmp0,tmp1,tmp2)\n advec2 = np.sqrt(dot2(advec))\n del(advec)\n #avoid division by zero\n if fvisc2.max() > 0:\n fvisc2[np.where(fvisc2==0)] = fvisc2[np.where(fvisc2>0)].min()\n Re = advec2/fvisc2\n #set minimum floor to exclude zero-valued Re \n Re[np.where(Re==0)] = Re[np.where(Re>0)].min()\n else:\n Re = advec2\n print('Re undefined')\n return Re", "def calculate_ft(self):\n \n # Create a function which is able to evaluate B**2\n ffunc = scipy.interpolate.interp1d(self.psigrid, self.e.getF()[self.tind])\n def b2_func(R, Z, psi):\n bt = ffunc(psi)/R\n br = -self.psifunc.ev(R, Z, dy=1)/R\n bz = self.psifunc.ev(R, Z, dx=1)/R\n \n return bt**2 + br**2 + bz**2\n \n\n def b_bmax2(R,Z,psi):\n b2 = b2_func(R,Z,psi)\n return b2 / np.max(b2)\n \n def b_bmax(R,Z,psi):\n return np.sqrt(b_bmax2(R,Z,psi))\n \n # Evaluate the flux-surface averaged h^2 and h, as required\n fsa_h2 = self.fs_average(b_bmax2)\n fsa_h = self.fs_average(b_bmax)\n \n # This is the function which gets flux-surface averaged in equation (7)\n def ftl_func(R,Z,psi):\n h = b_bmax(R,Z,psi)\n h2 = b_bmax2(R,Z,psi)\n \n return (1 - (np.sqrt(1 - h) * (1 + 0.5 * h)))/h2\n \n \n # Equation 6, 7 in Lin-Liu\n fs_ftu = 1 - fsa_h2 / fsa_h**2 * (1 - np.sqrt(1 - fsa_h) * (1 + 0.5 * fsa_h))\n fs_ftl = 1 - fsa_h2 * self.fs_average(ftl_func)\n # Equation 18, 19 \n om = 0.75\n self.fs_ft = om*fs_ftu + (1-om)*fs_ftl", "def trajectories(t_upper=3600*24*687, h=100, m1=5.972e+24, m2=6.417e+23,\n m3=1.989e+30, a1=1.0*1.496e+11, a2=1.52*1.496e+11):\n\n # We check if parameters are all positive\n\n list_parameters = [t_upper, h, m1, m2, m3,\n a1, a2]\n\n for parameters in list_parameters:\n\n if parameters < 0:\n print(f'You have entered a negative parameter')\n\n # initial values for planet 1 in x, y and z direction\n x_i1 = a1\n y_i1 = 0\n v_x1i = 0\n v_y1i = 29779.301841746023\n z_i1 = 0\n v_z1i = 0\n\n # initial values for planet 2 in x, y and z direction\n x_i2 = a2\n y_i2 = 0\n v_x2i = 0\n v_y2i = 24154.203325249873\n z_i2 = 0\n v_z2i = 0\n\n # initial values for Sun in x, y and z direction\n x_i3 = 0\n y_i3 = 0\n v_x3i = 0\n v_y3i = 0\n z_i3 = 0\n v_z3i = 0\n\n# Initial positions and velocities\n r = np.array([x_i1, y_i1, v_x1i, v_y1i, x_i2,\n y_i2, v_x2i, v_y2i, x_i3, y_i3, v_x3i, v_y3i,\n z_i1, z_i2, z_i3, v_z1i, v_z2i, v_z3i])\n\n # We create vectors which will contains the trajectories\n # and velocities of each bodies\n x_pnts1 = [x_i1]\n y_pnts1 = [y_i1]\n v_x_pnts1 = [v_x1i]\n v_y_pnts1 = [v_y1i]\n\n x_pnts2 = [x_i2]\n y_pnts2 = [y_i2]\n v_x_pnts2 = [v_x2i]\n v_y_pnts2 = [v_y2i]\n\n x_pnts3 = [x_i3]\n y_pnts3 = [y_i3]\n v_x_pnts3 = [v_x3i]\n v_y_pnts3 = [v_y3i]\n\n x_pnts3 = [x_i3]\n y_pnts3 = [y_i3]\n v_x_pnts3 = [v_x3i]\n v_y_pnts3 = [v_y3i]\n\n z_pnts1 = [z_i1]\n z_pnts2 = [z_i2]\n z_pnts3 = [z_i3]\n\n v_z_pnts1 = [v_z1i]\n v_z_pnts2 = [v_z2i]\n v_z_pnts3 = [v_z3i]\n\n m1 = m1\n m2 = m2\n m3 = m3\n a1 = a1\n a2 = a2\n\n # We create a vector which will contain the time\n # Initial value\n t_i = 0.0\n t_values = [t_i]\n\n for t in range(0, t_upper, h):\n\n # We used the RK4 formula here\n k1 = h*derivative(r=r, t=0, m1=5.972e+24, m2=m2, m3=1.989e+30,\n a1=a1, a2=1.52*1.496e+11)\n k2 = h*derivative(r=r + 0.5*k1, t=t + (h/2), m1=5.972e+24,\n m2=6.417e+23, m3=1.989e+30, a1=1.0*1.496e+11,\n a2=1.52*1.496e+11)\n k3 = h*derivative(r=r + 0.5*k2, t=t + (h/2), m1=5.972e+24,\n m2=6.417e+23, m3=1.989e+30, a1=1.0*1.496e+11,\n a2=1.52*1.496e+11)\n k4 = h*derivative(r=r + h*k3, t=t+h, m1=5.972e+24, m2=6.417e+23,\n m3=1.989e+30, a1=1.0*1.496e+11, a2=1.52*1.496e+11)\n\n # We calculate the new vector r\n r += (k1 + 2*k2 + 2*k3 + k4)*(1.0/6.0)\n\n # We add the new points calculated\n x_pnts1.append(r[0])\n y_pnts1.append(r[1])\n\n v_x_pnts1.append(r[2])\n v_y_pnts1.append(r[3])\n\n x_pnts2.append(r[4])\n y_pnts2.append(r[5])\n v_x_pnts2.append(r[6])\n v_y_pnts2.append(r[7])\n\n x_pnts3.append(r[8])\n y_pnts3.append(r[9])\n v_x_pnts3.append(r[10])\n v_y_pnts3.append(r[11])\n\n z_pnts1.append(r[12])\n z_pnts2.append(r[13])\n z_pnts3.append(r[14])\n\n v_z_pnts1.append(r[15])\n v_z_pnts2.append(r[16])\n v_z_pnts3.append(r[17])\n\n t_values.append(t)\n\n # We return all the trajectories\n return x_pnts1, y_pnts1, x_pnts2, y_pnts2, x_pnts3, y_pnts3, z_pnts1, z_pnts2, z_pnts3", "def rhs(y, t, l, m, g):\n # Unpack the states so you can use the variable names in the\n # sympy.physics.mechanics equations\n q1 = y[0]\n q2 = y[1]\n u1 = y[2]\n u2 = y[3]\n # or you can make use of python's tuple unpacking for a one liner\n # q1, q2, u1, u2 = y\n\n # Initialize a vector for the derivatives.\n dydt = zeros((len(y)))\n\n # Compute the derivatives, these are pasted in from the\n # sympy.physics.mechanics results.\n dydt[0] = u1\n dydt[1] = u2\n dydt[2] = (-g*sin(q1)*sin(q2)**2 + 2*g*sin(q1) -\n g*sin(q2)*cos(q1)*cos(q2) + 2*l*u1**2*sin(q1)*cos(q1)*cos(q2)**2 -\n l*u1**2*sin(q1)*cos(q1) - 2*l*u1**2*sin(q2)*cos(q1)**2*cos(q2) +\n l*u1**2*sin(q2)*cos(q2) + l*u2**2*sin(q1)*cos(q2) -\n l*u2**2*sin(q2)*cos(q1))/(l*(sin(q1)**2*sin(q2)**2 +\n 2*sin(q1)*sin(q2)*cos(q1)*cos(q2) + cos(q1)**2*cos(q2)**2 - 2))\n dydt[3] = (-sin(q1)*sin(q2)/2 - cos(q1)*cos(q2)/2)*(2*g*l*m*sin(q1) -\n l**2*m*(-sin(q1)*cos(q2) +\n sin(q2)*cos(q1))*u2**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2)) -\n l**2*m) + (g*l*m*sin(q2) - l**2*m*(sin(q1)*cos(q2) -\n sin(q2)*cos(q1))*u1**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2))\n - l**2*m)\n\n # Return the derivatives.\n return dydt", "def main():\r\n #Drag Force Equation: 1/2 * rho * Cd * A * v^2\r\n\r\n #User-Defined Constants\r\n global m\r\n global v0\r\n global theta\r\n global rho #Fluid Density\r\n global A #Cross-sectional Area\r\n global Cd #Drag coefficient\r\n global tStep\r\n global g\r\n\r\n m = 1\r\n v0 = 30\r\n theta = math.radians(45)\r\n rho = 1.225\r\n A = 0.05\r\n Cd = 0.5 #A ball is approx. 0.5\r\n tStep = 0.005\r\n g = 9.8\r\n\r\n\r\n #Data Structures\r\n global tHist\r\n global xHist\r\n global yHist\r\n global thetaHist\r\n global vHist\r\n global vXHist\r\n global vYHist\r\n tHist = [] #list for all time steps\r\n xHist = [] #list for all x position steps\r\n yHist = [] #list for all y position steps\r\n thetaHist = [] #List for all theta at every time step\r\n vHist = [] #list for all velocities at every time step\r\n vXHist = [] #list for all x-axis velocities at every time step\r\n vYHist = [] #list for all y-axis velocities at every time step\r\n\r\n #Initialize intial values\r\n tHist.append(0.0)\r\n xHist.append(0.0)\r\n yHist.append(0.0)\r\n thetaHist.append(theta)\r\n vHist.append(v0)\r\n vXHist.append(v0 * math.cos(theta))\r\n vYHist.append(v0 * math.sin(theta))\r\n vTheta = math.atan(vYHist[0] / vXHist[0])\r\n # print(\"t: \" + str(tHist[0]))\r\n # print(\"x: \" + str(xHist[0]))\r\n # print(\"y: \" + str(yHist[0]))\r\n # print(\"v: \" + str(vHist[0]))\r\n # print(\"Vx: \" + str(vXHist[0]))\r\n # print(\"Vy: \" + str(vYHist[0]))\r\n\r\n #Convenience variables\r\n global k\r\n\r\n counter = 1\r\n #Loop until the y-displacement becomes negative (projectile reaches ground again)\r\n while True:\r\n tHist.append(counter * tStep) #increment time\r\n print(\"t: \" + str(tHist[counter]))\r\n\r\n #This large hunk is the solution to the net force differential equation in the x-axis\r\n # oneOverVX = (1/vXHist[counter-1]) + (((rho*A*Cd*math.cos(thetaHist[counter-1]))/(2*m))*(tStep)) #STABLE\r\n # oneOverVX = (1/vXHist[counter-1]) + (((rho*A*Cd)/(2*m))*(tStep))\r\n # oneOverVX = (1/vHist[counter-1]) + (((rho*A*Cd*math.cos(thetaHist[counter-1]))/(2*m))*(tStep))\r\n oneOverVX = (1/vXHist[counter-1]) + ((rho*A*Cd)/(2*m*math.cos(thetaHist[counter-1]))*(tStep)) #This is one over the solution for velocity in the x-axis net force differential equation\r\n vXHist.append(1 / oneOverVX) #Adding the velocity to the list of velocities\r\n\r\n vY0 = vYHist[counter-1] #Convenience variable\r\n # k = 0.5 * rho * A * Cd * math.sin(abs(thetaHist[counter-1])) #STABLE\r\n # k = 0.5 * rho * A * Cd\r\n k = (rho * A * Cd) / (2 * math.sin(abs(thetaHist[counter-1]))) #Convenience variable\r\n print(\"k: \" + str(k))\r\n print(\"vX: \" + str(vXHist[counter]))\r\n rootGMK = math.sqrt(g*m*k) #Convenience variable\r\n if vYHist[counter-1] > 0.0: #If the projectile is going upwards\r\n #Solving the y-axis differential equation for velocity\r\n equationRight = -rootGMK * ((tStep/m) - (math.atan((k*vY0)/(rootGMK))/rootGMK))\r\n vYHist.append((math.tan(equationRight) * rootGMK) / k)\r\n elif vYHist[counter-1] < 0.0: #If the projectile is going downwards\r\n #Solving the y-axis differential equation for velocity\r\n\r\n # Hand-solved integral\r\n # exponent = -(2*tStep*rootGMK)/m\r\n # numerator = g*m*math.exp(exponent) - math.exp(exponent)*vY0*rootGMK - vY0*rootGMK - g*m\r\n # denominator = math.exp(exponent)*(vY0-rootGMK) - vY0*k - rootGMK\r\n # vYHist.append(numerator / denominator)\r\n\r\n #Wolfram Alpha arctanh integral\r\n arctanh =(vY0*math.sqrt(k))/(math.sqrt(g*m))\r\n print(\"arctanh: \" + str(arctanh))\r\n equationRight = (np.arctanh(arctanh))/(rootGMK) - (tStep/m)\r\n vYHist.append(np.tanh(rootGMK * equationRight) * ((math.sqrt(g*m))/(math.sqrt(k))))\r\n else: #If current y velocity is 0\r\n vYHist.append(vY0 - g*tStep)\r\n print(\"vY: \" + str(vYHist[counter]))\r\n\r\n vHist.append(math.hypot(vXHist[counter], vYHist[counter])) #Calculate the net velocity and add it to the velocities list\r\n print(\"v: \" + str(vHist[counter]))\r\n thetaHist.append(math.atan(vYHist[counter]/vXHist[counter])) #Calculate the current angle based on the velocities and add it to the theta list\r\n print(\"0: \" + str(math.degrees(thetaHist[counter])))\r\n\r\n x0 = xHist[counter-1]\r\n y0 = yHist[counter-1]\r\n\r\n # yIntegral = trigintegrate()\r\n\r\n \"\"\"\r\n Note: What I wanted to do here was to integrate the velocity functions over the time interval to find the exact\r\n changes in position. Unfortunately, I was running short of time and decided it was not worth it to move forward with\r\n this final step, and instead worked on the presentation and testing different cases.\r\n \"\"\"\r\n xHist.append(x0 + vXHist[counter]*tStep) #Calculate new x position using x = x0 + vt\r\n yHist.append(y0 + vYHist[counter]*tStep) #Calculate new y position using y = y0 + vt\r\n print(\"x: \" + str(xHist[counter]))\r\n print(\"y: \" + str(yHist[counter]))\r\n print()\r\n\r\n # xHist.append(xHist[counter-1] + vXHist[counter-1]*tStep + 0.5*aXHist[counter-1]*tStep**2)\r\n # yHist.append(yHist[counter-1] + vYHist[counter-1]*tStep + 0.5*aYHist[counter-1]*tStep**2)\r\n # vXHist.append(vXHist[counter-1] + aXHist[counter-1]*tStep)\r\n # vYHist.append(vYHist[counter-1] + aYHist[counter-1]*tStep)\r\n # vHist.append(math.hypot(vXHist[counter], vYHist[counter]))\r\n #\r\n # vTheta = math.atan(vYHist[counter] / vXHist[counter])\r\n # xDragAccel = -0.5*rho*Cd*A*vHist[counter]**2*math.cos(vTheta) / m\r\n # yDragAccel = -math.copysign(0.5*rho*Cd*A*vHist[counter]**2*math.sin(vTheta) / m, vYHist[counter])\r\n #\r\n # aXHist.append(xDragAccel)\r\n # aYHist.append(-g*tStep + yDragAccel)\r\n\r\n if vYHist[counter-1] > 0.0 and vYHist[counter] < 0.0: #Check if the projectile has reached it's peak by checking for a critical point\r\n print(\"max height reached at time=\" + str(tHist[counter]))\r\n # break\r\n\r\n # print(\"t: \" + str(tHist[counter]))\r\n # print(\"x: \" + str(xHist[counter]))\r\n # print(\"y: \" + str(yHist[counter]))\r\n # print(\"Vx: \" + str(vXHist[counter]))\r\n # print(\"Vy: \" + str(vYHist[counter]))\r\n # print(\"Ax: \" + str(aXHist[counter]))\r\n # print(\"Ay: \" + str(aYHist[counter]))\r\n\r\n if yHist[counter] < 0 or counter > 99999: #End the loop if the projectile has reached the ground (or limit the number of iterations to avoid computer death)\r\n break\r\n\r\n counter += 1\r\n\r\n plotData()", "def hydro_solver(self):\n u_dx = self.central_x(self.u)\n w_dy = self.central_y(self.w)\n P_dx = self.central_x(self.P)\n P_dy = self.central_y(self.P)\n\n rho_dx_upwind = self.upwind_x(self.rho, self.u)\n rho_dy_upwwind = self.upwind_y(self.rho, self.w)\n rho_udx_upwind = self.upwind_x(self.rho * self.u, self.u)\n rho_udy_upwind = self.upwind_y(self.rho * self.u, self.w)\n rho_wdx_upwind = self.upwind_x(self.rho * self.w, self.u)\n rho_wdy_upwind = self.upwind_y(self.rho * self.w, self.w)\n u_dx_uu = self.upwind_x(self.u, self.u)\n u_dx_uw = self.upwind_x(self.u, self.w)\n w_dy_uu = self.upwind_y(self.w, self.u)\n w_dy_uw = self.upwind_y(self.w, self.w)\n e_dx = self.upwind_x(self.e, self.u)\n e_dy = self.upwind_y(self.e, self.w)\n\n self.rho_dt = (\n -self.rho * (u_dx + w_dy)\n - self.u * rho_dx_upwind\n - self.w * rho_dy_upwwind\n )\n self.e_dt = (\n -(self.e + self.P) * (u_dx + w_dy) - self.u * e_dx - self.w * e_dy\n )\n self.rho_udt = (\n -self.rho * self.u * (u_dx_uu + w_dy_uu)\n - self.u * rho_udx_upwind\n - self.w * rho_udy_upwind\n - P_dx\n )\n self.rho_wdt = (\n -self.rho * self.w * (u_dx_uw + w_dy_uw)\n - self.u * rho_wdx_upwind\n - self.w * rho_wdy_upwind\n - P_dy\n + self.rho * self.g\n )\n\n self.time_step()\n rho_previous = np.zeros_like(self.rho)\n rho_previous[:, :] = self.rho\n self.rho[:, :] = self.rho + self.rho_dt * self.dt\n self.e[:, :] = self.e + self.e_dt * self.dt\n self.u[:, :] = (\n rho_previous * self.u + self.rho_udt * self.dt\n ) / self.rho\n self.w[:, :] = (\n rho_previous * self.w + self.rho_wdt * self.dt\n ) / self.rho\n\n self.boundary_conditions()\n self.T[:, :] = (\n (self.Y - 1) * self.e * self.mu * self.m_u / (self.kb * self.rho)\n )\n self.P[:, :] = (self.Y - 1) * self.e\n uw = (self.u, self.w)\n v = np.linalg.norm(uw)\n dt = self.dt\n\n return dt", "def forward(h, n, u, v, f, dt, dx, dy, du, dv, dn, beta=0, eps=0, gamma=0, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # forward euler and forward/backward timestep\n beta = np.float32(beta)\n mu = np.float32(mu)\n \n du1, du0 = du[:2]\n dv1, dv0 = dv[:2]\n dn0 = dn[0]\n \n dndt_x(h, n, u, v, dx, dy, dn0) # calculate dndt and put it into dn0\n \n n1 = n + ( dn0 )*dt\n \n dudt_x(h, n, f, u, v, dx, dy, du0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n, f, u, v, dx, dy, dv0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dudt_x(h, n1, f, u, v, dx, dy, du1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n1, f, u, v, dx, dy, dv1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n \n u1 = u + ( beta*du1 + (one-beta)*du0 )*dt\n v1 = v + ( beta*dv1 + (one-beta)*dv0 )*dt\n \n n, u, v = n1, u1, v1\n \n du = [du1, du0, du0, du0]\n dv = [dv1, dv0, dv0, dv0]\n dn = [dn0, dn0, dn0]\n return n1, u1, v1, du, dv, dn", "def t_rh_2_dewT(ds, var):\n ds['dew'] = 243.04 * (np.log(ds[var['rh']] / 100) + ((17.625 * ds[var['temp']]) / (243.04 + ds[var['temp']])))/\\\n (17.625-np.log(ds[var['rh']] / 100) - ((17.625 * ds[var['temp']]) / (243.04 + ds[var['temp']])))\n return ds", "def main():\n\n varList = {'beta': 6., 'convSpeed': 1.2, 'Mark': 0., 'axi': 1, 'acModes': 4, 'Nr': 801, 'Tf': 600., 'xf': 0.51}\n\n # Solve steady flame.\n # BC1: I have the attachment BC at r = 1, always\n # BC2: I need to set dF/dr = 0 at r = 0 iff Mark != 0\n [qMean, r, FMean] = steady_flame_area_FD3(varList['Mark'], varList['beta'], varList['axi'], varList['Nr'])\n r = r * varList['beta']\n\n # Calculate mean flame derivatives\n dFMeanDr = derivsnew.FD1_CT2_D(FMean, r[1] - r[0])\n d2FMeanDr2 = derivsnew.FD2_CT2_D(FMean, r[1] - r[0])\n\n #Apply BC smooth tip:\n if(varList['Mark']!=0.0):\n dFMeanDr[-1] = 0.0\n\n # Use correct number of points. Remember that the extrems need to be set depending on the BC!\n # The attach BC (first point) is always assumed to be true and removed from the vector list\n if(varList['Mark']==0):\n Nr = varList['Nr'] / 2\n dFMeanDr = dFMeanDr[1:]\n d2FMeanDr2 = d2FMeanDr2[1:]\n r = r[1:]\n # The smooth BC holds only if Mark!=0 (second derivatives appear): remove also the last point\n else:\n Nr = varList['Nr'] / 2 - 1\n dFMeanDr = dFMeanDr[1:-1]\n d2FMeanDr2 = d2FMeanDr2[1:-1]\n r = r[1:-1]\n\n # Calculate geometric values\n den = 1 + varList['beta'] * varList['beta'] * dFMeanDr * dFMeanDr\n dR = r[1] - r[0]\n # Set Nx equal to Nr for now.\n # The implementation is more complicated if they differ, and need to interpolate between values.\n Nx = Nr\n\n # Nonuniform grid spacing along x!\n # Nx = length(dx) has to hold.\n dx = np.empty(len(FMean) - 1)\n for ii in range(1, len(FMean)):\n dx[ii - 1] = FMean[ii] - FMean[ii - 1]\n\n [A, B, C, tau] = loadAcoustics(varList['xf'], varList['Tf'], varList['acModes'], varList['beta'])\n\n Matrix = buildMatrix(Nr, dR, varList['beta'], den, r, FMean, dFMeanDr, d2FMeanDr2, varList['Mark'], varList['acModes'], A,\n B, C, Nx, dx, tau, qMean, varList['convSpeed'])\n\n [d, W, V] = eigProblem.solveEigProb(Matrix)\n [dnew, Wnew, Vnew] = eigProblem.selectUnstable(d, W, V)\n\n print dnew / (2. * np.pi)", "def main(data, setup):\n # input check \n varnames = ('vm_raw', 'vm_raw_theo')\n for varname in varnames:\n if varname not in data.keys():\n raise LookupError('data must contain variable %s.' %s)\n\n # display info message\n chrono = setup['chrono']\n chrono.issue('target velocity: correct for sensor motion...')\n\n # retrieve varialbes\n vnys = data['nqv']\n v_sensor_r = data['v_sensor_r']\n\n # ========== main =================================== #\n for key_raw in ('vm_raw', 'vm_raw_theo'):\n key_c = key_raw.replace('raw', 'raw_c')\n\n # sum\n vm_raw = data[key_raw]\n v_sum = (vm_raw + np.expand_dims(v_sensor_r, 1))\n\n # mod\n data[key_c] = symmod(v_sum, vnys)\n # ==================================================== #\n\n return data", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def build_rhs():\n\n def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n \"\"\"Computes the divergence of the velocity field.\"\"\"\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]\n\n def add_factor(\n v,\n factor,\n ):\n return [factor * v_i for v_i in v]\n\n b_terms = {\n _B_TERM_SOURCE_RHO: add_factor(src_rho, inv_dt),\n }\n if isinstance(rho_info, ConstantDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(rho_info.rho, states['u'], states['v'], states['w']),\n inv_dt * rho_info.rho),\n _B_TERM_DRHO_DT: [\n tf.zeros_like(src_rho_i) for src_rho_i in src_rho\n ],\n })\n\n elif isinstance(rho_info, VariableDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(1.0, states['rho_u'], states['rho_v'], states['rho_w']),\n inv_dt),\n _B_TERM_DRHO_DT:\n add_factor(rho_info.drho_dt, inv_dt),\n })\n\n else:\n raise ValueError('`rho_info` has to be either `ConstantDensityInfo` or '\n '`VariableDensityInfo`.')\n\n # pylint: disable=g-complex-comprehension\n return [(div_i + drho_dt_i - src_rho_i)\n for div_i, drho_dt_i, src_rho_i in zip(\n b_terms[_B_TERM_DIV],\n b_terms[_B_TERM_DRHO_DT],\n b_terms[_B_TERM_SOURCE_RHO],\n )], b_terms\n # pylint: enable=g-complex-comprehension", "def manipulate_heat_data(self): \n self.exh.T_array = ( 0.5 * (self.exh.T_inlet_array +\n self.exh.T_outlet_array) + 273.15)\n self.exh.delta_T_array = ( self.exh.T_inlet_array -\n self.exh.T_outlet_array )\n \n self.cool.delta_T_array = ( self.cool.T_inlet_array -\n self.cool.T_outlet_array )\n self.cool.C = self.cool.mdot * self.cool.c_p", "def __init__(self, sim, A_phi, V0_frac, t, Ndec_response=4):\n self.A0 = A_phi[0]\n self.phi0 = A_phi[1]\n self.sim = sim\n self.V0_frac = V0_frac\n self.t = t\n self.x_eq0 = self.sim.x_eq([0,0, V0_frac*self.sim.V(t[0])*self.sim.C], t[0]) # Given the initial charge...\n self.sol = integrate.odeint(sim, self.x0, t=t)\n self.z = sim.zLI(self.sol, t)\n self.phi = np.unwrap(np.angle(self.z))\n self.t_filt = t_filt = t[15:] \n self.i0 = i0 = np.argmin(abs(self.t_filt)) \n self.ip = ip = np.argmin(abs(self.t_filt-self.sim.V.tp))\n self.phi_filt = phi_filt = np.convolve(self.phi, np.ones(16)/16.0, 'valid') # Dependent on using 16 samples / period\n self.df_filt = df_filt = np.gradient(self.phi_filt)/np.gradient(self.t_filt)\n self.t_wide = t_filt[::Ndec_response]\n self.respRePts = self.sim.responseReVec(self.t_wide)\n self.Ht = lambda t: np.interp(t, self.t_wide, self.respRePts)\n \n \n \n self.dphi_act = (phi_filt[ip] - phi_filt[i0])/ (2*np.pi)*1000\n self.phi_filt_mcyc = (phi_filt - phi_filt[0])*1e3/(2*np.pi)\n self.phi_est, self.dphi_est = estimate_dphi(self.df_python, self.i0, self.ip)\n self.error = (self.dphi_est - self.dphi_act)/self.dphi_act", "def __init__(self, sim, A_phi, V0_frac, t, Ndec_response=4):\n self.A0 = A_phi[0]\n self.phi0 = A_phi[1]\n self.sim = sim\n self.V0_frac = V0_frac\n self.t = t\n self.x_eq0 = self.sim.x_eq([0, 0, V0_frac*self.sim.V(t[0])*self.sim.C], t[0]) # Given the initial charge...\n self.sol = integrate.odeint(sim, self.x0, t=t)\n self.z = sim.zLI(self.sol, t)\n self.phi = np.unwrap(np.angle(self.z))\n self.t_filt = t_filt = t[15:] \n self.i0 = i0 = np.argmin(abs(self.t_filt)) \n self.ip = ip = np.argmin(abs(self.t_filt-self.sim.V.tp))\n self.phi_filt = phi_filt = np.convolve(self.phi, np.ones(16)/16.0, 'valid') # Dependent on using 16 samples / period\n self.df_filt = df_filt = np.gradient(self.phi_filt)/np.gradient(self.t_filt)\n self.t_wide = t_filt[::Ndec_response]\n self.respRePts = self.sim.responseReVec(self.t_wide)\n self.Ht = lambda tt: np.interp(tt, self.t_wide, self.respRePts)\n \n \n \n self.dphi_act = (phi_filt[ip] - phi_filt[i0])/ (2*np.pi)*1000\n self.phi_filt_mcyc = (phi_filt - phi_filt[0])*1e3/(2*np.pi)\n self.phi_est, self.dphi_est = estimate_dphi(self.df_python, self.i0, self.ip)\n self.error = (self.dphi_est - self.dphi_act)/self.dphi_act", "def forward(self, z_t_1, h_x, phi_table):\n z_category = self.gen_z_t_dist_now(z_t_1, h_x)\n \n if self.use_gumbel_softmax:\n \n# device = z_category.device\n \n averaged_z_t = 0\n \n log_prob = Variable(torch.log(z_category))\n \n for k in range(self.sampling_times): \n curr_z_t = F.gumbel_softmax(log_prob, tau = 0.1)\n \n averaged_z_t += curr_z_t\n \n del curr_z_t\n \n# averaged_z_t = averaged_z_t.to(device)\n \n z_t = averaged_z_t/self.sampling_times\n else:\n z_t = z_category\n \n phi_z = torch.mm(z_t, torch.t(phi_table))\n# mu = self.h_to_mu(h_combined)\n# logvar = self.h_to_logvar(h_combined)\n# std = torch.exp(0.5 * logvar) \n# epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n# z_t = epsilon * std + mu # [batch_sz x z_sz]\n return z_t, z_category, phi_z", "def get_observations(self):\n joint_states = self.joints_state\n self.force = self.wrench_stamped.wrench.force\n self.torque = self.wrench_stamped.wrench.torque\n self.static_taxel = self.tactile_static.taxels\n# dynamic_taxel= tactile_dynamic\n\n# print(\"[force]\", self.force.x, self.force.y, self.force.z)\n# print(\"[torque]\", self.torque.x, self.torque.y, self.torque.z)\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n\n shp_joint_vel = joint_states.velocity[0]\n shl_joint_vel = joint_states.velocity[1]\n elb_joint_vel = joint_states.velocity[2]\n wr1_joint_vel = joint_states.velocity[3]\n wr2_joint_vel = joint_states.velocity[4]\n wr3_joint_vel = joint_states.velocity[5]\n\n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n# print(\"q(observation):\", q)\n eef_x, eef_y, eef_z = self.get_xyz(q)\n self.end_effector = self.get_xyz(q)\n eef_x_ini, eef_y_ini, eef_z_ini = self.get_xyz(self.init_joint_pose2) \n\n delta_image_r, delta_image_l = self.get_image()\n self.cnn_image_r = agent.update_cnn(delta_image_r)\n self.cnn_image_l = agent.update_cnn(delta_image_l)\n self.cnn_image_r_list = self.cnn_image_r.tolist()\n self.cnn_image_l_list = self.cnn_image_l.tolist()\n print(\"r_list\", self.cnn_image_r_list)\n print(\"l_list\", self.cnn_image_l_list)\n\n observation = []\n# rospy.logdebug(\"List of Observations==>\"+str(self.observations))\n for obs_name in self.observations:\n if obs_name == \"shp_joint_ang\":\n observation.append((shp_joint_ang - self.init_joint_pose2[0]) * self.joint_n)\n elif obs_name == \"shl_joint_ang\":\n observation.append((shl_joint_ang - self.init_joint_pose2[1]) * self.joint_n)\n elif obs_name == \"elb_joint_ang\":\n observation.append((elb_joint_ang - self.init_joint_pose2[2]) * self.joint_n)\n elif obs_name == \"wr1_joint_ang\":\n observation.append((wr1_joint_ang - self.init_joint_pose2[3]) * self.joint_n)\n elif obs_name == \"wr2_joint_ang\":\n observation.append((wr2_joint_ang - self.init_joint_pose2[4]) * self.joint_n)\n elif obs_name == \"wr3_joint_ang\":\n observation.append((wr3_joint_ang - self.init_joint_pose2[5]) * self.joint_n)\n elif obs_name == \"shp_joint_vel\":\n observation.append(shp_joint_vel)\n elif obs_name == \"shl_joint_vel\":\n observation.append(shl_joint_vel)\n elif obs_name == \"elb_joint_vel\":\n observation.append(elb_joint_vel)\n elif obs_name == \"wr1_joint_vel\":\n observation.append(wr1_joint_vel)\n elif obs_name == \"wr2_joint_vel\":\n observation.append(wr2_joint_vel)\n elif obs_name == \"wr3_joint_vel\":\n observation.append(wr3_joint_vel)\n elif obs_name == \"eef_x\":\n observation.append((eef_x - eef_x_ini) * self.eef_n)\n elif obs_name == \"eef_y\":\n observation.append((eef_y - eef_y_ini) * self.eef_n)\n elif obs_name == \"eef_z\":\n observation.append((eef_z - eef_z_ini) * self.eef_n)\n elif obs_name == \"force_x\":\n observation.append((self.force.x - self.force_ini.x) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_y\":\n observation.append((self.force.y - self.force_ini.y) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_z\":\n observation.append((self.force.z - self.force_ini.z) / self.force_limit1 * self.force_n)\n elif obs_name == \"torque_x\":\n observation.append((self.torque.x - self.torque_ini.x) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_y\":\n observation.append((self.torque.y - self.torque_ini.y) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_z\":\n observation.append((self.torque.z - self.torque_ini.z) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"image_cnn\":\n for x in range(0, 10):\n observation.append(self.cnn_image_r_list[0][x])\n# print(\"r_list\", self.cnn_image_r_list[0][x])\n for x in range(0, 10):\n observation.append(self.cnn_image_l_list[0][x])\n# print(\"l_list\", self.cnn_image_l_list[0][x])\n elif obs_name == \"static_taxel\":\n for x in range(0, 28):\n observation.append((self.static_taxel[0].values[x] - self.static_taxel_ini[0].values[x]) * self.taxel_n)\n for x in range(0, 28):\n observation.append((self.static_taxel[1].values[x] - self.static_taxel_ini[1].values[x]) * self.taxel_n)\n# elif obs_name == \"dynamic_taxel\":\n# observation.append(dynamic_taxel[0].values) * self.taxel_n\n# observation.append(dynamic_taxel[1].values) * self.taxel_n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n print(\"observation\", list(map(round, observation, [3]*len(observation))))\n# print(\"observation\", observation)\n\n return observation", "def get_variables(self, z0, u_inf):\n # Get the ambient data from the CTD profile\n Ta, Sa, P = self.profile.get_values(z0, ['temperature', 'salinity',\n 'pressure'])\n rho = seawater.density(Ta, Sa, P)\n \n # Compute the properties of each dispersed-phase particle\n us = np.zeros(len(self.particles))\n rho_p = np.zeros(len(self.particles))\n m_p = np.zeros(len(self.particles))\n B_p = np.zeros(len(self.particles))\n for i in range(len(self.particles)):\n m0 = self.particles[i].m0\n T0 = self.particles[i].T0\n m_p[i] = np.sum(m0) * self.particles[i].nb0\n if m_p[i] > 0.:\n # Particles exist, get properties. Make sure the algorithm \n # uses the dirty bubble properties since this is supposed\n # to be the rise velocity averaged over the whole plume.\n us[i], rho_p[i]= self.particles[i].properties(m0, T0, P, Sa, \n Ta, np.inf)[0:2]\n B_p[i] = (rho - rho_p[i]) / rho * 9.81 * (m_p[i] / rho_p[i])\n else:\n # Particles dissolved, set to ambient conditions\n us[i] = 0.\n rho_p[i] = rho\n B_p[i] = 0.\n \n # Select the correct slip velocity\n u_slip = us[0]\n for i in range(len(self.particles) - 1):\n if B_p[i+1] > B_p[i]:\n u_slip = us[i+1]\n \n # Compute the total buoyancy flux\n B = np.sum(B_p)\n \n # Get the ambient buoyancy frequency\n N = self.profile.buoyancy_frequency(z0)\n \n # Return the governing parameters\n return (B, N, u_slip, u_inf)", "def dynamics(x,Earth):\r\n\r\n # precompute a few terms to reduce number of operations\r\n r = norm(x[0:3])\r\n Re_r_sqr = 1.5*Earth.J2*(Earth.R/r)**2\r\n five_z_sqr = 5*x[2]**2/(r**2)\r\n\r\n # two body and J2 acceleration together\r\n accel = (-Earth.mu/(r**3))*np.array([x[0]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[1]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[2]*(1 - Re_r_sqr*(five_z_sqr - 3))])\r\n\r\n return np.array([x[3],x[4],x[5],accel[0],accel[1],accel[2]])", "def main():\n \n # Particle in SHO - c.f. Mocz & Succi (2015) Fig. 2\n # parameters\n n = 100 # number of particles\n dt = 0.02 # timestep\n nt = 100 # number of timesteps\n nt_setup = 400 # number of timesteps to set up simulation\n n_out = 25 # plot solution every nout steps\n b = 4 # velocity damping for acquiring initial condition\n m = 1/n # mass of SPH particle ( m * n = 1 normalizes |wavefunction|^2 to 1)\n h = 40/n # smoothing length\n t = 0. # time\n\n # plot potential\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n fig = plt.plot(xx, 0.5*xx**2, linewidth=5, color=[0.7, 0.7, 0.9])\n \n # initialize\n x = np.linspace(-3.0, 3.0, num=n)\n x = np.reshape(x,(n,1))\n u = np.zeros((n,1))\n \n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h )\n\n # get v at t=-0.5*dt for the leap frog integrator using Euler's method\n u_mhalf = u - 0.5 * dt * a\n\n # main loop (time evolution)\n for i in np.arange(-nt_setup, nt): # negative time (t<0, i<0) is used to set up initial conditions\n\n # leap frog\n u_phalf = u_mhalf + a*dt\n x = x + u_phalf*dt\n u = 0.5*(u_mhalf+u_phalf)\n u_mhalf = u_phalf\n if (i >= 0):\n t = t + dt\n print(\"%.2f\" % t)\n \n if (i == -1 ): # switch off damping before t=0\n u = np.zeros((n,1)) + 1.0\n u_mhalf = u\n b = 0 # switch off damping at time t=0\n \n # update densities, pressures, accelerations\n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h)\n \n # plot solution every n_out steps\n if( (i >= 0) and (i % n_out) == 0 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n rr_exact = 1./np.sqrt(np.pi) * np.exp(-(xx-np.sin(t))**2/2.)**2\n fig = plt.plot(xx, rr_exact, linewidth=2, color=[.6, .6, .6])\n fig = plt.plot(xx, rr, linewidth=2, color=[1.*i/nt, 0, 1.-1.*i/nt], label='$t='+\"%.2f\" % t +'$')\n # plot the t<0 damping process for fun\n if( i==-nt_setup or i==-nt_setup*3/4 or i==-nt_setup/2 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n fig = plt.plot(xx, rr, linewidth=1, color=[0.9, 0.9, 0.9])\n \n plt.legend()\n plt.xlabel('$x$')\n plt.ylabel('$|\\psi|^2$')\n plt.axis([-2, 4, 0, 0.8])\n plt.savefig('solution.pdf', aspect = 'normal', bbox_inches='tight', pad_inches = 0)\n plt.close()", "def solve_amps(self, h, a, g):\n\n # Apply n_body symmetry, which builds all\n # other spin parts of the unitary group residual\n g3s = (+ g.t3\n - g.t3.transpose([0, 1, 2, 3, 5, 4])\n + g.t3.transpose([0, 1, 2, 4, 5, 3]))\n g3 = ((+ g.t3\n + g.t3.transpose([1, 2, 0, 4, 5, 3])\n + g.t3.transpose([2, 0, 1, 5, 3, 4])\n + g.t3.transpose([0, 2, 1, 3, 5, 4])\n + g.t3.transpose([2, 1, 0, 5, 4, 3])\n + g.t3.transpose([1, 0, 2, 4, 3, 5])\n + 2 * g3s) / 12)\n\n g2 = 1 / 2 * (g.t2 + g.t2.transpose([1, 0, 3, 2]))\n\n t2 = g2 * (- cc_denom(h.f, g.t2.ndim, 'dir', 'full'))\n t3 = g3 * (- cc_denom(h.f, g.t3.ndim, 'dir', 'full'))\n\n # Symmetrize\n t2 = 1 / 2 * (t2 + t2.transpose([1, 0, 3, 2]))\n t3 = ((+ t3\n + t3.transpose([1, 2, 0, 4, 5, 3])\n + t3.transpose([2, 0, 1, 5, 3, 4])\n + t3.transpose([0, 2, 1, 3, 5, 4])\n + t3.transpose([2, 1, 0, 5, 4, 3])\n + t3.transpose([1, 0, 2, 4, 3, 5])) / 6)\n\n return Tensors(\n t1=g.t1 * (- cc_denom(h.f, g.t1.ndim, 'dir', 'full')),\n t2=t2,\n t3=t3)", "def update_rhs(self, h, a, r):\n return Tensors(\n t1=r.t1 - a.t1 / cc_denom(h.f, 2, 'dir', 'full'),\n t2=r.t2 - a.t2 / cc_denom(h.f, 4, 'dir', 'full'),\n t3=r.t3 - (a.t3 - a.t3.transpose([0, 1, 2, 4, 3, 5])) /\n cc_denom(h.f, 6, 'dir', 'full')\n )", "def update_rhs(self, h, a, r):\n return Tensors(\n t1=r.t1 - a.t1 / cc_denom(h.f, 2, 'dir', 'full'),\n t2=r.t2 - a.t2 / cc_denom(h.f, 4, 'dir', 'full'),\n t3=r.t3 - (a.t3 - a.t3.transpose([0, 1, 2, 4, 3, 5])) /\n cc_denom(h.f, 6, 'dir', 'full')\n )", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def bet(P_1,V0_1,Vi_1,Vflap_1,J_a,Twist,Pitch,Chord,PLE,Polar,rho,dt):\n Phi = np.zeros_like(Twist)\n V_4 = np.zeros_like(P_1)\n dV_4 = np.zeros_like(P_1)\n F_5 = np.zeros_like(P_1)\n F_4 = np.zeros_like(P_1)\n F_1 = np.zeros_like(P_1)\n l = np.zeros_like(Twist)\n d = np.zeros_like(Twist)\n m = np.zeros_like(Twist)\n aoa = np.zeros_like(Twist)\n daoa = np.zeros_like(Twist)\n cl = np.zeros_like(Twist)\n cd = np.zeros_like(Twist)\n cm = np.zeros_like(Twist)\n w = np.zeros_like(Chord)\n wy = np.zeros_like(Twist)\n S = np.zeros_like(Twist)\n \n Vflap_1[np.abs(Vflap_1) < 1e-8] = 0\n \n for i in range(len(P_1)):\n \n for j in range(len(P_1[i])):\n \n # Calculate wing element elevation angle\n \n if j <= J_a:\n \n dz = P_1[i,J_a,2]\n dy = P_1[i,J_a,1]\n\n Phi[i,j] = np.arctan(dz/dy)\n \n else:\n \n dz = P_1[i,-1,2]-P_1[i,J_a+1,2]\n dy = P_1[i,-1,1]-P_1[i,J_a+1,1]\n \n Phi[i,j] = np.arctan(dz/dy)\n \n # Calculate local flow velocity\n V_4[i,j] = roty((Twist[i,j]+Pitch[j]), rotx(-Phi[i,j],(V0_1 + Vi_1 + Vflap_1[i,j]))) \n V_4[np.abs(V_4) < 1e-12] = 0\n\n # Calculate angle of attack\n aoa[i,j] = np.arctan(-V_4[i,j,2]/V_4[i,j,0])\n\n # Find cl,cd,cm from polars with linear interpolation\n cl[i,j] = np.interp(np.degrees(aoa[i,j]),Polar[j,0],Polar[j,1])\n cd[i,j] = np.interp(np.degrees(aoa[i,j]),Polar[j,0],Polar[j,2])\n cm[i,j] = np.interp(np.degrees(aoa[i,j]),Polar[j,0],Polar[j,3])\n\n # Calculate element width\n if j < len(P_1[i])-1:\n wy[i,j] = P_1[i,j+1,1]-P_1[i,j,1]\n else:\n wy[i,j] = P_1[i,j,1]-P_1[i,j-1,1]\n \n # Calculate element surface area\n S[i,j] = Chord[j] * wy[i,j]\n \n # Calculate aerodynamic forces\n l[i,j] = 0.5 * rho * np.linalg.norm(V_4[i,j])**2 * S[i,j] * cl[i,j]\n d[i,j] = 0.5 * rho * np.linalg.norm(V_4[i,j])**2 * S[i,j] * cd[i,j]\n m[i,j] = 0.5 * rho * np.linalg.norm(V_4[i,j])**2 * S[i,j] * cm[i,j]\n \n # Force vector in Blade Element local axes\n F_5[i,j] = np.array([-d[i,j], 0, l[i,j]])\n \n # Force vector in Blade local axes\n F_4[i,j] = roty(aoa[i,j],F_5[i,j])\n \n \n \n \"\"\"\n Add mass effect and rotate to stroke plane axes\n \"\"\"\n for i in range(len(P_1)):\n \n for j in range(len(P_1[i])):\n # Add mass effects\n if i==0:\n daoa[i,j] = (aoa[1,j]-aoa[0,j])/dt\n dV_4[i,j] = (V_4[1,j]-V_4[0,j])/dt\n \n elif i < len(P_1)-1:\n daoa[i,j] = (aoa[i+1,j]-aoa[i-1,j])/(2*dt)\n dV_4[i,j] = (V_4[i+1,j]-V_4[i-1,j])/(2*dt)\n \n else:\n daoa[i,j] = (aoa[i,j]-aoa[i-1,j])/dt\n dV_4[i,j] = (V_4[i,j]-V_4[i-1,j])/dt\n \n \n F_4[i,j,0] = F_4[i,j,0] - 0.25 * rho * np.pi * Chord[j] * S[i,j] * V_4[i,j,2] * daoa[i,j]\n F_4[i,j,2] = F_4[i,j,2] + 0.25 * rho * np.pi * Chord[j] * S[i,j] * dV_4[i,j,2]\n \n # Tranform to Stroke Plane Axes\n F_1[i,j] = rotx(Phi[i,j],roty(-(Twist[i,j]+Pitch[j]),F_4[i,j]))\n\n return F_1", "def _redef_via_predef_eqn(self):\r\n time = self.current_T # + self.d_T\r\n\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff) \r\n self.Epsilon = self.d_T * self.thermal_conductivity / \\\r\n (self.density * self.heat_capacity)\r\n\r\n # Source term.\r\n def F_func(elem, eta):\r\n x = elem.local_to_global(eta)\r\n F = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F -= self.Epsilon * self.redef_F_laplacian(x[0], x[1], time)\r\n F += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func,\r\n self.node_map,\r\n gauss_mult=2) # Use double gp_1D\r\n\r\n # Boundary term.\r\n def f_func(elem, eta):\r\n n = elem.guess_normal_vector_global(eta)\r\n f = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n x = elem.local_to_global(eta)\r\n # Evaluate our boundary term.\r\n f += self.Beta * self.redef_f_norm_grad(x[0], x[1], time, n)\r\n f += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func,\r\n self.node_map,\r\n gauss_mult=2)", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_H2 = self.edp_par['rho_H2'].value\n Z_H2 = self.edp_par['Z_H2'].value\n sigma_H2 = self.edp_par['sigma_H2'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n \n # Make sure Z_H2 > Z_H1. If Z_H2 < Z_H1, swap them\n if Z_H1 > Z_H2:\n Z_H1, Z_H2 = Z_H2, Z_H1\n sigma_H1, sigma_H2 = sigma_H2, sigma_H1\n rho_H1, rho_H2 = rho_H2, rho_H1\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H2 + sigma_H2\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG += 2*rho_H2*sigma_H2 * cos(alpha*Z_H2) * exp(-0.5*(alpha*sigma_H2)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def R_term(\n enst, # enstrophy field\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3, # vorticity-3 component\n s11, # strain rate-11 component\n s12, # strain rate-12 component\n s13, # strain rate-13 component\n s22, # strain rate-22 component\n s23, # strain rate-23 component\n s33, # strain rate-33 component\n diff = False): # differentiation flag\n #---------------------------------------------------------------------#\n # Defining domain variables #\n #---------------------------------------------------------------------#\n pi = np.pi # pi\n dx = (2.0*pi)/64.0 # spatial step\n nu = 0.000185 # default viscosity\n #---------------------------------------------------------------------#\n # Spectral differentiation variables #\n #---------------------------------------------------------------------#\n dim = 64\n kspec = np.fft.fftfreq(dim) * dim\n Kfield = np.array(np.meshgrid(kspec, kspec, kspec, indexing='ij'))\n #---------------------------------------------------------------------#\n # Spectral differentiation variables #\n #---------------------------------------------------------------------#\n term1 = np.zeros((dim, dim, dim))\n term2 = np.zeros((dim, dim, dim))\n term3 = np.zeros((dim, dim, dim))\n #---------------------------------------------------------------------#\n # Numerator (numpy gradient tool) #\n #---------------------------------------------------------------------#\n if diff is not False:\n term1 = np.gradient(enst,dx, edge_order=2)[0]\n term2 = np.gradient(enst,dx, edge_order=2)[1]\n term3 = np.gradient(enst,dx, edge_order=2)[2]\n #---------------------------------------------------------------------#\n # Numerator (spectral differentiation) #\n #---------------------------------------------------------------------#\n else:\n term1 = 0.5*np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(enst) +\\\n 1j*Kfield[0]*np.fft.fftn(enst)).real\n term2 = 0.5*np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(enst) +\\\n 1j*Kfield[1]*np.fft.fftn(enst)).real\n term3 = 0.5*np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(enst) +\\\n 1j*Kfield[2]*np.fft.fftn(enst)).real\n #---------------------------------------------------------------------#\n # Numerator #\n #---------------------------------------------------------------------#\n num = nu*(term1**2.0+ term2**2.0 + term3**2.0)\n #---------------------------------------------------------------------#\n # Denominator #\n #---------------------------------------------------------------------#\n den = omega1*s11*omega1 + omega1*s12*omega2 + omega1*s13*omega3 +\\\n omega2*s12*omega1 + omega2*s22*omega2 + omega2*s23*omega3+\\\n omega3*s13*omega1 + omega3*s23*omega2 + omega3*s33*omega3\n #---------------------------------------------------------------------#\n # R calculation #\n #---------------------------------------------------------------------#\n R = num/den\n\n return R", "def calculate_forces(v0, mu, density_m, CD, diameter_b, \\\n area_b, volume_b, density_b, \\\n dt, T):\n \n # Gravitational const. m/s^2\n g = 9.81 \n # Proportionality constant for\n # Reynolds number\n Re_const = diameter_b*density_m/mu\n \n a_s = 3*math.pi*diameter_b*mu/(density_b*volume_b)\n a_q = 0.5*CD*density_m*area_b/(density_b*volume_b)\n b = g*(density_m/density_b - 1.0)\n \n # Numerical solution gives velocity as \n # a function of time.\n v, t = vm.solver(v0, a_s, a_q, b, Re_const, T, dt) \n\n # Initialize vectors\n Fg = zeros(len(v))\n Fb = zeros(len(v))\n Fd = zeros(len(v))\n\n # Loop over time steps\n for n in range(0, len(v)):\n # Evaluate Reynolds number\n Re = Re_const*v[n] \n \n # Gravity force\n Fg[n] = -density_b*volume_b*g\n # Bouyancy force\n Fb[n] = density_m*g*volume_b\n \n # Drag force\n if abs(Re) < 1:\n # If Re < 1, use Stokes' drag force \n Fd[n] = -3.0*math.pi*diameter_b*mu*v[n]\n else:\n # If Re >= 1, use the quadratic\n # drag force\n Fd[n] = -0.5*CD*density_m*area_b*abs(v[n])*v[n]\n\n \n return Fg, Fb, Fd, t", "def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux", "def Hstep_cost_function(H): \n U = Wold - Yold\n #cost = -np.trace(H.T@K@H) + (self.admm_rho/2)*(norm(H.T@D - Wold + self.Y, 'fro')**2) \n cost = -np.trace(H.T@K@H)/nsamples + (rho/2)*np.trace((H.T@D - U)@(H.T@D-U).T) \n return cost", "def get_synth(obswvl, obsflux, ivar, dlam, synth=None, temp=None, logg=None, fe=None, alpha=None):\n\n\t# Get synthetic spectrum from grid\n\tif synth is None:\n\n\t\t# Use modified version of interpolateAtm to get blue synthetic spectrum from Ivanna's grid\n\t\tsynthflux_blue = 1. - interpolateAtm(temp,logg,fe,alpha,griddir='/raid/gridie/bin/')\n\t\twvl_range_blue = np.arange(4100., 6300.+0.14, 0.14)\n\t\tsynthwvl_blue = 0.5*(wvl_range_blue[1:] + wvl_range_blue[:-1])\n\n\t\t# Also get synthetic spectrum for redder part from Evan's grid\n\t\tsynthflux_red = 1. - interpolateAtm(temp,logg,fe,alpha,griddir='/raid/grid7/bin/')\n\t\tsynthwvl_red = np.fromfile('/raid/grid7/bin/lambda.bin')\n\t\tsynthwvl_red = np.around(synthwvl_red,2)\n\n\t\t# Splice blue + red parts together\n\t\tsynthflux = np.hstack((synthflux_blue, synthflux_red))\n\t\tsynthwvl = np.hstack((synthwvl_blue, synthwvl_red))\n\n\t# Else, use input synthetic spectrum\n\telse:\n\t\tsynthflux = synth[0]\n\t\tsynthwvl = synth[1]\n\n\t#print(synthwvl, obswvl)\n\n\t# Clip synthetic spectrum so it's within range of obs spectrum\n\tmask = np.where((synthwvl > obswvl[0]) & (synthwvl < obswvl[-1]))\n\tsynthwvl = synthwvl[mask]\n\tsynthflux = synthflux[mask]\n\n\t# Interpolate and smooth the synthetic spectrum onto the observed wavelength array\n\tsynthfluxnew = smooth_gauss_wrapper(synthwvl, synthflux, obswvl, dlam)\n\n\t# For testing purposes\n\t'''\n\tplt.figure()\n\tplt.plot(synthwvl, synthflux, 'k-', label='Synthetic')\n\tplt.plot(obswvl, synthfluxnew, 'r-', label='Synthetic (smoothed)')\n\t#plt.xlim(5000, 5250)\n\t#plt.ylim(0.9,1.0)\n\tplt.legend(loc='best')\n\tplt.savefig('synth_cont.png')\n\t#plt.show()\n\t'''\n\n\treturn synthfluxnew", "def distcalc(z,h=0.70,omegalambda=0.7,omegam=0.3,omegak=0.0):\n\n H0 = 100 * h # this is in units of km/s/Mpc\n\n H0freq = H0 * constants.kilo/(constants.mega * constants.parsec) # this is H0 is units of Hz\n \n hubbletime = 1.0/H0freq # in seconds\n hubbletimeyr = hubbletime / constants.year\n\n #hubble distance\n dh = constants.c / H0freq # in meters\n\n #now i can calculate the comoving distance (line of sight) using hogg eqn 15\n dc = dh * integrate.quad(dcintegrand,0,z,(omegalambda,omegam,omegak))[0]\n\n #now i can find the transverse comoving distance using hogg eqn 16\n if omegak == 0:\n dm = dc\n elif omegak > 0:\n dm = dh/np.sqrt(omegak) * np.sinh(dc * np.sqrt(omegak) / dh)\n else:\n dm = dh/np.sqrt(abs(omegak)) * np.sin(dc * np.sqrt(abs(omegak)) / dh)\n\n\n #now i will calculate the angular diameter distance (hogg eqn 18)\n da = dm/(1+z)\n \n #now i will calculate scale in kpc/arcsec, since this is commonly used\n scale = da * constants.arcsec / (constants.kilo * constants.parsec)\n\n #now i will calculate the luminosity distance (hog eqn 21)\n dl = (1+z)*dm\n \n #now i will calculate lookback time and \n #time from the begining of the universe to that redshift using hogg eqn 30\n \n tlookback = hubbletimeyr * integrate.quad(timeintegrand,0,z,(omegalambda,omegam,omegak))[0]\n \n tz = hubbletimeyr * integrate.quad(timeintegrand,z,np.inf,(omegalambda,omegam,omegak))[0]\n \n #all sky co-moving volume out to redshift z (hogg eqn 30)\n if omegak == 0:\n vc = 4 * np.pi * dm**3 / 3\n elif omegak > 0:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsinh( np.sqrt(omegak) * dm / dh ) / np.sqrt(omegak) )\n else:\n vc = ( 4 * np.pi * dh**3 / (2 * omegak) ) * ( dm * np.sqrt(1 + omegak * dm**2 / dh**2) / dh - \n np.arcsin( np.sqrt(abs(omegak)) * dm / dh ) / np.sqrt(abs(omegak)) )\n\n #for output, i will make a dictionary\n output = dict(dh=dh, dc=dc, dm=dm, da=da, scale=scale, dl=dl, tlookback = tlookback, tz=tz, vc=vc)\n\n return output", "def define_ufl_equations_diff(self):\n\n # Derivatives of velocity integration equation.\n if self.f1 != 0:\n self.df1_du = dlf.derivative(self.f1, self.displacement, self.trial_vector)\n self.df1_dv = dlf.derivative(self.f1, self.velocity, self.trial_vector)\n else:\n self.df1_du = 0\n self.df1_dv = 0\n self.df1_dp = 0 # This is always zero.\n\n # Derivatives of momentum equation.\n if self.displacement != 0:\n self.df2_du = dlf.derivative(self.f2, self.displacement, self.trial_vector)\n else:\n self.df2_du = 0\n\n if self.velocity != 0:\n self.df2_dv = dlf.derivative(self.f2, self.velocity, self.trial_vector)\n else:\n self.df2_dv = 0\n\n if self.pressure != 0:\n self.df2_dp = dlf.derivative(self.f2, self.pressure, self.trial_scalar)\n else:\n self.df2_dp = 0\n\n # Derivatives of incompressibility equation.\n if self.f3 != 0:\n if self.displacement != 0:\n self.df3_du = dlf.derivative(self.f3, self.displacement, self.trial_vector)\n else:\n self.df3_du = 0\n\n if self.velocity != 0:\n self.df3_dv = dlf.derivative(self.f3, self.velocity, self.trial_vector)\n else:\n self.df3_dv = 0\n\n self.df3_dp = dlf.derivative(self.f3, self.pressure, self.trial_scalar)\n else:\n self.df3_du = 0\n self.df3_dv = 0\n self.df3_dp = 0\n\n return None", "def derivative(r, t, G=6.67e-11, AU=1.496e+11,\n m1=5.972e+24, m2=6.417e+23, m3=1.989e+30,\n a1=1.0*1.496e+11, a2=1.52*1.496e+11):\n\n if G < 0:\n print(f\"The gravitational constant is negative\")\n\n if AU < 0:\n print(f\"The Astronomical unit is negative\")\n\n if m1 < 0:\n print(f\"The mass of the first body is negative\")\n\n if m2 < 0:\n print(f\"The mass of the second body is negative\")\n\n if m3 < 0:\n print(f\"The mass of the third body is negative\")\n\n if a1 < 0:\n print(f\"The distance of body 1 from the body center is negative\")\n\n if a2 < 0:\n print(f\"The distance of body 2 from the body center is negative\")\n\n x1 = r[0]\n y1 = r[1]\n v_x1 = r[2]\n v_y1 = r[3]\n\n x2 = r[4]\n y2 = r[5]\n v_x2 = r[6]\n v_y2 = r[7]\n\n x3 = r[8]\n y3 = r[9]\n v_x3 = r[10]\n v_y3 = r[11]\n\n z1 = r[12]\n z2 = r[13]\n z3 = r[14]\n\n v_z1 = r[15]\n v_z2 = r[16]\n v_z3 = r[17]\n\n r1 = np.array([x1, y1, z1])\n r2 = np.array([x2, y2, z2])\n r3 = np.array([x3, y3, z3])\n\n dr1 = v_x1\n dr2 = v_y1\n\n dr3 = (G*m2/distance(r1, r2)**3)*(x2-x1) + (G*m3/distance(r1, r3)**3)*(x3-x1)\n dr4 = (G*m2/distance(r1, r2)**3)*(y2-y1) + (G*m3/distance(r1, r3)**3)*(y3-y1)\n\n dr5 = v_x2\n dr6 = v_y2\n\n dr7 = (G*m1/distance(r1, r2)**3)*(x1-x2) + (G*m3/distance(r2, r3)**3)*(x3-x2)\n dr8 = (G*m1/distance(r1, r2)**3)*(y1-y2) + (G*m3/distance(r2, r3)**3)*(y3-y2)\n\n dr9 = v_x3\n dr10 = v_y3\n\n dr11 = (G*m1/distance(r1, r3)**3)*(x1-x3) + (G*m2/distance(r2, r3)**3)*(x2-x3)\n dr12 = (G*m1/distance(r1, r3)**3)*(y1-y3) + (G*m2/distance(r2, r3)**3)*(y2-y3)\n\n dr13 = v_z1\n dr14 = v_z2\n dr15 = v_z3\n\n dr16 = (G*m2/distance(r1, r2)**3)*(z2-z2) + (G*m3/distance(r1, r3)**3)*(z3-z1)\n dr17 = (G*m3/distance(r2, r3)**3)*(z1-z2) + (G*m1/distance(r2, r1)**3)*(z1-z2)\n dr18 = (G*m1/distance(r1, r3)**3)*(z1-z3) + (G*m2/distance(r2, r3)**3)*(z2-z3)\n\n dr = np.array([dr1, dr2, dr3, dr4, dr5, dr6,\n dr7, dr8, dr9, dr10, dr11, dr12,\n dr13, dr14, dr15, dr16, dr17, dr18])\n\n return dr", "def one_transition_spectrum_fluor(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n dd = tr[\"dd\"] # transition dipole strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n # CD and fluorescence can be calculated in this step\n # TODO if rotatory strength defined calculate also circular dichroism spectra\n # TOOD calculate fluorescence spectra (for fluorescence there should be a switch because it should be calculated only for the first transition) \n \n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n re = tr[\"re\"] # reorganisation energy\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-numpy.conjugate(gt) -1j*om*ta.data + 2j*re*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n# plt.figure()\n# plt.title(\"Absorption\")\n# plt.plot(ta.data,numpy.real(at))\n# plt.plot(ta.data,numpy.imag(at))\n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = dd*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def velocity_field(xt,yt,x0,y0,velf,dia,tsr,solidity):\n rad = dia/2.\n rot = tsr*velf/rad\n\n # Calculating EMG distribution parameters\n loc,spr,skw,scl = vorticity(tsr,solidity)\n \n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n \n # Integration of the vorticity profile using Fortran code (vorticity.f90; _vortrun.so)\n vel_vs = dblquad(_vortmodel.integrand,0.,35.*dia,lambda x: -4.*dia,lambda x: 4.*dia, args=(x0t,y0t,dia,loc[0],loc[1],loc[2],spr[0],spr[1],skw[0],skw[1],scl[0],scl[1],scl[2]))\n \n # Calculating velocity deficit\n vel = (vel_vs[0]*(rot))/(2.*pi)\n vel = (vel + velf)/velf # normalization of velocity\n \n return vel", "def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)", "def dvdt(self, args: Dict) -> float:\n if self.channel_bool['leak']:\n i_leak: float = self.leak.i(args['v'])\n else:\n i_leak: float = 0.\n \n if self.channel_bool['nav']:\n i_nav: float = self.nav.i(args['v'], h=args['h_nav'])\n else:\n i_nav: float = 0.\n\n if self.channel_bool['kvhh']:\n i_kvhh: float = self.kvhh.i(args['v'], n=args['n_kvhh'])\n else:\n i_kvhh: float = 0.\n\n if self.channel_bool['kva']:\n i_kva: float = self.kva.i(args['v'], h=args['h_kva'])\n else:\n i_kva: float = 0.\n\n if self.channel_bool['kvsi']:\n i_kvsi: float = self.kvsi.i(args['v'], m=args['m_kvsi'])\n else:\n i_kvsi: float = 0.\n\n if self.channel_bool['cav']:\n i_cav: float = self.cav.i(args['v'])\n else:\n i_cav: float = 0.\n\n if self.channel_bool['kca']:\n i_kca: float = self.kca.i(args['v'], ca=args['ca'])\n else:\n i_kca: float = 0.\n \n if self.channel_bool['nap']:\n i_nap: float = self.nap.i(args['v'])\n else:\n i_nap: float = 0.\n\n if self.channel_bool['kir']:\n i_kir: float = self.kir.i(args['v'])\n else:\n i_kir: float = 0.\n\n if self.channel_bool['ampar']:\n i_ampar: float = self.ampar.i(args['v'], s=args['s_ampar'])\n else:\n i_ampar: float = 0.\n\n if self.channel_bool['nmdar']:\n i_nmdar: float = self.nmdar.i(args['v'], s=args['s_nmdar'])\n else:\n i_nmdar: float = 0.\n\n if self.channel_bool['gabar']:\n i_gabar: float = self.gabar.i(args['v'], s=args['s_gabar'])\n else:\n i_gabar: float = 0.\n\n return ((-10.0*self.params.area \n * (i_leak\n + i_nav \n + i_kvhh \n + i_kva \n + i_kvsi \n + i_cav \n + i_kca \n + i_nap \n + i_kir) \n - (i_ampar \n + i_nmdar \n + i_gabar))\n / (10.0*self.params.cm*self.params.area))", "def solver(T, dt, v0, Cd, rho, A, m, Source=None):\n a = Cd*rho*A/(2*m) # Set up the constant a for compact code\n v = zeros(int(T/dt) + 1) # Create the velocity mesh\n v[0] = v0; g = 9.81;#Initial velocity and the value of gravity acceleration\n \n # Description of the functions X(t) and Y(t) is given in the PDF file\n def X(t): \n if(Source == None):\n return -g\n else:\n return -g + Source(t+dt/2.)/m\n \n def Y(t):\n return a\n \n #Calculate the velocity at each meshpoint\n for i in range(1,len(v)):\n v[i] = skydiving_iterate(v[i-1], dt*(i-1), dt, X, Y)\n return v, linspace(0, T, T/dt +1)", "def calc_variables ( ):\n\n # In this example we simulate using the shifted-force potential only\n # The values of < p_sf >, < e_sf > and density should be consistent (for this potential)\n # There are no long-range or delta corrections\n\n from averages_module import VariableType\n \n # Preliminary calculations\n vol = box**3 # Volume\n rho = n / vol # Density\n\n # Variables of interest, of class VariableType, containing three attributes:\n # .val: the instantaneous value\n # .nam: used for headings\n # .method: indicating averaging method\n # If not set below, .method adopts its default value of avg\n # The .nam and some other attributes need only be defined once, at the start of the program,\n # but for clarity and readability we assign all the values together below\n\n # Move acceptance ratio\n m_r = VariableType ( nam = 'Move ratio', val = m_ratio, instant = False )\n\n # Internal energy per molecule (shifted-force potential)\n # Ideal gas contribution (assuming nonlinear molecules) plus total PE divided by N\n e_sf = VariableType ( nam = 'E/N shifted force', val = 3.0*temperature + total.pot/n )\n\n # Pressure (shifted-force potential)\n # Ideal gas contribution plus total virial divided by V\n p_sf = VariableType ( nam = 'P shifted force', val = rho*temperature + total.vir/vol )\n\n # Collect together into a list for averaging\n return [ m_r, e_sf, p_sf ]", "def elastd(ntw):\n # Simulated common block obs with observatory information\n global cth, sth, clg, slg, dif, radn, gl\n # Simulated common block love with love numbers\n global h, k, l\n # Simulated common block azimut with strainmeter and tiltmeter azimuths\n global azt, azs\n # Simulated common block bpos with lunar and solar colat and long, lunar sine parallax,\n # and solar distance\n global dsz, dcz, dsl, dcl, ssz, scz, ssl, scl, dpar, sdist\n\n coor = [dsz, dcz, dsl, dcl, ssz, scz, ssl, scl]\n par = dpar\n # Data for mean parallaxes, a times m over m(earth), equatorial radius.\n rbor = [1.6592496e-2, 4.2635233e-5]\n amrat = [78451.25, 2.1235762e12]\n a = 6.37814e6\n g1 = 9.79828\n g2 = 9.82022\n ppp = [3, 0, 0]\n iflag = 0\n strain = [0]\n dele = [0, 0, 0]\n dim = [0, 0, 0]\n # On first call compute factors for gravity and tilt, and dc tides\n # at the given latitude.\n if iflag != 1:\n iflag = 1\n for i in range(3):\n # browse 0,1,2\n dele[i] = 1.0 + (2.0 / (i + 2.0)) * h[i] - ((i + 3.0) / (i + 2.0)) * k[i]\n dim[i] = 1.0 + k[i] - h[i]\n # dc gravity tide is also known as the Honkasalo correction\n # **note that the love numbers for an elastic earth are used\n # in computing the dc tide as well.eq\n gdc = -3.0481e-7 * (3 * cth ** 2 - 1.0) * dele[0] * radn\n tnsdc = -9.1445e-7 * cth * sth * dim[0] * radn / gl\n etdc = -1.555e-8 * (\n h[0] * (3.0 * cth ** 2 - 1.0) - 6.0 * l[0] * (2.0 * cth ** 2 - 1.0)\n )\n eldc = -1.555e-8 * (h[0] * (3.0 * cth ** 2 - 1.0) - 6.0 * l[0] * cth ** 2)\n potdc = 0.0992064 * (1.0 - 3 * cth ** 2)\n re = 1.0 / (radn * a)\n\n # zero out arrays\n tilt = [0, 0]\n e = [0, 0, 0]\n tltcor = [0, 0]\n grav = 0\n gnth = 0\n # compute normalized parallax\n pa = [par / 3422.45, 1 / sdist]\n\n # in outer loop, ii = 0 for moon, 1 for sun (basile)\n for ii in [0, 1]:\n id = 3\n if ii == 1:\n id = 1\n ir = 4 * (ii)\n # find cosine of zenith angle, potential constants, legendre polynomials\n # and their derivatives, and derivatives of the cosine of the zenith angle.\n cll = clg * coor[ir + 3] + slg * coor[ir + 2]\n sll = slg * coor[ir + 3] - clg * coor[ir + 2]\n cz = coor[ir + 1]\n sz = coor[ir]\n cu = cth * cz + sth * sz * cll\n xi = rbor[ii] * pa[ii] * radn\n cc = amrat[ii] * rbor[ii] * pa[ii]\n rkr = [cc * xi * xi, 0, 0]\n rkr[1] = rkr[0] * xi\n rkr[2] = rkr[1] * xi\n\n p = [0.5 * (3 * cu * cu - 1.0), 0, 0]\n pp = [3 * cu, 0, 0]\n if ii != 1:\n p[1] = 0.5 * cu * (5.0 * cu * cu - 3.0)\n p[2] = 0.25 * (7.0 * cu * p[1] - 3.0 * p[0])\n pp[1] = 1.5 * (5.0 * cu * cu - 1.0)\n pp[2] = 0.25 * (7.0 * p[1] + cu * pp[1]) - 3.0 * pp[0]\n ppp[1] = 15.0 * cu\n ppp[2] = 7.5 * (7.0 * cu * cu - 1.0)\n\n cut = -sth * cz + cth * sz * cll\n cutt = -cu\n cul = -sth * sz * sll\n cull = -sth * sz * cll\n cutl = -cth * sz * sll\n # for j = 1:id:\n for j in range(id):\n if ntw[0] == 1:\n grav = grav + dele[j] * (j + 2) * rkr[j] * p[j] * g1 * re\n gnth = gnth - dim[0] * rkr[0] * pp[0] * g1 * cut * re\n # ellipticity corrections, convert strains to strainmeter\n if ntw[0] == 1:\n grav = grav + gnth * dif - gdc\n\n return [grav, tilt, strain, gdc]", "def fwd_model(Ti_samples,To_samples, dw_samples, kw_samples,hi_samples,ho_samples,TA_samples):\n\t#Determine number of samples (totquat)\n\ttotquat=len(Ti_samples)\n\t# List to store values of Q (assuming no radiative heat transfer) calculated from\n\t# the random samples of the parameters\n\tQ_samples_4PCE=[]\n\t# List to store values of Q assuming radiative heat transfer occurs\n\t#Q_r_samples_4PCE=[]\n\t# Calculate values of heat flux Q (assuming no radiative heat transfer)\n\t# for the different sample values and append to the list\n\tfor i in range(totquat):\n\t\t(Q,T1,T2)=compute_heat_flux(Ti_samples[i], To_samples[i], dw_samples[i],\\\n\t\t\tkw_samples[i], hi_samples[i], ho_samples[i])\n\t\tQ_samples_4PCE.append(Q)\n\t\t# Calculate values of heat flux Q assuming radiative heat transfer to atmosphere and append to list\n\t\t# For the required estimates of Q,T1, and T2 needed to solve the nonlinear system,\n\t\t# we use the values obtained by solving the system assuming no radiative heat transfer\n\t\t\"\"\"Q2=r_heat_flux(Ti_samples[i], To_samples[i], dw_samples[i], kw_samples[i],\\\n\t\t\thi_samples[i], ho_samples[i], TA_samples[i], (Q,T1,T2))\n\t\tQ_r_samples_4PCE.append(Q2)\n\t# Convert Q_r_samples_4PCE to numpy array\n\tQ_evals = np.array(Q_r_samples_4PCE)\n\treturn Q_evals\"\"\"\n\t\tConvert Q_samples_4PCE to numpy array\n\t\tQ_evals = np.array(Q_samples_4PCE)\n\t\treturn Q_evals\"\"\"\n\n\ndef KDE(fcn_evals):\n\t\"\"\"\n\tPerforms kernel density estimation\n\tInput:\n\t\tfcn_evals: numpy array of evaluations of the forward model (values of heat flux Q)\n\tOutput:\n\t\txpts_pce: numpy array of points at which the PDF is estimated.\n\t\tPDF_data_pce: numpy array of estimated PDF values.\n\t\"\"\"\n\t# Perform KDE on fcn_evals\n\tkern_pce=stats.kde.gaussian_kde(fcn_evals)\n\t# Generate points at which to evaluate the PDF\n\txpts_pce=np.linspace(fcn_evals.min(),fcn_evals.max(),200)\n\t# Evaluate the estimated PDF at these points\n\tPDF_data_pce=kern_pce(xpts_pce)\n\treturn xpts_pce, PDF_data_pce", "def get_variables(self):\n return [self.g_t, self.m_t]", "def finite_diff(F, x0, v0, dt, M, K, C, T):\r\n\r\n ### INITIAL PARAMETERS ####\r\n\r\n # defining the number of steps of analysis = Ns\r\n Ns = int(T/dt)+1\r\n # step t0 (initial acceleration)\r\n ngl = np.shape(F)[0] # captures the number of degrees of freedom\r\n\r\n ### MODELLING THE DISPLACEMENTS ###\r\n\r\n x_before = np.zeros((ngl,1))\r\n # matrix that indicates the displacements, in each degree of freedom, along the time of \r\n # duration of analysis. Each column is a time step\r\n x = np.zeros((ngl, Ns))\r\n x[:,0] = x0[:,0]\r\n\r\n ### SOLVING INITIAL STEP ###\r\n\r\n # initial Force F0 is equivalent to the first column of the matrix of load vectors F along time\r\n aux1 = np.zeros((ngl,1))\r\n aux1[:,0] = np.copy(F[:,0])\r\n aux2 = aux1 - np.dot(C,v0) - np.dot(K,x0)\r\n a0 = np.dot(la.inv(M),aux2)\r\n # step t-1 (before initial condition)\r\n x_before = dt*dt*a0/2 - dt*v0 + x0 \r\n # step t+1 (after initial condition)\r\n C1 = M / (dt*dt) + C / (2*dt)\r\n C2 = K - 2*M / (dt*dt)\r\n C3 = M / (dt*dt) - C / (2*dt)\r\n aux3 = aux1 - np.dot(C2, x0) - np.dot(C3, x_before)\r\n x[:,1] = np.dot(la.inv(C1), aux3[:,0])\r\n\r\n ### INTEGRATING ALONG THE DURATION OS ANALYSIS ###\r\n\r\n i = 0\r\n aux4 = np.zeros((ngl,1))\r\n aux5 = np.zeros((ngl,1))\r\n aux6 = np.zeros((ngl,1))\r\n aux7 = np.zeros((ngl,1))\r\n for i in range(1,Ns-1):\r\n aux4[:,0] = np.copy(F[:,i])\r\n aux5[:,0] = np.copy(x[:,i])\r\n aux6[:,0] = np.copy(x[:,i-1])\r\n aux7[:,0] = np.copy(x[:,i+1])\r\n aux7 = np.dot(la.inv(C1), aux4 - np.dot(C2,aux5) - np.dot(C3,aux6))\r\n x[:,i+1] = np.copy(aux7[:,0])\r\n return x", "def _compute_H(self, t, index, t2, index2, update_derivatives=False, stationary=False):\r\n\r\n if stationary:\r\n raise NotImplementedError, \"Error, stationary version of this covariance not yet implemented.\"\r\n # Vector of decays and delays associated with each output.\r\n Decay = self.decay[index]\r\n Decay2 = self.decay[index2]\r\n t_mat = t[:, None]\r\n t2_mat = t2[None, :]\r\n if self.delay is not None:\r\n Delay = self.delay[index]\r\n Delay2 = self.delay[index2]\r\n t_mat-=Delay[:, None]\r\n t2_mat-=Delay2[None, :]\r\n\r\n diff_t = (t_mat - t2_mat)\r\n inv_sigma_diff_t = 1./self.sigma*diff_t\r\n half_sigma_decay_i = 0.5*self.sigma*Decay[:, None]\r\n\r\n ln_part_1, sign1 = ln_diff_erfs(half_sigma_decay_i + t2_mat/self.sigma, \r\n half_sigma_decay_i - inv_sigma_diff_t,\r\n return_sign=True)\r\n ln_part_2, sign2 = ln_diff_erfs(half_sigma_decay_i,\r\n half_sigma_decay_i - t_mat/self.sigma,\r\n return_sign=True)\r\n\r\n h = sign1*np.exp(half_sigma_decay_i\r\n *half_sigma_decay_i\r\n -Decay[:, None]*diff_t+ln_part_1\r\n -np.log(Decay[:, None] + Decay2[None, :]))\r\n h -= sign2*np.exp(half_sigma_decay_i*half_sigma_decay_i\r\n -Decay[:, None]*t_mat-Decay2[None, :]*t2_mat+ln_part_2\r\n -np.log(Decay[:, None] + Decay2[None, :]))\r\n\r\n if update_derivatives:\r\n sigma2 = self.sigma*self.sigma\r\n # Update ith decay gradient\r\n\r\n dh_ddecay = ((0.5*Decay[:, None]*sigma2*(Decay[:, None] + Decay2[None, :])-1)*h\r\n + (-diff_t*sign1*np.exp(\r\n half_sigma_decay_i*half_sigma_decay_i-Decay[:, None]*diff_t+ln_part_1\r\n )\r\n +t_mat*sign2*np.exp(\r\n half_sigma_decay_i*half_sigma_decay_i-Decay[:, None]*t_mat\r\n - Decay2*t2_mat+ln_part_2))\r\n +self.sigma/np.sqrt(np.pi)*(\r\n -np.exp(\r\n -diff_t*diff_t/sigma2\r\n )+np.exp(\r\n -t2_mat*t2_mat/sigma2-Decay[:, None]*t_mat\r\n )+np.exp(\r\n -t_mat*t_mat/sigma2-Decay2[None, :]*t2_mat\r\n )-np.exp(\r\n -(Decay[:, None]*t_mat + Decay2[None, :]*t2_mat)\r\n )\r\n ))\r\n self._dh_ddecay = (dh_ddecay/(Decay[:, None]+Decay2[None, :])).real\r\n \r\n # Update jth decay gradient\r\n dh_ddecay2 = (t2_mat*sign2\r\n *np.exp(\r\n half_sigma_decay_i*half_sigma_decay_i\r\n -(Decay[:, None]*t_mat + Decay2[None, :]*t2_mat)\r\n +ln_part_2\r\n )\r\n -h)\r\n self._dh_ddecay2 = (dh_ddecay/(Decay[:, None] + Decay2[None, :])).real\r\n \r\n # Update sigma gradient\r\n self._dh_dsigma = (half_sigma_decay_i*Decay[:, None]*h\r\n + 2/(np.sqrt(np.pi)\r\n *(Decay[:, None]+Decay2[None, :]))\r\n *((-diff_t/sigma2-Decay[:, None]/2)\r\n *np.exp(-diff_t*diff_t/sigma2)\r\n + (-t2_mat/sigma2+Decay[:, None]/2)\r\n *np.exp(-t2_mat*t2_mat/sigma2-Decay[:, None]*t_mat) \r\n - (-t_mat/sigma2-Decay[:, None]/2) \r\n *np.exp(-t_mat*t_mat/sigma2-Decay2[None, :]*t2_mat) \r\n - Decay[:, None]/2\r\n *np.exp(-(Decay[:, None]*t_mat+Decay2[None, :]*t2_mat))))\r\n \r\n return h", "def calc(self):\n\n # the following if query ensures that volume- and interaction-terms\n # are only calculated if tau > 0.\n # (to avoid nan-values from invalid function-evaluations)\n\n if self.V.tau.shape == (1,):\n Isurf = self.surface()\n # differentiation for non-existing canopy, as otherwise NAN values\n if self.V.tau > 0.:\n Ivol = self.volume()\n if self.int_Q is True:\n Iint = self.interaction()\n else:\n Iint = np.array([0.])\n else:\n Ivol = np.array([0.])\n Iint = np.array([0.])\n else:\n # calculate surface-term (valid for any tau-value)\n Isurf = self.surface()\n\n # store initial parameter-values\n old_t_0 = self.t_0\n old_p_0 = self.p_0\n old_t_ex = self.t_ex\n old_p_ex = self.p_ex\n\n old_tau = self.V._get_tau()\n old_omega = self.V._get_omega()\n old_NN = self.SRF._get_NormBRDF()\n\n # set mask for tau > 0.\n mask = old_tau > 0.\n valid_index = np.where(mask)\n inval_index = np.where(~mask)\n\n # set parameter-values to valid values for calculation\n self.t_0 = old_t_0[valid_index[0]]\n self.p_0 = old_p_0[valid_index[0]]\n self.t_ex = old_t_ex[valid_index[0]]\n self.p_ex = old_p_ex[valid_index[0]]\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically adds an axis to the arrays!\n self.V.tau = np.squeeze(old_tau[valid_index[0]])\n if np.array(self.V.omega).size != 1:\n self.V.omega = np.squeeze(old_omega[valid_index[0]])\n if np.array(self.SRF.NormBRDF).size != 1:\n self.SRF.NormBRDF = np.squeeze(old_NN[valid_index[0]])\n\n # calculate volume and interaction term where tau-values are valid\n _Ivol = self.volume()\n if self.int_Q is True:\n _Iint = self.interaction()\n else:\n _Iint = np.full_like(self.t_0, 0.)\n\n # reset parameter values to old values\n self.t_0 = old_t_0\n self.p_0 = old_p_0\n self.t_ex = old_t_ex\n self.p_ex = old_p_ex\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically add an axis to the arrays!\n self.V.tau = np.squeeze(old_tau)\n self.V.omega = np.squeeze(old_omega)\n self.SRF.NormBRDF = np.squeeze(old_NN)\n\n # combine calculated volume-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n Ivol = np.ones_like(self.t_0)\n Ivol[valid_index[0]] = _Ivol\n Ivol[inval_index[0]] = np.ones_like(Ivol[inval_index[0]]) * 0.\n\n # combine calculated interaction-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n if self.int_Q is True:\n Iint = np.ones_like(self.t_0)\n Iint[valid_index[0]] = _Iint\n Iint[inval_index[0]] = np.ones_like(Iint[inval_index[0]]) * 0.\n else:\n Iint = np.full_like(self.t_0, 0.)\n\n return Isurf + Ivol + Iint, Isurf, Ivol, Iint", "def grad_phi_mesh(self):\n fy = -0.5 * (np.roll(self.phi, 1, axis = 1) - np.roll(self.phi, -1, axis=1)) \n fx = -0.5 * (np.roll(self.phi, 1, axis = 0) - np.roll(self.phi, -1, axis=0))\n return fx,fy", "def compute_mixing_coefficients_surf(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n\n # SET UP NEW MIXING COEFFICIENT ARRAYS\n self.Kv_surf = np.zeros([Ly,N+1])\n self.Kt_surf = np.zeros([Ly,N+1])\n \n self.ghat = np.zeros([Ly,N+1])\n \n\n #################################\n # \tSURFACE KPP\n ################################\n #---> j-loop\n \n self.wm2 = []\n self.ws2 = []\n self.sigma_y = []\n for j in range(Ly):\n #--> k-loop (top to kbl[j])\n # in fortran k=N-1,kbl(j),-1\n for k in range(N-1,self.kbl[j]-1,-1):\n k_w = k\n k_r = k-1\n\n Bfsfc = self.Bfsfc_bl[j]\n zscale = z_u_w[j,N] - z_u_w[j,k_w]\n \n # CALCULATE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm2.append(wm)\n self.ws2.append(ws)\n # COMPUTE VERTICAL MIXING COEFFICIENTS\n sigma = (z_u_w[j,N] - z_u_w[j,k_w]) / np.max([self.hbls[j],self.eps])\n self.sigma1 = sigma #for debugging\n if j == 25: \n self.sigma_y.append(sigma)\n a1 = sigma - 2.\n a2 = 3.-2.*sigma\n a3 = sigma - 1.\n\n if sigma < 0.07:\n cff = 0.5 * (sigma-0.07)**2/0.07\n else:\n cff = 0\n \n \n if k == N-1: \n self.wm_debug = wm\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n\n self.Kv_surf[j,k_w] = wm * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gm1[j]+a3*self.dGm1_dS[j])))\n\n if k == N-1:\n self.ws_debug = ws\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n \n self.Kt_surf[j,k_w] = ws * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gt1[j]+a3*self.dGt1_dS[j])))\n #---> end k-loop \n if self.LMD_NONLOCAL:\n if Bfsfc < 0:\n self.ghat[j,k_w] = 0\n self.ghat[j,k_w] = self.Cg * sigma * (1.-sigma)**2\n else:\n self.ghat[j,k_w] = 0.\n\n # ADD CONVECTIVE ADJUSTMENT IN SURFACE MIXED LAYER \n if self.LMD_CONVEC and self.MLCONVEC: \n for k in range(N-1,int(self.kbl[j]-1),-1):\n k_w = k\n k_r = k -1\n\n if self.bvf[j,k_w] < 0:\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.ffac*self.nu0c\n\n # ADD CONVECTIVE ADJUSTMENT BELOW SURFACE MIXED LAYER\n # IF BKPP IS SWITCHED OFF!!\n for k in range(int(self.kbl[j]-1),-1,-1):\n k_w = k\n k_r = k -1\n if self.LMD_NONLOCAL:\n self.ghat[j,k_w] = 0\n if self.LMD_CONVEC and self.LMD_BKPP == False:\n if self.bvf[j,k_w] < 0:\n self.Kv_surf[j,k_w] = self.Kv_surf[j,k_w] + self.nu0c\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.nu0c\n \n\n #---> end j-loop", "def _forces_moments(self, delta):\n # assert delta.shape == (4,1)\n da = delta[0]\n de = delta[1]\n dt = delta[2]\n dr = delta[3]\n\n e0 = self._state[3]\n e1 = self._state[4]\n e2 = self._state[5]\n e3 = self._state[6]\n u = self._state[7]\n v = self._state[8]\n w = self._state[9]\n p = self._state[10]\n q = self._state[11]\n r = self._state[12]\n\n self._Va = np.sqrt(u**2 + v**2 + w**2)\n self._alpha = np.arctan(1.0*w/u)\n self._beta = np.arcsin(1.0*v/self._Va)\n\n\n\n Fg = self.mass*self.gravity*np.array([2*(e1*e3-e2*e0),\n 2*(e2*e3 + e1*e0),\n e3**2 + e0**2 - e1**2 - e2**2,\n ])\n\n # Fg = self.mass*self.gravity*np.array([2*(e1*e3 - e2*e0),\n # 2*(e2*e3 + e1*e0),\n # e3**2 + e0**2 - e1**2 - e2**2,\n # ])\n\n M_e = 25\n sig = lambda a: (1+np.exp(-M_e*(a-self.alpha0))+np.exp(M_e*(a+self.alpha0)))/((1+np.exp(-M_e*(a-self.alpha0)))*(1+np.exp(M_e*(a+self.alpha0))))\n cla = lambda a: (1-sig(a))*(self.C_L_0+self.C_L_alpha*a)+sig(a)*(2*np.sign(a)*np.sin(a)**2*np.cos(a))\n cda = lambda a: self.C_D_p + (self.C_L_0+self.C_L_alpha*a)**2/(np.pi*self.e*self.AR)\n\n cxa = lambda a: -(cda(a)) * np.cos(a) + (cla(a)) * np.sin(a)\n\n cxq = lambda a: -self.C_D_q * np.cos(a) +self.C_L_q * np.sin(a)\n\n cxde = lambda a: -self.C_D_delta_e * np.cos(a) + self.C_L_delta_e * np.sin(a)\n\n cza = lambda a: -(cda(a)) * np.sin(a) - (cla(a)) * np.cos(a)\n\n czq = lambda a: -self.C_D_q * np.sin(a) - self.C_L_q * np.cos(a)\n\n czde = lambda a: -self.C_D_delta_e * np.sin(a) - self.C_L_delta_e * np.cos(a)\n\n c = self.c/(2.0*self._Va)\n b = self.b/(2.0*self._Va)\n\n\n\n one = 0.5*self.rho*self._Va**2*self.S_wing\n # two = np.array([[1,0,0],[0,1,0],[0,0,1]])\n three = np.array([[cxa(self._alpha)+cxq(self._alpha)*c*q+cxde(self._alpha)*de],\n [self.C_Y_0+self.C_Y_beta*self._beta+self.C_Y_p*b*p+self.C_Y_r*b*r+self.C_Y_delta_a*da+self.C_Y_delta_r*dr],\n [cza(self._alpha)+czq(self._alpha)*c*q+czde(self._alpha)*de]])\n\n Fa = np.squeeze(three) * one\n # pdb.set_trace()\n Fa = Fa.reshape((3,-1))\n\n F = Fg + Fa\n #\n # print(\"Fa:\",Fa)\n\n Fp = 0.5*self.rho*self.S_prop*self.C_prop*((self.k_motor*dt)**2-self._Va**2)\n\n # print(\"FP:\", Fp)\n\n fx = F[0] + Fp\n # + 0.5*MAV.rho*self._Va**2*MAV.S_wing*(\\\n # +cxa(self._alpha)\\\n # + cxq(self._alpha)*c*q\\\n # + cxde(self._alpha)*de\n # )\n\n fy = F[1]\n fz = F[2]\n\n # Moment time!!!\n one = 0.5*self.rho*self._Va**2*self.S_wing\n two = np.array([\\\n [self.b*(self.C_ell_0+self.C_ell_beta*self._beta+self.C_ell_p*b*p+self.C_ell_r*b*r+self.C_ell_delta_a*da+self.C_ell_delta_r*dr)],\n [self.c*(self.C_m_0+(self.C_m_alpha*self._alpha)+(self.C_m_q*c*q)+(self.C_m_delta_e*de))],\n [self.b*(self.C_n_0+(self.C_n_beta*self._beta)+(self.C_n_p*b*p)+(self.C_n_r*b*r)+(self.C_n_delta_a*da)+(self.C_n_delta_r*dr))]\n ])\n Ma = one * np.squeeze(two)\n # print(\"\\nMa:\", Ma)\n # pdb.set_trace()\n Ma = Ma.reshape((3,-1))\n\n size = Ma.shape[1]\n\n Mp = np.block([[np.ones(size)*-self.kTp*(self.kOmega*dt)**2],\n [np.zeros(size)],\n [np.zeros(size)]\n ])\n\n M = Mp + Ma\n\n Mx = M[0]\n My = M[1]\n Mz = M[2]\n\n # self._forces[0] = fx\n # self._forces[1] = fy\n # self._forces[2] = fz\n # pdb.set_trace()\n # print(fx, fy, fz, Mx, My, Mz)\n\n return np.array([fx, fy, fz, Mx, My, Mz])", "def _F_qt(vh_comp,conn,q,wind=5):\n\n # get the van Hove data\n (vanHove,temp,dtime) = extract_vanHove_all(vh_comp,conn,wind)\n \n Fqt = [_Fqt_comp(vh,q) for vh in vanHove]\n\n return Fqt,temp,dtime", "def evolve(self):\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n gradp_x = self.cc_data.get_var(\"gradp_x\")\n gradp_y = self.cc_data.get_var(\"gradp_y\")\n\n # note: the base state quantities do not have valid ghost cells\n beta0 = self.base[\"beta0\"]\n beta0_edges = self.base[\"beta0-edges\"]\n\n rho0 = self.base[\"rho0\"]\n\n phi = self.cc_data.get_var(\"phi\")\n\n myg = self.cc_data.grid\n\n # ---------------------------------------------------------------------\n # create the limited slopes of rho, u and v (in both directions)\n # ---------------------------------------------------------------------\n limiter = self.rp.get_param(\"lm-atmosphere.limiter\")\n\n ldelta_rx = reconstruction.limit(rho, myg, 1, limiter)\n ldelta_ux = reconstruction.limit(u, myg, 1, limiter)\n ldelta_vx = reconstruction.limit(v, myg, 1, limiter)\n\n ldelta_ry = reconstruction.limit(rho, myg, 2, limiter)\n ldelta_uy = reconstruction.limit(u, myg, 2, limiter)\n ldelta_vy = reconstruction.limit(v, myg, 2, limiter)\n\n # ---------------------------------------------------------------------\n # get the advective velocities\n # ---------------------------------------------------------------------\n\n \"\"\"\n the advective velocities are the normal velocity through each cell\n interface, and are defined on the cell edges, in a MAC type\n staggered form\n\n n+1/2\n v\n i,j+1/2\n +------+------+\n | |\n n+1/2 | | n+1/2\n u + U + u\n i-1/2,j | i,j | i+1/2,j\n | |\n +------+------+\n n+1/2\n v\n i,j-1/2\n\n \"\"\"\n\n # this returns u on x-interfaces and v on y-interfaces. These\n # constitute the MAC grid\n if self.verbose > 0:\n print(\" making MAC velocities\")\n\n # create the coefficient to the grad (pi/beta) term\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n # create the source term\n source = self.aux_data.get_var(\"source_y\")\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n rhoprime = self.make_prime(rho, rho0)\n source.v()[:, :] = rhoprime.v()*g/rho.v()\n self.aux_data.fill_BC(\"source_y\")\n\n _um, _vm = lm_interface.mac_vels(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source)\n\n u_MAC = ai.ArrayIndexer(d=_um, grid=myg)\n v_MAC = ai.ArrayIndexer(d=_vm, grid=myg)\n\n # ---------------------------------------------------------------------\n # do a MAC projection to make the advective velocities divergence\n # free\n # ---------------------------------------------------------------------\n\n # we will solve D (beta_0^2/rho) G phi = D (beta_0 U^MAC), where\n # phi is cell centered, and U^MAC is the MAC-type staggered\n # grid of the advective velocities.\n\n if self.verbose > 0:\n print(\" MAC projection\")\n\n # create the coefficient array: beta0**2/rho\n # MZ!!!! probably don't need the buf here\n coeff.v(buf=1)[:, :] = 1.0/rho.v(buf=1)\n coeff.v(buf=1)[:, :] = coeff.v(buf=1)*beta0.v2d(buf=1)**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi-MAC\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi-MAC\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi-MAC\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi-MAC\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # MAC velocities are edge-centered. div{beta_0 U} is cell-centered.\n div_beta_U.v()[:, :] = \\\n beta0.v2d()*(u_MAC.ip(1) - u_MAC.v())/myg.dx + \\\n (beta0_edges.v2dp(1)*v_MAC.jp(1) -\n beta0_edges.v2d()*v_MAC.v())/myg.dy\n\n # solve the Poisson problem\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-12)\n\n # update the normal velocities with the pressure gradient -- these\n # constitute our advective velocities. Note that what we actually\n # solved for here is phi/beta_0\n phi_MAC = self.cc_data.get_var(\"phi-MAC\")\n phi_MAC[:, :] = mg.get_solution(grid=myg)\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n coeff_x = myg.scratch_array()\n b = (3, 1, 0, 0) # this seems more than we need\n coeff_x.v(buf=b)[:, :] = 0.5*(coeff.ip(-1, buf=b) + coeff.v(buf=b))\n\n coeff_y = myg.scratch_array()\n b = (0, 0, 3, 1)\n coeff_y.v(buf=b)[:, :] = 0.5*(coeff.jp(-1, buf=b) + coeff.v(buf=b))\n\n # we need the MAC velocities on all edges of the computational domain\n # here we do U = U - (beta_0/rho) grad (phi/beta_0)\n b = (0, 1, 0, 0)\n u_MAC.v(buf=b)[:, :] -= \\\n coeff_x.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.ip(-1, buf=b))/myg.dx\n\n b = (0, 0, 0, 1)\n v_MAC.v(buf=b)[:, :] -= \\\n coeff_y.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.jp(-1, buf=b))/myg.dy\n\n # ---------------------------------------------------------------------\n # predict rho to the edges and do its conservative update\n # ---------------------------------------------------------------------\n _rx, _ry = lm_interface.rho_states(myg.ng, myg.dx, myg.dy, self.dt,\n rho, u_MAC, v_MAC,\n ldelta_rx, ldelta_ry)\n\n rho_xint = ai.ArrayIndexer(d=_rx, grid=myg)\n rho_yint = ai.ArrayIndexer(d=_ry, grid=myg)\n\n rho_old = rho.copy()\n\n rho.v()[:, :] -= self.dt*(\n # (rho u)_x\n (rho_xint.ip(1)*u_MAC.ip(1) - rho_xint.v()*u_MAC.v())/myg.dx +\n # (rho v)_y\n (rho_yint.jp(1)*v_MAC.jp(1) - rho_yint.v()*v_MAC.v())/myg.dy)\n\n self.cc_data.fill_BC(\"density\")\n\n # update eint as a diagnostic\n eint = self.cc_data.get_var(\"eint\")\n gamma = self.rp.get_param(\"eos.gamma\")\n eint.v()[:, :] = self.base[\"p0\"].v2d()/(gamma - 1.0)/rho.v()\n\n # ---------------------------------------------------------------------\n # recompute the interface states, using the advective velocity\n # from above\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" making u, v edge states\")\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 2.0/(rho.v() + rho_old.v())\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n _ux, _vx, _uy, _vy = \\\n lm_interface.states(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source,\n u_MAC, v_MAC)\n\n u_xint = ai.ArrayIndexer(d=_ux, grid=myg)\n v_xint = ai.ArrayIndexer(d=_vx, grid=myg)\n u_yint = ai.ArrayIndexer(d=_uy, grid=myg)\n v_yint = ai.ArrayIndexer(d=_vy, grid=myg)\n\n # ---------------------------------------------------------------------\n # update U to get the provisional velocity field\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" doing provisional update of u, v\")\n\n # compute (U.grad)U\n\n # we want u_MAC U_x + v_MAC U_y\n advect_x = myg.scratch_array()\n advect_y = myg.scratch_array()\n\n advect_x.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(u_xint.ip(1) - u_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(u_yint.jp(1) - u_yint.v())/myg.dy\n\n advect_y.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(v_xint.ip(1) - v_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(v_yint.jp(1) - v_yint.v())/myg.dy\n\n proj_type = self.rp.get_param(\"lm-atmosphere.proj_type\")\n\n if proj_type == 1:\n u.v()[:, :] -= (self.dt*advect_x.v() + self.dt*gradp_x.v())\n v.v()[:, :] -= (self.dt*advect_y.v() + self.dt*gradp_y.v())\n\n elif proj_type == 2:\n u.v()[:, :] -= self.dt*advect_x.v()\n v.v()[:, :] -= self.dt*advect_y.v()\n\n # add the gravitational source\n rho_half = 0.5*(rho + rho_old)\n rhoprime = self.make_prime(rho_half, rho0)\n source[:, :] = rhoprime*g/rho_half\n self.aux_data.fill_BC(\"source_y\")\n\n v[:, :] += self.dt*source\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n if self.verbose > 0:\n print(\"min/max rho = {}, {}\".format(self.cc_data.min(\"density\"), self.cc_data.max(\"density\")))\n print(\"min/max u = {}, {}\".format(self.cc_data.min(\"x-velocity\"), self.cc_data.max(\"x-velocity\")))\n print(\"min/max v = {}, {}\".format(self.cc_data.min(\"y-velocity\"), self.cc_data.max(\"y-velocity\")))\n\n # ---------------------------------------------------------------------\n # project the final velocity\n # ---------------------------------------------------------------------\n\n # now we solve L phi = D (U* /dt)\n if self.verbose > 0:\n print(\" final projection\")\n\n # create the coefficient array: beta0**2/rho\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n mg.init_RHS(div_beta_U/self.dt)\n\n # use the old phi as our initial guess\n phiGuess = mg.soln_grid.scratch_array()\n phiGuess.v(buf=1)[:, :] = phi.v(buf=1)\n mg.init_solution(phiGuess)\n\n # solve\n mg.solve(rtol=1.e-12)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of p and update the velocities\n # this differs depending on what we projected.\n gradphi_x, gradphi_y = mg.get_solution_gradient(grid=myg)\n\n # U = U - (beta_0/rho) grad (phi/beta_0)\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= self.dt*coeff.v()*gradphi_x.v()\n v.v()[:, :] -= self.dt*coeff.v()*gradphi_y.v()\n\n # store gradp for the next step\n\n if proj_type == 1:\n gradp_x.v()[:, :] += gradphi_x.v()\n gradp_y.v()[:, :] += gradphi_y.v()\n\n elif proj_type == 2:\n gradp_x.v()[:, :] = gradphi_x.v()\n gradp_y.v()[:, :] = gradphi_y.v()\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n self.cc_data.fill_BC(\"gradp_x\")\n self.cc_data.fill_BC(\"gradp_y\")\n\n # increment the time\n if not self.in_preevolve:\n self.cc_data.t += self.dt\n self.n += 1", "def enthalpy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n h = g - temp*g_t\n return h", "def dynamics(state,t):\n global M,m\n f = control_upright(state)\n # f = 0\n dydx = np.zeros_like(state)\n x,x_dot,th,th_dot = state #unpacking the state\n dydx[0] = x_dot\n dydx[2] = th_dot\n\n den1 = M + (m*sin(th)*sin(th))\n dydx[1] = (f + (m*g*sin(th)*cos(th)) + m*L*th_dot*th_dot*sin(th) + (b/L)*(th_dot*cos(th)))/den1\n den2 = L*den1\n dydx[3] = (((M+m)*g*sin(th) + f*cos(th) + m*L*th_dot*th_dot*sin(th)*cos(th))/den2) + (b/(m*L*L))*th_dot\n dydx[3] = -dydx[3]\n\n return dydx", "def VerletHope2(r, v, beta,dt,R_dust,M_dust):\n # Deceptively simple (read about Velocity Verlet on wikipedia)\n r_new = r + v*dt + calculate_acceleration2(r,v,beta,omega,R_dust,M_dust)*dt**2/2\n v_new = v + (calculate_acceleration2(r,v,beta,omega,R_dust,M_dust) + calculate_acceleration2(r_new,v,beta,omega,R_dust,M_dust))/2 * dt\n \n return (r_new, v_new)", "def _diff_pot(a2,t2,d2,wair,temp,pres,ppot,airf,dhum):\n ph2 = _eq_pressure(0,0,0,a2,t2,d2)\n gi2 = _ice_g(0,0,t2,ppot)\n gv2 = _eq_vappot(0,0,0,a2,t2,d2)\n sh1 = -_air_f(0,1,0,airf,temp,dhum)\n si1 = -_ice_g(1,0,temp,pres)\n s1 = wair/airf*sh1 + (1-wair/airf)*si1\n sh2 = -_air_f(0,1,0,a2,t2,d2)\n si2 = -_ice_g(1,0,t2,ppot)\n s2 = wair/a2*sh2 + (1-wair/a2)*si2\n lhs = numpy.array([ppot, gi2, s1])\n rhs = numpy.array([ph2, gv2, s2])\n \n ph2_a = _eq_pressure(1,0,0,a2,t2,d2)\n ph2_t = _eq_pressure(0,1,0,a2,t2,d2)\n ph2_d = _eq_pressure(0,0,1,a2,t2,d2)\n gi2_t = _ice_g(1,0,t2,ppot)\n gv2_a = _eq_vappot(1,0,0,a2,t2,d2)\n gv2_t = _eq_vappot(0,1,0,a2,t2,d2)\n gv2_d = _eq_vappot(0,0,1,a2,t2,d2)\n sh2_a = -_air_f(1,1,0,a2,t2,d2)\n sh2_t = -_air_f(0,2,0,a2,t2,d2)\n sh2_d = -_air_f(0,1,1,a2,t2,d2)\n si2_t = -_ice_g(2,0,t2,ppot)\n s2_a = -wair/a2**2*(sh2 - a2*sh2_a - si2)\n s2_t = wair/a2*sh2_t + (1-wair/a2)*si2_t\n s2_d = wair/a2*sh2_d\n dlhs = numpy.array([[0.,0.,0.], [0.,gi2_t,0.], [0.,0.,0.]])\n drhs = numpy.array([[ph2_a,ph2_t,ph2_d], [gv2_a,gv2_t,gv2_d],\n [s2_a,s2_t,s2_d]])\n return lhs, rhs, dlhs, drhs", "def __init__(self, trap=2.5*10**16, Keq=1.0*10**17,\n EHdecay=1.0*10**-10, Etrap=2.0*10**-10, FHloss=8.0*10**-12,\n G3decay = 0, step=200*ps, pretime=2, reprate=80000000,\n verbose=False, trackQ=False, scalar=1, Gdecay=0, GHdecay=0,\n tolerance=0.005, G2decay=0. ,Gescape=1., Gform=1., G3loss=0.):\n # Some other variables used\n self.tolerance = tolerance\n self.scalar = scalar\n self.verbose = verbose\n self.reprate = reprate\n self.duration = 1.00 / reprate\n self.step = step\n self.steps = int(self.duration / self.step)\n self.powers = []\n self.pretime = pretime\n # Variables which hold state densities\n self.exciton = []\n self.hole = []\n self.electron = []\n self.trap = (trap) # Total number of traps\n self.filled = [] # Filled traps\n self.signal = []\n self.xsignal = []\n self.ehsignal = []\n self.xloss = []\n self.tloss = []\n self.pulses = []\n self.qk = []\n self.trackQ = trackQ\n # Rate and equilibrium constants, corrected for time step size\n self.Keq = Gescape/Gform # Equilibrium constant for X<-->e+h\n self.EHdecay = (EHdecay * step) # e+h->ground\n self.Etrap = (Etrap * step) # e+trap->filled\n self.FHloss = (FHloss * step) # filled+h->ground\n self.Gdecay = Gdecay * step\n self.G2decay = G2decay * step\n self.G3decay = G3decay * step\n self.GHdecay = GHdecay * step\n self.Gescape = Gescape * step\n self.G3loss = G3loss * step\n self.Gform = Gform * step", "def term_1(\n omega1, # vorticity-1\n omega2, # vorticity-2\n omega3, # vorticity-3\n enst, # enstrophy\n nu_sgs, # turbulent viscosity\n h = True): # spatial step size\n #---------------------------------------------------------------------#\n # Setting default values #\n #---------------------------------------------------------------------#\n if h is True:\n h = 2.0*np.pi/64.0\n #---------------------------------------------------------------------#\n # Preallocating space #\n #---------------------------------------------------------------------#\n term = np.zeros((64,64,64))\n #---------------------------------------------------------------------#\n # Enstrophy term #\n #---------------------------------------------------------------------#\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[2], h, edge_order=2)[2]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[1], h, edge_order=2)[1]\n term += np.gradient(\\\n np.gradient(enst, h, edge_order=2)[0], h, edge_order=2)[0]\n #---------------------------------------------------------------------#\n # Dissipation #\n #---------------------------------------------------------------------#\n omega1_grad = np.gradient(omega1, h, edge_order=2)\n omega2_grad = np.gradient(omega2, h, edge_order=2)\n omega3_grad = np.gradient(omega3, h, edge_order=2)\n term -= np.square(omega1_grad[2])\n term -= np.square(omega1_grad[1])\n term -= np.square(omega1_grad[0])\n term -= np.square(omega2_grad[2])\n term -= np.square(omega2_grad[1])\n term -= np.square(omega2_grad[0])\n term -= np.square(omega3_grad[2])\n term -= np.square(omega3_grad[1])\n term -= np.square(omega3_grad[0])\n #---------------------------------------------------------------------#\n # Applying the subgrid stress #\n #---------------------------------------------------------------------#\n term *= nu_sgs\n\n return term", "def update(self, t, dt):\n # update boundary values\n self._apply_boundary_conditions(t,dt)\n #update current empirical wind velocity\n corner_mean_index = int(scipy.floor(t/self.wind_update_period))\n self._empirical_velocity = self.u_av[corner_mean_index], self.v_av[corner_mean_index]\n # initialise wind speed derivative arrays\n du_dt = scipy.zeros((self.nx, self.ny))\n dv_dt = scipy.zeros((self.nx, self.ny))\n # approximate spatial first derivatives with centred finite difference\n # equations for both components of wind field\n du_dx, du_dy = self._centred_first_derivs(self._u)\n dv_dx, dv_dy = self._centred_first_derivs(self._v)\n # calculate centred first sums i.e. sf_x = f(x+dx,y)+f(x-dx,y) and\n # sf_y = f(x,y+dy)-f(x,y-dy) as first step in approximating spatial\n # second derivatives with second order finite difference equations\n # d2f/dx2 ~ [f(x+dx,y)-2f(x,y)+f(x-dx,y)] / (dx*dx)\n # = [sf_x-2f(x,y)] / (dx*dx)\n # d2f/dy2 ~ [f(x,y+dy)-2f(x,y)+f(x,y-dy)] / (dy*dy)\n # = [sf_y-2f(x,y)] / (dy*dy)\n # second finite differences are not computed in full as the common\n # f(x,y) term in both expressions can be extracted out to reduce\n # the number of +/- operations required\n su_x, su_y = self._centred_first_sums(self._u)\n sv_x, sv_y = self._centred_first_sums(self._v)\n # use finite difference method to approximate time derivatives across\n # simulation region interior from defining PDEs\n # du/dt = -(u*du/dx + v*du/dy) + 0.5*Kx*d2u/dx2 + 0.5*Ky*d2u/dy2\n # dv/dt = -(u*dv/dx + v*dv/dy) + 0.5*Kx*d2v/dx2 + 0.5*Ky*d2v/dy2\n du_dt = (-self._u_int * du_dx - self._v_int * du_dy +\n self._Bx * su_x + self._By * su_y -\n self._C * self._u_int)\n dv_dt = (-self._u_int * dv_dx - self._v_int * dv_dy +\n self._Bx * sv_x + self._By * sv_y -\n self._C * self._v_int)\n # perform update with Euler integration\n self._u_int += du_dt * dt\n self._v_int += dv_dt * dt\n # update spline interpolators\n self._set_interpolators()", "def __init__(self, temperatures, daytypes, consumptions, nb_days, nb_particles, sigma2, kappa, u_heat):\n self.temperatures = temperatures\n self.daytypes = daytypes\n self.consumptions = consumptions\n self.nb_days = nb_days\n self.nb_particles = nb_particles\n self.sigma2 = sigma2\n self.kappa = kappa\n self.u_heat = u_heat\n #Var init\n self.s = np.zeros((nb_days, nb_particles)) \n self.g_heat = np.zeros((nb_days, nb_particles))\n #sigma_s and sigma_g are fixed\n self.sigma_s_star_2 = np.zeros((1, nb_particles)) \n self.sigma_g_star_2 = np.zeros((1, nb_particles))\n self.x_season = np.zeros((1, nb_particles))\n self.x_heat = np.zeros((1, nb_particles))\n self.x = np.zeros((1, nb_particles))\n self.w = np.zeros((1, nb_particles))", "def rhs_fenics(y,t):\n #print \"time: \",t\n uprev.vector()[:]=y\n f.t = t #dolfin needs to know the current time for cos(t)\n uprime_solver.solve()\n return uprime_solution.vector().array()", "def __getitem__(self, i):\n T0, S0 = get_surface_ts(self.nc, i)\n \n # average the variables if we got multiple time elements\n if isinstance(i, slice):\n T0, S0, = T0.mean(axis=0), S0.mean(axis=0)\n if self.p == 0.:\n rho, drhodT, drhodS = jmd95.eos.state_surface(T0, S0)\n else:\n rho, drhodT, drhodS = jmd95.eos.state(self.p, T0, S0)\n return rho", "def T_c(I, T_amb, V, D, R_list, N_cond=1, T_range=[298,323,348], a_s=0.9, e_s=0.9, I_sun=900.0, temp_factor=1, wind_factor=1, n_iter=10):\n\n # def Q_gen(I, R):\n # w = I * I * R\n # return w\n\n # def Q_rad_in(I_sun, A_s, a_s):\n # w = I_sun * D * a_s\n # return w\n\n # def Q_conv(htcoeff, A_s, T_lin, T_amb):\n # w = htcoeff * A_s * (T_line - T_amb)\n # return w\n\n # def Q_rad_out(e_s, A_s, sigma, T_line, T_amb):\n # w = e_s * D * sigma * (T_line**4 - T_amb**4)\n # return w\n\n def reynolds(V, D, v, Mair=1.103):\n r = V * D / v\n return r\n\n def nusselt(Re, Pr):\n a = 0.62 * ( (Re) ** (1.0/2.0) ) * ( Pr ** (1.0/3.0) )\n b = (1 + (0.4/(Pr**(2.0/3.0) ) ) ) ** (1.0/4.0)\n c = (Re / 282000) ** (5.0/8.0)\n n = 0.3 + (a/b) * ( (1 + c) ** (4.0/5.0) )\n return n\n\n def air_prop(T_amb):\n # temp v k Pr\n air_prop = np.array([[200, 7.59e-6, 18.1e-3, 0.737],\n [250, 11.44e-6, 22.3e-3, 0.720],\n [300, 15.89e-6, 26.3e-3, 0.707],\n [350, 20.92e-6, 30.0e-3, 0.700],\n [400, 26.41e-6, 33.8e-3, 0.690],\n [450, 32.39e-6, 37.3e-3, 0.686],\n [500, 38.79e-6, 40.7e-3, 0.684],\n [550, 45.57e-6, 43.9e-3, 0.683],\n [600, 52.69e-6, 46.9e-3, 0.685]])\n\n v, k, Pr = np.apply_along_axis(lambda x: np.interp(T_amb, air_prop[:,0], x),\n 0, air_prop[:,1:])\n return v, k, Pr\n\n def R_T(R_lo, R_mid, R_hi, T_line, N_cond, T_range=T_range):\n if 273 <= T_line <= 323:\n R = ((R_lo + \n ((R_lo - R_mid)/(T_range[0] - T_range[1]))\n *(T_line - T_range[0]))/N_cond)\n elif T_line > 323:\n R = ((R_mid + \n ((R_mid - R_hi)/(T_range[1] - T_range[2]))\n *(T_line - T_range[1]))/N_cond)\n else:\n R = R_lo\n print('Out of bounds')\n return R\n\n R_lo, R_mid, R_hi = R_list[0], R_list[1], R_list[2]\n temp_factor = 1\n wind_factor = 1\n sigma = 5.6703e-8 # Stefan-Boltzmann constant\n\n T_amb = T_amb*temp_factor\n V = V*wind_factor\n\n v, k, Pr = air_prop(T_amb)\n Re = reynolds(V, D, v)\n htcoeff = nusselt(Re, Pr) * k / D\n\n def T_line(T_init):\n \n R = R_T(R_lo, R_mid, R_hi, T_init, N_cond)\n print R\n\n C4 = e_s * sigma * D * math.pi\n C3 = 0.0\n C2 = 0.0\n C1 = htcoeff * D * math.pi\n C0 = - ( I ** 2 * R\n + I_sun * a_s * D\n + htcoeff * D * math.pi * T_amb\n + e_s * D * math.pi * sigma * (T_amb ** 4))\n\n return np.roots([C4, C3, C2, C1, C0])\n\n T_c = T_amb\n \n for i in range(n_iter):\n T_arr = T_line(T_c)\n T_c = np.real(T_arr[np.where((np.real(T_arr) > 0) & ~(np.iscomplex(T_arr)))]).mean()\n print T_c\n\n return T_c", "def dvdt(self, args: List[float]) -> float:\n v, h_nav, n_kvhh, h_kva, m_kvsi, s_ampar, _, s_nmdar, s_gabar, ca = args\n return ((-10.0*self.params.area \n * (self.leak.i(v)\n + self.nav.i(v, h=h_nav) \n + self.kvhh.i(v, n=n_kvhh)\n + self.kva.i(v, h=h_kva)\n + self.kvsi.i(v, m=m_kvsi)\n + self.cav.i(v)\n + self.kca.i(v, ca=ca)\n + self.nap.i(v)\n + self.kir.i(v))\n - (self.ampar.i(v, s=s_ampar)\n + self.nmdar.i(v, s=s_nmdar)\n + self.gabar.i(v, s=s_gabar))) \n / (10.0*self.params.cm*self.params.area))", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n self.udot[:] = 0.0\n\n t = time()\n for nl in self.nlists: \n nl.separations()\n #nl.apply_minimum_image()\n self.timing['pairsep time'] = (time() - t)\n\n t = time()\n if SPROPS:\n properties.spam_properties(self,self.nl_default \\\n ,self.h[0:self.n],self.hlr[0:self.n])\n self.timing['SPAM time'] = time() - t\n \n t = time()\n for force in self.forces:\n force.apply()\n self.timing['force time'] = time() - t\n \n if ADVECTIVE:\n self.rdot[:,:] = 0.0", "def forward(self, z_t_1, h_x, phi_table, t, temp=0):\n \n# sparsemax.device = z_t_1.device\n \n z_category, z_category_sparse = self.gen_z_t_dist_now(z_t_1, h_x)\n \n# if t > self.t_thres:\n# \n# if self.use_gumbel_softmax:\n# # print(t, 'inference here')\n# # device = z_category.device\n# \n# averaged_z_t = 0\n# \n# log_prob = Variable(torch.log(z_category))\n# \n# for k in range(self.sampling_times): \n# curr_z_t = F.gumbel_softmax(log_prob, tau = 0.05)\n# \n# # curr_z_t = sparsemax(log_prob)\n# \n# \n# averaged_z_t += curr_z_t\n# \n# del curr_z_t\n# \n# # averaged_z_t = averaged_z_t.to(device)\n# \n# z_t = averaged_z_t/self.sampling_times\n# \n# # print('diff::', torch.norm(z_t - z_category))\n# # \n# # print()\n# else:\n# z_t = z_category\n# \n# else:\n z_t = z_category\n \n if len(z_t.shape) == 2:\n phi_z = torch.mm(z_t, torch.t(phi_table))\n else:\n \n phi_table_full = (torch.t(phi_table)).view(1, phi_table.shape[1], phi_table.shape[0])\n \n phi_table_full = phi_table_full.repeat(phi_table.shape[1], 1, 1)\n \n phi_z = torch.bmm(z_t, phi_table_full)\n# mu = self.h_to_mu(h_combined)\n# logvar = self.h_to_logvar(h_combined)\n# std = torch.exp(0.5 * logvar) \n# epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n# z_t = epsilon * std + mu # [batch_sz x z_sz]\n return z_t, z_category, phi_z, z_category_sparse", "def addPhotonVariables(hf, event, data_temp, pho):\n # data_temp[ 0, column_names.index( 'pho_truthPdgId_egam') ] = hf[ 'pho_truthPdgId_egam' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_truthPdgId_atlas') ] = hf[ 'pho_truthPdgId_atlas' ][ event][ pho ]\n # data_temp[ 0, column_names.index( 'pho_egamTruthParticle') ] = hf[ 'pho_egamTruthParticle' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_truthType') ] = hf[ 'pho_truthType' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_truthOrigin') ] = hf[ 'pho_truthOrigin' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_isPhotonEMLoose') ] = hf[ 'pho_isPhotonEMLoose' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_isPhotonEMTight') ] = hf[ 'pho_isPhotonEMTight' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_e') ] = hf[ 'pho_e' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_eta') ] = hf[ 'pho_eta' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_phi') ] = hf[ 'pho_phi' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_et') ] = hf[ 'pho_et' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_Rhad1') ] = hf[ 'pho_Rhad1' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_Rhad') ] = hf[ 'pho_Rhad' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_weta2') ] = hf[ 'pho_weta2' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_Rphi') ] = hf[ 'pho_Rphi' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_Reta') ] = hf[ 'pho_Reta' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_Eratio') ] = hf[ 'pho_Eratio' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_f1') ] = hf[ 'pho_f1' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_wtots1') ] = hf[ 'pho_wtots1' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_DeltaE') ] = hf[ 'pho_DeltaE' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_weta1') ] = hf[ 'pho_weta1' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_fracs1') ] = hf[ 'pho_fracs1' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_ConversionType') ] = hf[ 'pho_ConversionType' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_ConversionRadius') ] = hf[ 'pho_ConversionRadius' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_VertexConvEtOverPt') ] = hf[ 'pho_VertexConvEtOverPt' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_VertexConvPtRatio') ] = hf[ 'pho_VertexConvPtRatio' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_topoetcone20') ] = hf[ 'pho_topoetcone20' ][ event][ pho ]\n # data_temp[ 0, column_names.index( 'pho_topoetcone30') ] = hf[ 'pho_topoetcone30' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_topoetcone40') ] = hf[ 'pho_topoetcone40' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_ptvarcone20') ] = hf[ 'pho_ptvarcone20' ][ event][ pho ]\n # data_temp[ 0, column_names.index( 'pho_ptvarcone30') ] = hf[ 'pho_ptvarcone30' ][ event][ pho ]\n # data_temp[ 0, column_names.index( 'pho_ptvarcone40') ] = hf[ 'pho_ptvarcone40' ][ event][ pho ]", "def __init__(self,\n uDict,\n phiDict,\n testSpaceDict,\n matType,\n dofBoundaryConditionsDict,\n dofBoundaryConditionsSetterDict,\n coefficients,\n elementQuadrature,\n elementBoundaryQuadrature,\n fluxBoundaryConditionsDict=None,\n advectiveFluxBoundaryConditionsSetterDict=None,\n diffusiveFluxBoundaryConditionsSetterDictDict=None,\n stressTraceBoundaryConditionsSetterDict=None,\n stabilization=None,\n shockCapturing=None,\n conservativeFluxDict=None,\n numericalFluxType=None,\n TimeIntegrationClass=None,\n massLumping=False,\n reactionLumping=False,\n options=None,\n name='defaultName',\n reuse_trial_and_test_quadrature=True,\n sd = True,\n movingDomain=False):\n #\n #set the objects describing the method and boundary conditions\n #\n self.movingDomain=movingDomain\n self.tLast_mesh=None\n #\n self.name=name\n self.sd=sd\n self.Hess=False\n self.lowmem=True\n self.timeTerm=True#allow turning off the time derivative\n #self.lowmem=False\n self.testIsTrial=True\n self.phiTrialIsTrial=True\n self.u = uDict\n self.ua = {}#analytical solutions\n self.phi = phiDict\n self.dphi={}\n for ck,phi in phiDict.iteritems():\n if coefficients.potential.has_key(ck):\n for cj in coefficients.potential[ck].keys():\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n else:\n self.dphi[(ck,ck)] = FiniteElementFunction(phi.femSpace)\n #check for nonlinearities in the diffusion coefficient that don't match the potential\n for ci,ckDict in coefficients.diffusion.iteritems():\n #for ck,cjDict in coefficients.diffusion.iteritems(): #cek: bug?\n for ck,cjDict in ckDict.iteritems():\n for cj in cjDict.keys():\n if not self.dphi.has_key((ck,cj)):\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n self.matType = matType\n #try to reuse test and trial information across components if spaces are the same\n self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature#True#False\n if self.reuse_test_trial_quadrature:\n for ci in range(1,coefficients.nc):\n assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, \"to reuse_test_trial_quad all femSpaces must be the same!\"\n ## Simplicial Mesh\n self.mesh = self.u[0].femSpace.mesh #assume the same mesh for all components for now\n self.testSpace = testSpaceDict\n self.dirichletConditions = dofBoundaryConditionsDict\n self.dirichletNodeSetList=None #explicit Dirichlet conditions for now, no Dirichlet BC constraints\n self.coefficients = coefficients\n self.coefficients.initializeMesh(self.mesh)\n self.nc = self.coefficients.nc\n self.stabilization = stabilization\n self.shockCapturing = shockCapturing\n self.conservativeFlux = conservativeFluxDict #no velocity post-processing for now\n self.fluxBoundaryConditions=fluxBoundaryConditionsDict\n self.advectiveFluxBoundaryConditionsSetterDict=advectiveFluxBoundaryConditionsSetterDict\n self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict\n #determine whether the stabilization term is nonlinear\n self.stabilizationIsNonlinear = False\n #cek come back\n if self.stabilization != None:\n for ci in range(self.nc):\n if coefficients.mass.has_key(ci):\n for flag in coefficients.mass[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.advection.has_key(ci):\n for flag in coefficients.advection[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.diffusion.has_key(ci):\n for diffusionDict in coefficients.diffusion[ci].values():\n for flag in diffusionDict.values():\n if flag != 'constant':\n self.stabilizationIsNonlinear=True\n if coefficients.potential.has_key(ci):\n for flag in coefficients.potential[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.reaction.has_key(ci):\n for flag in coefficients.reaction[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.hamiltonian.has_key(ci):\n for flag in coefficients.hamiltonian[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n #determine if we need element boundary storage\n self.elementBoundaryIntegrals = {}\n for ci in range(self.nc):\n self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux != None) or\n (numericalFluxType != None) or\n (self.fluxBoundaryConditions[ci] == 'outFlow') or\n (self.fluxBoundaryConditions[ci] == 'mixedFlow') or\n (self.fluxBoundaryConditions[ci] == 'setFlow'))\n #\n #calculate some dimensions\n #\n self.nSpace_global = self.u[0].femSpace.nSpace_global #assume same space dim for all variables\n self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in self.u.values()]\n self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in self.phi.values()]\n self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in self.phi.values()]\n self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in self.testSpace.values()]\n self.nFreeDOF_global = [dc.nFreeDOF_global for dc in self.dirichletConditions.values()]\n self.nVDOF_element = sum(self.nDOF_trial_element)\n self.nFreeVDOF_global = sum(self.nFreeDOF_global)\n #\n NonlinearEquation.__init__(self,self.nFreeVDOF_global)\n #\n #build the quadrature point dictionaries from the input (this\n #is just for convenience so that the input doesn't have to be\n #complete)\n #\n elementQuadratureDict={}\n elemQuadIsDict = isinstance(elementQuadrature,dict)\n if elemQuadIsDict: #set terms manually\n for I in self.coefficients.elementIntegralKeys:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[I] = elementQuadrature[I]\n else:\n elementQuadratureDict[I] = elementQuadrature['default']\n else:\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[I] = elementQuadrature\n if self.stabilization != None:\n for I in self.coefficients.elementIntegralKeys:\n if elemQuadIsDict:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature[I]\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature['default']\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature\n if self.shockCapturing != None:\n for ci in self.shockCapturing.components:\n if elemQuadIsDict:\n if elementQuadrature.has_key(('numDiff',ci,ci)):\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature[('numDiff',ci,ci)]\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature['default']\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature\n if massLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('m',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n if reactionLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('r',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n elementBoundaryQuadratureDict={}\n if isinstance(elementBoundaryQuadrature,dict): #set terms manually\n for I in self.coefficients.elementBoundaryIntegralKeys:\n if elementBoundaryQuadrature.has_key(I):\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]\n else:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']\n else:\n for I in self.coefficients.elementBoundaryIntegralKeys:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature\n #\n # find the union of all element quadrature points and\n # build a quadrature rule for each integral that has a\n # weight at each point in the union\n #mwf include tag telling me which indices are which quadrature rule?\n (self.elementQuadraturePoints,self.elementQuadratureWeights,\n self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)\n self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]\n self.nQuadraturePoints_global = self.nQuadraturePoints_element*self.mesh.nElements_global\n #\n #Repeat the same thing for the element boundary quadrature\n #\n (self.elementBoundaryQuadraturePoints,\n self.elementBoundaryQuadratureWeights,\n self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)\n self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]\n self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global*\n self.mesh.nElementBoundaries_element*\n self.nElementBoundaryQuadraturePoints_elementBoundary)\n\n #\n #storage dictionaries\n self.scalars_element = set()\n #\n #simplified allocations for test==trial and also check if space is mixed or not\n #\n self.q={}\n self.ebq={}\n self.ebq_global={}\n self.ebqe={}\n self.phi_ip={}\n #mesh\n self.q['x'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,3),'d')\n self.q['det(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['abs(det(J))'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['J'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.q['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['x'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['g'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n self.ebqe['inverse(J)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['hat(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['bar(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['sqrt(det(g))'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.ebqe[('n')] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n #shape\n self.q[('v',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w',0)] = self.q[('v',0)]\n self.q[('grad(v)',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)',0)] = self.q[('grad(v)',0)]\n self.q[('grad(w)*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)*dV_f',0)] = self.q[('grad(w)*dV',0)]\n #todo get rid of dV_{f,a}, etc\n self.q[('w*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w*dV_m',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','grad(w)*dV','grad(w)*dV_f','w*dV','w*dV_m']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.q[key_ci] = self.q[key_0]\n #ELLAM weights stiffness, body integrals by dt\n for ci in range(self.nc):\n self.q[('dt*grad(w)*dV',ci)]= numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[ci],self.nSpace_global),'d')\n #\n self.ebqe[('v',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n self.ebqe[('w',0)] = self.ebqe[('v',0)]\n self.ebqe[('grad(v)',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.ebqe[('w*dS_f',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','w*dS_f']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.ebqe[key_ci] = self.ebqe[key_0]\n\n for ci in range(self.nc):\n self.q[('u',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('grad(u)',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n #f\n for ci in self.coefficients.advection.keys():\n self.q[('f',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.q[('df',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n self.ebqe[('f',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.ebqe[('df',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n\n #a, linear dispersion single component\n\n for ci,ckDict in self.coefficients.diffusion.iteritems():\n for ck,cjDict in ckDict.iteritems():\n for flag in cjDict.values():\n assert flag == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n\n if self.coefficients.sdInfo != None and (ci,ck) in self.coefficients.sdInfo.keys():\n self.q[('a',ci,ck)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n\n else:\n self.q[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n #dense storage\n self.q[('grad(w)*dV_a',ci,ck)] = self.q[('grad(w)*dV_f',ci)]\n self.q[('dt*grad(w)*dV_a',ci,ck)]= self.q[('dt*grad(w)*dV',ci)]\n #ci,ckDict\n #linear potential only for now, need to change for e.g., Buckley Leverett\n for ck in self.phi.keys():\n self.phi[ck].dof[:]=self.u[ck].dof\n self.q[('grad(phi)',ck)] = self.q[('grad(u)',ck)]\n for key in self.dphi.keys():\n self.dphi[key].dof.fill(1.0)\n self.q[('dphi',key[0],key[1])] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n\n\n# if self.coefficients.diffusion.has_key(0):\n# for ck,flag in self.coefficients.diffusion[0][0].iteritems():\n# assert self.coefficients.diffusion[0][0][ck] == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n# if self.coefficients.sdInfo != None and (0,0) in self.coefficients.sdInfo.keys():\n# self.q[('a',0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.q[('da',0,0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n\n# else:\n# self.q[('a',0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.q[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# #\n# self.phi[0].dof[:]=self.u[0].dof\n# self.dphi[(0,0)].dof.fill(1.0)\n# self.q[('grad(phi)',0)] = self.q[('grad(u)',0)]\n# self.q[('dphi',0,0)] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n# self.q[('grad(w)*dV_a',0,0)] = self.q[('grad(w)*dV_f',0)]\n# self.q[('dt*grad(w)*dV_a',0,0)]= self.q[('dt*grad(w)*dV',0)]\n\n #r 'constant' ie not a function of solution but go ahead and include dr for now\n for ci,cjDict in self.coefficients.reaction.iteritems():\n self.q[('r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dr',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('w*dV_r',ci)] = self.q[('w*dV',ci)]\n self.q[('dt*w*dV_r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.ebqe[('r',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #m\n for ci,cjDict in self.coefficients.mass.iteritems():\n self.q[('m',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dm',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('mt',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_last',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_tmp',ci)] = self.q[('m',ci)]\n self.q[('cfl',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('numDiff',ci,ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.ebqe[('m',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n for cj in cjDict.keys():\n self.ebqe[('dm',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n\n ###ellam specific options with defauls here\n self.ellamDiscretization = ELLAMtools.ELLAMdiscretization(self,options)\n\n #\n self.needEBQ = options.needEBQ #could need for analytical velocity evaluation with RT0,BDM\n\n #beg normal stuff allocating things\n self.points_elementBoundaryQuadrature= set()\n self.scalars_elementBoundaryQuadrature= set([('u',ci) for ci in range(self.nc)])\n self.vectors_elementBoundaryQuadrature= set()\n self.tensors_elementBoundaryQuadrature= set()\n\n if self.needEBQ:\n for k in ['x','hat(x)']:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq['n'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n self.ebq['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),'d')\n #allocate the metric tensor\n self.ebq['g'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n log(memory(\"element boundary quadrature\",\"LADRellam\"),level=4)\n ebq_keys = ['sqrt(det(g))']\n ebq_keys.extend([('u',ci) for ci in range(self.nc)])\n for k in ebq_keys:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #test and trial info\n self.ebq[('w',0)] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[0]),'d')\n for ci in range(1,self.nc):\n self.ebq[('w',ci)] = self.ebq[('w',0)]\n for ci in range(self.nc):\n self.ebq[('v',ci)] = self.ebq[('w',0)]\n\n #ebq_global info\n self.ebq_global['x'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq_global['n'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n #\n # allocate residual and Jacobian storage\n #\n self.elementResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementSpatialResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementJacobian = {}\n for ci in range(self.nc):\n self.elementJacobian[ci]={}\n for cj in range(self.nc):\n if cj in self.coefficients.stencil[ci]:\n self.elementJacobian[ci][cj] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci],\n self.nDOF_trial_element[cj]),\n 'd')\n #\n self.fluxJacobian_exterior = {}\n for ci in range(self.nc):\n self.fluxJacobian_exterior[ci]={}\n for cj in self.coefficients.stencil[ci]:\n self.fluxJacobian_exterior[ci][cj] = numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[cj]),\n 'd')\n\n #\n #\n #\n #\n log(memory(\"element and element boundary Jacobians\",\"OneLevelTransport\"),level=4)\n self.inflowBoundaryBC = {}\n self.inflowBoundaryBC_values = {}\n self.inflowFlux = {}\n for cj in range(self.nc):\n self.inflowBoundaryBC[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,),'i')\n self.inflowBoundaryBC_values[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nDOF_trial_element[cj]),'d')\n self.inflowFlux[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.internalNodes = set(range(self.mesh.nNodes_global))\n #identify the internal nodes this is ought to be in mesh\n ##\\todo move this to mesh\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n eN_global = self.mesh.elementBoundaryElementsArray[ebN,0]\n ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN,0]\n for i in range(self.mesh.nNodes_element):\n if i != ebN_element:\n I = self.mesh.elementNodesArray[eN_global,i]\n self.internalNodes -= set([I])\n self.nNodes_internal = len(self.internalNodes)\n self.internalNodesArray=numpy.zeros((self.nNodes_internal,),'i')\n for nI,n in enumerate(self.internalNodes):\n self.internalNodesArray[nI]=n\n #\n del self.internalNodes\n self.internalNodes = None\n log(\"Updating local to global mappings\",2)\n self.updateLocal2Global()\n log(\"Building time integration object\",2)\n log(memory(\"inflowBC, internalNodes,updateLocal2Global\",\"OneLevelTransport\"),level=4)\n #mwf for interpolating subgrid error for gradients etc\n if self.stabilization and self.stabilization.usesGradientStabilization:\n self.timeIntegration = TimeIntegrationClass(self,integrateInterpolationPoints=True)\n else:\n self.timeIntegration = TimeIntegrationClass(self)\n\n if options != None:\n self.timeIntegration.setFromOptions(options)\n log(memory(\"TimeIntegration\",\"OneLevelTransport\"),level=4)\n log(\"Calculating numerical quadrature formulas\",2)\n self.calculateQuadrature()\n #lay out components/equations contiguously for now\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [self.offset[ci-1]+self.nFreeDOF_global[ci-1]]\n self.stride = [1 for ci in range(self.nc)]\n #use contiguous layout of components for parallel, requires weak DBC's\n comm = Comm.get()\n self.comm=comm\n if comm.size() > 1:\n assert numericalFluxType != None and numericalFluxType.useWeakDirichletConditions,\"You must use a numerical flux to apply weak boundary conditions for parallel runs\"\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [ci]\n self.stride = [self.nc for ci in range(self.nc)]\n #\n log(memory(\"stride+offset\",\"OneLevelTransport\"),level=4)\n if numericalFluxType != None:\n if options == None or options.periodicDirichletConditions == None:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict)\n else:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict,\n options.periodicDirichletConditions)\n else:\n self.numericalFlux = None\n #set penalty terms\n #cek todo move into numerical flux initialization\n if self.ebq_global.has_key('penalty'):\n for ebN in range(self.mesh.nElementBoundaries_global):\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebq_global['penalty'][ebN,k] = self.numericalFlux.penalty_constant/(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)\n #penalty term\n #cek move to Numerical flux initialization\n if self.ebqe.has_key('penalty'):\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebqe['penalty'][ebNE,k] = self.numericalFlux.penalty_constant/self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power\n log(memory(\"numericalFlux\",\"OneLevelTransport\"),level=4)\n self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray\n #use post processing tools to get conservative fluxes, None by default\n import PostProcessingTools\n self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)\n log(memory(\"velocity postprocessor\",\"OneLevelTransport\"),level=4)\n #helper for writing out data storage\n import Archiver\n self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n #TODO get rid of this\n for ci,fbcObject in self.fluxBoundaryConditionsObjectsDict.iteritems():\n self.ebqe[('advectiveFlux_bc_flag',ci)] = numpy.zeros(self.ebqe[('advectiveFlux_bc',ci)].shape,'i')\n for t,g in fbcObject.advectiveFluxBoundaryConditionsDict.iteritems():\n if self.coefficients.advection.has_key(ci):\n self.ebqe[('advectiveFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)\n self.ebqe[('advectiveFlux_bc_flag',ci)][t[0],t[1]] = 1\n\n if hasattr(self.numericalFlux,'setDirichletValues'):\n self.numericalFlux.setDirichletValues(self.ebqe)\n if not hasattr(self.numericalFlux,'isDOFBoundary'):\n self.numericalFlux.isDOFBoundary = {}\n for ci in range(self.nc):\n self.numericalFlux.isDOFBoundary[ci]= numpy.zeros(self.ebqe[('u',ci)].shape,'i')\n if not hasattr(self.numericalFlux,'ebqe'):\n self.numericalFlux.ebqe = {}\n for ci in range(self.nc):\n self.numericalFlux.ebqe[('u',ci)]= numpy.zeros(self.ebqe[('u',ci)].shape,'d')", "def forward(self,y,xt,D,h,param):\n \n \n h_flip=h.flip(2,3)\n a=conv(h,xt)-y \n b=xt-self.learning_rate*conv(h_flip,a)\n u=tor.matmul(D,tor.matmul(b,D.transpose(2,3)))\n u_=rho(u,param)\n xt1=tor.matmul(D.transpose(2,3),tor.matmul(u_,D))\n \n return xt1", "def __init__(self, parameters, mesh_name, facet_name,\n bc_dict={\"obstacle\": 2, \"channel_walls\": 1, \"inlet\": 3,\n \"outlet\": 4}):\n self.bc_dict = bc_dict\n self.mesh = df.Mesh()\n with df.XDMFFile(mesh_name) as infile:\n infile.read(self.mesh)\n\n mvc = df.MeshValueCollection(\"size_t\", self.mesh,\n self.mesh.topology().dim() - 1)\n with df.XDMFFile(facet_name) as infile:\n infile.read(mvc, \"name_to_read\")\n self.mf = mf = df.cpp.mesh.MeshFunctionSizet(self.mesh, mvc)\n\n self.V = V = df.VectorFunctionSpace(self.mesh, 'P',\n parameters[\"degree velocity\"])\n self.Q = Q = df.FunctionSpace(self.mesh, 'P',\n parameters[\"degree pressure\"])\n self.rho = df.Constant(parameters[\"density [kg/m3]\"])\n self.mu = df.Constant(parameters[\"viscosity [Pa*s]\"])\n self.dt = df.Constant(parameters[\"dt [s]\"])\n self.g = df.Constant((0, 0))\n self.vu, self.vp = df.TestFunction(V), df.TestFunction(Q)\n self.u_, self.p_ = df.Function(V), df.Function(Q)\n self.u_1, self.p_1 = df.Function(V), df.Function(Q)\n self.u_k, self.p_k = df.Function(V), df.Function(Q)\n self.u, self.p = df.TrialFunction(V), df.TrialFunction(Q) # unknown!\n\n self.U_m = U_m = parameters[\"velocity [m/s]\"]\n x = [0, .41 / 2] # center of the channel\n Ucenter = 4.*U_m*x[1]*(.41-x[1])/(.41*.41)\n U0_str = \"4.*U_m*x[1]*(.41-x[1])/(.41*.41)\"\n self.U_mean = np.mean(2 / 3 * Ucenter)\n\n U0 = df.Expression((U0_str, \"0\"), U_m=U_m, degree=2)\n bc0 = df.DirichletBC(V, df.Constant((0, 0)), mf, bc_dict[\"obstacle\"])\n bc1 = df.DirichletBC(V, df.Constant((0, 0)), mf, bc_dict[\"channel_walls\"])\n bc2 = df.DirichletBC(V, U0, mf, bc_dict[\"inlet\"])\n bc3 = df.DirichletBC(Q, df.Constant(0), mf, bc_dict[\"outlet\"])\n self.bcu = [bc0, bc1, bc2]\n self.bcp = [bc3]\n self.ds_ = df.Measure(\"ds\", domain=self.mesh, subdomain_data=mf)\n return", "def tde(ax, col, legend):\n z = 0.354\n d = Planck15.luminosity_distance(z=z).cgs.value\n\n # In the Eftekhari paper, it says that although the event was first\n # triggered by Swift/BAT on 2011 March 28.55 UT, subsequent\n # analysis of the BAT data revealed discernible emission as early as\n # 2011 March 25. All times should therefore be shifted relative to Mar 25.5\n\n # Need to add 3.04 to the Zauderer points\n nu, dt, f, ef, islim = zauderer()\n t = (dt+3.04)/(1+z)\n\n # Low frequency\n nu_plt = 4.9E9\n choose = np.logical_and(~islim, nu == nu_plt/1E9)\n dt_all = t[choose]\n nufnu_all = nu_plt*f[choose]\n\n # adding the set from Berger2012\n # and making the same correction as above\n # this is 4.9 GHz\n t = (np.array([3.87, 4.76, 5.00, 5.79, 6.78, 7.77, 9.79, 14.98, 22.78,\n 35.86, 50.65, 67.61, 94.64, 111.62, 126.51, 143.62, 164.38, 174.47,\n 197.41, 213.32])) / (1+z)\n f = np.array([0.25, 0.34, 0.34, 0.61, 0.82, 1.48, 1.47, 1.80, 2.10, 4.62,\n 4.84, 5.86, 9.06, 9.10, 9.10, 11.71, 12.93, 12.83, 13.29, 12.43])\n\n # Berger 2012: use the 8.4 GHz light curve, since that's closest in freq\n #t = (np.array([14.97, 127.69, 159.77, 174.47, 177.50, 197.41, 213.32, 219.22]))/(1+z)\n #f = np.array([5.49, 19.03, 22.15, 23.19, 23.65, 22.42, 22.04, 21.52])\n #dt_all = np.append(dt_all, t)\n #nufnu_all = np.append(nufnu_all, f*nu_plt)\n\n # adding the set from Zauderer2013\n # they also say it's relative to March 25.5...\n # so I think I need to subtract 3.04 days from here too\n t = (np.array([245.23, 302.95, 383.92, 453.66, 582.31]))/(1+z)\n f = np.array([12.17, 12.05, 12.24, 11.12, 8.90])\n dt_all = np.append(dt_all, t)\n nufnu_all = np.append(nufnu_all, f*nu_plt)\n\n # adding the set from Eftekhari 2018\n t = np.array([645, 651.1, 787.6, 1032, 1105, 1373, 1894])\n f = np.array([8.24, 8.63, 6.23, 4.21, 3.52, 2.34, 1.47])\n dt_all = np.append(dt_all, t)\n nufnu_all = np.append(nufnu_all, f*nu_plt)\n\n order = np.argsort(dt_all)\n lum = plot_line(\n ax, d, dt_all[order], nufnu_all[order], \n 'SwiftJ1644+57', 'TDE', col, legend)\n ax.text(dt_all[order][10], lum[10]*1.1, 'Swift J1644+57', fontsize=11,\n verticalalignment='bottom',\n horizontalalignment='left')", "def TM_fluid(layer, kx, om):\n\n h = layer.d\n rho = layer.medium.rho\n K = layer.medium.K\n k = om*np.sqrt(rho/K)\n ky = np.sqrt(k**2-kx**2)\n T = np.zeros((2, 2), dtype=complex)\n T[0, 0] = np.cos(ky*h)\n T[1, 0] = (om**2*rho/ky)*np.sin(ky*h)\n T[0, 1] = -(ky/(om**2*rho))*np.sin(ky*h)\n T[1, 1] = np.cos(ky*h)\n return T", "def initialize_variables(self):\n super(D2Model, self).initialize_variables()\n\n s = \"::: initializing 2D variables :::\"\n print_text(s, cls=self)\n\n # Depth below sea level :\n class Depth(Expression):\n def eval(self, values, x):\n values[0] = abs(min(0, x[2]))\n self.D = Depth(element=self.Q.ufl_element())\n \n # Enthalpy model\n self.theta_surface = Function(self.Q, name='theta_surface')\n self.theta_float = Function(self.Q, name='theta_float')\n self.theta_app = Function(self.Q, name='theta_app')\n self.theta = Function(self.Q, name='theta')\n self.theta0 = Function(self.Q, name='theta0')\n self.W0 = Function(self.Q, name='W0')\n self.thetahat = Function(self.Q, name='thetahat')\n self.uhat = Function(self.Q, name='uhat')\n self.vhat = Function(self.Q, name='vhat')\n self.what = Function(self.Q, name='what')\n self.mhat = Function(self.Q, name='mhat')\n self.rho_b = Function(self.Q, name='rho_b')\n\n # Age model \n self.age = Function(self.Q, name='age')\n self.a0 = Function(self.Q, name='a0')\n\n # Surface climate model\n self.precip = Function(self.Q, name='precip')\n\n # Stokes-balance model :\n self.u_s = Function(self.Q, name='u_s')\n self.u_t = Function(self.Q, name='u_t')\n self.F_id = Function(self.Q, name='F_id')\n self.F_jd = Function(self.Q, name='F_jd')\n self.F_ib = Function(self.Q, name='F_ib')\n self.F_jb = Function(self.Q, name='F_jb')\n self.F_ip = Function(self.Q, name='F_ip')\n self.F_jp = Function(self.Q, name='F_jp')\n self.F_ii = Function(self.Q, name='F_ii')\n self.F_ij = Function(self.Q, name='F_ij')\n self.F_iz = Function(self.Q, name='F_iz')\n self.F_ji = Function(self.Q, name='F_ji')\n self.F_jj = Function(self.Q, name='F_jj')\n self.F_jz = Function(self.Q, name='F_jz')\n self.tau_iz = Function(self.Q, name='tau_iz')\n self.tau_jz = Function(self.Q, name='tau_jz')", "def car_dynamics(self,x, t, u, p):\n # f = vehicle_dynamics_ks(x, u, p)\n f = vehicle_dynamics_st(x, u, p)\n # f = vehicle_dynamics_std(x, u, p)\n # f = vehicle_dynamics_mb(x, u, p)\n return f", "def __init__(self):\n # Set constants\n self.fromHztoeV = 6.58e-16\n self.gramstoeV = 1 / ( 1.78 * 1e-33)\n self.mtoev = 1/(1.97 * 1e-7) \n self.H0 = cosmo.H(0).value * 1e3 / (1e3 * const.kpc.value) #expressed in 1/s\n self.rhocritical = cosmo.critical_density(0).value * self.gramstoeV /(1e-2)**3 # eV/m**3\n self.Om0 = cosmo.Om0 #total matter \n self.OLambda0 = cosmo.Ode0 # cosmological constant\n self.DM0 = self.Om0 - cosmo.Ob0 # dark matter\n self.evtonJoule = 1.60218 * 1e-10 # from eV to nJ\n self.evtoJoule = 1.60218 * 1e-19 # from eV to J\n PSgal1h = np.loadtxt(\"/Users/andreacaputo/Desktop/Phd/AxionDecayCrossCorr/Codes/NIRB_PS/PS_GALl_1h.dat\")\n PSgal2h = np.loadtxt(\"/Users/andreacaputo/Desktop/Phd/AxionDecayCrossCorr/Codes/NIRB_PS/PS_GALl_2h.dat\")\n self.Mpc = 1e3 * const.kpc.value\n self.zmin = 0.001\n self.zmax = 30.001\n self.zbins = 301\n self.h = cosmo.h\n self.z_vect = np.linspace(self.zmin, self.zmax, self.zbins)\n self.k_vect = PSgal1h[:,0]* self.h\n self.Power1h = PSgal1h[:,1:]/(self.h**3)\n self.Power2h = PSgal2h[:,1:]/(self.h**3)\n self.Power = self.Power1h + self.Power2h\n self.Praw_prova1h = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power1h))\n self.Praw_prova2h = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power2h))\n self.Praw_prova = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power))", "def run_main(sst, ft_qv, use_NT):\n\n dtout=10. #minutes\n end_time=8*24. #hours\n del_time=dtout*60. #seconds\n end_time=end_time*3600. #seconds\n #sst=297\n D=5.e-6 #s-1\n U=7 #m/s\n psfc=100. #kPa\n qsfc=tf.qs_tp(sst,psfc)\n ft_intercept = 292 #K\n ft_gamma = 6.e-3 #K/m\n #ft_qv = 2.e-3\n k=0.2 #entrainment efficiency\n Cd = 1.e-3 #drag coefficient\n tspan = np.arange(0.,end_time,del_time)\n vars_init=[285.,400.,8.e-3] #theta (K), height (m) qv (kg/kg) to start\n the_tup=dict(D=D,U=U,sst=sst,ft_intercept=ft_intercept,ft_gamma=ft_gamma,\n qsfc=qsfc,ft_qv=ft_qv,k=k,Cd=Cd,radcool=30.,use_NT=use_NT) # include use_NT\n the_tup=make_tuple(the_tup,'coeffs')\n output=integrate.odeint(dmixed_vars, vars_init, tspan,(the_tup,))\n result=pd.DataFrame.from_records(output,columns=['theta','h','qv'])\n\n # save time/computation by only doing calculations for the last timestep (equilibrium)\n result['time']=tspan[-1]/3600./24. #days\n result['deltheta'] = theta_ft(result['h'].values[-1],ft_intercept,ft_gamma) - result['theta'].iloc[-1]\n result['delqv'] = ft_qv - result['qv'].iloc[-1]\n result['LCL'] = calc_lcl(result.iloc[-1], psfc)\n result['q_flux_0']=calc_sfc_qvap_flux(result.iloc[-1],the_tup)\n result['T_flux_0']=calc_sfc_theta_flux(result.iloc[-1],the_tup)\n result['entflux_theta']=calc_entflux_theta(result.iloc[-1],the_tup)\n \n # decide how to calculate entrainment\n the_vars = [result['theta'].iloc[-1],result['h'].iloc[-1],result['qv'].iloc[-1]]\n if use_NT:\n result['went']=calc_went_NT(the_vars, the_tup, result['deltheta'].iloc[-1], \n result['T_flux_0'].iloc[-1], result['q_flux_0'].iloc[-1])\n else:\n result['went']=calc_went(result.iloc[-1],the_tup)\n\n result['entflux_qv']=calc_entflux_qv(result.iloc[-1],the_tup)\n\n with open('dumpmodel.csv','w') as f:\n result.to_csv(f,index=False)\n \n return None", "def solve_amps(self, h, a, g):\n\n # Symmetrize T3 RHS\n g3 = ((+ g.t3\n + g.t3.transpose([1, 2, 0, 4, 5, 3])\n + g.t3.transpose([2, 0, 1, 5, 3, 4])\n + g.t3.transpose([0, 2, 1, 3, 5, 4])\n + g.t3.transpose([2, 1, 0, 5, 4, 3])\n + g.t3.transpose([1, 0, 2, 4, 3, 5])\n ) / 12)\n\n # Symmetrize T2 RHS\n g2 = 1 / 2 * (g.t2 + g.t2.transpose([1, 0, 3, 2]))\n\n # Solve\n t2 = g2 * (- cc_denom(h.f, g.t2.ndim, 'dir', 'full'))\n t3 = g3 * (- cc_denom(h.f, g.t3.ndim, 'dir', 'full'))\n\n # Symmetrize amplitudes\n t2 = 1 / 2 * (t2 + t2.transpose([1, 0, 3, 2]))\n t3 = ((+ t3\n + t3.transpose([1, 2, 0, 4, 5, 3])\n + t3.transpose([2, 0, 1, 5, 3, 4])\n + t3.transpose([0, 2, 1, 3, 5, 4])\n + t3.transpose([2, 1, 0, 5, 4, 3])\n + t3.transpose([1, 0, 2, 4, 3, 5])) / 6)\n\n return Tensors(\n t1=g.t1 * (- cc_denom(h.f, g.t1.ndim, 'dir', 'full')),\n t2=t2,\n t3=t3)", "def variable_costs(dh: DataHandler):\n print(\"PtHydrogen not implemented\")\n\n scen_hor_map = dh.scenarios.horizon\n\n cost_var = dh.get(\"i_cost\").xs(\"varcost\", level=\"par_cost\")\n cost_var = cost_var.groupby([\"alltec\"]).apply(\n extract_horizon_specific_cost, scen_hor_map\n )\n cost_var = add_dimension(cost_var, dh.merge_stored_sets(\"r\"), \"r\")\n cost_var = cost_var.reorder_levels([\"alltec\", \"r\"])\n\n h2_price = dh.get(\"o_h2price_buy\")\n h2_price = add_dimension(h2_price, dh.merge_stored_sets(\"tec_h2g\"), \"alltec\")\n\n elec_price = dh.get(\"o_prices\")\n\n cost_fuel = dh.get(\"cost_fuel\")\n cost_fuel = add_dimension(cost_fuel, dh.merge_stored_sets(\"r\"), \"r\")\n cost_fuel = cost_fuel.reorder_levels([\"alltec\", \"r\"])\n\n cost_fuel.loc[h2_price.index, :] = h2_price\n\n eff = dh.get(\"eff\")\n\n co2_int = dh.get(\"co2_int\").div(1000)\n\n co2_price = dh.get(\"o_co2price\")\n\n co2_costs = co2_int * co2_price\n co2_costs.index.names = [\"alltec\", \"r\"]\n\n var_cost = (\n cost_fuel.add(co2_costs, fill_value=0).div(eff).add(cost_var, fill_value=0)\n )\n\n return var_cost", "def vorticity(self):\n \n ux,_ = np.gradient(self._obj['u'],self._obj['x'],self._obj['y'],axis=(0,1))\n _,vy = np.gradient(self._obj['v'],self._obj['x'],self._obj['y'],axis=(0,1))\n # self._obj['w'] = xr.DataArray(vy - ux, dims=['x', 'y'])\n self._obj['w'] = xr.DataArray(vy - ux, dims=['x', 'y','t'])\n \n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append('1/dt')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = ('1/dt')\n\n\n return self._obj", "def velocityVerlet(XY, yh, yt, h, n):\n for l in range(n):\n yt_temp = yt + (0.5 * h * XY.grad_log_density(yh))\n yhp1 = yh + (h * yt_temp)\n ytp1 = yt_temp + (0.5 * h * XY.grad_log_density(yhp1))\n yh = yhp1\n yt = ytp1\n\n return [yhp1, ytp1]", "def ADT_QCD_LEPTON():\n\n # As input for the quark-mass ratios, we use the quark masses at MZ and the lepton masses\n ip = Num_input()\n\n mu = ip.mu_at_MZ\n md = ip.md_at_MZ\n ms = ip.ms_at_MZ\n me = ip.me\n mmu = ip.mmu\n mtau = ip.mtau\n\n # Create the ADT:\n\n gamma_hat_P63eu_Q81u = np.hstack((-16 * me**2/mu**2, np.zeros(5)))\n gamma_hat_P63muu_Q81u = np.hstack((np.zeros(1), -16 * mmu**2/mu**2, np.zeros(4)))\n gamma_hat_P63tauu_Q81u = np.hstack((np.zeros(2), -16 * mtau**2/mu**2, np.zeros(3)))\n\n gamma_hat_P63ed_Q81d = np.hstack((-16 * me**2/md**2, np.zeros(5)))\n gamma_hat_P63mud_Q81d = np.hstack((np.zeros(1), -16 * mmu**2/md**2, np.zeros(4)))\n gamma_hat_P63taud_Q81d = np.hstack((np.zeros(2), -16 * mtau**2/md**2, np.zeros(3)))\n\n gamma_hat_P63es_Q81s = np.hstack((-16 * me**2/ms**2, np.zeros(5)))\n gamma_hat_P63mus_Q81s = np.hstack((np.zeros(1), -16 * mmu**2/ms**2, np.zeros(4)))\n gamma_hat_P63taus_Q81s = np.hstack((np.zeros(2), -16 * mtau**2/ms**2, np.zeros(3)))\n\n\n\n gamma_hat_P63eu_Q82u = np.hstack((np.zeros(3), -16 * me**2/mu**2, np.zeros(2)))\n gamma_hat_P63muu_Q82u = np.hstack((np.zeros(4), -16 * mmu**2/mu**2, np.zeros(1)))\n gamma_hat_P63tauu_Q82u = np.hstack((np.zeros(5), -16 * mtau**2/mu**2))\n\n gamma_hat_P63ed_Q82d = np.hstack((np.zeros(3), -16 * me**2/md**2, np.zeros(2)))\n gamma_hat_P63mud_Q82d = np.hstack((np.zeros(4), -16 * mmu**2/md**2, np.zeros(1)))\n gamma_hat_P63taud_Q82d = np.hstack((np.zeros(5), -16 * mtau**2/md**2))\n\n gamma_hat_P63es_Q82s = np.hstack((np.zeros(3), -16 * me**2/ms**2, np.zeros(2)))\n gamma_hat_P63mus_Q82s = np.hstack((np.zeros(4), -16 * mmu**2/ms**2, np.zeros(1)))\n gamma_hat_P63taus_Q82s = np.hstack((np.zeros(5), -16 * mtau**2/ms**2))\n\n\n\n gamma_hat_P62ue_Q83u = np.hstack((-16 * me**2/mu**2, np.zeros(5)))\n gamma_hat_P62umu_Q83u = np.hstack((np.zeros(1), -16 * mmu**2/mu**2, np.zeros(4)))\n gamma_hat_P62utau_Q83u = np.hstack((np.zeros(2), -16 * mtau**2/mu**2, np.zeros(3)))\n\n gamma_hat_P62de_Q83d = np.hstack((-16 * me**2/md**2, np.zeros(5)))\n gamma_hat_P62dmu_Q83d = np.hstack((np.zeros(1), -16 * mmu**2/md**2, np.zeros(4)))\n gamma_hat_P62dtau_Q83d = np.hstack((np.zeros(2), -16 * mtau**2/md**2, np.zeros(3)))\n\n gamma_hat_P62se_Q83s = np.hstack((-16 * me**2/ms**2, np.zeros(5)))\n gamma_hat_P62smu_Q83s = np.hstack((np.zeros(1), -16 * mmu**2/ms**2, np.zeros(4)))\n gamma_hat_P62stau_Q83s = np.hstack((np.zeros(2), -16 * mtau**2/ms**2, np.zeros(3)))\n\n\n\n gamma_hat_P62ue_Q84u = np.hstack((np.zeros(3), -16 * me**2/mu**2, np.zeros(2)))\n gamma_hat_P62umu_Q84u = np.hstack((np.zeros(4), -16 * mmu**2/mu**2, np.zeros(1)))\n gamma_hat_P62utau_Q84u = np.hstack((np.zeros(5), -16 * mtau**2/mu**2))\n\n gamma_hat_P62de_Q84d = np.hstack((np.zeros(3), -16 * me**2/md**2, np.zeros(2)))\n gamma_hat_P62dmu_Q84d = np.hstack((np.zeros(4), -16 * mmu**2/md**2, np.zeros(1)))\n gamma_hat_P62dtau_Q84d = np.hstack((np.zeros(5), -16 * mtau**2/md**2))\n\n gamma_hat_P62se_Q84s = np.hstack((np.zeros(3), -16 * me**2/ms**2, np.zeros(2)))\n gamma_hat_P62smu_Q84s = np.hstack((np.zeros(4), -16 * mmu**2/ms**2, np.zeros(1)))\n gamma_hat_P62stau_Q84s = np.hstack((np.zeros(5), -16 * mtau**2/ms**2))\n\n\n\n gamma_hat_Q81u = np.vstack((gamma_hat_P63eu_Q81u, gamma_hat_P63muu_Q81u, gamma_hat_P63tauu_Q81u, np.zeros((15,6))))\n gamma_hat_Q81d = np.vstack((np.zeros((3,6)), gamma_hat_P63ed_Q81d, gamma_hat_P63mud_Q81d, gamma_hat_P63taud_Q81d, np.zeros((12,6))))\n gamma_hat_Q81s = np.vstack((np.zeros((6,6)), gamma_hat_P63es_Q81s, gamma_hat_P63mus_Q81s, gamma_hat_P63taus_Q81s, np.zeros((9,6))))\n\n gamma_hat_Q82u = np.vstack((gamma_hat_P63eu_Q82u, gamma_hat_P63muu_Q82u, gamma_hat_P63tauu_Q82u, np.zeros((15,6))))\n gamma_hat_Q82d = np.vstack((np.zeros((3,6)), gamma_hat_P63ed_Q82d, gamma_hat_P63mud_Q82d, gamma_hat_P63taud_Q82d, np.zeros((12,6))))\n gamma_hat_Q82s = np.vstack((np.zeros((6,6)), gamma_hat_P63es_Q82s, gamma_hat_P63mus_Q82s, gamma_hat_P63taus_Q82s, np.zeros((9,6))))\n\n gamma_hat_Q83u = np.vstack((np.zeros((9,6)), gamma_hat_P62ue_Q83u, gamma_hat_P62umu_Q83u, gamma_hat_P62utau_Q83u, np.zeros((6,6))))\n gamma_hat_Q83d = np.vstack((np.zeros((12,6)), gamma_hat_P62de_Q83d, gamma_hat_P62dmu_Q83d, gamma_hat_P62dtau_Q83d, np.zeros((3,6))))\n gamma_hat_Q83s = np.vstack((np.zeros((15,6)), gamma_hat_P62se_Q83s, gamma_hat_P62smu_Q83s, gamma_hat_P62stau_Q83s))\n\n gamma_hat_Q84u = np.vstack((np.zeros((9,6)), gamma_hat_P62ue_Q84u, gamma_hat_P62umu_Q84u, gamma_hat_P62utau_Q84u, np.zeros((6,6))))\n gamma_hat_Q84d = np.vstack((np.zeros((12,6)), gamma_hat_P62de_Q84d, gamma_hat_P62dmu_Q84d, gamma_hat_P62dtau_Q84d, np.zeros((3,6))))\n gamma_hat_Q84s = np.vstack((np.zeros((15,6)), gamma_hat_P62se_Q84s, gamma_hat_P62smu_Q84s, gamma_hat_P62stau_Q84s))\n\n\n\n\n gamma_hat = np.array([gamma_hat_Q81u, gamma_hat_Q81d, gamma_hat_Q81s, gamma_hat_Q82u, gamma_hat_Q82d, gamma_hat_Q82s,\n gamma_hat_Q83u, gamma_hat_Q83d, gamma_hat_Q83s, gamma_hat_Q84u, gamma_hat_Q84d, gamma_hat_Q84s])\n\n\n # Return the tensor\n\n # tensor, zeile, spalte\n\n return gamma_hat", "def create_flux_vector_pms_gr(self):\n soma_prod = 0\n soma_inj = 0\n lim4 = 1e-4\n store_velocity = {}\n store_flux = {}\n for primal in self.primals:\n #1\n primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n primal_id = self.ident_primal[primal_id]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n for volume in fine_elems_in_primal:\n #2\n flux = {}\n velocity = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #3\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n if adj not in fine_elems_in_primal:\n #4\n pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n #3\n else:\n #4\n pvol = self.mb.tag_get_data(self.pcorr_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pcorr_tag, adj, flat=True)[0]\n #3\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)/(self.mi)\n keq2 = keq\n keq = keq*(np.dot(self.A, uni))\n pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))\n q = (grad_p)*keq - grad_z*keq*self.gama\n print((grad_p)*keq)\n print(- grad_z*keq*self.gama)\n print(q)\n print(self.store_flux_pf_gr[volume][tuple(unit)])\n print('\\n')\n import pdb; pdb.set_trace()\n\n if gid_adj > gid_vol:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n else:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n\n flux[tuple(unit)] = q\n velocity[tuple(unit)] = v\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n\n #2\n # print(gid_vol)\n # print(velocity)\n # print('\\n')\n # import pdb; pdb.set_trace()\n store_flux[volume] = flux\n self.mb.tag_set_data(self.flux_fine_pms_tag, volume, sum(flux.values()))\n # flt = sum(flux.values())\n # if volume not in self.wells_inj and volume not in self.wells_prod:\n # lim4 = 1e-7\n # if abs(flt) > lim4:\n # print(gid_vol)\n # print(flt)\n # import pdb; pdb.set_trace()\n # flt = sum(flux.values())\n store_velocity[volume] = velocity\n\n for volume in set(self.all_fine_vols) - set(self.wells):\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n values = store_flux[volume].values()\n if sum(values) > lim4:\n print('fluxo multiescala nao esta dando conservativo')\n print('gid:{0}'.format(gid))\n print(sum(values))\n import pdb; pdb.set_trace()\n\n with open('fluxo_multiescala_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat= True)[0]\n values = store_flux[volume].values()\n if volume in self.wells_inj:\n soma_inj += sum(values)\n else:\n soma_prod += sum(values)\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(soma_inj))\n arq.write('soma_prod:{0}\\n'.format(soma_prod))\n\n return store_flux", "def _step(self, t, y, h):\n # We must use solvers / implicit form\n f_pn1 = lambda a_n1: (y + h*self.v + (h**2 / 2.0) * \\\n ((1.0 - 2.*self.beta)*self.a + 2.*self.beta*a_n1))\n f_vn1 = lambda a_n1: (self.v + h*((1.0-self.gamma)*self.a + self.gamma*a_n1))\n def f_an1(a_n1):\n f_n1 = self.f(t+h,f_pn1(a_n1),f_vn1(a_n1))\n f_n = self.f(t,y,self.v,)\n return a_n1 - ((1.0+self.alpha)*f_n1 - self.alpha*f_n)\n\n a = self.solver(f_an1, self.a)\n y = f_pn1(a) # Calculate and store new variables. \n self.v = f_vn1(a)\n self.a = a\n return t+h, y", "def prepare_rhs(self, simulation):\n\n nv = simulation.container.nv\n sorder = simulation.container.sorder\n nspace = [1] * (len(sorder) - 1)\n v = self.stencil.get_all_velocities()\n\n gpu_support = simulation.container.gpu_support\n\n for key, value in self.value_bc.items():\n if value is not None:\n indices = np.where(self.ilabel == key)\n # TODO: check the index in sorder to be the most contiguous\n nspace[0] = indices[0].size\n k = self.istore[0, indices]\n\n s = 1 - self.distance[indices]\n coords = tuple()\n for i in range(simulation.domain.dim):\n x = simulation.domain.coords_halo[i][self.istore[i + 1, indices]]\n x += s * v[k, i] * simulation.domain.dx\n x = x.ravel()\n for j in range(\n 1, simulation.domain.dim\n ): # pylint: disable=unused-variable\n x = x[:, np.newaxis]\n coords += (x,)\n\n m = Array(nv, nspace, 0, sorder, gpu_support=gpu_support)\n m.set_conserved_moments(simulation.scheme.consm)\n\n f = Array(nv, nspace, 0, sorder, gpu_support=gpu_support)\n f.set_conserved_moments(simulation.scheme.consm)\n\n args = coords\n if isinstance(value, types.FunctionType):\n func = value\n elif isinstance(value, tuple):\n func = value[0]\n args += value[1]\n\n if self.time_bc[key]:\n func(f, m, 0, *args)\n else:\n func(f, m, *args)\n\n simulation.equilibrium(m)\n simulation.m2f(m, f)\n\n if self.generator.backend.upper() == \"LOOPY\":\n f.array_cpu[...] = f.array.get()\n\n self.feq[:, indices[0]] = f.swaparray.reshape((nv, indices[0].size))\n\n if self.time_bc[key]:\n self.func.append(func)\n self.args.append(args)\n self.f.append(f)\n self.m.append(m)\n self.indices.append(indices[0])", "def kelvin_effect(pres, surft, temp, mw_ba, dcell):\n volm = mw_ba/1e3 # approximation: using density 1000 kg/m3\n return pres*exp(-4*surft*volm/(dcell*gas_constant*temp))", "def trc_fgen_prefb(self,trc,dt,nspad=200,hwin=150,vlen=51):\n output=np.zeros((len(trc),((11*(vlen))+1)))\n pad=np.random.rand(nspad)/100\n trc_norm=trc/np.amax(np.abs(trc))\n trc_norm_padded=np.hstack((pad,trc_norm))\n trc_entropy=self.entropy(trc_norm_padded,50)\n trc_fdm=self.fdm(trc_norm_padded,50,np.arange(1,4),15)\n trc_slta=trigger.classic_sta_lta(trc_norm_padded,2,100)\n trc_fq_win_sum=self.fq_win_sum(trc_norm_padded,hwin,dt)\n hwin2=50\n trc_kurtosis_skew=self.kurtosis_skewness(trc_norm_padded,hwin2)\n for i,j in enumerate(trc):\n ftrc=[]\n fb=i*dt\n ftrc=np.append(ftrc,trc_norm_padded[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(np.abs(trc_norm_padded)))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_entropy)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_entropy))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1]) \n ftrc=np.append(ftrc,self.norm(trc_fdm)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_fdm))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1]) \n ftrc=np.append(ftrc,self.norm(trc_slta)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_fq_win_sum)[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(np.gradient(trc_fq_win_sum))[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_kurtosis_skew[0])[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,self.norm(trc_kurtosis_skew[1])[np.int(nspad+fb/dt)-vlen+1:np.int(nspad+fb/dt)+1])\n ftrc=np.append(ftrc,1)\n output[i,:]=ftrc\n return output", "def _calc_var(self, f, a, b, z):\n\n c_1 = (t.exp(a[...,0] * f[...,0] + b[...,0]) *\n (a[...,0]**2 * f[...,0]**2\n - 2 * a[...,0] * f[...,0] + 2)\n ) / a[...,0]**3\n c_2 = ((t.exp(a[...,1:-1] * f[...,1:] + b[...,1:-1]) *\n (a[...,1:-1]**2 * f[...,1:]**2 - 2 * a[...,1:-1] * f[...,1:] + 2) -\n t.exp(a[...,1:-1] * f[...,:-1] + b[...,1:-1]) *\n (a[...,1:-1]**2 * f[...,:-1]**2 - 2 * a[...,1:-1] * f[...,:-1] + 2)\n ) / a[...,1:-1]**3).sum(-1)\n c_3 = (t.exp(a[...,-1] * f[...,-1] + b[...,-1]) *\n (a[...,-1]**2 * f[...,-1]**2 - 2 * a[...,-1] * f[...,-1] + 2)\n ) / a[...,-1]**3\n\n return 1/z * (c_1 + c_2 + c_3)", "def _diff_wep(a,t,dh,wair,entr,pres):\n ph = _eq_pressure(0,0,0,a,t,dh)\n gi = _ice_g(0,0,t,pres)\n gv = _eq_vappot(0,0,0,a,t,dh)\n sh = -_air_f(0,1,0,a,t,dh)\n si = -_ice_g(1,0,t,pres)\n stot = wair/a*sh + (1-wair/a)*si\n lhs = numpy.array([pres, gi, entr])\n rhs = numpy.array([ph, gv, stot])\n \n ph_a = _eq_pressure(1,0,0,a,t,dh)\n ph_t = _eq_pressure(0,1,0,a,t,dh)\n ph_d = _eq_pressure(0,0,1,a,t,dh)\n gi_t = _ice_g(1,0,t,pres)\n gv_a = _eq_vappot(1,0,0,a,t,dh)\n gv_t = _eq_vappot(0,1,0,a,t,dh)\n gv_d = _eq_vappot(0,0,1,a,t,dh)\n sh_a = -_air_f(1,1,0,a,t,dh)\n sh_t = -_air_f(0,2,0,a,t,dh)\n sh_d = -_air_f(0,1,1,a,t,dh)\n si_t = -_ice_g(2,0,t,pres)\n s_a = -wair/a**2 * (sh - a*sh_a - si)\n s_t = wair/a*sh_t + (1-wair/a)*si_t\n s_d = wair/a*sh_d\n dlhs = numpy.array([[0.,0.,0.], [0.,gi_t,0.], [0.,0.,0.]])\n drhs = numpy.array([[ph_a,ph_t,ph_d], [gv_a,gv_t,gv_d], [s_a,s_t,s_d]])\n return lhs, rhs, dlhs, drhs", "def init_amplitudes(self, ham):\n e_ai = cc_denom(ham.f, 2, 'dir', 'full')\n e_abij = cc_denom(ham.f, 4, 'dir', 'full')\n nocc = self.mos.nocc\n nvir = self.mos.nvir\n\n t1 = ham.f.ov.transpose().conj() * (- e_ai)\n v_vovo = einsum(\"pia,pjb->aibj\", ham.l.pov, ham.l.pov).conj()\n\n t2 = v_vovo.transpose([0, 2, 1, 3]) * (- e_abij)\n t3 = np.zeros((nvir,) * 3 + (nocc,) * 3)\n\n return Tensors(t1=t1, t2=t2, t3=t3)", "def init_amplitudes(self, ham):\n e_ai = cc_denom(ham.f, 2, 'dir', 'full')\n e_abij = cc_denom(ham.f, 4, 'dir', 'full')\n nocc = self.mos.nocc\n nvir = self.mos.nvir\n\n t1 = ham.f.ov.transpose().conj() * (- e_ai)\n v_vovo = einsum(\"pia,pjb->aibj\", ham.l.pov, ham.l.pov).conj()\n\n t2 = v_vovo.transpose([0, 2, 1, 3]) * (- e_abij)\n t3 = np.zeros((nvir,) * 3 + (nocc,) * 3)\n\n return Tensors(t1=t1, t2=t2, t3=t3)" ]
[ "0.6345493", "0.6289429", "0.6155934", "0.6074104", "0.60056967", "0.5953673", "0.59231526", "0.5861008", "0.5852607", "0.5842901", "0.58290935", "0.58032346", "0.57665646", "0.576412", "0.57412446", "0.57161164", "0.5708992", "0.5680603", "0.56741995", "0.5667289", "0.5651036", "0.5636091", "0.56326026", "0.5630486", "0.56118804", "0.56004745", "0.560004", "0.55921143", "0.5578695", "0.55510914", "0.5546969", "0.554642", "0.554642", "0.55370384", "0.5535146", "0.55279464", "0.5515511", "0.55076474", "0.54960614", "0.5494221", "0.54864234", "0.5484784", "0.54822946", "0.5481848", "0.54786783", "0.54724216", "0.546047", "0.54581267", "0.54486054", "0.5440239", "0.5433578", "0.5427424", "0.54194367", "0.54087836", "0.5398851", "0.5396966", "0.53947043", "0.5378321", "0.5367236", "0.53665245", "0.5356612", "0.5348703", "0.5345249", "0.533599", "0.53338295", "0.5328355", "0.53263193", "0.532572", "0.5323944", "0.5320357", "0.531552", "0.53151983", "0.53145117", "0.53136134", "0.530624", "0.5304685", "0.530468", "0.5303275", "0.53006417", "0.5298339", "0.52974635", "0.5294232", "0.5280574", "0.5277707", "0.52765334", "0.52763486", "0.5274063", "0.52702457", "0.5269565", "0.5269049", "0.52677584", "0.5265517", "0.5263313", "0.5261754", "0.52597827", "0.52592355", "0.52541506", "0.5251", "0.52484465", "0.52484465" ]
0.7249938
0
NichollsTurton entrainment parameterization the_vars and coeffs are inputs into dmixed_vars deltheta, F0, Fqv0 are calculated in dmixed_vars
def calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0): thetal_m = the_vars[0] qt_m = the_vars[2] zi = the_vars[1] dth = deltheta thetal_ft = thetal_m + dth qt_ft = coeffs.ft_qv dqt = qt_ft - qt_m # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt) gamma = 6e-3 thetal_3000 = thetal_ft + gamma*(3000-zi) LTS = thetal_3000 - coeffs.sst # lower tropospheric stability # calculate coefficients press=tf.find_press(zi) Ad,Bd,issat = tf.calc_ABcoeffs(thetal_ft,qt_ft,press) Aw,Bw,issat = tf.calc_ABcoeffs(thetal_m,qt_m,press) invert= tf.t_uos_thetal(thetal_m,qt_m,press) T_0 = invert.temp lv=tf.L_t(invert.temp) Cl = (Ad*lv/tc.CPD - T_0/tc.EPS) del_thv_dry = Ad * dth + Bd * dqt del_thv_sat = Aw * dth + Bw * dqt # account for evaporative cooling (increases we) ql_max = invert.ql Cl = (Ad*lv/tc.CPD - T_0/tc.EPS) Del_thv = del_thv_dry - Cl * ql_max # calculate buoyancy integral terms rho = 1. lcl_press=tf.LCL_thetal(thetal_m,qt_m) zb=tf.find_height(lcl_press) T1 = zb/zi T2 = 0.5 * zb**2 / zi**2 T3 = (zi-zb)/zi T4 = 0.5 * (zi**2 - zb**2) / zi**2 # calculate delta_Fr delta_Frstar = 82.0 # Wm^-2 Frlambda = 7.9 # Wm^-2, using with CTL from Gesso delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1 wtl_0=F0 wqt_0=Fqv0 Del_F = delta_Fr/(tc.CPD*rho) # use sensitivity to radiation a la Gesso Fig. 3 term1 = wtl_0 * (Ad * (T1-T2) + Aw * (T3-T4)) term2 = wqt_0 * (Bd * (T1-T2) + Bw * (T3-T4)) term3 = Del_F * (Ad * T2 + Aw * T4) Theta_NE = term1 + term2 + term3 # calculate w* wstar=(2.5*9.8/T_0*zi*Theta_NE)**(1/3.) # calculate chi* chi_star = Cl * ql_max / (del_thv_dry - del_thv_sat) # calculate del_m Del_m = del_thv_dry + chi_star * (2. - chi_star) * (del_thv_sat - del_thv_dry) # calculate we a2=15. Del_thv_NT = Del_thv / (1. + a2 * (1. - Del_m/Del_thv)) A_NT = 0.2 fac_NT = 2.5 term4 = Del_thv_NT term5 = A_NT * fac_NT * (T2 * del_thv_dry + T4 * del_thv_sat) denominator = term4 + term5 we = A_NT * fac_NT * Theta_NE / denominator return we
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dmixed_vars(the_vars,tstep,coeffs):\n\n deltheta = theta_ft(the_vars[1],coeffs.ft_intercept,coeffs.ft_gamma) - the_vars[0]\n F0 = coeffs.U*coeffs.Cd*(coeffs.sst - the_vars[0]) #surface heat flux\n Fqv0 = coeffs.U*coeffs.Cd*(coeffs.qsfc - the_vars[2]) #surface vapor flux\n Fint = -coeffs.k*F0 #entrainment heat flux\n \n if coeffs.use_NT: # use NT parameterization by calculating we using function\n went = calc_went_NT(the_vars, coeffs, deltheta, F0, Fqv0) # Nicholls-Turton parameterization\n \n else: # use simple we parameterization\n went = -Fint/deltheta #simple entrainment parameterization\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*coeffs.ft_qv*1000 # convert qt_ft to g kg^-1\n \n Fqvent = -went*( coeffs.ft_qv - the_vars[2])\n wsubs = -coeffs.D*the_vars[1]\n rho=1.\n cp=1004.\n \n derivs=np.empty_like(the_vars)\n \n # higher delta_Fr from drier air at mixed-layer top...hence cloudy air results in less radiative cooling\n derivs[0]=(F0 - Fint)/(the_vars[1]*rho) - delta_Fr/1004./the_vars[1] \n derivs[1] = went + wsubs\n derivs[2] = (Fqv0 - Fqvent)/the_vars[1]\n return derivs", "def update_model_parameters(phi, T, nz, coord, SWVD, form=\"Calonne\"):\r\n D_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n D_eff = phi * (1 - phi) * D0 + D0\r\n elif form == \"Calonne\": # Calonne et al. (2014)\r\n x = 2 / 3 - phi\r\n b = np.heaviside(x, 1)\r\n D_eff = D0 * (1 - 3 / 2 * phi) * b\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective thermal conductivity W/m/K\r\n k_eff = np.ones(nz)\r\n\r\n if form == \"Hansen\": # Hansen and Foslien (2015)\r\n k_eff = phi * ((1 - phi) * k_a + phi * k_i) + k_a\r\n elif form == \"Calonne\": # Calonne et al. (2011)\r\n k_eff = ka0 + ka1 * (rho_i * phi) + ka2 * (rho_i * phi) ** 2\r\n else:\r\n print(\"requested method not available, check input\")\r\n\r\n ## effective heat capacity - similar forumla in Hansen and Foslien (2015) and Löwe et al. (2019)\r\n rhoC_eff = np.zeros(nz)\r\n rhoC_eff = phi * rho_i * C_i + (np.ones(nz) - phi) * rho_a * C_a\r\n\r\n ## Water Vapor density rho_v and its derivative rho_v_dT:\r\n [rho_v, rho_v_dT] = sat_vap_dens(nz, T, SWVD)\r\n\r\n return D_eff, k_eff, rhoC_eff, rho_v, rho_v_dT", "def solveFluNet(T,Ntime,a,b0,b1,g,k,w,y0,P,N,RHS=3):\n #I have added the variables P the transport matrix \n #and N the network size because they are needed\n #in the RHS.\n #I have added the variable RHS to be able to \n #choose which RHS method we want to use when running\n #solveFluNet\n \n #add input variables to RHS functions if needed\n def RHSnet(y,t,a,b0,b1,g,k,w):\n \"\"\"RHS used by odeint to solve Flu model\"\"\"\n S = y[:N]\n E = y[N:2*N]\n C = y[2*N:3*N]\n b = b0 + b1*(1+np.cos(2*np.pi*t))\n dy = np.zeros(3*N)\n dy[:N]= k*(1-S)-b*C*S+w*np.dot(P,S)-w*S\n dy[N:2*N]= b*C*S-(k+a)*E+w*np.dot(P,E)-w*E\n dy[2*N:3*N]= a*E-(g+k)*C+w*np.dot(P,C)-w*C\n return dy\n \n def RHSnetF(y,t,a,b0,b1,g,k,w):\n \"\"\"RHS used by odeint to solve Flu model\"\n Calculations carried out by fn.rhs\n \"\"\"\n dy = fn.rhs(P,y,t,a,b0,b1,g,k,w)\n return dy\n \n def RHSnetFomp(y,t,a,b0,b1,g,k,w):\n \"\"\"RHS used by odeint to solve Flu model\n Calculations carried out by fn.rhs_omp\n \"\"\"\n dy = fn.rhs_omp(P,y,t,a,b0,b1,g,k,w,2)\n return dy\n\n #Add code here and to RHS functions above to simulate network flu model\n t = np.linspace(0,T,Ntime)\n if (RHS==1):\n sol = odeint(RHSnet,y0,t,args=(a,b0,b1,g,k,w))\n if (RHS==2):\n sol = odeint(RHSnetF,y0,t,args=(a,b0,b1,g,k,w))\n if (RHS==3):\n sol = odeint(RHSnetFomp,y0,t,args=(a,b0,b1,g,k,w))\n S = sol[:,:N]\n E = sol[:,N:2*N]\n C = sol[:,2*N:3*N]\n return t,S,E,C", "def taylor_expansion(self,g_temp,g_step,var):\n A=np.zeros([self.n+1,self.n])\n for i in range(self.n):\n A[self.n][i]=1\n for j in range(self.n):\n if(i==j): A[i][j]=2.*var[i]+2.+g_temp*np.sum([self.XXZ.Z(k,i) for k in range(self.n) if k!=i])\n else: A[i][j]=-g_temp*self.XXZ.Z(j,i)\n #First derivative\n B1=np.zeros(self.n+1)\n for i in range(self.n): \n B1[i]=self.gamma*2.*g_temp*self.N*(self.n-self.N)+np.sum([self.XXZ.Z(k,i)*(var[k]-var[i]) for k in range(self.n) if k!=i])\n Ainv=np.linalg.pinv(A)\n der1=np.dot(Ainv,B1)\n #Second derivative\n B2=np.zeros(self.n+1)\n for k in range(self.n):\n B2[k]=self.gamma*2.*self.N*(self.n-self.N) -2.*der1[k]**2+2.*np.sum([self.XXZ.Z(l,k)*(der1[l]-der1[k]) for l in range(self.n) if k!=l])\n der2=np.dot(Ainv,B2)\n #Third derivative\n B3=np.zeros(self.n+1)\n for k in range(self.n):\n B3[k]=-6*der1[k]*der2[k]+3.*np.sum([self.XXZ.Z(l,k)*(der2[l]-der2[k]) for l in range(self.n) if k!=l])\n der3=np.dot(Ainv,B3)\n #Fourth derivative\n B4=np.zeros(self.n+1)\n for k in range(self.n):\n B4[k]=-8.*der3[k]*der1[k]-6.*der2[k]*der2[k]+4.*np.sum([self.XXZ.Z(l,k)*(der3[l]-der3[k]) for l in range(self.n) if k!=l])\n der4=np.dot(Ainv,B4)\n \n return var+g_step*der1+g_step**2*der2/2.+g_step**3*der3/6.+g_step**4*der4/24.", "def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state[0]\n pe = state[1]\n pd = state[2]\n e0 = state[3]\n e1 = state[4]\n e2 = state[5]\n e3 = state[6]\n u = state[7]\n v = state[8]\n w = state[9]\n # state[6:10] = normalize(state[6:10])\n p = state[10]\n q = state[11]\n r = state[12]\n # extract forces/moments\n fx = forces_moments[0]\n fy = forces_moments[1]\n fz = forces_moments[2]\n l = forces_moments[3]\n m = forces_moments[4]\n n = forces_moments[5]\n\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings('error')\n # try:\n # # position kinematics\n # except Warning as e:\n # pdb.set_trace()\n # print(e)\n\n pn_dot = (e1**2+e0**2-e2**2-e3**2)*u + 2*(e1*e2-e3*e0)*v + 2*(e1*e3+e2*e0)*w\n pe_dot = 2*(e1*e2+e3*e0)*u + (e2**2+e0**2-e1**2-e3**2)*v + 2*(e2*e3-e1*e0)*w\n pd_dot = 2*(e1*e3-e2*e0)*u + 2*(e2*e3+e1*e0)*v + (e3**2+e0**2-e1**2-e2**2)*w\n\n # pn_dot = (e0**2+e1**2-e2**2-e3**2)*u + 2*(e1*e2+e3*e0)*v + 2*(e1*e3-e2*e0)*w\n # pe_dot = 2*(e1*e2-e3*e0)*u + (e0**2-e1**2+e2**2-e3**2)*v + 2*(e2*e3+e1*e0)*w\n # pd_dot = 2*(e1*e3+e2*e0)*u + 2*(e2*e3-e1*e0)*v + (e0**2-e1**2-e2**2+e3**2)*w\n\n # pdb.set_trace()\n\n # position dynamics\n mass = self.mass\n u_dot = (r*v-q*w)+fx/mass\n v_dot = (p*w-r*u)+fy/mass\n w_dot = (q*u-p*v)+fz/mass\n\n # rotational kinematics\n e0_dot = 0.5*(-p*e1-q*e2-r*e3)\n e1_dot = 0.5*(p*e0+r*e2-q*e3)\n e2_dot = 0.5*(q*e0-r*e1+p*e3)\n e3_dot = 0.5*(r*e0+q*e1-p*e2)\n\n # rotatonal dynamics\n p_dot = self.gamma1*p*q - self.gamma2*q*r + self.gamma3*l + self.gamma4*n\n q_dot = self.gamma5*p*r - self.gamma6*(p**2-r**2) + m/self.Jy\n r_dot = self.gamma7*p*q - self.gamma1*q*r + self.gamma4*l + self.gamma8*n\n\n # collect the derivative of the states\n x_dot = np.array([pn_dot, pe_dot, pd_dot, e0_dot, e1_dot, e2_dot, e3_dot,\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])\n # pdb.set_trace()\n\n\n # print(x_dot)\n return x_dot", "def niv_variable_selection(x, y, t, max_vars):\n y1_t = (y == 1) & (t == 1)\n y0_t = (y == 0) & (t == 1)\n y1_c = (y == 1) & (t == 0)\n y0_c = (y == 0) & (t == 0)\n\n sum_y1_t = sum(y1_t)\n sum_y0_t = sum(y0_t)\n sum_y1_c = sum(y1_c)\n sum_y0_c = sum(y0_c)\n\n niv_dict = {}\n for col in x.columns:\n df = pd.concat([x[col].rename(col), y1_t.rename('y1_t'), y0_t.rename('y0_t'),\n y1_c.rename('y1_c'), y0_c.rename('y0_c')], axis=1)\n x_group = df.groupby(x[col])\n x_sum = x_group.sum()\n\n if sum_y0_t == 0 or sum_y1_t == 0:\n woe_t = 0\n else:\n woe_t = x_sum.apply(lambda r: np.log((r['y1_t'] * sum_y0_t) / (r['y0_t'] * sum_y1_t))\n if r['y1_t'] > 0 and r['y0_t'] > 0 else 0, axis=1)\n\n if sum_y0_c == 0 or sum_y1_c == 0:\n woe_c = 0\n else:\n woe_c = x_sum.apply(lambda r: np.log((r['y1_c'] * sum_y0_c) / (r['y0_c'] * sum_y1_c))\n if r['y1_c'] > 0 and r['y0_c'] > 0 else 0, axis=1)\n\n nwoe = woe_t - woe_c\n\n p_x_y1_t = x_sum['y1_t'] / sum_y1_t if sum_y1_t > 0 else 0\n p_x_y0_t = x_sum['y0_t'] / sum_y0_t if sum_y0_t > 0 else 0\n p_x_y1_c = x_sum['y1_c'] / sum_y1_c if sum_y1_c > 0 else 0\n p_x_y0_c = x_sum['y0_c'] / sum_y0_c if sum_y0_c > 0 else 0\n niv_weight = (p_x_y1_t * p_x_y0_c - p_x_y0_t * p_x_y1_c)\n\n niv_row = 100 * nwoe * niv_weight\n niv = niv_row.sum()\n niv_dict[col] = niv\n\n s_niv = pd.Series(niv_dict)\n s_selected_niv = s_niv.sort_values(ascending=False)[: max_vars]\n\n return s_selected_niv.index", "def undetermined_coefficients(gensols: List[Symbol], func_coeffs: List[Symbol], gt: Symbol, t: Symbol = t) -> Tuple[Symbol, Procedure]:\n\n Y = Function('Y', real=True)(t)\n\n coeffs = numbered_symbols('A', cls=Dummy)\n coefflist = []\n\n trialset = _undetermined_coefficients_match(gt, t)['trialset']\n\n notneedset = set()\n\n mult = 0\n for i, sol in enumerate(gensols):\n check = sol\n if check in trialset:\n # If an element of the trial function is already part of the\n # homogeneous solution, we need to multiply by sufficient x to\n # make it linearly independent. We also don't need to bother\n # checking for the coefficients on those elements, since we\n # already know it will be 0.\n while True:\n if check*t**mult in trialset:\n mult += 1\n else:\n break\n trialset.add(check*t**mult)\n notneedset.add(check)\n\n newtrialset = trialset - notneedset\n\n # while True:\n # dependent = False\n # for trial in newtrialset:\n # if trial in gensols:\n # dependent = True\n # break\n # if not dependent:\n # break\n # newtrialset = set([t*trial for trial in trialset])\n\n # trialset = trialset.union(newtrialset)\n\n trialfunc = sympy.Number(0)\n for i in newtrialset:\n c = next(coeffs)\n coefflist.append(c)\n trialfunc += c*i\n\n derivatives = []\n\n eqs = 0\n for order, coeff in enumerate(func_coeffs[::-1]):\n deriv = simplify(trialfunc.diff(t, order))\n derivatives.append(\n Eq(Derivative(Y, t, order), deriv, evaluate=False))\n eqs += coeff * deriv\n\n coeffsdict = dict(list(zip(trialset, [0]*(len(trialset) + 1))))\n\n eqs_lhs = eqs\n\n eqs = _mexpand(simplify(eqs - gt).expand())\n\n for i in Add.make_args(eqs):\n s = separatevars(i, dict=True, symbols=[t])\n coeffsdict[s[t]] += s['coeff']\n\n coeffvals = solve(list(coeffsdict.values()), coefflist)\n\n if not coeffvals:\n print(\n \"Could not solve `%s` using the \"\n \"method of undetermined coefficients \"\n \"(unable to solve for coefficients).\" % eqs)\n\n psol = trialfunc.subs(coeffvals)\n\n procedure = Procedure()\n procedure\\\n .text('Find ').latex('Y(t)').text(' that mimics the form of ').latex('g(t)', nl=True)\\\n .eq(Eq(Y, trialfunc, evaluate=False))\\\n .text('Compute successive derivatives of ').latex('Y(t)', nl=True)\\\n .equlist(derivatives)\\\n .text('Plug the derivatives into the LHS and equate coefficients', nl=True)\\\n .equlist([Eq(eqs_lhs, gt, evaluate=False),\n Eq(simplify(eqs_lhs).expand().collect(t), gt, evaluate=False)])\\\n .equarr([Eq(a, 0, evaluate=False) for a in coeffsdict.values()])\\\n .text('Solve for the undetermined coefficients', nl=True)\\\n .equarr([Eq(k, v, evaluate=False)\n for k, v in coeffvals.items() if k != 0] if len(coeffvals) > 0 else [])\\\n .text('Substitute the coefficients to get the particular solution', nl=True)\\\n .eq(Eq(Dummy('y_p'), psol, evaluate=False))\n\n return psol, procedure", "def _redef_via_predef_eqn(self):\r\n time = self.current_T # + self.d_T\r\n\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff) \r\n self.Epsilon = self.d_T * self.thermal_conductivity / \\\r\n (self.density * self.heat_capacity)\r\n\r\n # Source term.\r\n def F_func(elem, eta):\r\n x = elem.local_to_global(eta)\r\n F = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F -= self.Epsilon * self.redef_F_laplacian(x[0], x[1], time)\r\n F += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func,\r\n self.node_map,\r\n gauss_mult=2) # Use double gp_1D\r\n\r\n # Boundary term.\r\n def f_func(elem, eta):\r\n n = elem.guess_normal_vector_global(eta)\r\n f = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n x = elem.local_to_global(eta)\r\n # Evaluate our boundary term.\r\n f += self.Beta * self.redef_f_norm_grad(x[0], x[1], time, n)\r\n f += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func,\r\n self.node_map,\r\n gauss_mult=2)", "def variational_distribution(self):\n activation = tf.nn.relu\n if self.linear:\n activation = None\n\n #q(z | x, s)\n if self.log_variational:\n x = tf.log(1 + self.expression)\n else:\n x = self.expression\n\n h = dense(x, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n for layer in range(2, self.n_layers + 1):\n h = dense(h, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n\n \n self.qz_m = dense(h, self.n_latent, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n self.qz_v = dense(h, self.n_latent, activation=tf.exp, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n \n if self.scalings:\n # q(l | x, s)\n h = dense(x, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n self.ql_m = dense(h, 1, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n self.ql_v = dense(h, 1, activation=tf.exp, \\\n bn=False, keep_prob=None, phase=self.training_phase)", "def fluid_deriv(self):\n deriv = np.zeros((self.fluid_constraints['num_eq'],\n 2 * self.num_i + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n for j in range(self.num_nw_fluids):\n deriv[i * self.num_nw_fluids + j, i, j + 3] = 1\n deriv[i * self.num_nw_fluids + j, self.num_i + i, j + 3] = -1\n return deriv", "def variable_costs(dh: DataHandler):\n print(\"PtHydrogen not implemented\")\n\n scen_hor_map = dh.scenarios.horizon\n\n cost_var = dh.get(\"i_cost\").xs(\"varcost\", level=\"par_cost\")\n cost_var = cost_var.groupby([\"alltec\"]).apply(\n extract_horizon_specific_cost, scen_hor_map\n )\n cost_var = add_dimension(cost_var, dh.merge_stored_sets(\"r\"), \"r\")\n cost_var = cost_var.reorder_levels([\"alltec\", \"r\"])\n\n h2_price = dh.get(\"o_h2price_buy\")\n h2_price = add_dimension(h2_price, dh.merge_stored_sets(\"tec_h2g\"), \"alltec\")\n\n elec_price = dh.get(\"o_prices\")\n\n cost_fuel = dh.get(\"cost_fuel\")\n cost_fuel = add_dimension(cost_fuel, dh.merge_stored_sets(\"r\"), \"r\")\n cost_fuel = cost_fuel.reorder_levels([\"alltec\", \"r\"])\n\n cost_fuel.loc[h2_price.index, :] = h2_price\n\n eff = dh.get(\"eff\")\n\n co2_int = dh.get(\"co2_int\").div(1000)\n\n co2_price = dh.get(\"o_co2price\")\n\n co2_costs = co2_int * co2_price\n co2_costs.index.names = [\"alltec\", \"r\"]\n\n var_cost = (\n cost_fuel.add(co2_costs, fill_value=0).div(eff).add(cost_var, fill_value=0)\n )\n\n return var_cost", "def taylor_exp_3(y0, t, f, jac, hess, df_dt=None, d2f_dt2=None, d2f_dtdu=None, verbose=True, krylov_subspace_dim=None,\n **_):\n try:\n n, d = len(t), len(y0)\n y = np.zeros((n, d))\n except TypeError:\n n, d = len(t), 1\n y = np.zeros((n,))\n if verbose is False:\n count = Counter('', 0)\n elif verbose is True:\n count = Counter('Taylor Exp 3', n)\n else:\n count = Counter(verbose, n)\n if df_dt is None:\n def df_dt(*_): return np.zeros((d,))\n if d2f_dt2 is None:\n def d2f_dt2(*_): return np.zeros((d,))\n if d2f_dtdu is None:\n def d2f_dtdu(*_): return np.zeros((d, d))\n y[0] = y0\n j = jac(y[0], t[0])\n w = np.zeros((d, 3))\n expanded_vector = np.zeros((d + 3,))\n expanded_vector[-1] = 1\n expanded_matrix = np.zeros((d + 3, d + 3))\n expanded_matrix[-3:-1, -2:] = np.eye(2)\n expanded_matrix[:d, :d] = j\n for i in range(n - 1):\n h = t[i + 1] - t[i]\n w[:, -1] = f(y[i], t[i]) - np.dot(j, y[i])\n w[:, -2] = np.dot(jac(y[i], t[i]) - j, f(y[i], t[i])) + df_dt(y[i], t[i])\n w[:, -3] = np.dot(np.dot(hess(y[i], t[i]), f(y[i], t[i])), f(y[i], t[i])) \\\n + np.dot(jac(y[i], t[i]) - j, np.dot(jac(y[i], t[i]), y[i])) \\\n + np.dot(jac(y[i], t[i]) - j, df_dt(y[i], t[i])) \\\n + 2 * np.dot(d2f_dtdu(y[i], t[i]), f(y[i], t[i])) \\\n + d2f_dt2(y[i], t[i])\n expanded_vector[:d] = y[i]\n expanded_matrix[:d, -3:] = w\n if krylov_subspace_dim is None:\n y[i + 1] = np.dot(expm_sp(h * expanded_matrix), expanded_vector)[:d]\n else:\n y[i + 1] = expm_krylov(h * expanded_matrix, expanded_vector, krylov_subspace_dim)[:d]\n count(i + 1)\n return y", "def f1d(t,y,float_params,sigmaI): #sigmastep is an array\n \n ## y is Ntot0 ##\n\n # unpack parameters\n Nbar, Nstar, sigma0, nu_kin_mlyperus, DoverdeltaX2 = float_params \n\n # Ntot is passed in, Fqll calculated from Ntot\n Ntot0 = np.ascontiguousarray(y)\n Nqll0 = Nbar - Nstar * np.sin(2*np.pi*(Ntot0))\n\n # Calc surface deposition, dNtot_dt before diffusion\n m = (Nqll0 - (Nbar - Nstar))/(2*Nstar)\n sigmaM = (sigmaI - m * sigma0)/(1+m*sigma0)\n depsurf = nu_kin_mlyperus * sigmaM\n dNtot_dt = depsurf\n\n # Diffusion\n dy = diffuse_1d(Nqll0,DoverdeltaX2)\n dNtot_dt += dy \n\n # Package for output, only values of dNtot\n derivs = dNtot_dt\n return derivs", "def variation_of_parameters(y: List[Symbol], gt: Symbol, t: Symbol = t, do_integral=True) -> Tuple[Symbol, Procedure]:\n W, w = Wronskian(y, t)\n goW = simplify(gt / W)\n\n yp = 0\n\n Wdets = []\n integrals = []\n\n col = [0] * len(y)\n col[-1] = 1\n for i in range(len(y)):\n Wi = w.copy()\n Wi[:, i] = col.copy()\n\n # reduce cos^2 t + sin^2 t to 1\n Wi_det = trigsimp(simplify(Wi.det()), deep=True, recursive=True)\n\n integrand = (Wi_det * goW).expand()\n integral = integrate(\n integrand, t) if do_integral else Integral(integrand, t)\n yp += y[i] * integral\n\n if do_integral:\n integrals.append(\n Eq(Dummy('mu_{}'.format(i + 1)),\n Eq(Integral(integrand, t), integral, evaluate=False), evaluate=False)\n )\n else:\n integrals.append(Eq(Dummy('mu_{}'.format(i)),\n Integral(integrand, t), evaluate=False))\n\n Wdets.append(\n Eq(Symbol('W{}'.format(i+1)), Eq(Determinant(Wi), Wi_det, evaluate=False), evaluate=False))\n\n yps = logcombine(simplify(yp))\n\n procedure = Procedure()\n procedure\\\n .text('Compute the Wronskian determinant', nl=True)\\\n .eq(Eq(Dummy('W'), Eq(Determinant(w), W, evaluate=False), evaluate=False))\\\n .text('Compute ').latex('W_i', nl=True)\\\n .equlist(Wdets)\\\n .text('Calculate and simplify ').latex('\\\\frac{g(t)}{W(t)}', nl=True)\\\n .eq(Eq(sympy.Mul(gt, sympy.Pow(W, -1, evaluate=False), evaluate=False), goW, evaluate=False))\\\n .text('Compute ').latex('\\\\mu_i = \\\\int \\\\frac{g(t)W_i(t)}{W(t)} dt', nl=True)\\\n .equlist(integrals)\\\n .text('Compute the sum ').latex('\\\\sum_{i=1}^{k} y_i \\\\int \\\\frac{g(t)W_i(t)}{W(t)} dt', nl=True)\\\n .equlist([\n Eq(Dummy('y_p'), yp, evaluate=False),\n Eq(Dummy('y_p'), yps, evaluate=False)\n ])\\\n .text('Complementray + particular = general', nl=True)\\\n .eq(Eq(Dummy('y'), to_general(y, yps)[0], evaluate=False))\n\n return yps, procedure", "def hexapodZernikeMultiLinearModel_hexapodcoordinate():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n Vfile = '/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_validate.cp'\n b=p.load(open(Tfile))\n vb=p.load(open(Vfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n xh = x*1000 # convert to hexapod coordinate\n yh = -y*1000\n zh = -z*1000\n xtilth = - thetay\n ytilth = - thetax\n dataX = b[:,8:68]\n coeff_xh = sm.WLS(xh,dataX).fit().params\n coeff_yh = sm.WLS(yh,dataX).fit().params\n coeff_zh = sm.WLS(zh,dataX).fit().params\n coeff_xtilth = sm.WLS(xtilth,dataX).fit().params\n coeff_ytilth = sm.WLS(ytilth,dataX).fit().params\n coeff = np.array([coeff_xh,coeff_yh,coeff_zh,coeff_xtilth,coeff_ytilth])\n vx = vb[:,0]\n vy = vb[:,1]\n vz = vb[:,2]\n vtheta = vb[:,3]\n vphi = vb[:,4]\n vfwhm = vb[:,5]\n ve1 = vb[:,6]\n ve2 = vb[:,7]\n vthetax = vtheta*np.cos(np.deg2rad(vphi))\n vthetay = vtheta*np.sin(np.deg2rad(vphi))\n vxh = vx*1000 # convert to hexapod coordinate\n vyh = -vy*1000\n vzh = -vz*1000\n vxtilth = - vthetay\n vytilth = - vthetax\n vdataX = vb[:,8:68]\n fit = np.dot(vdataX,coeff.T)\n bp.bin_scatter(vxh,fit[:,0],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vyh,fit[:,1],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vzh,fit[:,2],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vxtilth,fit[:,3],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vytilth,fit[:,4],nbins=20,fmt='bo',scatter=True)", "def fwd_model(Ti_samples,To_samples, dw_samples, kw_samples,hi_samples,ho_samples,TA_samples):\n\t#Determine number of samples (totquat)\n\ttotquat=len(Ti_samples)\n\t# List to store values of Q (assuming no radiative heat transfer) calculated from\n\t# the random samples of the parameters\n\tQ_samples_4PCE=[]\n\t# List to store values of Q assuming radiative heat transfer occurs\n\t#Q_r_samples_4PCE=[]\n\t# Calculate values of heat flux Q (assuming no radiative heat transfer)\n\t# for the different sample values and append to the list\n\tfor i in range(totquat):\n\t\t(Q,T1,T2)=compute_heat_flux(Ti_samples[i], To_samples[i], dw_samples[i],\\\n\t\t\tkw_samples[i], hi_samples[i], ho_samples[i])\n\t\tQ_samples_4PCE.append(Q)\n\t\t# Calculate values of heat flux Q assuming radiative heat transfer to atmosphere and append to list\n\t\t# For the required estimates of Q,T1, and T2 needed to solve the nonlinear system,\n\t\t# we use the values obtained by solving the system assuming no radiative heat transfer\n\t\t\"\"\"Q2=r_heat_flux(Ti_samples[i], To_samples[i], dw_samples[i], kw_samples[i],\\\n\t\t\thi_samples[i], ho_samples[i], TA_samples[i], (Q,T1,T2))\n\t\tQ_r_samples_4PCE.append(Q2)\n\t# Convert Q_r_samples_4PCE to numpy array\n\tQ_evals = np.array(Q_r_samples_4PCE)\n\treturn Q_evals\"\"\"\n\t\tConvert Q_samples_4PCE to numpy array\n\t\tQ_evals = np.array(Q_samples_4PCE)\n\t\treturn Q_evals\"\"\"\n\n\ndef KDE(fcn_evals):\n\t\"\"\"\n\tPerforms kernel density estimation\n\tInput:\n\t\tfcn_evals: numpy array of evaluations of the forward model (values of heat flux Q)\n\tOutput:\n\t\txpts_pce: numpy array of points at which the PDF is estimated.\n\t\tPDF_data_pce: numpy array of estimated PDF values.\n\t\"\"\"\n\t# Perform KDE on fcn_evals\n\tkern_pce=stats.kde.gaussian_kde(fcn_evals)\n\t# Generate points at which to evaluate the PDF\n\txpts_pce=np.linspace(fcn_evals.min(),fcn_evals.max(),200)\n\t# Evaluate the estimated PDF at these points\n\tPDF_data_pce=kern_pce(xpts_pce)\n\treturn xpts_pce, PDF_data_pce", "def run_main(sst, ft_qv, use_NT):\n\n dtout=10. #minutes\n end_time=8*24. #hours\n del_time=dtout*60. #seconds\n end_time=end_time*3600. #seconds\n #sst=297\n D=5.e-6 #s-1\n U=7 #m/s\n psfc=100. #kPa\n qsfc=tf.qs_tp(sst,psfc)\n ft_intercept = 292 #K\n ft_gamma = 6.e-3 #K/m\n #ft_qv = 2.e-3\n k=0.2 #entrainment efficiency\n Cd = 1.e-3 #drag coefficient\n tspan = np.arange(0.,end_time,del_time)\n vars_init=[285.,400.,8.e-3] #theta (K), height (m) qv (kg/kg) to start\n the_tup=dict(D=D,U=U,sst=sst,ft_intercept=ft_intercept,ft_gamma=ft_gamma,\n qsfc=qsfc,ft_qv=ft_qv,k=k,Cd=Cd,radcool=30.,use_NT=use_NT) # include use_NT\n the_tup=make_tuple(the_tup,'coeffs')\n output=integrate.odeint(dmixed_vars, vars_init, tspan,(the_tup,))\n result=pd.DataFrame.from_records(output,columns=['theta','h','qv'])\n\n # save time/computation by only doing calculations for the last timestep (equilibrium)\n result['time']=tspan[-1]/3600./24. #days\n result['deltheta'] = theta_ft(result['h'].values[-1],ft_intercept,ft_gamma) - result['theta'].iloc[-1]\n result['delqv'] = ft_qv - result['qv'].iloc[-1]\n result['LCL'] = calc_lcl(result.iloc[-1], psfc)\n result['q_flux_0']=calc_sfc_qvap_flux(result.iloc[-1],the_tup)\n result['T_flux_0']=calc_sfc_theta_flux(result.iloc[-1],the_tup)\n result['entflux_theta']=calc_entflux_theta(result.iloc[-1],the_tup)\n \n # decide how to calculate entrainment\n the_vars = [result['theta'].iloc[-1],result['h'].iloc[-1],result['qv'].iloc[-1]]\n if use_NT:\n result['went']=calc_went_NT(the_vars, the_tup, result['deltheta'].iloc[-1], \n result['T_flux_0'].iloc[-1], result['q_flux_0'].iloc[-1])\n else:\n result['went']=calc_went(result.iloc[-1],the_tup)\n\n result['entflux_qv']=calc_entflux_qv(result.iloc[-1],the_tup)\n\n with open('dumpmodel.csv','w') as f:\n result.to_csv(f,index=False)\n \n return None", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def function_PLD_Ntransits(coeffs, t_list, Pns_list, lc_list,\n coeffs_dict_list,\n coeffs_tuple, fix_coeffs, fix_coeffs_channels,\n batman_params_list, PLD_params_list):\n\n residuals = []\n\n x1 = len([key for key in coeffs_tuple[0:9] if key not in fix_coeffs\n and key not in fix_coeffs_channels])\n x2 = len([key for key in coeffs_tuple[0:9] if key in fix_coeffs_channels])\n x3 = len([key for key in coeffs_tuple if key not in fix_coeffs\n and key not in fix_coeffs_channels])\n\n for i in range(len(coeffs_dict_list)):\n\n coeffs_fit = np.concatenate((coeffs[i*x3:i*x3 + x1],\n coeffs[-x2:],\n coeffs[i*x3 + x1 :(i+1)*x3 ]))\n\n new_flux = model_PLD(coeffs_fit, t_list[i], Pns_list[i], coeffs_dict_list[i],\n coeffs_tuple, fix_coeffs, batman_params_list[i], PLD_params_list[i])\n\n residuals.append(lc_list[i]-new_flux)\n\n return np.array(np.concatenate(residuals))", "def derivatives(self, increment_filter):\n ######################################################################\n # derivatives fluid and mass balance are static\n k = self.num_nw_fluids + 1\n\n ######################################################################\n # derivatives for specified heat transfer\n if self.Q.is_set:\n self.jacobian[k, 0, 0] = (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)\n self.jacobian[k, 0, 2] = -self.inl[0].m.val_SI\n self.jacobian[k, 1, 2] = self.inl[0].m.val_SI\n # custom variable Q\n if self.Q.is_var:\n self.jacobian[k, 2 + self.Q.var_pos, 0] = -1\n k += 1\n\n ######################################################################\n # derivatives for specified pressure ratio\n if self.pr.is_set:\n self.jacobian[k, 0, 1] = self.pr.val\n self.jacobian[k, 1, 1] = -1\n # custom variable pr\n if self.pr.is_var:\n self.jacobian[k, 2 + self.pr.var_pos, 0] = (\n self.inl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # derivatives for specified zeta\n if self.zeta.is_set:\n f = self.zeta_func\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(\n f, 'm', 0, zeta='zeta')\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 1] = self.numeric_deriv(\n f, 'p', 0, zeta='zeta')\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 2] = self.numeric_deriv(\n f, 'h', 0, zeta='zeta')\n if not increment_filter[1, 1]:\n self.jacobian[k, 1, 1] = self.numeric_deriv(\n f, 'p', 1, zeta='zeta')\n if not increment_filter[1, 2]:\n self.jacobian[k, 1, 2] = self.numeric_deriv(\n f, 'h', 1, zeta='zeta')\n # custom variable zeta\n if self.zeta.is_var:\n self.jacobian[k, 2 + self.zeta.var_pos, 0] = (\n self.numeric_deriv(f, 'zeta', 2, zeta='zeta'))\n k += 1\n\n ######################################################################\n # derivatives for specified hydro-group parameters\n if self.hydro_group.is_set:\n # hazen williams equation\n if self.hydro_group.method == 'HW':\n func = self.hw_func\n # darcy friction factor\n else:\n func = self.darcy_func\n\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(func, 'm', 0)\n if not increment_filter[0, 1]:\n self.jacobian[k, 0, 1] = self.numeric_deriv(func, 'p', 0)\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 2] = self.numeric_deriv(func, 'h', 0)\n if not increment_filter[1, 1]:\n self.jacobian[k, 1, 1] = self.numeric_deriv(func, 'p', 1)\n if not increment_filter[1, 2]:\n self.jacobian[k, 1, 2] = self.numeric_deriv(func, 'h', 1)\n # custom variables of hydro group\n for var in self.hydro_group.elements:\n if var.is_var:\n self.jacobian[k, 2 + var.var_pos, 0] = (\n self.numeric_deriv(func, self.vars[var], 2))\n k += 1\n\n ######################################################################\n # derivatives for additional equations\n self.additional_derivatives(increment_filter, k)", "def forward(h, n, u, v, f, dt, dx, dy, du, dv, dn, beta=0, eps=0, gamma=0, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # forward euler and forward/backward timestep\n beta = np.float32(beta)\n mu = np.float32(mu)\n \n du1, du0 = du[:2]\n dv1, dv0 = dv[:2]\n dn0 = dn[0]\n \n dndt_x(h, n, u, v, dx, dy, dn0) # calculate dndt and put it into dn0\n \n n1 = n + ( dn0 )*dt\n \n dudt_x(h, n, f, u, v, dx, dy, du0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n, f, u, v, dx, dy, dv0, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dudt_x(h, n1, f, u, v, dx, dy, du1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n dvdt_x(h, n1, f, u, v, dx, dy, dv1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n \n u1 = u + ( beta*du1 + (one-beta)*du0 )*dt\n v1 = v + ( beta*dv1 + (one-beta)*dv0 )*dt\n \n n, u, v = n1, u1, v1\n \n du = [du1, du0, du0, du0]\n dv = [dv1, dv0, dv0, dv0]\n dn = [dn0, dn0, dn0]\n return n1, u1, v1, du, dv, dn", "def dfegn17(mod96file, wavetype, nmod, freq, pertu = 0.05, tmpdir = None, cleanup = True, fig = None):\n \n #create the main temporary directory\n if tmpdir is None: \n tmpdir = \"/tmp/tmpdir_dfegn17_%10d\" % (np.random.rand() * 1e10)\n while os.path.isdir(tmpdir):\n tmpdir += \"_%10d\" % (np.random.rand() * 1e10)\n os.mkdir(tmpdir)\n\n #read starting model\n Z, H, VP, VS, RHO, QP, QS, ETAP, ETAS, FREFP, FREFS = readmod96(mod96file)\n model0 = np.concatenate((H, VP, VS, RHO))\n nlayer = len(H)\n IH = np.arange(nlayer) #index of thickness parameters\n IVP = np.arange(1 * nlayer, 2 * nlayer) #index of Vp parameters\n IVS = np.arange(2 * nlayer, 3 * nlayer) \n IRH = np.arange(3 * nlayer, 4 * nlayer) \n\n\n\n #compute eigenfunctions for starting model\n out0 = fegn17(mod96file, wavetype, nmod, freq, tmpdir = \"%s/%s\" % (tmpdir, \"startingmodel\"), fig = None, cleanup = False)\n\n #initiate kernels\n if wavetype == \"R\":\n DURDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n DUZDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n DTRDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n DTZDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n elif wavetype == \"L\":\n DUTDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n DTTDN = np.zeros((nmod + 1, nlayer, len(model0)), float) * np.nan\n\n\n #perturbate each parameter subsequently\n for nparam in xrange(len(model0)):\n if nparam == nlayer - 1 :\n #thickness of the half space: meaningless\n continue\n\n #prepare the perturbated model and determine the sub-directory name\n modeln = model0.copy()\n modeln[nparam] *= (1.0 + pertu)\n tmpdir_n = \"%s/param%06d\" % (tmpdir, nparam)\n mod96file_n = \"%s/model_param%06d.mod96\" % (tmpdir, nparam) #write the perturbated file in the main temporary directory\n\n #write perturbated model\n writemod96(mod96file_n, \n H = modeln[IH], \n VP = modeln[IVP], \n VS = modeln[IVS], \n RHO = modeln[IRH], \n QP = QP, QS = QS, ETAP=ETAP, ETAS=ETAS, FREFP=FREFP, FREFS=FREFS) #keep attenuation untouched\n\n #call fegn17 with perturbated model\n out_n = fegn17(mod96file_n, wavetype, nmod, freq, tmpdir = tmpdir_n, fig = None, cleanup = False)\n dm = (modeln[nparam] - model0[nparam])\n\n for modnum in xrange(nmod+1):\n key = \"%s%d\" % (wavetype, modnum)\n\n if wavetype == \"R\":\n DURDN[modnum, :, nparam] = (out_n[key]['UR'] - out0[key]['UR']) / dm\n DUZDN[modnum, :, nparam] = (out_n[key]['UZ'] - out0[key]['UZ']) / dm\n DTRDN[modnum, :, nparam] = (out_n[key]['TR'] - out0[key]['TR']) / dm\n DTZDN[modnum, :, nparam] = (out_n[key]['TZ'] - out0[key]['TZ']) / dm\n elif wavetype == \"L\":\n\n DUTDN[modnum, :, nparam] = (out_n[key]['UT'] - out0[key]['UT']) / dm\n DTTDN[modnum, :, nparam] = (out_n[key]['TT'] - out0[key]['TT']) / dm\n\n\n #-------------------\n dout = {\"Z\" : out0['model'][\"Z\"], \"T\" : 1. / freq}\n if wavetype == \"R\":\n dout['DURDH'] = DURDN[:, :, IH ]\n dout['DURDVP'] = DURDN[:, :, IVP]\n dout['DURDVS'] = DURDN[:, :, IVS]\n dout['DURDRH'] = DURDN[:, :, IRH]\n\n dout['DUZDH'] = DUZDN[:, :, IH ]\n dout['DUZDVP'] = DUZDN[:, :, IVP]\n dout['DUZDVS'] = DUZDN[:, :, IVS]\n dout['DUZDRH'] = DUZDN[:, :, IRH]\n\n dout['DTRDH'] = DTRDN[:, :, IH ]\n dout['DTRDVP'] = DTRDN[:, :, IVP]\n dout['DTRDVS'] = DTRDN[:, :, IVS]\n dout['DTRDRH'] = DTRDN[:, :, IRH]\n\n dout['DTZDH'] = DTZDN[:, :, IH ]\n dout['DTZDVP'] = DTZDN[:, :, IVP]\n dout['DTZDVS'] = DTZDN[:, :, IVS]\n dout['DTZDRH'] = DTZDN[:, :, IRH]\n \n elif wavetype == \"L\": \n dout['DUTDH'] = DUTDN[:, :, IH ]\n dout['DUTDVP'] = DUTDN[:, :, IVP]\n dout['DUTDVS'] = DUTDN[:, :, IVS]\n dout['DUTDRH'] = DUTDN[:, :, IRH]\n \n dout['DTTDH'] = DTTDN[:, :, IH ]\n dout['DTTDVP'] = DTTDN[:, :, IVP]\n dout['DTTDVS'] = DTTDN[:, :, IVS]\n dout['DTTDRH'] = DTTDN[:, :, IRH]\n\n if cleanup:\n #remove temporary directory\n execbash('rm -rf %s' % tmpdir, \".\")\n\n if fig is not None:\n\n ax2 = fig.add_subplot(224)\n ax1 = fig.add_subplot(223, sharey = ax2)\n ax3 = fig.add_subplot(222, sharex = ax2)\n\n #------------------\n ax1.invert_yaxis()\n #------------------\n z = np.concatenate((np.repeat(out0['model'][\"Z\"], 2)[1:], [sum(out0['model'][\"H\"]) * 1.1]))\n vp = np.repeat(out0['model'][\"Vp\"], 2)\n vs = np.repeat(out0['model'][\"Vs\"], 2)\n rh = np.repeat(out0['model'][\"Rh\"], 2)\n ax1.plot(vp, z, label = \"Vp\")\n ax1.plot(vs, z, label = \"Vs\")\n ax1.plot(rh, z, label = \"Rh\")\n ax1.legend()\n ax1.grid(True)\n ax1.set_ylabel('model depth (km)')\n\n #------------------\n if wavetype == \"R\":\n vmax = abs(dout['DUZDVS'][nmod, :, :]).max()\n ax3.plot(dout[\"Z\"], out0[\"%s%d\" % (wavetype, nmod)]['UZ'], label = \"UZ\")\n Y = dout['DUZDVS'][nmod, :, :]\n Y = np.ma.masked_where(np.isnan(Y), Y)\n ax2.pcolormesh(dout[\"Z\"], dout[\"Z\"], Y, vmin = -vmax, vmax = vmax)\n ax2.set_title(\"DUZ/DVS, T = %f, mode %d\" % (1. / freq, nmod))\n ax3.set_xlabel('eigenfunction depth (km)')\n elif wavetype == \"L\":\n vmax = abs(dout['DUTDVS'][nmod, :, :]).max()\n ax3.plot(dout[\"Z\"], out0[\"%s%d\" % (wavetype, nmod)]['UT'], label = \"UT\")\n Y = dout['DUTDVS'][nmod, :, :]\n Y = np.ma.masked_where(np.isnan(Y), Y)\n ax2.pcolormesh(dout[\"Z\"], dout[\"Z\"], Y, vmin = -vmax, vmax = vmax)\n ax2.set_title(\"DUT/DVS, T = %f, mode %d\" % (1. / freq, nmod))\n ax3.set_xlabel('eigenfunction depth (km)')\n\n ax3.xaxis.set_label_position(\"top\") \n ax3.legend()\n ax2.grid(True)\n ax3.grid(True)\n\n return dout", "def dynstall_mhh_dxdt_simple(t, x, U, U_dot, omega, alpha_34, p):\n # States\n x1=x[0] # Downwash memory term 1\n x2=x[1] # Downwash memory term 2\n x3=x[2] # Clp', Lift coefficient with a time lag to the attached lift coeff\n x4=x[3] # f'' , Final separation point function\n # Parameters\n alpha0 = p['alpha0']\n Cla = p['Cla']\n c = p['chord']\n A1 = p['A1']\n A2 = p['A2']\n b1 = p['b1']\n b2 = p['b2']\n F_st = p['F_st']\n # Variables derived from inputs\n U = max(U, 0.01)\n Tu = max(c/(2*U), 1e-4) # Eq. 23\n Tf = p['Tf0']*Tu # OLD was twice: Tf = p['Tf0']*c/U\n Tp = p['Tp0']*Tu # OLD was twice: Tp = p['Tp0']*c/U\n # Variables derived from states\n if p['alpha0_in_x1x2']:\n alphaE = alpha_34*(1-A1-A2)+ x1 + x2 # Eq. 12\n else:\n alphaE = (alpha_34-alpha0)*(1-A1-A2)+ x1 + x2 + alpha0 # Eq. 12\n\n# alphaE = u['alphaE'](t) # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< HACK HACK TODO TODO TODO TODO TODO\n\n Clp = Cla * (alphaE-alpha0) + np.pi * Tu * omega # Eq. 13\n alphaF = x3/Cla+alpha0 # p. 13\n fs_aF = F_st(alphaF) # p. 13\n if(fs_aF<0):\n print('Problematic fs:',fs_aF)\n x4 = np.clip(x4, 1e-16, 1.0) # Constraining x4 between 0 and 1 increases numerical stability\n # State equation\n xdot = [0]*4\n if p['alpha0_in_x1x2']:\n xdot[0] = -1/Tu * (b1 + c * U_dot/(2*U**2)) * x1 + b1 * A1 / Tu * alpha_34\n xdot[1] = -1/Tu * (b2 + c * U_dot/(2*U**2)) * x2 + b2 * A2 / Tu * alpha_34\n else:\n xdot[0] = -1/Tu * (b1 + c * U_dot/(2*U**2)) * x1 + b1 * A1 / Tu * (alpha_34-alpha0)\n xdot[1] = -1/Tu * (b2 + c * U_dot/(2*U**2)) * x2 + b2 * A2 / Tu * (alpha_34-alpha0)\n xdot[2] = -1/Tp * x3 + 1/Tp * Clp\n xdot[3] = -1/Tf * x4 + 1/Tf * fs_aF\n return xdot", "def DyEvo(x, t, T0, r1, r2, K_co, K_ch, alpha, n):\n y=np.zeros([np.size(x)])\n D=np.zeros([2]) \n #define fitnss\n D[0]=dmax*x[0]**n/(K_co**n+x[0]**n) #cooperator\n D[1]=dmax*x[0]**n/(K_ch**n+x[0]**n) #cheater \n #degradation\n deg=fmax*x[1]/(x[1]+Kd) \n #ODE of eco-evo dynamics\n y[0]=alpha*T0-deg*x[0]-alpha*x[0] #dt/dt\n y[1]=x[1]*(r1*(1-x[1]-x[2])-D[0]-alpha)#d Co/dt\n y[2]=x[2]*(r2*(1-x[1]-x[2])-D[1]-alpha) #d Ch/dt\n \n return y", "def dE_mdn(self, x, y, t, w1 = None, w2 = None):\n if w2 == None:\n w2 = self.w2\n M = int(self.M)\n # avoid underrun\n \n alpha, sigma, mu = self.getMixtureParams(y.T)\n #import pdb; pdb.set_trace()\n \n #T = t.T[None, None, :] # note: np.tile is slower than this notation\n T = t.T[None, :]\n \n phi = self._phi(T, mu, sigma)\n aphi = alpha*phi\n pi = aphi / np.sum(aphi, 0)\n \n # derivatives of E with respect to the output variables (s. Bishop 1995, chp. 6.4)\n dE_dy_alpha = alpha - pi\n dE_dy_sigma = - 0.5 * pi * ((np.sum((T-mu)**2 , 1) / sigma) - self.c)\n dE_dy_mu = pi[:,np.newaxis,:] * (mu - T) / sigma[:,np.newaxis,:]\n\n dk = np.zeros([self.ny, x.shape[0]])\n dk[0:M,:] = dE_dy_alpha\n dk[M:2*M,:] = dE_dy_sigma\n \n dk[2*M:] = np.reshape(dE_dy_mu, [M*self.c, x.shape[0]])\n \n # back-propagate the dks\n #t0=datetime.now()\n dEnw1, dEnw2 = self.backward(x, dk, None, w2)\n #print 'eval of dE_mdn:' + str((datetime.now()-t0))\n #dj = (1 - self.z[1:]**2) * np.dot(w2[:,1:].T, dk)\n # evaluate derivatives with respect to the weights\n #dEnw1 = (dj[:,:,np.newaxis]*x[np.newaxis,:,:]).transpose(1,0,2)\n #dEnw2 = (dk[:,:,np.newaxis]*self.z.T[np.newaxis,:,:]).transpose(1,0,2)\n return dEnw1, dEnw2", "def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n z = unknowns['z']\n znew = z\n\n iter = 0\n eps = 1.0e99\n while iter < self.maxiter and abs(eps) > self.atol:\n z = znew\n znew = 4.0 - x*z\n\n eps = x*znew + znew - 4.0\n\n unknowns['z'] = znew\n unknowns['y'] = x + 2.0*znew\n\n resids['z'] = eps\n #print(unknowns['y'], unknowns['z'])", "def _func_pen(self, coeffs_ext):\n l_elastic_net = self.l_elastic_net\n eta = self.eta\n n_features = self.n_features\n coeffs = coeffs_ext[:n_features] - coeffs_ext[n_features:]\n return l_elastic_net * ((1. - eta) * coeffs_ext.sum()\n + 0.5 * eta * np.linalg.norm(coeffs) ** 2)", "def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n a = self.a\n b = self.b\n c = self.c\n\n unknowns['y'] = a*x**2 + b*x + c", "def test_adding_three_variables():\n a = fwd.Variable()\n b = fwd.Variable()\n c = fwd.Variable()\n f = fwd.exp(a-b+c)\n assert equals(f.evaluation_at({a: 1.0, b: 2.0, c: 3.0}), np.exp(2.0))\n assert equals(f.derivative_at(b, {a: 1.0, b: 2.0, c: 3.0}), -np.exp(2.0))\n assert equals(f.derivative_at(a, {a: 1.0, b: 2.0, c: 3.0}), np.exp(2.0))", "def fluid_deriv(self, increment_filter, k):\n i = 0\n for fluid in self.nw_fluids:\n j = 0\n for o in self.outl:\n self.jacobian[k, j + 1, 0] = -o.fluid.val[fluid]\n self.jacobian[k, j + 1, i + 3] = -o.m.val_SI\n j += 1\n self.jacobian[k, 0, 0] = self.inl[0].fluid.val[fluid]\n self.jacobian[k, 0, i + 3] = self.inl[0].m.val_SI\n k += 1\n i += 1", "def nonlinear_model(t, q, u, p, calcOutput=False):\n # Compute variables useful to several other functions\n m = calcMisc(t, q, u, p)\n\n # Aerodynamic Force (length 3)\n Fa, aeroOut = Faero(t, q, u, p, calcOutput=calcOutput, m=m)\n\n # Inertial force (length 3)\n Fs = Fstruct(t, q, u, p, calcOutput=calcOutput, m=m)\n\n # Total force\n F = np.zeros(3)\n F[0] = Fa[0] + Fs[0]\n F[1] = Fa[1] + Fs[1]\n F[2] =-Fa[2] - Fs[2]\n\n # Derivative of states (structural and aero)\n dqxd = dxd_dt(t, m['qx'], m['qxd'], F[p['Ix']], p)\n dxa = dxa_dt(t, m['x'], m['xd'], m['qxa_ua'], m['qxa_di'], p, u)\n dq = np.concatenate((m['qxd'], dqxd, dxa)) # NOTE: assumed order here\n\n if calcOutput:\n dq_full, xd, xdd = inflate_q(dq, Iq=p['Iq'])\n d = dict()\n ## Structural states\n d['x'] = m['x'][0]\n d['y'] = m['x'][1]\n d['thetat'] = m['x'][2]\n d['xd'] = m['xd'][0]\n d['yd'] = m['xd'][1]\n d['thetad'] = m['xd'][2]\n d['xdd'] = xdd[0]\n d['ydd'] = xdd[1]\n d['thetadd'] = xdd[2]\n # Inputs\n # (NOTE: computed with \"calc='u')\n #m['Ux'], m['Uy'], m['theta_p'] = inputsAtTime(t, u)\n # Struct\n d['theta'] = m['theta']\n d['rho_x'] = m['rho_x']\n d['rho_y'] = m['rho_y']\n ## Aero\n d.update(aeroOut)\n if p['dynamicStallModel'] == 'oye':\n d['fs'] = m['qxa_ua'][0]\n d['dfs'] = dxa [0]\n elif p['dynamicStallModel'] == 'mhh':\n d['x1_ds'] = m['qxa_ua'][0]\n d['x2_ds'] = m['qxa_ua'][1]\n d['x3_ds'] = m['qxa_ua'][2]\n d['x4_ds'] = m['qxa_ua'][3]\n return pd.Series(d)\n else:\n return dq", "def _compute_GP_variables(self):\r\n Wi = 1.0/self.W\r\n self.Sigma_tilde = np.diagflat(Wi)\r\n\r\n Y_tilde = Wi*self.Ki_f + self.f_hat\r\n\r\n self.Wi_K_i = self.W12BiW12\r\n ln_det_Wi_K = pddet(self.Sigma_tilde + self.K)\r\n lik = self.noise_model.logpdf(self.f_hat, self.data, extra_data=self.extra_data)\r\n y_Wi_K_i_y = mdot(Y_tilde.T, self.Wi_K_i, Y_tilde)\r\n\r\n Z_tilde = (+ lik\r\n - 0.5*self.ln_B_det\r\n + 0.5*ln_det_Wi_K\r\n - 0.5*self.f_Ki_f\r\n + 0.5*y_Wi_K_i_y\r\n + self.NORMAL_CONST\r\n )\r\n\r\n #Convert to float as its (1, 1) and Z must be a scalar\r\n self.Z = np.float64(Z_tilde)\r\n self.Y = Y_tilde\r\n self.YYT = np.dot(self.Y, self.Y.T)\r\n self.covariance_matrix = self.Sigma_tilde\r\n self.precision = 1.0 / np.diag(self.covariance_matrix)[:, None]\r\n\r\n #Compute dZ_dK which is how the approximated distributions gradients differ from the dL_dK computed for other likelihoods\r\n self.dZ_dK = self._Kgradients()\r\n #+ 0.5*self.Wi_K_i - 0.5*np.dot(self.Ki_f, self.Ki_f.T) #since we are not adding the K gradients explicit part theres no need to compute this again\r", "def coeffs(f):\n return dmp_coeffs(f.rep, f.lev, f.dom)", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def rhs(y, t, l, m, g):\n # Unpack the states so you can use the variable names in the\n # sympy.physics.mechanics equations\n q1 = y[0]\n q2 = y[1]\n u1 = y[2]\n u2 = y[3]\n # or you can make use of python's tuple unpacking for a one liner\n # q1, q2, u1, u2 = y\n\n # Initialize a vector for the derivatives.\n dydt = zeros((len(y)))\n\n # Compute the derivatives, these are pasted in from the\n # sympy.physics.mechanics results.\n dydt[0] = u1\n dydt[1] = u2\n dydt[2] = (-g*sin(q1)*sin(q2)**2 + 2*g*sin(q1) -\n g*sin(q2)*cos(q1)*cos(q2) + 2*l*u1**2*sin(q1)*cos(q1)*cos(q2)**2 -\n l*u1**2*sin(q1)*cos(q1) - 2*l*u1**2*sin(q2)*cos(q1)**2*cos(q2) +\n l*u1**2*sin(q2)*cos(q2) + l*u2**2*sin(q1)*cos(q2) -\n l*u2**2*sin(q2)*cos(q1))/(l*(sin(q1)**2*sin(q2)**2 +\n 2*sin(q1)*sin(q2)*cos(q1)*cos(q2) + cos(q1)**2*cos(q2)**2 - 2))\n dydt[3] = (-sin(q1)*sin(q2)/2 - cos(q1)*cos(q2)/2)*(2*g*l*m*sin(q1) -\n l**2*m*(-sin(q1)*cos(q2) +\n sin(q2)*cos(q1))*u2**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2)) -\n l**2*m) + (g*l*m*sin(q2) - l**2*m*(sin(q1)*cos(q2) -\n sin(q2)*cos(q1))*u1**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2))\n - l**2*m)\n\n # Return the derivatives.\n return dydt", "def equations(self):\n k = 0\n ######################################################################\n # equations for fluid balance\n self.residual[k:k + self.num_nw_fluids * 2] = self.fluid_func()\n k += self.num_nw_fluids * 2\n\n ######################################################################\n # equations for mass flow balance\n self.residual[k:k + 2] = self.mass_flow_func()\n k += 2\n\n ######################################################################\n # equations for energy balance\n self.residual[k] = self.energy_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer\n if self.Q.is_set:\n self.residual[k] = (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) - self.Q.val)\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient\n if self.kA.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient characteristic\n if self.kA_char.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_char_func()\n k += 1\n\n ######################################################################\n # equations for specified upper terminal temperature difference\n if self.ttd_u.is_set:\n self.residual[k] = self.ttd_u_func()\n k += 1\n\n ######################################################################\n # equations for specified lower terminal temperature difference\n if self.ttd_l.is_set:\n self.residual[k] = self.ttd_l_func()\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at hot side\n if self.pr1.is_set:\n self.residual[k] = (\n self.pr1.val * self.inl[0].p.val_SI - self.outl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at cold side\n if self.pr2.is_set:\n self.residual[k] = (\n self.pr2.val * self.inl[1].p.val_SI - self.outl[1].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified zeta at hot side\n if self.zeta1.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta1', inconn=0, outconn=0)\n k += 1\n\n ######################################################################\n # equations for specified zeta at cold side\n if self.zeta2.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta2', inconn=1, outconn=1)\n k += 1\n\n ######################################################################\n # additional equations\n self.additional_equations(k)", "def define_ufl_equations_diff(self):\n\n # Derivatives of velocity integration equation.\n if self.f1 != 0:\n self.df1_du = dlf.derivative(self.f1, self.displacement, self.trial_vector)\n self.df1_dv = dlf.derivative(self.f1, self.velocity, self.trial_vector)\n else:\n self.df1_du = 0\n self.df1_dv = 0\n self.df1_dp = 0 # This is always zero.\n\n # Derivatives of momentum equation.\n if self.displacement != 0:\n self.df2_du = dlf.derivative(self.f2, self.displacement, self.trial_vector)\n else:\n self.df2_du = 0\n\n if self.velocity != 0:\n self.df2_dv = dlf.derivative(self.f2, self.velocity, self.trial_vector)\n else:\n self.df2_dv = 0\n\n if self.pressure != 0:\n self.df2_dp = dlf.derivative(self.f2, self.pressure, self.trial_scalar)\n else:\n self.df2_dp = 0\n\n # Derivatives of incompressibility equation.\n if self.f3 != 0:\n if self.displacement != 0:\n self.df3_du = dlf.derivative(self.f3, self.displacement, self.trial_vector)\n else:\n self.df3_du = 0\n\n if self.velocity != 0:\n self.df3_dv = dlf.derivative(self.f3, self.velocity, self.trial_vector)\n else:\n self.df3_dv = 0\n\n self.df3_dp = dlf.derivative(self.f3, self.pressure, self.trial_scalar)\n else:\n self.df3_du = 0\n self.df3_dv = 0\n self.df3_dp = 0\n\n return None", "def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0):\n \n # Check inputs\n if X.dtype != \"float64\":\n raise ValueError(\"array X should have type float64.\")\n #if no_dims.__class__ != \"<type 'int'>\": # doesn't work yet!\n # print \"Error: number of dimensions should be an integer.\";\n # return -1;\n \n # Initialize variables\n if X.shape[1] > initial_dims:\n print \"Reducing the dimensionality to %d\" %initial_dims\n X = pca(X, initial_dims);\n (n, d) = X.shape;\n max_iter = 1000;\n initial_momentum = 0.5;\n final_momentum = 0.8;\n eta = 500;\n min_gain = 0.01;\n Y = Math.random.randn(n, no_dims);\n dY = Math.zeros((n, no_dims));\n iY = Math.zeros((n, no_dims));\n gains = Math.ones((n, no_dims));\n \n # Compute P-values\n P = x2p(X, 1e-5, perplexity);\n P = P + Math.transpose(P);\n P = P / Math.sum(P);\n P = P * 4; # early exaggeration\n P = Math.maximum(P, 1e-12);\n \n # Run iterations\n for iter in range(max_iter):\n \n # Compute pairwise affinities\n sum_Y = Math.sum(Math.square(Y), 1); \n num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y));\n num[range(n), range(n)] = 0;\n Q = num / Math.sum(num);\n Q = Math.maximum(Q, 1e-12);\n \n # Compute gradient\n PQ = P - Q;\n for i in range(n):\n dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0);\n \n # Perform the update\n if iter < 20:\n momentum = initial_momentum\n else:\n momentum = final_momentum\n gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));\n gains[gains < min_gain] = min_gain;\n iY = momentum * iY - eta * (gains * dY);\n Y = Y + iY;\n Y = Y - Math.tile(Math.mean(Y, 0), (n, 1));\n \n # Compute current value of cost function\n if (iter + 1) % 10 == 0:\n C = Math.sum(P * Math.log(P / Q));\n print \"Iteration \", (iter + 1), \": error is \", C\n \n # Stop lying about P-values\n if iter == 100:\n P = P / 4;\n \n # Return solution\n return Y;", "def ti_deriv(self, increment_filter, k):\n fuel = self.fuel_alias.val\n pos = 3 + self.nw_fluids.index(fuel)\n\n for i in range(2):\n self.jacobian[k, i, 0] = -self.inl[i].fluid.val[fuel]\n self.jacobian[k, i, pos] = -self.inl[i].m.val_SI\n self.jacobian[k, 2, 0] = self.outl[0].fluid.val[fuel]\n self.jacobian[k, 2, pos] = self.outl[0].m.val_SI\n self.jacobian[k] *= self.lhv\n k += 1", "def run_optimization(file, sess, Hx_final, Hy_final, w_ij, ops):\n\n sub_patch = read_patch(file)\n # all_plot_params = []\n verbose = True\n\n # Prepare input, randomness in fv is achieved by adding a noise\n vi, Ti, fv_o, subTi = input_prep(sub_patch, do_show=False, avoid_triangle_flip=True)\n B_inv_new = None\n N = 8\n BATCH = 32\n type = 'NN'\n for type in ('NN', 'GD', 'BFGS'):\n\n fvi = fv_o.copy()\n print(\"*\" * 1000)\n print(fvi)\n print(\"&\" * 1000)\n all_plot_params = []\n\n def get_nn_hessians(fv, v, subt):\n inputs_b = pack_batch(fv, v, subt, BATCH)\n feed_dict = gen_feed_dict_inference(inputs_b, is_training=False, ops=ops)\n (Hx_final_out, Hy_final_out, w_ij_out) = sess.run((Hx_final, Hy_final, w_ij), feed_dict=feed_dict)\n (Hx, Hy) = extract_Hs(Hx_final_out, Hy_final_out)\n return (Hx, Hy)\n\n for i in range(N):\n\n params = dirichlet_sym_gd_bfgs_nn_weak_armijo_optimizer_step(line_search=line_search_armijio,\n get_nn_hessians=get_nn_hessians,\n source=vi,\n t=Ti,\n target=fvi,\n subt=subTi,\n verbose=verbose,\n # Back feed the next estimate of Hessian\n B_inv_new=B_inv_new,\n type=type)\n\n optimization_res = do_optimization_result_unpack(params=params, t=Ti, v=vi, verbose=verbose)\n\n (displacement,\n p_k, # optimization_res['step_fv'],\n p_k_n, # optimization_res['step_fv_n'],\n _, # optimization_res['subvertices'],\n eng_xnew_first_order_approx,\n eng_xnew_x0_diff_sub,\n source,\n t,\n source_dim,\n t_dim,\n eng_at_x0_sub,\n eng_at_xnew,\n eng_at_xnew_sub,\n deng_x_at_x0_sub,\n alpha,\n edges_sub,\n epsilon,\n sub_source_dim, # nv_sub\n edges_sub_dim,\n sv_at_x0,\n sv_at_x0_sub,\n sv_at_xnew,\n sv_at_xnew_sub,\n fv_at_xnew,\n eng_first_order_approx_xnew_x0_diff,\n t_flip,\n x0,\n eng_first_order_approx_xnew_x0_diff_sub,\n ray_deng_x_at_x0,\n ray_deng_x_at_xnew,\n eng_plot,\n ray_deng_x_at_x0_sub,\n ray_deng_x_at_xnew_sub,\n eng_plot_sub,\n span,\n type,\n B_inv_new,\n eng_xnew_x0_diff) = optim_res_unpack(optimization_res)\n\n plot_params = (ray_deng_x_at_x0,\n ray_deng_x_at_xnew,\n eng_plot,\n ray_deng_x_at_x0_sub,\n ray_deng_x_at_xnew_sub,\n eng_plot_sub,\n alpha,\n span,\n type)\n\n do_verbose(\"linear_diff\", eng_first_order_approx_xnew_x0_diff, verbose)\n all_plot_params.append(plot_params)\n fvi = fv_at_xnew\n\n sequence_params = concatinate_optimization_sequence_params(all_plot_params=all_plot_params, do_show=True)\n plot_optimization_sequence(sequence_params=sequence_params, do_show=True, type=type)\n\n plt.show()\n return 0", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def TDGradientFunction(Prof,x,Trx,rb_spec,abs_spec,dr,inu0,bsrMult,base_T,base_P,r0,lam=[0,0,0,0,0,0]): \n \n iR = Prof['WV Online'].size # range index for a profile into 1D x array\n x2 = np.reshape(x,(iR+1,6))\n xK = x2[0,:] # constants [HSRL Mol HSRL Comb, WV On, WV Off, O2 On ,O2 Off]\n xS = x2[1:,:] # state vector [T, nWV, BSR, phi_HSRL, phi_WV, phi_O2]\n \n grad2 = np.zeros(x2.shape) \n \n #N,dNdB,dNdT = HSRLDerivative(T,BSR,phi,rb_spec,Trx,inu0,K,base_T,base_P)\n HSRL_mol,dHmdB,dHmdT = HSRLDerivative(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Mol'],inu0['HSRL'],xK[0],base_T,base_P)\n HSRL_comb,dHcdB,dHcdT = HSRLDerivative(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Comb'],inu0['HSRL'],xK[1],base_T,base_P)\n \n # N,dNdB,dNdnWV,dNdT = WVDIALDerivative(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n WV_on,dWVndB,dWVndnWV,dWVndT = WVDIALDerivative(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Online'],abs_spec['WV Online'],Trx['WV Online'],inu0['WV Online'],xK[2],base_T,base_P,dr,r0)\n WV_off,dWVfdB,dWVfdnWV,dWVfdT = WVDIALDerivative(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Offline'],abs_spec['WV Offline'],Trx['WV Offline'],inu0['WV Offline'],xK[3],base_T,base_P,dr,r0) \n \n # N,dNdB,dNdnWV,dNdT = O2DIALDerivative(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n O2_on,dO2ndB,dO2ndnWV,dO2ndT = O2DIALDerivative(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Online'],abs_spec['O2 Online'],Trx['O2 Online'],inu0['O2 Online'],xK[4],base_T,base_P,dr,r0)\n O2_off,dO2fdB,dO2fdnWV,dO2fdT = O2DIALDerivative(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Offline'],abs_spec['O2 Offline'],Trx['O2 Offline'],inu0['O2 Offline'],xK[5],base_T,base_P,dr,r0)\n \n# HSRLModel,dHSdB,dHSdT = HSRLProfileRatioDeriv(xS[:,0],P,xS[:,2], \\\n# Trx['HSRL Mol'],Trx['HSRL Comb'], \\\n# rb_spec['HSRL'],inu0['HSRL'],GainRatio=xK[0])\n#\n# WVModel,dWVdB,dWVdnWV,dWVdT = WaterVaporProfileRatioDeriv(xS[:,0],P,xS[:,1],xS[:,2]*bsrMult['WV'],\n# Trx['WV Online'], Trx['WV Offline'], \\\n# rb_spec['WV Online'],rb_spec['WV Offline'], \\\n# abs_spec['WV Online'],abs_spec['WV Offline'],dr, \\\n# inu0['WV Online'],inu0['WV Offline'],GainRatio=xK[1])\n# \n# O2Model,dO2dB,dO2dnWV,dO2dT = OxygenProfileRatioDeriv(xS[:,0],P,xS[:,1],xS[:,2]*bsrMult['O2'],\n# Trx['O2 Online'], Trx['O2 Offline'], \\\n# rb_spec['O2 Online'],rb_spec['O2 Offline'], \\\n# abs_spec['O2 Online'],abs_spec['O2 Offline'],dr, \\\n# inu0['O2 Online'],inu0['O2 Offline'],GainRatio=xK[2])\n \n HSRLmolBase = 1-(Prof['HSRL Mol'])/(HSRL_mol+Prof['HSRL Mol BG'])\n HSRLcombBase = 1-(Prof['HSRL Comb'])/(HSRL_comb+Prof['HSRL Comb BG'])\n WVonBase = 1-(Prof['WV Online'])/(WV_on+Prof['WV Online BG'])\n WVoffBase = 1-(Prof['WV Offline'])/(WV_off+Prof['WV Offline BG'])\n O2onBase = 1-(Prof['O2 Online'])/(O2_on+Prof['O2 Online BG'])\n O2offBase = 1-(Prof['O2 Offline'])/(O2_off+Prof['O2 Offline BG'])\n \n \n# HSRLbase = 2*(HSRLModel-Prof['HSRL'])/ProfVar['HSRL']\n# WVbase = 2*(WVModel-Prof['WV'])/ProfVar['WV']\n# O2base = 2*(O2Model-Prof['O2'])/ProfVar['O2']\n \n # temperature gradient\n grad2[1:,0] = np.nansum(HSRLmolBase[np.newaxis]*dHmdT,axis=1) \\\n + np.nansum(HSRLcombBase[np.newaxis]*dHcdT,axis=1) \\\n + np.nansum(WVonBase[np.newaxis]*dWVndT,axis=1) \\\n + np.nansum(WVoffBase[np.newaxis]*dWVfdT,axis=1) \\\n + np.nansum(O2onBase[np.newaxis]*dO2ndT,axis=1) \\\n + np.nansum(O2offBase[np.newaxis]*dO2fdT,axis=1)\n# # piece wise penalty function \n# gradpen = lam[0]*np.sign(np.diff(xS[:,0]))\n# gradpen[np.nonzero(np.isnan(gradpen))] = 0\n# grad2[2:,0] = grad2[2:,0] + gradpen\n# grad2[1:-1,0] = grad2[1:-1,0] - gradpen\n# piece wise slope penalty function \n gradpen = lam[0]*np.sign(np.diff(np.diff(xS[:,0])))\n gradpen[np.nonzero(np.isnan(gradpen))] = 0\n grad2[3:,0] = grad2[3:,0] + gradpen\n grad2[2:-1,0] = grad2[2:-1,0] - 2*gradpen\n grad2[1:-2,0] = grad2[1:-2,0] + gradpen\n \n # water vapor gradient\n grad2[1:,1] = np.nansum(WVonBase[np.newaxis]*dWVndnWV,axis=1) \\\n + np.nansum(WVoffBase[np.newaxis]*dWVfdnWV,axis=1) \\\n + np.nansum(O2onBase[np.newaxis]*dO2ndnWV,axis=1) \\\n + np.nansum(O2offBase[np.newaxis]*dO2fdnWV,axis=1)\n # piecewise penalty function\n gradpen = lam[1]*np.sign(np.diff(xS[:,1]))\n gradpen[np.nonzero(np.isnan(gradpen))] = 0\n grad2[2:,1] = grad2[2:,1] + gradpen\n grad2[1:-1,1] = grad2[1:-1,1] - gradpen\n \n # backscatter gradient\n grad2[1:,2] = np.nansum(HSRLmolBase[np.newaxis]*dHmdB,axis=1) \\\n + np.nansum(HSRLcombBase[np.newaxis]*dHcdB,axis=1) \\\n + np.nansum(WVonBase[np.newaxis]*dWVndB,axis=1) \\\n + np.nansum(WVoffBase[np.newaxis]*dWVfdB,axis=1) \\\n + np.nansum(O2onBase[np.newaxis]*dO2ndB,axis=1) \\\n + np.nansum(O2offBase[np.newaxis]*dO2fdB,axis=1) \n# #piecewise penalty function\n# gradpen = lam[2]*np.sign(np.diff(xS[:,2]))\n# gradpen[np.nonzero(np.isnan(gradpen))] = 0\n# grad2[2:,2] = grad2[2:,2] + gradpen\n# grad2[1:-1,2] = grad2[1:-1,2] - gradpen\n \n\n # *bsrMult['WV']\n # *bsrMult['WV']\n # *bsrMult['O2']\n # *bsrMult['O2']\n\n # HSRL Common terms\n grad2[1:,3] = np.nansum(HSRLmolBase[np.newaxis]*HSRL_mol,axis=0) + np.nansum(HSRLcombBase[np.newaxis]*HSRL_comb,axis=0)\n# # piece wise penalty function \n# gradpen = lam[3]*np.sign(np.diff(xS[:,3]))\n# gradpen[np.nonzero(np.isnan(gradpen))] = 0\n# grad2[2:,3] = grad2[2:,3] + gradpen\n# grad2[1:-1,3] = grad2[1:-1,3] - gradpen\n \n # WV Common terms\n grad2[1:,4] = np.nansum(WVonBase[np.newaxis]*WV_on,axis=0) + np.nansum(WVoffBase[np.newaxis]*WV_off,axis=0)\n# # piece wise penalty function \n# gradpen = lam[4]*np.sign(np.diff(xS[:,4]))\n# gradpen[np.nonzero(np.isnan(gradpen))] = 0\n# grad2[2:,4] = grad2[2:,4] + gradpen\n# grad2[1:-1,4] = grad2[1:-1,4] - gradpen\n \n # O2 Common terms\n grad2[1:,5] = np.nansum(O2onBase[np.newaxis]*O2_on,axis=0) + np.nansum(O2offBase[np.newaxis]*O2_off,axis=0)\n# # piece wise penalty function \n# gradpen = lam[5]*np.sign(np.diff(xS[:,5]))\n# gradpen[np.nonzero(np.isnan(gradpen))] = 0\n# grad2[2:,5] = grad2[2:,5] + gradpen\n# grad2[1:-1,5] = grad2[1:-1,5] - gradpen\n\n grad2[0,0] = np.nansum(HSRLmolBase*HSRL_mol/xK[0])\n grad2[0,1] = np.nansum(HSRLcombBase*HSRL_comb/xK[1])\n grad2[0,2] = np.nansum(WVonBase*WV_on/xK[2])\n grad2[0,3] = np.nansum(WVoffBase*WV_off/xK[3])\n grad2[0,4] = np.nansum(O2onBase*O2_on/xK[4])\n grad2[0,5] = np.nansum(O2offBase*O2_off/xK[5])\n \n# grad2[0,1] = np.nansum(WVbase*WVModel/xK[1])\n# grad2[0,2] = np.nansum(O2base*O2Model/xK[2])\n \n# OptError = np.nansum(2*(HSRLModel-Prof['HSRL'])/ProfVar['HSRL']*) \\\n# +np.nansum((WVModel-Prof['WV'])**2/ProfVar['WV']) \\\n# +np.sum((O2Model-Prof['O2'])**2/ProfVar['O2'])\n \n return grad2.flatten()", "def change_variables((a,b,c,d), (n,r,m)): \n return ( n*a**2 + r*a*b + m*b**2, 2*(n*a*c + m*b*d) + r*(a*d + c*b), \\\n n*c**2 + r*c*d + m*d**2 )", "def f(self,y,psi):\r\n\r\n #1. check that number of params is consistent\r\n # assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions'\r\n # assert psi.shape[1] == 4, 'inconsistent parameter dimensions'\r\n mpsi = psi.copy()\r\n d = psi[-1]\r\n mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3)\r\n\r\n #3. transform data\r\n z = d*y.copy()\r\n for i in range(len(mpsi)):\r\n a,b,c = mpsi[i]\r\n z += a*np.tanh(b*(y+c))\r\n return z", "def F(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n a = g1*u1 - u0\n b = g2*u2 - u0\n c = g3*u - u0\n l = g1*v1 - v0 \n m = g2*v2 - v0\n n = g3*v - v0\n r = g1 - 1\n s = g2 - 1\n t = g3 - 1\n return np.array([\n coeffs[0]*(a**2-l**2) + 2*coeffs[1]*(a*b-l*m) + coeffs[2]*(b**2-m**2) + 2*coeffs[3]*(a*c-l*n) + 2*coeffs[4]*(b*c-m*n) + c**2 - n**2,\n coeffs[0]*(l**2-r**2) + 2*coeffs[1]*(l*m-r*s) + coeffs[2]*(m**2-s**2) + 2*coeffs[3]*(l*n-r*t) + 2*coeffs[4]*(m*n-s*t) + n**2 - t**2,\n coeffs[0]*a*l + coeffs[1]*(l*b+m*a) + coeffs[2]*m*b + coeffs[3]*(l*c+n*a) + coeffs[4]*(m*c+b*n) + c*n,\n coeffs[0]*a*r + coeffs[1]*(r*b+s*a) + coeffs[2]*s*b + coeffs[3]*(r*c+t*a) + coeffs[4]*(s*c+b*t) + c*t,\n coeffs[0]*r*l + coeffs[1]*(l*s+m*r) + coeffs[2]*m*s + coeffs[3]*(l*t+n*r) + coeffs[4]*(m*t+s*n) + t*n \n ])", "def independent_variables(self, indep_vars):\n self.set(independent_variables=indep_vars)", "def derivatives(self, increment_filter):\n ######################################################################\n # derivatives fluid and mass balance are static\n k = self.num_nw_fluids * 2 + 2\n\n ######################################################################\n # derivatives for energy balance equation\n for i in range(2):\n self.jacobian[k, i, 0] = (\n self.outl[i].h.val_SI - self.inl[i].h.val_SI)\n self.jacobian[k, i, 2] = -self.inl[i].m.val_SI\n\n self.jacobian[k, 2, 2] = self.inl[0].m.val_SI\n self.jacobian[k, 3, 2] = self.inl[1].m.val_SI\n k += 1\n\n ######################################################################\n # derivatives for specified heat transfer\n if self.Q.is_set:\n self.jacobian[k, 0, 0] = (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)\n self.jacobian[k, 0, 2] = -self.inl[0].m.val_SI\n self.jacobian[k, 2, 2] = self.inl[0].m.val_SI\n k += 1\n\n ######################################################################\n # derivatives for specified heat transfer coefficient\n if self.kA.is_set:\n f = self.kA_func\n self.jacobian[k, 0, 0] = (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)\n for i in range(4):\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified heat transfer coefficient\n if self.kA_char.is_set:\n f = self.kA_char_func\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(f, 'm', 0)\n if not increment_filter[1, 0]:\n self.jacobian[k, 1, 0] = self.numeric_deriv(f, 'm', 1)\n for i in range(4):\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified upper terminal temperature difference\n if self.ttd_u.is_set:\n f = self.ttd_u_func\n for i in [0, 3]:\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified lower terminal temperature difference\n if self.ttd_l.is_set:\n f = self.ttd_l_func\n for i in [1, 2]:\n if not increment_filter[i, 1]:\n self.jacobian[k, i, 1] = self.numeric_deriv(f, 'p', i)\n if not increment_filter[i, 2]:\n self.jacobian[k, i, 2] = self.numeric_deriv(f, 'h', i)\n k += 1\n\n ######################################################################\n # derivatives for specified pressure ratio at hot side\n if self.pr1.is_set:\n self.jacobian[k, 0, 1] = self.pr1.val\n self.jacobian[k, 2, 1] = -1\n k += 1\n\n ######################################################################\n # derivatives for specified pressure ratio at cold side\n if self.pr2.is_set:\n self.jacobian[k, 1, 1] = self.pr2.val\n self.jacobian[k, 3, 1] = -1\n k += 1\n\n ######################################################################\n # derivatives for specified zeta at hot side\n if self.zeta1.is_set:\n f = self.zeta_func\n if not increment_filter[0, 0]:\n self.jacobian[k, 0, 0] = self.numeric_deriv(\n f, 'm', 0, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[0, 1]:\n self.jacobian[k, 0, 1] = self.numeric_deriv(\n f, 'p', 0, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[0, 2]:\n self.jacobian[k, 0, 2] = self.numeric_deriv(\n f, 'h', 0, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[2, 1]:\n self.jacobian[k, 2, 1] = self.numeric_deriv(\n f, 'p', 2, zeta='zeta1', inconn=0, outconn=0)\n if not increment_filter[2, 2]:\n self.jacobian[k, 2, 2] = self.numeric_deriv(\n f, 'h', 2, zeta='zeta1', inconn=0, outconn=0)\n k += 1\n\n ######################################################################\n # derivatives for specified zeta at cold side\n if self.zeta2.is_set:\n f = self.zeta_func\n if not increment_filter[1, 0]:\n self.jacobian[k, 1, 0] = self.numeric_deriv(\n f, 'm', 1, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[1, 1]:\n self.jacobian[k, 1, 1] = self.numeric_deriv(\n f, 'p', 1, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[1, 2]:\n self.jacobian[k, 1, 2] = self.numeric_deriv(\n f, 'h', 1, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[3, 1]:\n self.jacobian[k, 3, 1] = self.numeric_deriv(\n f, 'p', 3, zeta='zeta2', inconn=1, outconn=1)\n if not increment_filter[3, 2]:\n self.jacobian[k, 3, 2] = self.numeric_deriv(\n f, 'h', 3, zeta='zeta2', inconn=1, outconn=1)\n k += 1\n\n ######################################################################\n # derivatives for additional equations\n self.additional_derivatives(increment_filter, k)", "def calc_equil(sst, ft_qv, use_NT=False):\n \n run_main(sst, ft_qv, use_NT)\n \n # grab csv file\n with open('dumpmodel.csv','r') as f:\n df_result=pd.read_csv(f)\n\n # last time step into named tupple\n out=df_result.iloc[-1]\n steady_state=make_tuple(out.to_dict())\n steady_state\n \n # obtain steady-state values\n dth=steady_state.deltheta\n dqt=steady_state.delqv\n thetal_m=steady_state.theta\n qt_m=steady_state.qv\n h=steady_state.h\n press=tf.find_press(steady_state.h) #kPa\n thetal_ft = steady_state.theta + dth\n qt_ft = steady_state.qv + dqt\n zb = steady_state.LCL\n zi = steady_state.h\n we = steady_state.went\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-h)\n LTS = thetal_3000 - steady_state.theta\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n # calculate LWP\n rho = 1.\n LWP = 0.5*rho*(zi-zb)**2\n \n # put all required variables into output array\n out_array = np.array([thetal_m, qt_m, zi, zb, we, LWP, delta_Fr, LTS, dqt])\n \n return out_array", "def DynamicsCo(x, t, T0, alpha, cost_co, cost_ch, K_co, K_ch, n, r):\n y=np.zeros([np.size(x)])\n D=np.zeros([2]) \n #define fitnss\n D[0]=dmax*x[0]**n/(K_co**n+x[0]**n) #cooperator\n D[1]=dmax*x[0]**n/(K_ch**n+x[0]**n) #cheater \n #degradation\n deg=fmax*x[1]/(x[1]+Kd) \n #ODE of eco-evo dynamics\n y[0]=alpha*T0-deg*x[0]-alpha*x[0] #dt/dt\n y[1]=x[1]*(r*(1-cost_co)*(1-x[1]-x[2])-D[0]-alpha)#d Co/dt\n y[2]=x[2]*(r*(1-cost_ch)*(1-x[1]-x[2])-D[1]-alpha) #d Ch/dt\n \n return y", "def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n y = params['y']\n\n unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0", "def nonhomo_system_variation_of_parameters(xc: List[Symbol], gt, t: Symbol = Symbol('t', real=True)):\n fund_matrix = eye(len(xc))\n for i, x in enumerate(xc):\n fund_matrix[:, i] = x\n\n procedure = Procedure()\n procedure.text('Fundamental matrix ').latex('\\\\Psi', nl=True)\\\n .eq(fund_matrix)\n\n gt = Matrix(gt)\n fund_inv = fund_matrix**(-1)\n procedure.text('Calculate the inverse of the fundamental matrix ').latex('\\\\Psi^{-1}', nl=True)\\\n .latex('\\\\Psi^{-1} = ').eq(fund_inv)\n\n fund_inv_gt = expand(simplify(fund_inv * gt))\n procedure.text('Compute ').latex('\\\\Psi^{-1} g(t)', nl=True)\\\n .latex('\\\\Psi^{-1} g(t) = ').eq(fund_inv_gt)\n\n procedure.text('Compute the integral', nl=True)\n fund_inv_gt_int = expand(simplify(integrate(fund_inv_gt)))\n procedure.latex('\\\\int \\\\Psi^{-1} g(t) =').eq(fund_inv_gt_int)\n\n procedure.text('Finally, ').latex(\n '\\\\vec{\\\\mathbf{x_p}} = \\\\Psi \\\\int \\\\Psi^{-1} g(t)', nl=True)\n sol = expand(fund_matrix * fund_inv_gt_int)\n procedure.latex('\\\\vec{\\\\mathbf{x_p}} =').eq(sol)\n return sol, procedure", "def temp_func(self,var):\n y = var['intercept']\n for p,c in zip(var['powers'],var['coef']):\n # Exp the 4 inputs to the power for that coef\n #to plug them into the equation, un-scale them:\n# a = np.multiply(self.ins,(1/self.scale_var))**p\n a = self.ins**p\n y += c* np.prod(a)\n\n #to fit this into the environment, re-scale:\n y = y #* self.scale_var\n\n # Noise is a random number (positive or negative), scaled by self.noise\n noise = random.randint(-1,1) * random.random() * self.noise * cfg['thresh1'] * self.scale_var #scales noise\n y += noise\n return y", "def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)", "def function_PLD(coeffs, t, Pns, lc, coeffs_dict, coeffs_tuple, fix_coeffs, batman_params, PLD_params, eclipse = False):\n new_flux = model_PLD(coeffs, t, Pns, coeffs_dict, coeffs_tuple, fix_coeffs, batman_params, PLD_params, eclipse = eclipse)\n return lc - new_flux", "def __init__(self,\n uDict,\n phiDict,\n testSpaceDict,\n matType,\n dofBoundaryConditionsDict,\n dofBoundaryConditionsSetterDict,\n coefficients,\n elementQuadrature,\n elementBoundaryQuadrature,\n fluxBoundaryConditionsDict=None,\n advectiveFluxBoundaryConditionsSetterDict=None,\n diffusiveFluxBoundaryConditionsSetterDictDict=None,\n stressTraceBoundaryConditionsSetterDict=None,\n stabilization=None,\n shockCapturing=None,\n conservativeFluxDict=None,\n numericalFluxType=None,\n TimeIntegrationClass=None,\n massLumping=False,\n reactionLumping=False,\n options=None,\n name='defaultName',\n reuse_trial_and_test_quadrature=True,\n sd = True,\n movingDomain=False):\n #\n #set the objects describing the method and boundary conditions\n #\n self.movingDomain=movingDomain\n self.tLast_mesh=None\n #\n self.name=name\n self.sd=sd\n self.Hess=False\n self.lowmem=True\n self.timeTerm=True#allow turning off the time derivative\n #self.lowmem=False\n self.testIsTrial=True\n self.phiTrialIsTrial=True\n self.u = uDict\n self.ua = {}#analytical solutions\n self.phi = phiDict\n self.dphi={}\n for ck,phi in phiDict.iteritems():\n if coefficients.potential.has_key(ck):\n for cj in coefficients.potential[ck].keys():\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n else:\n self.dphi[(ck,ck)] = FiniteElementFunction(phi.femSpace)\n #check for nonlinearities in the diffusion coefficient that don't match the potential\n for ci,ckDict in coefficients.diffusion.iteritems():\n #for ck,cjDict in coefficients.diffusion.iteritems(): #cek: bug?\n for ck,cjDict in ckDict.iteritems():\n for cj in cjDict.keys():\n if not self.dphi.has_key((ck,cj)):\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n self.matType = matType\n #try to reuse test and trial information across components if spaces are the same\n self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature#True#False\n if self.reuse_test_trial_quadrature:\n for ci in range(1,coefficients.nc):\n assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, \"to reuse_test_trial_quad all femSpaces must be the same!\"\n ## Simplicial Mesh\n self.mesh = self.u[0].femSpace.mesh #assume the same mesh for all components for now\n self.testSpace = testSpaceDict\n self.dirichletConditions = dofBoundaryConditionsDict\n self.dirichletNodeSetList=None #explicit Dirichlet conditions for now, no Dirichlet BC constraints\n self.coefficients = coefficients\n self.coefficients.initializeMesh(self.mesh)\n self.nc = self.coefficients.nc\n self.stabilization = stabilization\n self.shockCapturing = shockCapturing\n self.conservativeFlux = conservativeFluxDict #no velocity post-processing for now\n self.fluxBoundaryConditions=fluxBoundaryConditionsDict\n self.advectiveFluxBoundaryConditionsSetterDict=advectiveFluxBoundaryConditionsSetterDict\n self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict\n #determine whether the stabilization term is nonlinear\n self.stabilizationIsNonlinear = False\n #cek come back\n if self.stabilization != None:\n for ci in range(self.nc):\n if coefficients.mass.has_key(ci):\n for flag in coefficients.mass[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.advection.has_key(ci):\n for flag in coefficients.advection[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.diffusion.has_key(ci):\n for diffusionDict in coefficients.diffusion[ci].values():\n for flag in diffusionDict.values():\n if flag != 'constant':\n self.stabilizationIsNonlinear=True\n if coefficients.potential.has_key(ci):\n for flag in coefficients.potential[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.reaction.has_key(ci):\n for flag in coefficients.reaction[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.hamiltonian.has_key(ci):\n for flag in coefficients.hamiltonian[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n #determine if we need element boundary storage\n self.elementBoundaryIntegrals = {}\n for ci in range(self.nc):\n self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux != None) or\n (numericalFluxType != None) or\n (self.fluxBoundaryConditions[ci] == 'outFlow') or\n (self.fluxBoundaryConditions[ci] == 'mixedFlow') or\n (self.fluxBoundaryConditions[ci] == 'setFlow'))\n #\n #calculate some dimensions\n #\n self.nSpace_global = self.u[0].femSpace.nSpace_global #assume same space dim for all variables\n self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in self.u.values()]\n self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in self.phi.values()]\n self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in self.phi.values()]\n self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in self.testSpace.values()]\n self.nFreeDOF_global = [dc.nFreeDOF_global for dc in self.dirichletConditions.values()]\n self.nVDOF_element = sum(self.nDOF_trial_element)\n self.nFreeVDOF_global = sum(self.nFreeDOF_global)\n #\n NonlinearEquation.__init__(self,self.nFreeVDOF_global)\n #\n #build the quadrature point dictionaries from the input (this\n #is just for convenience so that the input doesn't have to be\n #complete)\n #\n elementQuadratureDict={}\n elemQuadIsDict = isinstance(elementQuadrature,dict)\n if elemQuadIsDict: #set terms manually\n for I in self.coefficients.elementIntegralKeys:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[I] = elementQuadrature[I]\n else:\n elementQuadratureDict[I] = elementQuadrature['default']\n else:\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[I] = elementQuadrature\n if self.stabilization != None:\n for I in self.coefficients.elementIntegralKeys:\n if elemQuadIsDict:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature[I]\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature['default']\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature\n if self.shockCapturing != None:\n for ci in self.shockCapturing.components:\n if elemQuadIsDict:\n if elementQuadrature.has_key(('numDiff',ci,ci)):\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature[('numDiff',ci,ci)]\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature['default']\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature\n if massLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('m',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n if reactionLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('r',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n elementBoundaryQuadratureDict={}\n if isinstance(elementBoundaryQuadrature,dict): #set terms manually\n for I in self.coefficients.elementBoundaryIntegralKeys:\n if elementBoundaryQuadrature.has_key(I):\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]\n else:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']\n else:\n for I in self.coefficients.elementBoundaryIntegralKeys:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature\n #\n # find the union of all element quadrature points and\n # build a quadrature rule for each integral that has a\n # weight at each point in the union\n #mwf include tag telling me which indices are which quadrature rule?\n (self.elementQuadraturePoints,self.elementQuadratureWeights,\n self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)\n self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]\n self.nQuadraturePoints_global = self.nQuadraturePoints_element*self.mesh.nElements_global\n #\n #Repeat the same thing for the element boundary quadrature\n #\n (self.elementBoundaryQuadraturePoints,\n self.elementBoundaryQuadratureWeights,\n self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)\n self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]\n self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global*\n self.mesh.nElementBoundaries_element*\n self.nElementBoundaryQuadraturePoints_elementBoundary)\n\n #\n #storage dictionaries\n self.scalars_element = set()\n #\n #simplified allocations for test==trial and also check if space is mixed or not\n #\n self.q={}\n self.ebq={}\n self.ebq_global={}\n self.ebqe={}\n self.phi_ip={}\n #mesh\n self.q['x'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,3),'d')\n self.q['det(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['abs(det(J))'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['J'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.q['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['x'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['g'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n self.ebqe['inverse(J)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['hat(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['bar(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['sqrt(det(g))'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.ebqe[('n')] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n #shape\n self.q[('v',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w',0)] = self.q[('v',0)]\n self.q[('grad(v)',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)',0)] = self.q[('grad(v)',0)]\n self.q[('grad(w)*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)*dV_f',0)] = self.q[('grad(w)*dV',0)]\n #todo get rid of dV_{f,a}, etc\n self.q[('w*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w*dV_m',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','grad(w)*dV','grad(w)*dV_f','w*dV','w*dV_m']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.q[key_ci] = self.q[key_0]\n #ELLAM weights stiffness, body integrals by dt\n for ci in range(self.nc):\n self.q[('dt*grad(w)*dV',ci)]= numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[ci],self.nSpace_global),'d')\n #\n self.ebqe[('v',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n self.ebqe[('w',0)] = self.ebqe[('v',0)]\n self.ebqe[('grad(v)',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.ebqe[('w*dS_f',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','w*dS_f']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.ebqe[key_ci] = self.ebqe[key_0]\n\n for ci in range(self.nc):\n self.q[('u',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('grad(u)',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n #f\n for ci in self.coefficients.advection.keys():\n self.q[('f',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.q[('df',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n self.ebqe[('f',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.ebqe[('df',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n\n #a, linear dispersion single component\n\n for ci,ckDict in self.coefficients.diffusion.iteritems():\n for ck,cjDict in ckDict.iteritems():\n for flag in cjDict.values():\n assert flag == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n\n if self.coefficients.sdInfo != None and (ci,ck) in self.coefficients.sdInfo.keys():\n self.q[('a',ci,ck)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n\n else:\n self.q[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n #dense storage\n self.q[('grad(w)*dV_a',ci,ck)] = self.q[('grad(w)*dV_f',ci)]\n self.q[('dt*grad(w)*dV_a',ci,ck)]= self.q[('dt*grad(w)*dV',ci)]\n #ci,ckDict\n #linear potential only for now, need to change for e.g., Buckley Leverett\n for ck in self.phi.keys():\n self.phi[ck].dof[:]=self.u[ck].dof\n self.q[('grad(phi)',ck)] = self.q[('grad(u)',ck)]\n for key in self.dphi.keys():\n self.dphi[key].dof.fill(1.0)\n self.q[('dphi',key[0],key[1])] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n\n\n# if self.coefficients.diffusion.has_key(0):\n# for ck,flag in self.coefficients.diffusion[0][0].iteritems():\n# assert self.coefficients.diffusion[0][0][ck] == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n# if self.coefficients.sdInfo != None and (0,0) in self.coefficients.sdInfo.keys():\n# self.q[('a',0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.q[('da',0,0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n\n# else:\n# self.q[('a',0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.q[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# #\n# self.phi[0].dof[:]=self.u[0].dof\n# self.dphi[(0,0)].dof.fill(1.0)\n# self.q[('grad(phi)',0)] = self.q[('grad(u)',0)]\n# self.q[('dphi',0,0)] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n# self.q[('grad(w)*dV_a',0,0)] = self.q[('grad(w)*dV_f',0)]\n# self.q[('dt*grad(w)*dV_a',0,0)]= self.q[('dt*grad(w)*dV',0)]\n\n #r 'constant' ie not a function of solution but go ahead and include dr for now\n for ci,cjDict in self.coefficients.reaction.iteritems():\n self.q[('r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dr',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('w*dV_r',ci)] = self.q[('w*dV',ci)]\n self.q[('dt*w*dV_r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.ebqe[('r',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #m\n for ci,cjDict in self.coefficients.mass.iteritems():\n self.q[('m',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dm',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('mt',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_last',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_tmp',ci)] = self.q[('m',ci)]\n self.q[('cfl',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('numDiff',ci,ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.ebqe[('m',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n for cj in cjDict.keys():\n self.ebqe[('dm',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n\n ###ellam specific options with defauls here\n self.ellamDiscretization = ELLAMtools.ELLAMdiscretization(self,options)\n\n #\n self.needEBQ = options.needEBQ #could need for analytical velocity evaluation with RT0,BDM\n\n #beg normal stuff allocating things\n self.points_elementBoundaryQuadrature= set()\n self.scalars_elementBoundaryQuadrature= set([('u',ci) for ci in range(self.nc)])\n self.vectors_elementBoundaryQuadrature= set()\n self.tensors_elementBoundaryQuadrature= set()\n\n if self.needEBQ:\n for k in ['x','hat(x)']:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq['n'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n self.ebq['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),'d')\n #allocate the metric tensor\n self.ebq['g'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n log(memory(\"element boundary quadrature\",\"LADRellam\"),level=4)\n ebq_keys = ['sqrt(det(g))']\n ebq_keys.extend([('u',ci) for ci in range(self.nc)])\n for k in ebq_keys:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #test and trial info\n self.ebq[('w',0)] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[0]),'d')\n for ci in range(1,self.nc):\n self.ebq[('w',ci)] = self.ebq[('w',0)]\n for ci in range(self.nc):\n self.ebq[('v',ci)] = self.ebq[('w',0)]\n\n #ebq_global info\n self.ebq_global['x'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq_global['n'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n #\n # allocate residual and Jacobian storage\n #\n self.elementResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementSpatialResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementJacobian = {}\n for ci in range(self.nc):\n self.elementJacobian[ci]={}\n for cj in range(self.nc):\n if cj in self.coefficients.stencil[ci]:\n self.elementJacobian[ci][cj] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci],\n self.nDOF_trial_element[cj]),\n 'd')\n #\n self.fluxJacobian_exterior = {}\n for ci in range(self.nc):\n self.fluxJacobian_exterior[ci]={}\n for cj in self.coefficients.stencil[ci]:\n self.fluxJacobian_exterior[ci][cj] = numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[cj]),\n 'd')\n\n #\n #\n #\n #\n log(memory(\"element and element boundary Jacobians\",\"OneLevelTransport\"),level=4)\n self.inflowBoundaryBC = {}\n self.inflowBoundaryBC_values = {}\n self.inflowFlux = {}\n for cj in range(self.nc):\n self.inflowBoundaryBC[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,),'i')\n self.inflowBoundaryBC_values[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nDOF_trial_element[cj]),'d')\n self.inflowFlux[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.internalNodes = set(range(self.mesh.nNodes_global))\n #identify the internal nodes this is ought to be in mesh\n ##\\todo move this to mesh\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n eN_global = self.mesh.elementBoundaryElementsArray[ebN,0]\n ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN,0]\n for i in range(self.mesh.nNodes_element):\n if i != ebN_element:\n I = self.mesh.elementNodesArray[eN_global,i]\n self.internalNodes -= set([I])\n self.nNodes_internal = len(self.internalNodes)\n self.internalNodesArray=numpy.zeros((self.nNodes_internal,),'i')\n for nI,n in enumerate(self.internalNodes):\n self.internalNodesArray[nI]=n\n #\n del self.internalNodes\n self.internalNodes = None\n log(\"Updating local to global mappings\",2)\n self.updateLocal2Global()\n log(\"Building time integration object\",2)\n log(memory(\"inflowBC, internalNodes,updateLocal2Global\",\"OneLevelTransport\"),level=4)\n #mwf for interpolating subgrid error for gradients etc\n if self.stabilization and self.stabilization.usesGradientStabilization:\n self.timeIntegration = TimeIntegrationClass(self,integrateInterpolationPoints=True)\n else:\n self.timeIntegration = TimeIntegrationClass(self)\n\n if options != None:\n self.timeIntegration.setFromOptions(options)\n log(memory(\"TimeIntegration\",\"OneLevelTransport\"),level=4)\n log(\"Calculating numerical quadrature formulas\",2)\n self.calculateQuadrature()\n #lay out components/equations contiguously for now\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [self.offset[ci-1]+self.nFreeDOF_global[ci-1]]\n self.stride = [1 for ci in range(self.nc)]\n #use contiguous layout of components for parallel, requires weak DBC's\n comm = Comm.get()\n self.comm=comm\n if comm.size() > 1:\n assert numericalFluxType != None and numericalFluxType.useWeakDirichletConditions,\"You must use a numerical flux to apply weak boundary conditions for parallel runs\"\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [ci]\n self.stride = [self.nc for ci in range(self.nc)]\n #\n log(memory(\"stride+offset\",\"OneLevelTransport\"),level=4)\n if numericalFluxType != None:\n if options == None or options.periodicDirichletConditions == None:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict)\n else:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict,\n options.periodicDirichletConditions)\n else:\n self.numericalFlux = None\n #set penalty terms\n #cek todo move into numerical flux initialization\n if self.ebq_global.has_key('penalty'):\n for ebN in range(self.mesh.nElementBoundaries_global):\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebq_global['penalty'][ebN,k] = self.numericalFlux.penalty_constant/(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)\n #penalty term\n #cek move to Numerical flux initialization\n if self.ebqe.has_key('penalty'):\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebqe['penalty'][ebNE,k] = self.numericalFlux.penalty_constant/self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power\n log(memory(\"numericalFlux\",\"OneLevelTransport\"),level=4)\n self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray\n #use post processing tools to get conservative fluxes, None by default\n import PostProcessingTools\n self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)\n log(memory(\"velocity postprocessor\",\"OneLevelTransport\"),level=4)\n #helper for writing out data storage\n import Archiver\n self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n #TODO get rid of this\n for ci,fbcObject in self.fluxBoundaryConditionsObjectsDict.iteritems():\n self.ebqe[('advectiveFlux_bc_flag',ci)] = numpy.zeros(self.ebqe[('advectiveFlux_bc',ci)].shape,'i')\n for t,g in fbcObject.advectiveFluxBoundaryConditionsDict.iteritems():\n if self.coefficients.advection.has_key(ci):\n self.ebqe[('advectiveFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)\n self.ebqe[('advectiveFlux_bc_flag',ci)][t[0],t[1]] = 1\n\n if hasattr(self.numericalFlux,'setDirichletValues'):\n self.numericalFlux.setDirichletValues(self.ebqe)\n if not hasattr(self.numericalFlux,'isDOFBoundary'):\n self.numericalFlux.isDOFBoundary = {}\n for ci in range(self.nc):\n self.numericalFlux.isDOFBoundary[ci]= numpy.zeros(self.ebqe[('u',ci)].shape,'i')\n if not hasattr(self.numericalFlux,'ebqe'):\n self.numericalFlux.ebqe = {}\n for ci in range(self.nc):\n self.numericalFlux.ebqe[('u',ci)]= numpy.zeros(self.ebqe[('u',ci)].shape,'d')", "def objective(trial):\n # The parameters that we will calibrate the model for are shown here.\n # Optuna trial i\n BOD = trial.suggest_uniform(\"BOD\", 0, 1) #Review ranges here\n k_r = trial.suggest_uniform(\"k_r\", 0, 1) #Review Ranges here \n \n def ChLa(t):\n return 1 # Need to link to data\n\n def I(x):\n return 1 # Need to link to data\n\n K_z = 2 * 10**(-5) # p.51\n a = K_z\n k_b = 0.1 # Table 5\n th_b = 1.047 # Table 5\n k_r = 0.1 # Table 5\n YCHO2 = 0.0083 # Table 5\n th_p = 1.036 # Table 5\n th_s = 1.065 # Table 5\n th_r = 1.047 # Table 5\n\n def Temp(t):\n \"\"\"\n Function that maps time to temperature\n \"\"\"\n return 20 # Need to link to data\n\n def P_max(t):\n return 9.6 * 1.036 **(Temp(t) - 20) # Eq. 4\n\n def L_min(t):\n I = 1 # Need to link to PAR data\n K_1 = 0.687 * 1.086**(Temp(t) - 20)\n K_2 = 15\n return I * (1 + 2 * np.sqrt(K_1 / K_2)) / (I + K_1 + I**2 / K_2) # Eq. 5\n \n # f deals with sink and source terms \n def f(x, t):\n return -1 / YCHO2 * k_r * th_r**(Temp(t) - 20) * ChLa(t) + P_max(t) * L_min(t) * ChLa(t) - k_b * th_b**(Temp(t)-20) * BOD \n\n L = 200 # Length of domain\n dt = 1 / 48 # Mesh spacing in t\n F = a * dt # a * dt / dx**2\n T = 100 # Simulation time stop\n\n # Solving the PDE\n DO, x, t, _ = solver_FE_simple(I, a, f, L, dt, F, T)\n \n # Creating some bogus targets while database errors are happening\n DO_data = DO + np.random.random(len(DO))\n\n # Using mean squared error as the measure of fit, where we want\n # to minimize this number\n return ((DO - DO_data)**2).mean()", "def fluid_reynolds(uu, param, grid, lnrho=list(), shock=list(), nghost=3,\n lmix=True):\n #viscous forces\n th2 = 2./3\n th1 = 1./3\n fvisc = np.zeros_like(uu)\n #molecular viscosity contribution\n ldel2, lshock, lhyper3 = False, False, False\n for ivisc in param.ivisc:\n if not 'shock' in ivisc and not 'hyper' in ivisc\\\n and not '\\n' in ivisc:\n ldel2 = True\n if 'shock' in ivisc:\n lshock = True\n if 'hyper3' in ivisc:\n lhyper3 = True\n \n if ldel2:\n if lhyper3:\n lhyper3 = lhyper3==lmix\n del2u = np.zeros_like(uu)\n for j in range(0,3):\n del2u[j] = del2(uu[j],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n del2u[j, : nghost, nghost:-nghost, nghost:-nghost] = del2u[j,-2*nghost: -nghost, nghost: -nghost, nghost: -nghost]\n del2u[j,-nghost: , nghost:-nghost, nghost:-nghost] = del2u[j, nghost:2*nghost, nghost: -nghost, nghost: -nghost]\n del2u[j, nghost:-nghost, : nghost, nghost:-nghost] = del2u[j, nghost: -nghost,-2*nghost: -nghost, nghost: -nghost]\n del2u[j, nghost:-nghost,-nghost: , nghost:-nghost] = del2u[j, nghost: -nghost, nghost:2*nghost, nghost: -nghost]\n del2u[j, nghost:-nghost, nghost:-nghost, : nghost] = del2u[j, nghost: -nghost, nghost: -nghost,-2*nghost: -nghost]\n del2u[j, nghost:-nghost, nghost:-nghost,-nghost: ] = del2u[j, nghost: -nghost, nghost: -nghost, nghost:2*nghost]\n for ivisc in param.ivisc:\n ivisc = str.strip(ivisc,'\\n')\n if 'nu-const' not in ivisc and 'shock' not in ivisc\\\n and 'hyper' not in ivisc and len(ivisc) > 0:\n print('fluid_reynolds WARNING: '+ivisc+' not implemented\\n'+\n 'terms may be missing from the standard rate of strain tensor')\n fvisc = fvisc + param.nu*del2u\n del(del2u)\n tmp0 = grad(uu[0],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp0[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp0[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp0[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp0[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp0[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp0[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp0[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp0[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp0[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp0[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n tmp1 = grad(uu[1],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp1[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp1[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp1[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp1[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp1[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp1[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp1[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp1[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp1[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp1[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n tmp2 = grad(uu[2],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n tmp2[j, :nghost,nghost:-nghost,nghost:-nghost] = tmp2[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n tmp2[j,-nghost:,nghost:-nghost,nghost:-nghost] = tmp2[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost, :nghost,nghost:-nghost] = tmp2[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost,-nghost:,nghost:-nghost] = tmp2[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n tmp2[j,nghost:-nghost,nghost:-nghost, :nghost] = tmp2[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n tmp2[j,nghost:-nghost,nghost:-nghost,-nghost:] = tmp2[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n #effect of compressibility \n if len(lnrho) > 0:\n divu = div(uu,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n divu[ :nghost,nghost:-nghost,nghost:-nghost] = divu[-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n divu[-nghost:,nghost:-nghost,nghost:-nghost] = divu[ nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n divu[nghost:-nghost, :nghost,nghost:-nghost] = divu[nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n divu[nghost:-nghost,-nghost:,nghost:-nghost] = divu[nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n divu[nghost:-nghost,nghost:-nghost, :nghost] = divu[nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n divu[nghost:-nghost,nghost:-nghost,-nghost:] = divu[nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n gradlnrho = grad(lnrho,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n gradlnrho[j, :nghost,nghost:-nghost,nghost:-nghost] = gradlnrho[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n gradlnrho[j,-nghost:,nghost:-nghost,nghost:-nghost] = gradlnrho[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost, :nghost,nghost:-nghost] = gradlnrho[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost,-nghost:,nghost:-nghost] = gradlnrho[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n gradlnrho[j,nghost:-nghost,nghost:-nghost, :nghost] = gradlnrho[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n gradlnrho[j,nghost:-nghost,nghost:-nghost,-nghost:] = gradlnrho[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n Sglnrho = np.zeros_like(uu)\n Sglnrho[0] = dot(tmp0,gradlnrho) +\\\n (tmp0[0]+tmp1[0]+tmp2[0]-th2*divu)*gradlnrho[0] \n Sglnrho[1] = dot(tmp1,gradlnrho) +\\\n (tmp0[1]+tmp1[1]+tmp2[1]-th2*divu)*gradlnrho[1]\n Sglnrho[2] = dot(tmp2,gradlnrho) +\\\n (tmp0[2]+tmp1[2]+tmp2[2]-th2*divu)*gradlnrho[2]\n graddivu = grad(divu,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n graddivu[j, :nghost,nghost:-nghost,nghost:-nghost] = graddivu[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n graddivu[j,-nghost:,nghost:-nghost,nghost:-nghost] = graddivu[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost, :nghost,nghost:-nghost] = graddivu[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost,-nghost:,nghost:-nghost] = graddivu[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n graddivu[j,nghost:-nghost,nghost:-nghost, :nghost] = graddivu[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n graddivu[j,nghost:-nghost,nghost:-nghost,-nghost:] = graddivu[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n fvisc = fvisc + param.nu*(th1*graddivu+Sglnrho)\n del(Sglnrho)\n elif param.ldensity:\n print('fluid_reynolds WARNING: no lnrho provided\\n'+\n 'rate of strain tensor likely incomplete')\n #shock contribution\n if lshock:\n if len(shock) == 0:\n print('fluid_reynolds WARNING: no shock provided\\n'+\n 'rate of strain tensor likely incomplete')\n else:\n shock[ :nghost,nghost:-nghost,nghost:-nghost] = shock[-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n shock[-nghost:,nghost:-nghost,nghost:-nghost] = shock[ nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n shock[nghost:-nghost, :nghost,nghost:-nghost] = shock[nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n shock[nghost:-nghost,-nghost:,nghost:-nghost] = shock[nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n shock[nghost:-nghost,nghost:-nghost, :nghost] = shock[nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n shock[nghost:-nghost,nghost:-nghost,-nghost:] = shock[nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n divugradlnrho = np.zeros_like(uu)\n gradshock = grad(shock,grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n coordinate_system=param.coord_system)\n for j in range(0,3):\n gradshock[j, :nghost,nghost:-nghost,nghost:-nghost] = gradshock[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n gradshock[j,-nghost:,nghost:-nghost,nghost:-nghost] = gradshock[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost, :nghost,nghost:-nghost] = gradshock[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost,-nghost:,nghost:-nghost] = gradshock[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n gradshock[j,nghost:-nghost,nghost:-nghost, :nghost] = gradshock[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n gradshock[j,nghost:-nghost,nghost:-nghost,-nghost:] = gradshock[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n for j in range(0,3):\n divugradlnrho[j] = param.nu_shock*divu*gradshock[j] +\\\n param.nu_shock*shock*(divu*gradlnrho[j] + graddivu[j])\n del(divu,gradshock,gradlnrho,graddivu)\n fvisc = fvisc + divugradlnrho\n del(divugradlnrho)\n if lhyper3:\n #deluij5 = np.zeros_like([uu,uu,uu])\n #uij5glnrho to be included\n del6u = np.zeros_like(uu)\n for j in range(0,3):\n del6u[j] = del6(uu[j],grid.dx,grid.dy,grid.dz)\n del6u[j, :nghost,nghost:-nghost,nghost:-nghost] = del6u[j,-2*nghost:-nghost,nghost:-nghost,nghost:-nghost]\n del6u[j,-nghost:,nghost:-nghost,nghost:-nghost] = del6u[j, nghost: 2*nghost,nghost:-nghost,nghost:-nghost]\n del6u[j,nghost:-nghost, :nghost,nghost:-nghost] = del6u[j,nghost:-nghost,-2*nghost:-nghost,nghost:-nghost]\n del6u[j,nghost:-nghost,-nghost:,nghost:-nghost] = del6u[j,nghost:-nghost, nghost: 2*nghost,nghost:-nghost]\n del6u[j,nghost:-nghost,nghost:-nghost, :nghost] = del6u[j,nghost:-nghost,nghost:-nghost,-2*nghost:-nghost]\n del6u[j,nghost:-nghost,nghost:-nghost,-nghost:] = del6u[j,nghost:-nghost,nghost:-nghost, nghost: 2*nghost]\n #del6 for non-cartesian tba\n #del6u[j] = del6(uu[j],grid.dx,grid.dy,grid.dz,x=grid.x,y=grid.y,\n # coordinate_system=param.coord_system)\n fvisc = fvisc + param.nu_hyper3*del6u\n del(del6u)\n fvisc2 = np.sqrt(dot2(fvisc))\n #advective forces\n advec = np.zeros_like(uu)\n advec[0] = dot(uu,tmp0)\n advec[1] = dot(uu,tmp1)\n advec[0] = dot(uu,tmp2)\n del(tmp0,tmp1,tmp2)\n advec2 = np.sqrt(dot2(advec))\n del(advec)\n #avoid division by zero\n if fvisc2.max() > 0:\n fvisc2[np.where(fvisc2==0)] = fvisc2[np.where(fvisc2>0)].min()\n Re = advec2/fvisc2\n #set minimum floor to exclude zero-valued Re \n Re[np.where(Re==0)] = Re[np.where(Re>0)].min()\n else:\n Re = advec2\n print('Re undefined')\n return Re", "def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n self.xdot[7,0:self.n] = self.rhodot[0:self.n] \n self.xdot[8,0:self.n] = 0\n self.xdot[9,0:self.n] = 0\n self.xdot[10,0:self.n] = self.udot[0:self.n]\n return self.xdot", "def exocytosis_model(rate,twin,N_syn,\n pars,N_var,\n recompile=0,\n model='spiking',\n solver_options=None,\n stimulus='poisson',spikes_pre=None):\n\n # First check that N_var is compatible\n assert N_var<3 and N_var>=1, \"Number of variables of exocytosis model must be either 1 (x) or 2 (x,u)\"\n\n # Assures that twin is Numpy array for later handling\n twin = np.asarray(twin,dtype=float)\n\n # Also convert make sure to recast N_eq in a way that is suitable for C\n N_var = np.intc(N_var)\n if not N_syn: N_syn = 0 # Dummy value of N_syn in the case of the mean-field model\n N_syn = np.intc(N_syn)\n\n if model=='spiking':\n # Create input_spikes\n if twin.size == 2:\n # One rate or multiple ones in the same interval\n spikes = spg.input_spikes(N_syn, twin[1], rate, 0, stimulus=stimulus, spikes_pre=spikes_pre)\n else:\n # Multiple rates in different intervals\n spikes = spg.input_spikes(N_syn, twin, rate, 0, stimulus=stimulus, spikes_pre=spikes_pre)\n twin = np.r_[0, np.sum(twin)]\n N_spk = int(np.shape(spikes)[1])\n\n # Check that ICs are of size N_var x N_syn\n # NOTE: you will have to pass the whole vector of ICs to compute solutions from last point\n if pars['ICs'].size != (N_var+1)*N_syn:\n pars['ICs'] = np.tile(pars['ICs'][:N_var+1],(1,N_syn))[0]\n\n support_code = \"\"\"\n #include \"gliotransmission_models.h\"\n \"\"\"\n source_files = [os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/pycapi_utils.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/solver_options.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/stochastic_solvers.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir + '/code/gliotransmission_models.cpp')]\n code = \"\"\"\n // Version\n double version = 1.0;\n \n // Define astrocyte model\n release synapse(N_var,N_syn,N_spk,pars);\n synapse.set_ics(pars);\n \n // Declare output structure\n out_release out(N_spk,N_var);\n \n // Simulator\n out = synapse.simulate(spikes.data());\n \n //Output \n return_val = out.make_PyDict();\n \"\"\"\n libs = ['gsl', 'gslcblas', 'm']\n dirs = [os.path.join(os.path.expanduser('~'), base_dir + '/code/'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers')]\n vars = ['pars', 'spikes','N_spk','N_var','N_syn']\n otm = weave.inline(code,\n vars,\n support_code=support_code,\n sources=source_files,\n libraries=libs,\n library_dirs=dirs,\n include_dirs=dirs,\n runtime_library_dirs=dirs,\n type_converters=converters.blitz,\n compiler='gcc',\n extra_compile_args=['-std=c++11'],\n force=recompile)\n # Post-stimulus processing\n otm['spk'] = spikes[0] # Spike instants\n otm['is'] = spikes[-1] # Synapse indexes in the spike train\n otm['ICs'] = pars['ICs']\n if N_var>1:\n u_ = otm['u']\n else:\n u_ = None\n otm['LCs'] = last_point(pars,otm['spk'],twin[-1],otm['x'],otm['y'],otm['is'],uval=u_)\n elif model=='average':\n # Check that rate is scalar\n assert isscalar(rate), \"Mean-field rate must be scalar\"\n\n support_code = \"\"\"\n #include \"gliotransmission_models.h\"\n \"\"\"\n source_files = [os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/pycapi_utils.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/solver_options.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers/stochastic_solvers.cpp'),\n os.path.join(os.path.expanduser('~'), base_dir + '/code/gliotransmission_models.cpp')]\n code = \"\"\"\n // Version\n double version = 0.0;\n\n // Define astrocyte model\n release_ave synapse(N_var);\n\n // Declare output structure\n out_release out;\n\n // Simulator\n out = synapse.simulate(rate,pars,solver_options);\n\n //Output \n return_val = out.make_PyDict();\n \"\"\"\n libs = ['gsl', 'gslcblas', 'm']\n dirs = [os.path.join(os.path.expanduser('~'), base_dir + '/code/'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules'),\n os.path.join(os.path.expanduser('~'), base_dir + '/pycustommodules/solvers')]\n vars = ['rate', 'pars', 'solver_options', 'N_var']\n otm = weave.inline(code,\n vars,\n support_code=support_code,\n sources=source_files,\n libraries=libs,\n library_dirs=dirs,\n include_dirs=dirs,\n runtime_library_dirs=dirs,\n type_converters=converters.blitz,\n compiler='gcc',\n extra_compile_args=['-std=c++11'],\n force=recompile)\n\n # Post-stimulus processing\n otm['twin'] = twin # Simulate interval\n # Add released resources\n if (N_var < 2):\n otm['r'] = pars['u0'] * otm['x']\n else:\n otm['r'] = np.multiply(otm['x'], otm['u'])\n return otm", "def apply_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n z = unknowns['z']\n resids['z'] = x*z + z - 4.0\n\n # Output equations need to evaluate a residual just like an explicit comp.\n resids['y'] = x + 2.0*z - unknowns['y']\n #print(x, unknowns['y'], z, resids['z'], resids['y'])", "def __init__(self, sim, A_phi, V0_frac, t, Ndec_response=4):\n self.A0 = A_phi[0]\n self.phi0 = A_phi[1]\n self.sim = sim\n self.V0_frac = V0_frac\n self.t = t\n self.x_eq0 = self.sim.x_eq([0,0, V0_frac*self.sim.V(t[0])*self.sim.C], t[0]) # Given the initial charge...\n self.sol = integrate.odeint(sim, self.x0, t=t)\n self.z = sim.zLI(self.sol, t)\n self.phi = np.unwrap(np.angle(self.z))\n self.t_filt = t_filt = t[15:] \n self.i0 = i0 = np.argmin(abs(self.t_filt)) \n self.ip = ip = np.argmin(abs(self.t_filt-self.sim.V.tp))\n self.phi_filt = phi_filt = np.convolve(self.phi, np.ones(16)/16.0, 'valid') # Dependent on using 16 samples / period\n self.df_filt = df_filt = np.gradient(self.phi_filt)/np.gradient(self.t_filt)\n self.t_wide = t_filt[::Ndec_response]\n self.respRePts = self.sim.responseReVec(self.t_wide)\n self.Ht = lambda t: np.interp(t, self.t_wide, self.respRePts)\n \n \n \n self.dphi_act = (phi_filt[ip] - phi_filt[i0])/ (2*np.pi)*1000\n self.phi_filt_mcyc = (phi_filt - phi_filt[0])*1e3/(2*np.pi)\n self.phi_est, self.dphi_est = estimate_dphi(self.df_python, self.i0, self.ip)\n self.error = (self.dphi_est - self.dphi_act)/self.dphi_act", "def pt_operators(N, L, J, theta, f, phi, band_idxs, deltaE, exact=False, qs=None, verbose=False, **kwargs):\n if qs is None:\n qs = range(N)\n BandIdxs = band_idxs\n DeltaE = deltaE\n if 'theta0' in kwargs:\n theta0 = kwargs['theta0']\n else:\n theta0 = theta\n H0Op = pf.ParafermionicChainOp(N, L, J, theta0, 0.0, 0.0, q=0) # we get MPO object for full unperturbed Hamiltonian (note the same in each sector)\n HfOps = []\n for q in qs:\n if 'exclude_side' in kwargs:\n if kwargs['exclude_side'] == 'left':\n fs = np.ones(L)*f\n fs[0] = 0.0\n Hf = pf.ParafermionicChainOp(N, L, J, theta, fs, phi, q=q)\n elif kwargs['exclude_side'] == 'right':\n fs = np.ones(L)*f\n fs[-1] = 0.0\n Hf = pf.ParafermionicChainOp(N, L, J, theta, fs, phi, q=q)\n elif kwargs['exclude_side'] == 'neither':\n fs = np.ones(L)*f\n fs[0] = 0.0\n fs[-1] = 0.0\n Hf = pf.ParafermionicChainOp(N, L, J, theta, fs, phi, q=q)\n else:\n raise Exception('\\'exlude_side\\' argument should be either left or right')\n else:\n Hf = pf.ParafermionicChainOp(N, L, J, theta, f, phi, q=q)\n Hf.add(H0Op, c1=1.0, c2=-1.0, inplace=True, compress=False)\n HfOps.append(Hf)\n [Partitions, H0Energies] = H0Op.get_bands_and_energies() # get all the partitions and energies of each\n BandEnergy = H0Energies[BandIdxs[0]] # get the energy of the band we start from, this is E0\n BandPartitions = list(map(lambda x: Partitions[x], BandIdxs)) # get the\n FullBand = np.vstack(list(map(lambda x: pfcy.GetFullBandDW(BandPartitions[x]), range(len(BandIdxs)))))\n FullBandDim = len(FullBand)\n [NeighbouringBands,] = np.where(np.abs(H0Energies - BandEnergy) < DeltaE) # find other bands within deltaE in energy\n FullSubspace = np.copy(FullBand)\n for NeighbouringBand in NeighbouringBands:\n if NeighbouringBand not in BandIdxs:\n FullSubspace = np.vstack((FullSubspace, pfcy.GetFullBandDW(Partitions[NeighbouringBand])))\n FullSubspaceDim = FullSubspace.shape[0]\n if verbose: print('Full subspace dim: ' + str(FullSubspaceDim) + '.')\n x = np.arange(FullSubspaceDim)\n I = sps.diags(np.ones(FullSubspaceDim), 0)\n P0 = sps.diags(np.piecewise(x, [x < FullBandDim, x >= FullBandDim], [1.0, 0.0]), 0)\n Q0 = sps.diags(np.piecewise(x, [x < FullBandDim, x >= FullBandDim], [0.0, 1.0]), 0)\n s = time.time()\n H0 = H0Op.mats_subspace(FullSubspace)\n e = time.time()\n if verbose: print('Time taken to calculate H0 matrix: ' + str(e-s) + ' seconds.')\n\n s = time.time()\n Hfs = list(map(lambda x : HfOps[x].mats_subspace(FullSubspace), qs))\n e = time.time()\n if verbose: print('Time taken to calculate V matrices: ' + str(e-s) + ' seconds.')\n denominators = (BandEnergy - H0.diagonal()[FullBandDim:])\n if len(np.where(denominators == 0)[0]) > 0:\n return None\n Q = sps.diags(np.hstack([np.zeros(FullBandDim),np.ones(FullSubspaceDim-FullBandDim)/denominators]), 0)\n\n if exact:\n Offset = np.sum(map(lambda x: len(pfcy.GetFullBandDW(Partitions[x])), range(min(BandIdxs))))\n # for debugging purposes, calculate some of full spectrum exactly, can be time consuming\n FullEs = list(map(lambda x: pf.Diagonalise(N, L, J, theta, f, phi, q=x, k=Offset + FullBandDim), qs))\n FullEs = list(map(lambda x: FullEs[x][0][Offset:(Offset+FullBandDim)], qs))\n else:\n FullEs = None\n\n return [P0, Q, H0, Hfs, FullBandDim, BandEnergy, FullEs]", "def variables(self, *args, **kwargs):\n return self._optimizer.variables(*args, **kwargs)", "def f(self,y,psi):\r\n\r\n #1. check that number of params is consistent\r\n assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions'\r\n assert psi.shape[1] == 3, 'inconsistent parameter dimensions'\r\n\r\n #2. exponentiate the a and b (positive!)\r\n mpsi = psi.copy()\r\n\r\n #3. transform data\r\n z = y.copy()\r\n for i in range(len(mpsi)):\r\n a,b,c = mpsi[i]\r\n z += a*np.tanh(b*(y+c))\r\n return z", "def set_coeffs(self, sol):\n # TODO: look for bugs here!\n self.log_debug(\"Set spline coefficients\")\n\n # task: find which of the free parameters (coeffs) belong to which spline object\n sol_bak = sol.copy()\n subs = dict()\n\n # iterate over the OrderedDict {'x1': [cx1_..., ...], 'u1': [cu1_...]}\n for k, v in list(self.indep_vars.items()):\n i = len(v)\n # TODO: improve comment\n subs[k] = sol[:i] # set numerical value to symbolical value\n sol = sol[i:] ##:: sol = []\n \n if self._parameters['use_chains']:\n for var in self.sys.states + self.sys.inputs:\n for ic in self._chains:\n if var in ic: ##:: ('x1','x2','u1') and ('x3','x4')\n subs[var] = subs[ic.upper] ##:: elements in the same chain have the same coefficients (number, not symbol).\n \n # set numerical coefficients for each spline and derivative\n # TODO: handle `!!`-comments after talking to yx \n ##!! spline_key_plus_k = self.splines.keys().append('k')\n for k in list(self.splines.keys()): ##:: ['x1','x3']\n self.splines[k].set_coefficients(free_coeffs=subs[k])\n ##:: self._indep_vars = free_coeffs (self.splines[k]._indep_coeffs=free_coeffs) makes symbols changing into numbers. {'x1': <Spline object>, 'x3': <Spline object>}, Spline._P[k] saves the polynomial.\n \n # yet another dictionary for solution and coeffs\n# ##!! indep_vars['z_par'] = np.array([sp.symbols('k')])\n# ##!! self.indep_vars = indep_vars\n\n coeffs_sol = OrderedDict()\n\n # used for indexing\n i = 0\n j = 0\n\n for k, v in list(self.indep_vars.items()):\n ##:: ['x1': array([0.12,0.13,...,]), 'x3':...] symbols change into numbers\n j += len(v)\n coeffs_sol[k] = sol_bak[i:j]\n i = j\n\n self.coeffs_sol = coeffs_sol\n ##:: {'x1': array([ 25.94485709, 16.38313857, -35.65010072, ..., 2.28427004, 2.82974712, 1.88490863]), 'x3': array([-34.33884269, 45.13959025, 1.3272378 , -4.15546318,# 5.3863866 , -5.39286006, -8.86559812, -6.11620983, -2.95630206])}\n\n ##!! return self.coeffs_sol['z_par'].tolist()", "def compute_plume(self, full_nondim=False):\n def compute_nondimensional(x):\n \"\"\" both nondim melt rate and circulation \"\"\"\n M = self.nondim_M(x)\n M.attrs = {'long_name':'dimensionless meltrate; eqn. (26)'}\n phi0 = self.phi0(x)\n phi0.attrs = {'long_name':'dimensionless circulation; eqn. (25)'}\n return M, phi0\n\n # calculations\n self.dp['M'], self.dp['phi0'] = compute_nondimensional(self.dp.dgrl_)\n\n self.dp['m'] = self.dim_M().where(self.dp.mask==3)*3600*24*365 # [m/s] -> [m/yr]\n self.dp.m.attrs = {'long_name':'dimensional meltrates; eqn. (28a)', 'units':'m/yr'}\n \n self.dp['Phi'] = self.Phi().where(self.dp.mask==3)\n self.dp.Phi.attrs = {'long_name':'dimensional circulation; eqn. (29)', 'units':'m^3/s'}\n\n if full_nondim: # compute non-dimensional 1D melt curve for full [0,1] interval\n self.dp = self.dp.assign_coords({'x_':np.linspace(0,1,51)})\n self.dp.x_.attrs['long_name'] = 'non-dimensional coordinates in [0,1]'\n self.dp['M_full'], self.dp['phi0_full'] = compute_nondimensional(self.dp.coords['x_'])\n \n return self.dp", "def dynstall_oye_dxdt(t,fs,u,p):\n alpha = u['alpha'](t)\n f_st = p['F_st'](alpha)\n return 1/p['tau'] * (f_st - fs)", "def __init__(self, sim, A_phi, V0_frac, t, Ndec_response=4):\n self.A0 = A_phi[0]\n self.phi0 = A_phi[1]\n self.sim = sim\n self.V0_frac = V0_frac\n self.t = t\n self.x_eq0 = self.sim.x_eq([0, 0, V0_frac*self.sim.V(t[0])*self.sim.C], t[0]) # Given the initial charge...\n self.sol = integrate.odeint(sim, self.x0, t=t)\n self.z = sim.zLI(self.sol, t)\n self.phi = np.unwrap(np.angle(self.z))\n self.t_filt = t_filt = t[15:] \n self.i0 = i0 = np.argmin(abs(self.t_filt)) \n self.ip = ip = np.argmin(abs(self.t_filt-self.sim.V.tp))\n self.phi_filt = phi_filt = np.convolve(self.phi, np.ones(16)/16.0, 'valid') # Dependent on using 16 samples / period\n self.df_filt = df_filt = np.gradient(self.phi_filt)/np.gradient(self.t_filt)\n self.t_wide = t_filt[::Ndec_response]\n self.respRePts = self.sim.responseReVec(self.t_wide)\n self.Ht = lambda tt: np.interp(tt, self.t_wide, self.respRePts)\n \n \n \n self.dphi_act = (phi_filt[ip] - phi_filt[i0])/ (2*np.pi)*1000\n self.phi_filt_mcyc = (phi_filt - phi_filt[0])*1e3/(2*np.pi)\n self.phi_est, self.dphi_est = estimate_dphi(self.df_python, self.i0, self.ip)\n self.error = (self.dphi_est - self.dphi_act)/self.dphi_act", "def _redef_sp1_vars(self):\r\n\r\n if len(self.fq_list) == 0:\r\n no_rad = True\r\n lst_tmp = np.matrix(np.reshape(self.lst_tmp, \r\n (self.lst_tmp.size, 1)))\r\n else: no_rad = False\r\n # The practically constants...\r\n # Big Epsilon:\r\n if self.cond == True:\r\n self.Epsilon = self.d_T * self.thermal_conductivity\r\n else:\r\n self.Epsilon = (self.diff_scale ** 2) / \\\r\n (3.0 * self.absorb_coeffs[self.rad] ** 2)\r\n # Beta:\r\n if self.cond == True:\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff)\r\n else:\r\n self.Beta = (1.0 + 3.0 * self.r2) * (2.0 * self.diff_scale) / \\\r\n ((1.0 - 2.0 * self.r1) * (\r\n 3.0 * self.absorb_coeffs[self.rad]))\r\n\r\n # The feild solutions at the last timestep.\r\n # The integral vF:\r\n if self.cond == True:\r\n # The horrifically complicated F:\r\n def F_func_cond(elem, eta):\r\n F = 0.0\r\n Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F += Tn\r\n for k in range(0, len(self.fq_list)):\r\n vk = self.fq_list[k]\r\n try:\r\n vk_m = self.fq_list[k - 1]\r\n except:\r\n vk_m = self.v0_frequency\r\n absorbtion = self.absorb_coeffs[k]\r\n phi = elem.eval_elem(self.node_map, self.lst_rad[k],\r\n [eta])[0]\r\n inter1 = phi - 4.0 * sconst.pi * \\\r\n self.B_int_function(Tn, self.refr_idx_vol,\r\n vk, vk_m)\r\n inter2 = absorbtion * self.d_T / (self.diff_scale ** 2)\r\n F += inter2 * inter1\r\n return elem.funcs(eta) * F\r\n if not no_rad:\r\n # We're integrating something non-linear for SP1\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func_cond,\r\n self.node_map)\r\n else:\r\n # Or something easier if we're only looking at heat.\r\n self.vF_vect_vol = np.array(self.uv_vol * lst_tmp).reshape(-1)\r\n else:\r\n def F_func_radiative(elem, eta):\r\n T = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n vk = self.fq_list[self.rad]\r\n try:\r\n vk_minus = self.fq_list[self.rad - 1]\r\n except:\r\n vk_minus = self.v0_frequency\r\n n = self.refr_idx_vol\r\n F = 4.0 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func_radiative,\r\n self.node_map)\r\n # The path integral vf:\r\n if self.cond == True:\r\n def f_func_cond(elem, eta):\r\n Tb = self.background_temperature\r\n Tn = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n n = self.refr_idx_background\r\n vk = self.v0_frequency\r\n vk_minus = 0\r\n Bb0 = self.B_int_function(Tb, n, vk, vk_minus)\r\n Bn0 = self.B_int_function(Tn, n, vk, vk_minus)\r\n B_coeff = (self.alpha * sconst.pi) / self.convect_coeff\r\n f = Tb + B_coeff * (Bb0 - Bn0)\r\n return elem.funcs(eta) * f\r\n if not no_rad:\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func_cond,\r\n self.node_map)\r\n else:\r\n try:\r\n self.vf_vect_bound = self.cache_tb_integral_array\r\n except AttributeError:\r\n def elem_functor(elem, eta): return elem.funcs(eta)\r\n self.cache_tb_integral_array = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n elem_functor,\r\n self.node_map)\r\n self.cache_tb_integral_array *= self.background_temperature\r\n self.vf_vect_bound = self.cache_tb_integral_array\r\n \r\n else:\r\n # Radiation f = 4*pi*B^{(k)}(T_b, n_g)\r\n def f_func_radiative(elem, eta):\r\n T = self.background_temperature\r\n vk = self.fq_list[self.rad]\r\n try:\r\n vk_minus = self.fq_list[self.rad - 1]\r\n except:\r\n vk_minus = self.v0_frequency\r\n n = self.refr_idx_vol\r\n f = 4 * sconst.pi * self.B_int_function(T, n, vk, vk_minus)\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func_radiative,\r\n self.node_map)\r\n assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])\r\n assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])\r\n assert (self.vf_vect_bound.shape[0] == \\\r\n self.vF_vect_vol.shape[0])", "def solve(self):\n \n # Definition of the parameters\n Q_pc = self.parameters.getParam(\"Q_pc\")\n V_c = self.parameters.getParam(\"V_c\")\n V_p = self.parameters.getParam(\"V_p\")\n CL = self.parameters.getParam(\"CL\")\n initial_conditions = [\n self.parameters.getParam(\"q_c0\"),\n self.parameters.getParam(\"q_p0\"),\n ]\n t_eval = np.linspace(0, self.timespan, self.nsteps)\n\n # Definition of the model ODEs\n def pk_iv_model(t, y, Q_pc, V_c, V_p, CL):\n \"\"\"Defines the differential equations for the PK IV model.\n\n Parameters:\n :param t: time (h)\n :param y: list of the state variables of the ODEs system, in the\n form [q_c, q_p]\n :param Q_pc: transition rate between central and peripheral\n compartments (mL/h)\n :param V_c: volume of central compartment (mL)\n :param V_p: volume of peripheral compartment (mL)\n :param CL: clearance/elimination rate from the central\n compartment (mL/h)\n\n The parameters (except for t and y) are extracted from the\n Parameter class, using getParam method.\n\n Returns list containing the differential equations, in the form:\n [dqc_dt, dqp_dt]\n \"\"\"\n q_c, q_p = y\n transfer = Q_pc * (q_c / V_c - q_p / V_p)\n dqc_dt = self.dosefunction(t) - q_c / V_c * CL - transfer\n dqp_dt = transfer\n return [dqc_dt, dqp_dt]\n\n # Solving the model\n sol = scipy.integrate.solve_ivp(\n fun=lambda t, y: pk_iv_model(t, y, Q_pc, V_c, V_p, CL),\n t_span=[t_eval[0], t_eval[-1]],\n y0=initial_conditions,\n t_eval=t_eval,\n )\n\n # Feeding the solution line by line to solution class\n t = sol.t\n y = sol.y\n N = t.shape[0]\n columnNames = [\"t\", \"dose\", \"q_c\", \"q_p\"]\n self.solution.begin(columnNames, N)\n for i in range(N):\n arr = np.zeros((len(columnNames), 1))\n arr[0] = t[i]\n arr[1] = self.dosefunction(t[i])\n arr[2:, 0] = y[:, i]\n self.solution.report(arr)", "def variational_expectations(self, Fmu, Fvar, Y):\n\n gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)\n gh_x = gh_x.reshape(1, -1)\n gh_w = gh_w.reshape(-1, 1) / np.sqrt(np.pi)\n shape = tf.shape(Fmu)\n Fmu, Fvar, Y = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar, Y)]\n X = gh_x * tf.sqrt(2.0 * Fvar) + Fmu\n Y = tf.tile(Y, [1, self.num_gauss_hermite_points]) # broadcast Y to match X\n\n logp = self.logp(X, Y)\n return tf.reshape(tf.matmul(logp, gh_w), shape)", "def test_var_exp(self):\n with self.test_context() as session:\n test_setups, F, feed = self.prepare()\n for test_setup in test_setups:\n l = test_setup.likelihood\n y = test_setup.Y\n l.compile()\n r1 = session.run(l.logp(F, y), feed_dict=feed)\n zero = F * 0.\n r2 = session.run(\n l.variational_expectations(F, zero, test_setup.Y), feed_dict=feed)\n assert_allclose(r1, r2, atol=test_setup.tolerance, rtol=test_setup.tolerance)", "def f_obs(x, *args):\n sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args\n # flag\n init_mult = 1000\n\n T = len(x)\n val = 0\n term1 = 0\n term2 = 0\n\n # term 3 and 4 for DIM\n term3 = 0\n term4 = 0\n\n sslm.obs[word] = x\n sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)\n\n mean = sslm.mean[word]\n variance = sslm.variance[word]\n\n # only used for DIM mode\n # w_phi_l = sslm.w_phi_l[word]\n # m_update_coeff = sslm.m_update_coeff[word]\n\n for t in range(1, T + 1):\n mean_t = mean[t]\n mean_t_prev = mean[t - 1]\n\n val = mean_t - mean_t_prev\n term1 += val * val\n term2 += word_counts[t - 1] * mean_t - totals[t - 1] * np.exp(mean_t + variance[t] / 2) / sslm.zeta[t - 1]\n\n model = \"DTM\"\n if model == \"DIM\":\n # stuff happens\n pass\n\n if sslm.chain_variance > 0.0:\n\n term1 = - (term1 / (2 * sslm.chain_variance))\n term1 = term1 - mean[0] * mean[0] / (2 * init_mult * sslm.chain_variance)\n else:\n term1 = 0.0\n\n final = -(term1 + term2 + term3 + term4)\n\n return final", "def tsne(x, no_dims=2, perplexity=30.0, max_iter=1000):\n\n # Check inputs\n if isinstance(no_dims, float):\n print(\"Error: array x should have type float.\")\n return -1\n\n (n, d) = x.shape\n\n # 动量\n initial_momentum = 0.5\n final_momentum = 0.8\n eta = 500\n min_gain = 0.01\n # 随机初始化Y\n y = np.random.randn(n, no_dims)\n # dy梯度\n dy = np.zeros((n, no_dims))\n # iy是什么\n iy = np.zeros((n, no_dims))\n\n gains = np.ones((n, no_dims))\n\n # 对称化\n P = seach_prob(x, 1e-5, perplexity)\n P = P + np.transpose(P)\n P = P / np.sum(P) #pij\n # early exaggeration\n # pi\\j,提前夸大\n print (\"T-SNE DURING:%s\" % time.clock())\n P = P * 4\n P = np.maximum(P, 1e-12)\n\n # Run iterations\n for iter in range(max_iter):\n # Compute pairwise affinities\n sum_y = np.sum(np.square(y), 1)\n num = 1 / (1 + np.add(np.add(-2 * np.dot(y, y.T), sum_y).T, sum_y))\n num[range(n), range(n)] = 0\n Q = num / np.sum(num) #qij\n Q = np.maximum(Q, 1e-12) #X与Y逐位比较取其大者\n\n # Compute gradient\n # np.tile(A,N) 重复数组AN次 [1],5 [1,1,1,1,1]\n # pij-qij\n PQ = P - Q\n # 梯度dy\n for i in range(n):\n dy[i,:] = np.sum(np.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (y[i,:] - y), 0)\n\n # Perform the update\n if iter < 20:\n momentum = initial_momentum\n else:\n momentum = final_momentum\n\n gains = (gains + 0.2) * ((dy > 0) != (iy > 0)) + (gains * 0.8) * ((dy > 0) == (iy > 0))\n gains[gains < min_gain] = min_gain\n # 迭代\n iy = momentum * iy - eta * (gains * dy)\n y = y + iy\n y = y - np.tile(np.mean(y, 0), (n, 1))\n # Compute current value of cost function\\\n if (iter + 1) % 100 == 0:\n C = np.sum(P * np.log(P / Q))\n print(\"Iteration \", (iter + 1), \": error is \", C)\n if (iter+1) != 100:\n ratio = C/oldC\n print(\"ratio \", ratio)\n if ratio >= 0.95:\n break\n oldC = C\n # Stop lying about P-values\n if iter == 100:\n P = P / 4\n print(\"finished training!\")\n return y", "def forward(self,y,xt,D,h,param):\n \n \n h_flip=h.flip(2,3)\n a=conv(h,xt)-y \n b=xt-self.learning_rate*conv(h_flip,a)\n u=tor.matmul(D,tor.matmul(b,D.transpose(2,3)))\n u_=rho(u,param)\n xt1=tor.matmul(D.transpose(2,3),tor.matmul(u_,D))\n \n return xt1", "def den_evolve(self, delt, txp, src):\n self.ne += (-txp.dfluxe + src.se)*delt\n self.ni += (-txp.dfluxi + src.si)*delt", "def equations(self):\n k = 0\n ######################################################################\n # equations for fluid balance\n self.residual[k:k + self.num_nw_fluids] = self.fluid_func()\n k += self.num_nw_fluids\n\n ######################################################################\n # equations for mass flow balance\n self.residual[k] = self.mass_flow_func()\n k += 1\n\n ######################################################################\n # equations for specified heta transfer\n if self.Q.is_set:\n self.residual[k] = self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) - self.Q.val\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio\n if self.pr.is_set:\n self.residual[k] = (\n self.inl[0].p.val_SI * self.pr.val - self.outl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified zeta\n if self.zeta.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(zeta='zeta')\n k += 1\n\n ######################################################################\n # equation for specified hydro-group paremeters\n if self.hydro_group.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n # hazen williams equation\n if self.hydro_group.method == 'HW':\n func = self.hw_func\n # darcy friction factor\n else:\n func = self.darcy_func\n self.residual[k] = func()\n k += 1\n\n ######################################################################\n # additional equations\n self.additional_equations(k)", "def cubic_evolve(self,nt=1):\n #loop through time steps\n for l in range(nt):\n # temporary array\n y_temp = np.zeros(self.y.shape[0])\n # loop through array\n for i in range(self.y.shape[0]):\n # idx left to departure point\n x_dep = self.x[i]-self.u[i]*self.dt\n j = int(np.floor(x_dep/self.dx))\n # alpha\n a = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n # calculate next time step\n f = lambda x: x % self.y.shape[0] if x >= self.y.shape[0] else x\n y_temp[i] = - a * (1-a)*(2-a)/6 * self.y[f(j-1)]\n y_temp[i] += (1-a**2)*(2-a)/2 * self.y[f(j)]\n y_temp[i] += a*(1+a)*(2-a)/2 * self.y[f(j+1)]\n y_temp[i] -= a*(1-a**2)/6 * self.y[f(j+2)]\n self.y = np.copy(y_temp)\n return self.y", "def forward(self, inputs):\n inputs = inputs.view(-1, self.nVar)\n x, y, z = inputs[:, 0], inputs[:, 1], inputs[:, 2]\n output = torch.zeros_like(inputs)\n s = self.sigma\n r = self.r\n b = self.b\n output[:, 0] = s * (y-x)\n output[:, 1] = x * (r-z) - y\n output[:, 2] = x*y - b*z\n return output", "def _sesolve_func_td(H_func, psi0, tlist, e_ops, args, opt, progress_bar):\n if debug:\n print(inspect.stack()[0][3])\n\n #\n # check initial state or oper\n #\n if psi0.isket:\n oper_evo = False\n elif psi0.isunitary:\n oper_evo = True\n else:\n raise TypeError(\"The unitary solver requires psi0 to be\"\n \" a ket as initial state\"\n \" or a unitary as initial operator.\")\n\n if opt.moving_mode_indices is None:\n n_modes = len(psi0.dims[0])\n opt.moving_mode_indices = np.arange(n_modes)\n\n #\n # setup integrator\n #\n new_args = None\n\n if type(args) is dict:\n new_args = {}\n for key in args:\n if isinstance(args[key], Qobj):\n new_args[key] = args[key].data\n else:\n new_args[key] = args[key]\n\n elif type(args) is list or type(args) is tuple:\n new_args = []\n for arg in args:\n if isinstance(arg, Qobj):\n new_args.append(arg.data)\n else:\n new_args.append(arg)\n\n if type(args) is tuple:\n new_args = tuple(new_args)\n else:\n if isinstance(args, Qobj):\n new_args = args.data\n else:\n new_args = args\n\n if oper_evo:\n initial_vector = operator_to_vector(psi0).full().ravel()\n # Check that function returns superoperator\n if H_func(0, args).issuper:\n L_func = H_func\n else:\n L_func = lambda t, args: spre(H_func(t, args))\n\n else:\n new_psi0 = psi0\n initial_vector = new_psi0.full().ravel()\n L_func = H_func\n if opt.moving_basis:\n initial_mode_displacements = np.zeros(len(opt.moving_mode_indices), dtype=complex)\n for mode_idx in opt.moving_mode_indices:\n identities = [qeye(dim) for dim in psi0.dims[0]]\n a_op_components = identities\n a_op_components[mode_idx] = destroy(psi0.dims[0][mode_idx])\n a_op = tensor(a_op_components)\n a_expect = expect(a_op, psi0)\n initial_mode_displacements[mode_idx] = a_expect\n displacement_components = identities\n displacement_components[mode_idx] = displace(psi0.dims[0][mode_idx], -a_expect)\n displacement_op = tensor(displacement_components)\n new_psi0 = displacement_op * new_psi0\n initial_vector = np.hstack([initial_vector, initial_mode_displacements])\n\n if not opt.rhs_with_state:\n print('Using cython function.')\n r = scipy.integrate.ode(cy_ode_psi_func_td)\n else:\n if opt.moving_basis:\n r = scipy.integrate.ode(_ode_psi_func_td_with_state_moving_basis)\n else:\n r = scipy.integrate.ode(_ode_psi_func_td_with_state)\n\n r.set_integrator('zvode', method=opt.method, order=opt.order,\n atol=opt.atol, rtol=opt.rtol, nsteps=opt.nsteps,\n first_step=opt.first_step, min_step=opt.min_step,\n max_step=opt.max_step)\n r.set_initial_value(initial_vector, tlist[0])\n if opt.moving_basis:\n r.set_f_params(L_func, new_args, opt, psi0.dims)\n else:\n r.set_f_params(L_func, new_args)\n\n #\n # call generic ODE code\n #\n return _generic_ode_solve(r, psi0, tlist, e_ops, opt, progress_bar,\n dims=psi0.dims)", "def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n return self.xdot", "def obj_u_opt_N_opt(u, T, alpha, B, N, num_tumor_voxels, Td = 2):\n x = T.dot(u)\n alpha_tilde = alpha #np.repeat(N, num_tumor_voxels)*alpha\n B_tilde = B #np.repeat(N, num_tumor_voxels)*B\n #Note that all modalities must have the same number of tumor voxels:\n return alpha_tilde.T.dot(x) - x.T.dot(B_tilde*x) + num_tumor_voxels*(np.sum(N)-1)*(np.log(2)/Td)", "def du(self):\n du = setup_nonlinear_model_du() # Ux, Uy, theta_p\n return du", "def compute_hessian(self, dw, trn_X, trn_y, epsilon: float = 0.01):\n norm = torch.cat([w.view(-1) for w in dw]).norm()\n eps = epsilon / norm\n\n dalpha_pos = self.finite_difference(dw, trn_X, trn_y, eps, wrt='alpha')\n dalpha_neg = self.finite_difference(dw, trn_X, trn_y, -eps, wrt='alpha')\n hessian = [(p - n) / (2. * eps) for p, n in zip(dalpha_pos, dalpha_neg)]\n return hessian", "def J(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n df1du = 2*u*g3**2 - 2*g3*u0 + 2*g3*coeffs[3]*(g1*u1-u0) + 2*g3*coeffs[4]*(g2*u2-u0)\n df1dv = -2*v*g3**2 + 2*g3*v0 - 2*g3*coeffs[3]*(g1*v1-v0) - 2*g3*coeffs[4]*(g2*v2-v0)\n df1dg1 = 2*g1*coeffs[0]*(u1**2-v1**2) + 2*(v1*v0-u1*u0)*(coeffs[0]+coeffs[1]+coeffs[3]) + 2*g2*coeffs[1]*(u1*u2-v1*v2) + 2*g3*coeffs[3]*(u1*u-v1*v)\n df1dg2 = 2*g2*coeffs[2]*(u2**2-v2**2) + 2*(v2*v0-u2*u0)*(coeffs[1]+coeffs[2]+coeffs[4]) + 2*g1*coeffs[1]*(u1*u2-v1*v2) + 2*g3*coeffs[4]*(u2*u-v2*v)\n df1dg3 = 2*g3*(u**2-v**2) + 2*(v*v0-u*u0)*(coeffs[3]+coeffs[4]+1) + 2*g1*coeffs[3]*(u1*u-v1*v) + 2*g2*coeffs[4]*(u2*u-v2*v)\n\n df2du = 0\n df2dv = 2*v*g3**2 + 2*g3*(-v0 + coeffs[3]*(g1*v1-v0) + coeffs[4]*(g2*v2-v0))\n df2dg1 = 2*g1*coeffs[0]*(v1**2-1) + 2*(1-v1*v0)*(coeffs[0]+coeffs[1]+coeffs[3]) + 2*g2*coeffs[1]*(v1*v2-1) + 2*g3*coeffs[3]*(v1*v-1)\n df2dg2 = 2*g2*coeffs[2]*(v2**2-1) + 2*(1-v2*v0)*(coeffs[1]+coeffs[2]+coeffs[4]) + 2*g1*coeffs[1]*(v1*v2-1) + 2*g3*coeffs[4]*(v2*v-1)\n df2dg3 = 2*g3*(v**2-1) + 2*(1-v*v0)*(coeffs[3]+coeffs[4]+1) + 2*g1*coeffs[3]*(v1*v-1) + 2*g2*coeffs[4]*(v2*v-1)\n\n df3du = g3*coeffs[3]*(g1*v1-v0) + g3*coeffs[4]*(g2*v2-v0) + g3*(g3*v-v0)\n df3dv = g3*coeffs[3]*(g1*u1-u0) + g3*coeffs[4]*(g2*u2-u0) + g3*(g3*u-u0)\n df3dg1 = 2*g1*coeffs[0]*u1*v1 - (v1*u0+u1*v0)*(coeffs[0]+coeffs[1]+coeffs[3]) + g2*coeffs[1]*(u1*v2+v1*u2) + g3*coeffs[3]*(v1*u+u1*v)\n df3dg2 = 2*g2*coeffs[2]*u2*v2 - (v2*u0+u2*v0)*(coeffs[1]+coeffs[2]+coeffs[4]) + g1*coeffs[1]*(u1*v2+v1*u2) + g3*coeffs[4]*(v2*u+u2*v)\n df3dg3 = 2*g3*u*v - (u*v0+v*u0)*(coeffs[3]+coeffs[4]+1) + g1*coeffs[3]*(v1*u+u1*v) + g2*coeffs[4]*(v2*u+u2*v)\n\n df4du = g3*coeffs[3]*(g1-1) + g3*coeffs[4]*(g2-1) + g3*(g3-1)\n df4dv = 0\n df4dg1 = 2*g1*coeffs[0]*u1 - (u0+u1)*(coeffs[0]+coeffs[1]+coeffs[3]) + g2*coeffs[1]*(u1+u2) + g3*coeffs[3]*(u+u1)\n df4dg2 = 2*g2*coeffs[2]*u2 - (u0+u2)*(coeffs[1]+coeffs[2]+coeffs[4]) + g1*coeffs[1]*(u1+u2) + g3*coeffs[4]*(u+u2)\n df4dg3 = 2*g3*u - (u+u0)*(coeffs[3]+coeffs[4]+1) + g1*coeffs[3]*(u+u1) + g2*coeffs[4]*(u+u2)\n\n df5du = 0\n df5dv = g3*coeffs[3]*(g1-1) + g3*coeffs[4]*(g2-1) + g3*(g3-1)\n df5dg1 = 2*g1*coeffs[0]*v1 - (v1+v0)*(coeffs[0]+coeffs[1]+coeffs[3]) + g2*coeffs[1]*(v2+v1) + g3*coeffs[3]*(v1+v)\n df5dg2 = 2*g2*coeffs[2]*v2 - (v2+v0)*(coeffs[1]+coeffs[2]+coeffs[4]) + g1*coeffs[1]*(v2+v1) + g3*coeffs[4]*(v2+v)\n df5dg3 = 2*g3*v - (v0+v)*(coeffs[3]+coeffs[4]+1) + g1*coeffs[3]*(v1+v) + g2*coeffs[4]*(v2+v)\n\n return np.array([\n [df1du, df1dv, df1dg1, df1dg2, df1dg3],\n [df2du, df2dv, df2dg1, df2dg2, df2dg3],\n [df3du, df3dv, df3dg1, df3dg2, df3dg3],\n [df4du, df4dv, df4dg1, df4dg2, df4dg3],\n [df5du, df5dv, df5dg1, df5dg2, df5dg3],\n ])", "def LiftCoeff(h,Vc,Temp_m,W,S):\n T,p,rho = isa(h)\n return W*g0/(0.5*rho*VTrue(h,Vc,p,Temp_m)**2*S)", "def newtonian_profile(PSI):\n\n U = dot(MDX, PSI)\n V = - dot(MDY, PSI)\n VGRAD = dot(U,MDX) + dot(V,MDY)\n\n BPFEQNS = zeros((3*vecLen, 3*vecLen), dtype='D')\n # Cxx eqn\n # Cxx\n BPFEQNS[0:vecLen, 0:vecLen] = Nu*MDX - VGRAD \\\n + 2*tsm.c_prod_mat(dot(MDX,U)) - oneOverWi*II\n # Cyy\n BPFEQNS[0:vecLen, vecLen:2*vecLen] = 0\n # Cxy\n BPFEQNS[0:vecLen, 2*vecLen:3*vecLen] = 2*tsm.c_prod_mat(dot(MDY, U))\n # Cyy eqn\n # Cxx\n BPFEQNS[vecLen:2*vecLen, 0:vecLen] = 0\n # Cyy\n BPFEQNS[vecLen:2*vecLen, vecLen:2*vecLen] = Nu*MDX - VGRAD - oneOverWi*II\\\n + 2.*tsm.c_prod_mat(dot(MDY, V))\n # Cxy\n BPFEQNS[vecLen:2*vecLen, 2*vecLen:3*vecLen] = 2.*tsm.c_prod_mat(dot(MDX, V))\n #Cxy eqn\n # Cxx\n BPFEQNS[2*vecLen:3*vecLen, 0:vecLen] = tsm.c_prod_mat(dot(MDX, V))\n # Cyy \n BPFEQNS[2*vecLen:3*vecLen, vecLen:2*vecLen] = tsm.c_prod_mat(dot(MDY, U))\n # Cxy\n BPFEQNS[2*vecLen:3*vecLen, 2*vecLen:3*vecLen] = Nu*MDX - VGRAD - oneOverWi*II \n\n RHS = zeros(3*vecLen, dtype='D')\n RHS[0] = -oneOverWi\n RHS[vecLen] = -oneOverWi\n RHS[2*vecLen:3*vecLen] = 0\n\n soln = linalg.solve(BPFEQNS, RHS)\n\n Cxx = soln[0:vecLen]\n Cyy = soln[vecLen:2*vecLen]\n Cxy = soln[2*vecLen:3*vecLen]\n\n return Cxx, Cyy, Cxy", "def variational_objective(params, t, num_samples, beta=1.):\n\n # 1. draw samples from the variational posterior, eps ~ N(0,I)\n zs, ldet_sums = draw_variational_samples(params, num_samples)\n\n # 1.5 negative entropy of z0 --- likely we need this for KL though\n # not needed for optimization\n\n # 2. compute expected value of the sum of jacobian terms\n E_ldet_sum = np.mean(ldet_sums)\n\n # 3. compute data term\n lls = logprob(zs, t)\n E_logprob = np.mean(lls)\n\n if debug_print:\n print \"entropy term: \", E_ldet_sum\n print \"data term : \", E_logprob, \" (+/- \", np.std(lls), \")\", \" min = \", np.min(lls)\n\n # return lower bound\n beta = 1. if t >= len(beta_schedule) else beta_schedule[t]\n lower_bound = beta * E_logprob + E_ldet_sum\n return -lower_bound", "def dewT_2_q_magnus(ds, var):\n A1, B1, C1 = 17.625, 243.04, 610.94\n vpsl = C1 * np.exp(A1 * (ds[var['dew']] - 273.15) / (B1 + (ds[var['dew']] - 273.15)))\n wsl = eps0 * vpsl / (ds[var['pressure']] - vpsl)\n ds[var['spec_h']] = wsl / (1 + wsl)\n return ds", "def ev(knotvec, coeffs, u):\n assert len(coeffs) == knotvec.numdofs, 'Wrong size of coefficient vector'\n return scipy.interpolate.splev(u, (knotvec.kv, coeffs, knotvec.p))", "def linear_evolve(self,nt=1):\n for l in range(nt):\n y_temp = np.empty(self.y.shape[0])\n for i in range(self.y.shape[0]):\n \n # idx left to the departure point\n j = int(np.floor((self.x[i]-self.u[i]*self.dt)/self.dx))\n # idx right to the departure point\n k = j+1\n print i, j, k\n # linear interpolation\n alpha = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n y_temp[i] = (1-alpha)*self.y[j] + alpha*self.y[k]\n # copy array to current time\n self.y = np.copy(y_temp)\n stop\n #return current varibale\n return self.y", "def V_phi(self, x):\n\n x = self.featureExtractor.getFeatures(x)\n\n x = torch.tensor(x).float()\n\n x = F.relu(self.linear(x))\n\n v = self.linear_v(x)\n\n return v", "def none_model_vars():\n obj_fun = None\n time_sol = None\n gap = None\n s_pos = None\n b_target = None\n threads = None\n\n return obj_fun, time_sol, gap, s_pos, b_target, threads", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def get_dphi_dx_fast(self, sess, x_train):\n dphi_dx = tf.gradients(self.featurizations, self.x)\n feed_dict = {self.x: x_train}\n dphi_dx_np = sess.run(dphi_dx, feed_dict = feed_dict)\n return dphi_dx_np", "def get_dphi_dx(self, sess, x_test):\n self.logger.debug(\"Finding dphi dx\")\n n, d = x_test.shape\n\n #TODO: Remove the hardcoding from here\n featurization_sliced = self.slice_tensor_one_d(self.featurizations, 32)\n num_hidden_last_layer = len(featurization_sliced)\n self.logger.debug(\"Completed slicing\")\n\n run_list = []\n for i in range(len(featurization_sliced)):\n dphi_i_dx = tf.gradients(featurization_sliced[i], self.x)\n run_list.append(dphi_i_dx)\n\n self.logger.debug(\"Found run list\")\n feed_dict = {self.x: x_test}\n dphi_dx_np = sess.run(run_list, feed_dict = feed_dict)\n dphi_dx_np = np.array(dphi_dx_np).squeeze()\n\n self.logger.debug(\"Found grads\")\n #dphi_dx_np = dphi_dx_np.reshape((n, num_hidden_last_layer, d))\n dphi_dx_np = dphi_dx_np.swapaxes(0, 1)\n return dphi_dx_np", "def malthusiens(nb_init, t0, tf, eps, methode, gamma ) :\n\n f=lambda y, t : gamma*y\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y", "def additional_derivatives(self, increment_filter, k):\n ######################################################################\n # derivatives for saturated liquid at hot side outlet equation\n if self.subcooling.val is False:\n o1 = self.outl[0].to_flow()\n self.jacobian[k, 2, 1] = -dh_mix_dpQ(o1, 0)\n self.jacobian[k, 2, 2] = 1\n k += 1", "def main():\n\n varList = {'beta': 6., 'convSpeed': 1.2, 'Mark': 0., 'axi': 1, 'acModes': 4, 'Nr': 801, 'Tf': 600., 'xf': 0.51}\n\n # Solve steady flame.\n # BC1: I have the attachment BC at r = 1, always\n # BC2: I need to set dF/dr = 0 at r = 0 iff Mark != 0\n [qMean, r, FMean] = steady_flame_area_FD3(varList['Mark'], varList['beta'], varList['axi'], varList['Nr'])\n r = r * varList['beta']\n\n # Calculate mean flame derivatives\n dFMeanDr = derivsnew.FD1_CT2_D(FMean, r[1] - r[0])\n d2FMeanDr2 = derivsnew.FD2_CT2_D(FMean, r[1] - r[0])\n\n #Apply BC smooth tip:\n if(varList['Mark']!=0.0):\n dFMeanDr[-1] = 0.0\n\n # Use correct number of points. Remember that the extrems need to be set depending on the BC!\n # The attach BC (first point) is always assumed to be true and removed from the vector list\n if(varList['Mark']==0):\n Nr = varList['Nr'] / 2\n dFMeanDr = dFMeanDr[1:]\n d2FMeanDr2 = d2FMeanDr2[1:]\n r = r[1:]\n # The smooth BC holds only if Mark!=0 (second derivatives appear): remove also the last point\n else:\n Nr = varList['Nr'] / 2 - 1\n dFMeanDr = dFMeanDr[1:-1]\n d2FMeanDr2 = d2FMeanDr2[1:-1]\n r = r[1:-1]\n\n # Calculate geometric values\n den = 1 + varList['beta'] * varList['beta'] * dFMeanDr * dFMeanDr\n dR = r[1] - r[0]\n # Set Nx equal to Nr for now.\n # The implementation is more complicated if they differ, and need to interpolate between values.\n Nx = Nr\n\n # Nonuniform grid spacing along x!\n # Nx = length(dx) has to hold.\n dx = np.empty(len(FMean) - 1)\n for ii in range(1, len(FMean)):\n dx[ii - 1] = FMean[ii] - FMean[ii - 1]\n\n [A, B, C, tau] = loadAcoustics(varList['xf'], varList['Tf'], varList['acModes'], varList['beta'])\n\n Matrix = buildMatrix(Nr, dR, varList['beta'], den, r, FMean, dFMeanDr, d2FMeanDr2, varList['Mark'], varList['acModes'], A,\n B, C, Nx, dx, tau, qMean, varList['convSpeed'])\n\n [d, W, V] = eigProblem.solveEigProb(Matrix)\n [dnew, Wnew, Vnew] = eigProblem.selectUnstable(d, W, V)\n\n print dnew / (2. * np.pi)", "def set_solver_parameters(m, gamma, horizon, my_vars, timeout=30 * 60, pre_solve=-1):\n h = ext.get_set_time(horizon)\n beta = get_var(my_vars, 'beta')\n m.setObjective(quicksum((gamma ** t) * beta[0, t] for t in h), GRB.MAXIMIZE)\n\n m.setParam('TimeLimit', timeout)\n m.setParam('Threads', 8)\n m.setParam('Presolve', pre_solve)\n m.setParam('OutputFlag', 0)" ]
[ "0.80656624", "0.5865987", "0.5693037", "0.5615564", "0.55575585", "0.55294347", "0.55230105", "0.5521183", "0.5503675", "0.5450836", "0.540424", "0.53514105", "0.5325137", "0.53189623", "0.5317447", "0.5312849", "0.5308044", "0.5304588", "0.530239", "0.5301694", "0.5296928", "0.5287328", "0.52784234", "0.5275083", "0.52576554", "0.5250537", "0.5234691", "0.5217848", "0.5201104", "0.5196678", "0.5185753", "0.5178583", "0.51640326", "0.5152025", "0.5151244", "0.51207054", "0.5116701", "0.51075786", "0.5098229", "0.5094568", "0.509096", "0.50907564", "0.50852835", "0.508479", "0.5083289", "0.50821745", "0.5081304", "0.5077086", "0.50754094", "0.5074996", "0.5074658", "0.5070279", "0.50655884", "0.50642943", "0.5061261", "0.5057435", "0.50556505", "0.50451666", "0.50442165", "0.5028064", "0.5025042", "0.5021877", "0.50210303", "0.50141925", "0.5013818", "0.50130904", "0.50090134", "0.5006547", "0.50057006", "0.50003946", "0.49994954", "0.49975103", "0.49951848", "0.4994092", "0.49927926", "0.49904364", "0.4987641", "0.49866462", "0.49824142", "0.49808532", "0.4973733", "0.4969949", "0.4965457", "0.49650878", "0.49636203", "0.4962349", "0.4961448", "0.49608457", "0.4960218", "0.4957515", "0.49566132", "0.49551055", "0.4952623", "0.49514017", "0.49489293", "0.4947147", "0.4944185", "0.4943071", "0.49403766", "0.4939456" ]
0.73483014
1
find the lcl (in m) for a row in the dataframe
def calc_lcl(row,psfc): Tdew = tf.tmr(row['qv'],psfc) LCL = tf.LCL(Tdew,row['theta'],psfc) #kPa # # rough approximation: 10 kPa = 1 km # delp=psfc - LCL lcl_h = delp*100. return lcl_h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_index_lm(l, m):\n return (l+1)**2 -1 -l + m", "def compute_kl(self, df):\n value_counts = [df[col].value_counts() for col in self.hist_cols]\n next_hists = self.value_counts_to_hists(value_counts)\n\n if self.prev_hists is None:\n self.prev_hists = next_hists\n return None\n\n output = []\n for prev_h, curr_h in zip(self.prev_hists, next_hists):\n for i in range(len(prev_h)):\n prev_h[i] = prev_h[i] if prev_h[i] != 0 else 1\n curr_h[i] = curr_h[i] if curr_h[i] != 0 else 1\n kl = entropy(prev_h, curr_h)\n output.append(kl)\n\n self.prev_hists = next_hists\n return output", "def lfindwithin (data):\r\n\r\n numfact = len(data[0])-1\r\n withinvec = 0\r\n for col in range(1,numfact):\r\n examplelevel = pstats.unique(pstats.colex(data,col))[0]\r\n rows = pstats.linexand(data,col,examplelevel) # get 1 level of this factor\r\n factsubjs = pstats.unique(pstats.colex(rows,0))\r\n allsubjs = pstats.unique(pstats.colex(data,0))\r\n if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?\r\n withinvec = withinvec + (1 << col)\r\n return withinvec", "def cal_L(self):\n # calculate the l matrix\n self.point_matrixs = self.point_matrix.reshape(\n self.point_matrix.shape[0], 1, self.point_matrix.shape[1])\n self.point_matrixs = np.tile(self.point_matrixs,\n (self.attach_points.shape[0], 1))\n self.attach_points_matrix = np.matlib.repmat(\n self.attach_points[:, 0:3], self.point_matrix.shape[0], 1)\n self.attach_points_matrix = self.attach_points_matrix.reshape(\n self.point_matrix.shape[0], self.attach_points.shape[0], 3)\n self.L = np.subtract(self.attach_points_matrix,\n self.point_matrixs)\n # self.L[:,self.attach_points[:,3]==1,:] = \\\n # - self.L[:,self.attach_points[:,3]==1,:]\n # print(self.L)", "def calculate_recovery_clifford(cl_in, desired_cl=0):\n row = list(clifford_lookuptable[cl_in])\n return row.index(desired_cl)", "def calc_Lr(rho,mld,f,g=9.8,po=1027.):\n n2ml=np.ndarray(len(rho[1,:-1]))\n for i in range(len(rho[1,:-1])):\n n2ml[i]=-(g/po)*((rho[15,i]-rho[np.int8(mld[i])+15,i])/mld[i])\n Lr=(np.sqrt(n2ml)*mld[:-1])/f\n\n return Lr", "def find_lcs(l1: str, l2: str, length1: int, length2: int):\n \"\"\" Theorem:{\n Initialize matrix with 0 for first row and colm\n If s1[i] = s2[j], update matrix[i][j] with value\n of matrix[i-1][j-1]+1\n Else update matrix[i][j] with max of value among\n matrix[i][j-1],matrix[i-1][j]\n Matrix[n][m] will be lcs\n }\n \"\"\"\n matrix = [[None]*(length1+1) for i in range(0, length2+1)]\n for i in range(0, length2+1):\n for j in range(0, length1+1):\n if i == 0 or j == 0:\n matrix[i][j] = 0\n elif l1[j-1] == l2[i-1]:\n matrix[i][j] = matrix[i-1][j-1] + 1\n else:\n matrix[i][j] = max(matrix[i][j-1], matrix[i-1][j])\n lcs = [None for i in range(0, matrix[length2][length1])]\n index = matrix[length2][length1]\n m = length2 \n n = length1\n while(m > -1 and n > -1):\n if l2[m-1] == l1[n-1]:\n lcs[index-1] = l2[m-1]\n index -= 1\n m -= 1\n n -= 1\n elif matrix[m-1][n] > matrix[m][n-1]:\n m -= 1\n else:\n n -= 1\n return lcs", "def get_lims(data):\n return data[:, 0].min() - 1, data[:, 0].max() + 1, data[:, 1].min() - 1, data[:, 1].max() + 1", "def _compute_lcs(source, target):\n table = _lcs_table(source, target)\n return _backtrack(table, source, target, len(source), len(target))", "def getCL(self):\r\n return self.cL;", "def calculate_LC(read):\n k_values = []\n for i in range(1,len(read)+1):\n k_values.append(i)\n observed_kmers = []\n for i in k_values:\n observed_kmers.append((count_kmers_observed(read, i)))\n possible_kmers = []\n for i in k_values:\n possible_kmers.append(count_kmers_possible(read, i))\n df = pd.DataFrame(list(zip(k_values, observed_kmers, possible_kmers)), columns = ['k','observed kmers','possible kmers'])\n df.at['Total', 'observed kmers'] = df['observed kmers'].sum()\n df.at['Total', 'possible kmers'] = df['possible kmers'].sum()\n x = int(df.at['Total', 'observed kmers'])\n y = int(df.at['Total', 'possible kmers'])\n LC = (x/y)\n return(LC)", "def getLPos(self):\n c = 0\n while c <= ALIENS_IN_ROW-1:\n i = 0\n for a in range(ALIEN_ROWS):\n if self._aliens[a][c] != None:\n return self._aliens[a][c].x - ALIEN_WIDTH/2\n else:\n i +=1\n if i == ALIEN_ROWS:\n c +=1", "def get_L(self, tolerance=None):\r\n index = self.data.index\r\n columns = self.data.columns\r\n\r\n # Obtain the eigenvalues and eigenvectors\r\n E, V = sandy.CategoryCov(self.data).get_eig(tolerance=tolerance)\r\n\r\n # need sparse because much faster for large matrices (2kx2k from J33 Pu9)\r\n # with a lot of zero eigs\r\n # this is exactly equivalent to V.values @ np.diag(np.sqrt(E.values))\r\n A = (sparse.csr_matrix(V.values) @ sparse.csr_matrix(np.diag(np.sqrt(E.values)))).todense()\r\n \r\n# u, s, vh = self.get_svd()\r\n# A = (sparse.csr_matrix(u) @ sparse.csr_matrix(np.diag(np.sqrt(s)))).todense()\r\n\r\n # QR decomposition\r\n Q, R = scipy.linalg.qr(A.T)\r\n L = R.T\r\n\r\n return pd.DataFrame(L, index=index, columns=columns)", "def get_l(m):\n L = m.copy()\n for i in range(L.shape[0]):\n L[i, i] = 1\n L[i, i+1:] = 0\n return np.matrix(L)", "def calcLnL(self, tree):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.calcLnL(self, tree)", "def lll(self, delta=0.75):\n if self.domain != ZZ:\n raise DMDomainError(\"ZZ expected, got %s\" % self.domain)\n elif self.rows > self.cols:\n raise DMShapeError(\"Matrix must not have more rows than columns.\")\n\n rep = self._lll(delta=delta)\n return self._new_rep(rep)", "def generate_LLL_matrix(self, matrix):\n LLL_matrix = matrix.transpose().LLL().transpose()\n return LLL_matrix", "def extract_hillslope_profile(node_matrix):\n ncols = numpy.size(node_matrix, 1)\n z = numpy.zeros(ncols)\n for col in range(ncols):\n dirt = numpy.where(node_matrix[:,col]!=0)[0]\n if len(dirt)>0:\n z[col] = numpy.amax(dirt)\n return z", "def log_likelihoods(self):\n return self.__data_frame.loc[:, \"ll\":\"ll\"].values[:-1]", "def recommend_L(self):\n\n min_eigenvalue = torch.min(torch.linalg.eigvalsh(self.Wc))\n lamBda = 1 / (1 + 4*torch.abs(min_eigenvalue - self.vars['q_init']))\n return lamBda", "def getL(self):\r\n return self.L", "def _getPMI(self, df, targetColname):\n pmi = 0\n search_term = df[targetColname]\n noofterms = len(search_term)\n startindex = 0\n pmiAccumulate = 0\n if(noofterms>1):\n for i in range(0,noofterms-1):\n pmi = self.computePMI(search_term[i],search_term[i+1])\n pmiAccumulate = pmiAccumulate+pmi\n pmiAccumulate = pmiAccumulate/noofterms\n pmi = pmiAccumulate\n return pmi", "def Findlt(l,sp,rhs):\n m = sp.M(l)\n return (m / l**3) - rhs", "def calcLnLFromNode(self, nd):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return TreeLikelihoodBase.calcLnLFromNode(self, nd)", "def _lcs(x, y):\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table", "def _lcs(x, y):\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table", "def extract_ltri( m, context = FloatContext ):\n rows, cols = shape_mat(m)\n return [ row[:i+1] + [context.zero]*(cols - i - 1) \n for i, row in enumerate(m) ]", "def get_number_of_rows_and_columns(m):\n\n r = int(np.sqrt(m))\n c = m // r if np.mod(m, r) == 0 else m // r + 1\n return r, c", "def magma_cgels(trans, m, n, nrhs, A, lda, B, ldb, hwork, lwork):\n info = c_int_type()\n trans = _trans_conversion[trans]\n status = _libmagma.magma_cgels(trans, m, n, nrhs, int(A), lda,\n int(B), ldb, int(hwork), lwork,\n ctypes.byref(info))\n magmaCheckStatus(status)", "def KL2kL(NL, KL, BL):\n # cycle through BL, finding matching inds in NL and thus KL\n # for row in BL, get KL value in row BL[i,0] and col where(NL[BL[i,0],:]==BL[i,1])[0]\n if (BL < 0).any():\n aBL = np.abs(BL)\n kL = np.array([KL[aBL[i, 0], np.where(NL[aBL[i, 0], :] == aBL[i, 1])[0]][0] for i in range(len(aBL))])\n kL2 = np.array([KL[aBL[i, 1], np.where(NL[aBL[i, 1], :] == aBL[i, 0])[0]][0] for i in range(len(aBL))])\n if np.abs(kL - kL2).any() > 1e-6:\n raise RuntimeError('KL is not properly symmetric! KL[i, j neighbor] != KL[j neighbor, i]')\n else:\n kL = np.array([KL[BL[i, 0], np.where(NL[BL[i, 0], :] == BL[i, 1])[0]][0] for i in range(len(BL))])\n return kL", "def get_data_lcc(data):\n data_lcc = deepcopy(data)\n edge_index_sparse = edge_index_2_csr(data.edge_index, data.num_nodes)\n lcc = largest_connected_components(edge_index_sparse)\n edge_index_lcc_sparse = edge_index_sparse[lcc][:, lcc].tocoo()\n data_lcc.edge_index = torch.stack(list(to_Variable(edge_index_lcc_sparse.row, edge_index_lcc_sparse.col))).long()\n\n data_lcc.x = data.x[lcc]\n data_lcc.y = data.y[lcc]\n data_lcc.train_mask = data.train_mask[lcc]\n data_lcc.val_mask = data.val_mask[lcc]\n data_lcc.test_mask = data.test_mask[lcc]\n return data_lcc", "def cov_l2cov_lmin(C_l) -> np.array:\n def isDiag(M):\n i, j = M.shape\n assert i == j \n test = M.reshape(-1)[:-1].reshape(i-1, j+1)\n return ~np.any(test[:, 1:])\n\n def invdiagmat(C):\n import copy\n ret = copy.deepcopy(C)\n row, col = np.diag_indices(ret.shape[0])\n ret[row, col] = 1/np.diag(ret)\n return ret\n\n elaw = np.ones(C_l.shape[-1])\n if isDiag(C_l):\n inv = invdiagmat(C_l)\n else:\n inv = np.linalg.inv(C_l)\n\n cov_minimal = elaw @ inv @ elaw\n return 1/cov_minimal", "def gev_ll(loc,c,scale):\n \n def gev_logp(value):\n scaled = (value - loc) / scale\n logp = -(scale\n + ((c + 1) / c) * tt.log1p(c * scaled)\n + (1 + c * scaled) ** (-1/c))\n alpha = loc - scale / c\n \n # If the value is greater than zero, then check to see if \n # it is greater than alpha. Otherwise, check to see if it \n # is less than alpha.\n bounds = tt.switch(value > 0, value > alpha, value < alpha)\n \n # The returned array will have entries of -inf if the bound\n # is not satisfied. This condition says \"if c is zero or\n # value is less than alpha, return -inf and blow up \n # the log-likelihood.\n return bound(logp, bounds, c != 0)\n return gev_logp", "def LC(f):\n return dmp_ground_LC(f.rep, f.lev, f.dom)", "def get_k(df):\n\n return int(np.log(len(df.columns)))", "def lcs_le(x: List, y: List) -> Tuple[Matrix, atrix]:\n m = len(x)\n n = len(y)\n lcs_matrix = [[None]*(n+1) for i in range(m+1)\n # each index is a optimal solution for each subproblem\n direction_matrix = [[None]*n for i in range(m)]\n # if either indecd is 0 then each element is 0\n for i n ranage(1, m+1):\n lcs_matrix[i][0] = 0\n for j in range(n+1):\n lcs_matrix[0][j] = 0\n for i in range(m):\n for j in range(n):\n if x[i] == y[j]:\n lcs_matrix[i+1][j+1] = lcs_matrix[i][j]+1\n direction_matrix[i][j] = Direction.UPPER_LEFT\n elif lcs_matrix[i][j+1] >= lcs_matrix[i+1][j]:\n lcs_matrix[i+1][j+1] = lcs_matrix[i][j+1]\n direction_matrix[i][j] = Direction.UP\n else:\n lcs_matrix[i+1][j+1] = lcs_matrix[i+1][j]\n direction_matrix[i][j] = Direction.LEFT\n return lcs_matrix, index_matrix", "def glcm_stat_dissimilarity(glcm_matrix):\n it = np.nditer(glcm_matrix, flags=['multi_index'])\n accum = 0\n while (not it.finished):\n accum += glcm_matrix[it.multi_index] * abs(np.diff(it.multi_index))\n it.iternext()\n return accum", "def llf(self):\n return self.model.loglike(self.params)", "def _len_lcs(x, y):\n table = _lcs(x, y)\n n, m = len(x), len(y)\n return table[n, m]", "def _lcs(x, y):\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table", "def get_l_interface(n_v,n_c, neighbours, vs, CV_matrix,L):\n h_j = np.empty((n_v, 3, 2))\n for i in range(3):\n h_j[:, i] = vs\n h_jp1 = np.dstack((roll_reverse(neighbours[:,:,0]),roll_reverse(neighbours[:,:,1])))\n l = np.mod(h_j - h_jp1 + L/2,L) - L/2\n l = np.sqrt(l[:,:,0]**2 + l[:,:,1]**2)\n LI = np.zeros((n_c,n_c),dtype=np.float32)\n for i in range(3):\n LI+= np.asfortranarray(l[:,i]*CV_matrix[:,:,i])@np.asfortranarray(CV_matrix[:,:,np.mod(i+2,3)].T)\n return LI", "def get_L(self):\n return self.get_par('XLAMBDA_0')", "def estimate_L(da):\n from statsmodels.tsa.stattools import acf\n \n def acf_lag1(x):\n if np.sum(~np.isnan(x)) == 0: # if all NaNs\n return np.nan\n else:\n x = x[~np.isnan(x)]\n return acf(x, nlags=1)[-1]\n \n n = len(da.time.values)\n \n # DataArray of lag1 ACF coefficients\n rho_da = xr.apply_ufunc(acf_lag1, da, input_core_dims=[['time']], output_core_dims=[[]], vectorize=True, dask='allowed')\n \n # DataArray of effective sample size\n n_eff_da = n * ((1 - rho_da) / (1 + rho_da))\n \n # Initialise guess for block length\n Ls_da = xr.full_like(rho_da, 1)\n for i in range(10): # iterate to get estimate of L\n L_da = (n - Ls_da + 1) ** ( (2/3) * (1 - n_eff_da / n) )\n Ls_da = L_da\n \n return np.ceil(L_da) # round up to get block length", "def lcr_matrix(H):\n if H.ndim != 2 or H.shape[0] != H.shape[1]:\n raise ValueError('H should be a square matrix')\n\n leverages = sqrt(1-H.diagonal())\n leverages = leverages[:, None]\n R = (eye(len(H)) - H) / leverages\n return R - R.mean(0)", "def rel_matrix(df_long: pd.DataFrame) -> None:\n pass", "def get_lthr_data(self):\n # FIXME one time point or whole data\n temp = self._data.copy()\n temp[temp < self._view_min] = 0\n return temp", "def _f_lcs(llcs, m, n):\n r_lcs = llcs / m\n p_lcs = llcs / n\n beta = p_lcs / (r_lcs + 1e-12)\n num = (1 + (beta ** 2)) * r_lcs * p_lcs\n denom = r_lcs + ((beta ** 2) * p_lcs)\n f_lcs = num / (denom + 1e-12)\n return f_lcs", "def _len_lcs(x, y):\n table = _lcs(x, y)\n n, m = len(x), len(y)\n return table[n, m]", "def _len_lcs(x, y):\n table = _lcs(x, y)\n n, m = len(x), len(y)\n return table[n, m]", "def L(self) -> float:\n return self._L", "def point_in_mbr(df, min_lat, max_lat, min_lon, max_lon):\n df = df[(df[\"lat\"] >= min_lat) &\n (df[\"lat\"] <= max_lat) &\n (df[\"lon\"] >= min_lon) &\n (df[\"lon\"] <= max_lon)\n ]\n return df", "def test_lfc_pos_area_below_lcl():\n p = [902.1554, 897.9034, 893.6506, 889.4047, 883.063, 874.6284, 866.2387, 857.887,\n 849.5506, 841.2686, 833.0042, 824.7891, 812.5049, 796.2104, 776.0027, 751.9025,\n 727.9612, 704.1409, 680.4028, 656.7156, 629.077, 597.4286, 565.6315, 533.5961,\n 501.2452, 468.493, 435.2486, 401.4239, 366.9387, 331.7026, 295.6319, 258.6428,\n 220.9178, 182.9384, 144.959, 106.9778, 69.00213] * units.hPa\n t = [-3.039381, -3.703779, -4.15996, -4.562574, -5.131827, -5.856229, -6.568434,\n -7.276881, -7.985013, -8.670911, -8.958063, -7.631381, -6.05927, -5.083627,\n -5.11576, -5.687552, -5.453021, -4.981445, -5.236665, -6.324916, -8.434324,\n -11.58795, -14.99297, -18.45947, -21.92021, -25.40522, -28.914, -32.78637,\n -37.7179, -43.56836, -49.61077, -54.24449, -56.16666, -57.03775, -58.28041,\n -60.86264, -64.21677] * units.degC\n td = [-22.08774, -22.18181, -22.2508, -22.31323, -22.4024, -22.51582, -22.62526,\n -22.72919, -22.82095, -22.86173, -22.49489, -21.66936, -21.67332, -21.94054,\n -23.63561, -27.17466, -31.87395, -38.31725, -44.54717, -46.99218, -43.17544,\n -37.40019, -34.3351, -36.42896, -42.1396, -46.95909, -49.36232, -48.94634,\n -47.90178, -49.97902, -55.02753, -63.06276, -72.53742, -88.81377, -93.54573,\n -92.92464, -91.57479] * units.degC\n prof = parcel_profile(p, t[0], td[0]).to('degC')\n lfc_p, lfc_t = lfc(p, t, td, prof)\n assert_nan(lfc_p, p.units)\n assert_nan(lfc_t, t.units)", "def ohodnotL(row, col, znak, prevx, prevy, pocet_ciest, hlbka, mx): # vlavo\r\n\r\n susedia = getSusedia_ohodnot(row, col, znak)\r\n\r\n if (values[row][col] != \"W\" and col != 0):\r\n hlbka += 1\r\n\r\n if col == 0:\r\n if values[row][col] == \"W\" and hlbka != 0:\r\n hlbka -= 1\r\n dlzkyL.append(hlbka)\r\n\r\n if (col != 0 and hlbka < mx):\r\n for sused in susedia:\r\n if (sused[1] == col - 1 or (sused[1] == col and (sused[0] != prevx or sused[1] != prevy))):\r\n if sused[1] == 0:\r\n pocet_ciest += 1\r\n pocet_ciest += ohodnotL(sused[0], sused[1], znak, row, col, 0, hlbka, mx)\r\n if (values[sused[0]][sused[1]] == \"W\") and col == 1: # nema zmysel sem umiestnovat - radsej inde\r\n pocet_ciest = 0\r\n return pocet_ciest", "def correlate_rows(matrix):\n return np.dot(matrix, matrix.T) / (la.norm(matrix) ** 2)", "def matlabels(df, rowlabel_fn):\n return df.index.to_frame().apply(rowlabel_fn, axis=1)", "def lamPoint(numLams, lambdas, lam):\r\n \r\n help = [0.0 for i in range(numLams)]\r\n for i in range(numLams):\r\n\r\n help[i] = lambdas[i] - lam;\r\n help[i] = abs(help[i]);\r\n\r\n \r\n index = 0\r\n min = help[index]\r\n\r\n for i in range(1, numLams):\r\n\r\n if (help[i] < min): \r\n min = help[i]\r\n index = i\r\n\r\n return index", "def keltner_channel_lband_indicator(high, low, close, n=10, fillna=False):\n lband = keltner_channel_lband(high, low, close, n)\n kc_lband_ind = np.clip(np.sign(lband-close), 0, 1)\n\n if fillna:\n kc_lband_ind = kc_lband_ind.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(kc_lband_ind, name='kc_lband_ind')", "def lcms(argg: range) -> int:\n l = 1\n for arg in argg:\n l = lcm(l, arg)\n return l", "def matchlines(nlines, wl, z, eml):\n lbdas = np.array(list(eml.keys()))\n a = (wl[:, np.newaxis] / (1 + z) - lbdas[np.newaxis, :]) ** 2.0\n jfound = np.argmin(a, axis=1)\n error = np.diag(a[:, jfound]).sum()\n error = np.sqrt(error / nlines)\n if((nlines >= 2)and(jfound[0] == jfound[1])):\n error = 15.\n return(error, jfound)", "def Ls(GTn:torch.tensor, Mn:torch.tensor) -> torch.tensor:\n return (-(GTn * torch.log(Mn+1e-15) + (1- GTn) * torch.log((1- Mn)+1e-15))).sum()", "def kldiv_lsm_ctc(logits, ylens):\n bs, _, vocab = logits.size()\n log_uniform = logits.new_zeros(logits.size()).fill_(math.log(1 / (vocab - 1)))\n probs = torch.softmax(logits, dim=-1)\n log_probs = torch.log_softmax(logits, dim=-1)\n loss = torch.mul(probs, log_probs - log_uniform)\n loss_mean = sum([loss[b, :ylens[b], :].sum() for b in range(bs)]) / ylens.sum()\n return loss_mean", "def lam(freq):\n return C / freq", "def calculateDistances(df):\n return", "def calculate_L(self, n_t, n_n, l0, l1, dt, ctant):\n d_scl = np.sqrt((self.t_D.get_value() ** 2).sum(1).mean())\n\n a = n_t * n_n * l1 * dt * d_scl ** 2 * np.log(l1 / l0) ** 2\n # try:\n # self._b\n # except:\n # from scipy.linalg import eigh as largest_eigh\n # k=1\n # N = inv_cov.shape[0]\n # evals_large, _ = largest_eigh(inv_cov, eigvals=(N-k,N-1))\n b = 0.5 * self.QUAD_REG.max()\n # FIXME: not quite right for non-diagonal matrix, but is close enough\n # in practice\n\n # print a, b, b / a\n return ((a + b) * ctant).astype('float32')", "def run_lmm(formula, df, reml=False, **kwargs):\n model = sm.MixedLM.from_formula(formula, df, **kwargs)\n return model.fit(reml=reml)", "def CC_CLS_CL_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','CC_CLS_CL']]\n min_value = min(Feature_DF.loc[:,'CC_CLS_CL'])\n Feature_DF.loc[:,'CC_CLS_CL_TRS'] = Feature_DF.loc[:,'CC_CLS_CL'].apply(lambda x : (1+x-min_value)**(-9/8))\n Feature_DF = Feature_DF.loc[:,['HNAME','CC_CLS_CL_TRS']]\n\n return Feature_DF", "def CC_REC_NUM_LT3_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','CC_REC_NUM_LT3']]\n min_value = min(Feature_DF.loc[:,'CC_REC_NUM_LT3'])\n Feature_DF.loc[:,'CC_REC_NUM_LT3_TRS'] = Feature_DF.loc[:,'CC_REC_NUM_LT3'].apply(lambda x : (1+x-min_value)**(-3/8))\n Feature_DF = Feature_DF.loc[:,['HNAME','CC_REC_NUM_LT3_TRS']]\n\n return Feature_DF", "def LLE(data, n_components=2, n_neighbors=10):\n # Compute the nearest neighbors\n _, neighbors_idx = AffiinityMAt(data,n_neighbor=n_neighbors,kernel_method='Linear')\n\n n = data.shape[0]\n w = np.zeros((n, n))\n for i in range(n):\n # Center the neighbors matrix\n k_indexes = neighbors_idx[i, :]\n neighbors = data[k_indexes, :] - data[i, :]\n\n # Compute the corresponding gram matrix\n gram_inv = np.linalg.pinv(np.dot(neighbors, neighbors.T))\n\n # Setting the weight values according to the lagrangian\n lambda_par = 2/np.sum(gram_inv)\n w[i, k_indexes] = lambda_par*np.sum(gram_inv, axis=1)/2\n m = np.subtract(np.eye(n), w)\n values, u = np.linalg.eigh(np.dot(np.transpose(m), m))\n return u[:, 1:n_components+1]", "def compute_log_likelihoods(df, error_rate=1e-3):\n df['log_likelihood_absent'] = df.apply(calculate_likelihood_absent, axis=1, args=(error_rate,))\n df['log_likelihood_present'] = df.apply(calculate_likelihood_present, axis=1, args=(error_rate,))\n\n return df", "def get_rows(df):\n return df.shape[0]", "def find_distances(frame, newPoint): \n distances = [] \n \n # iterate over all rows in the dataframe\n for index in range(frame.shape[0]):\n\n # get all columns of a row (except the label) \n point = frame.iloc[index,:-1] \n\n \t# compute the distance, then save distance and label \n # (use distance as first value)\n distance = euclidean_distance(point, newPoint)\n if distance == 0:\n distances.append((distance, frame.iloc[index,-1])) \n else:\n distances.append((sys.maxsize, frame.iloc[index,-1])) \n \n\n distances.sort() \n \n return distances", "def find_densest(m, w_ncols, w_nrows):\n # Implement your solution here.\n pass", "def _get_Mll(self):\n\t\t_M_ll = mll.get_mll(self.W_l, self.lmax)\n\t\treturn np.float64(_M_ll)", "def laplacian(self, e, epos):\n s = int(e >= self._nelec[0])\n ao = np.real_if_close(\n self._mol.eval_gto(self.pbc_str + \"GTOval_sph_deriv2\", epos.configs)[\n [0, 4, 7, 9]\n ],\n tol=1e4,\n )\n molap = np.dot(\n [ao[0], ao[1:].sum(axis=0)], self.parameters[self._coefflookup[s]]\n )\n molap_vals = self._testrow(e, molap[1][:, self._det_occup[s]])\n testvalue = self._testrow(e, molap[0][:, self._det_occup[s]])\n\n return molap_vals / testvalue", "def lil_cc(lil):\n n = len(lil)\n visited = np.zeros(n).astype(np.int_)\n label = - np.ones(n).astype(np.int_)\n k = 0\n while (visited == 0).any():\n front = [np.argmin(visited)]\n while len(front) > 0:\n pivot = front.pop(0)\n if visited[pivot] == 0:\n visited[pivot] = 1\n label[pivot] = k\n front += lil[pivot]\n k += 1\n return label", "def lcmm(*args):\n\treturn reduce(lcm, args)", "def lmin(self):\n return self._get_mean_and_samples_attribute('lmin')", "def lla(self):\n return self._lla_shape", "def _point_in_mbr(self, df):\n if df.empty:\n return df\n df = df[(df[\"lat\"] >= self._min_lat) &\n (df[\"lat\"] <= self._max_lat) &\n (df[\"lon\"] >= self._min_lon) &\n (df[\"lon\"] <= self._max_lon)\n ]\n return df", "def lll(basis, delta=.75):\n orthog = gram_schmidt(basis)\n mu = get_mus(basis, orthog)\n k = 1\n while k < len(basis):\n for j in range(k - 1, -1, -1):\n # Size condition\n if np.abs(mu[k][j]) > .5:\n basis[k] = basis[k] - np.rint(mu[k, j]) * basis[j]\n orthog = gram_schmidt(basis)\n mu = get_mus(basis, orthog)\n # Lovasz condition\n if sq_norm(orthog[k]) >= (delta - mu[k][k - 1]**2) * sq_norm(orthog[k - 1]):\n k += 1\n else:\n temp = copy.deepcopy(basis[k])\n basis[k] = copy.deepcopy(basis[k - 1])\n basis[k - 1] = temp\n orthog = gram_schmidt(basis)\n mu = get_mus(basis, orthog)\n k = max(k - 1, 1)\n return basis", "def cl_interval(lnL, sigma=None, CL=0.68):\n import copy\n # Confidence limits\n if sigma is None:\n c0 = (1. - CL)/2.\n c1 = 1.-c0\n # Image dimensions\n shape = lnL.shape\n ndim = len(shape)\n slc = [slice(None)]*ndim\n # Find max\n norm_L = np.exp(np.maximum(lnL - np.max(lnL),-15.))\n # Find best indices\n indices = np.where(lnL == np.max(lnL))\n best_idx = [bi[0] for bi in indices]\n\n # Error intervals\n all_error = []\n for kk in range(ndim):\n # Collapse on this dimension\n slc = copy.deepcopy(best_idx)\n slc[kk] = slice(None)\n Lslice = norm_L[slc].flatten()\n # Interpolate and go\n cumul_area = np.cumsum(Lslice)\n f_area = interp1d(cumul_area/cumul_area[-1], np.arange(len(Lslice)))\n # Here we go\n idx0 = int(np.round(f_area(c0)))\n idx1 = int(np.round(f_area(c1)))\n all_error.append([idx0,idx1])\n\n # Return\n return best_idx, all_error", "def compute_distance(df):\n pass", "def lagrange(self,K, L):\n return lagrange(K, L)", "def lll(basis, delta=.75):\n orthog = gram_schmidt(basis)\n mu = get_mus(basis, orthog)\n k = 1\n while k < len(basis):\n for j in range(k - 1, -1, -1):\n # Size condition\n if np.abs(mu[k][j]) > .5:\n basis[k] = basis[k] - np.rint(mu[k, j]) * basis[j]\n orthog = gram_schmidt(basis)\n mu = get_mus(basis, orthog)\n # Lovasz condition\n if sq_norm(orthog[k]) >= (delta - mu[k][k - 1]**2) * sq_norm(orthog[k - 1]):\n k += 1\n else:\n temp = copy.deepcopy(basis[k])\n basis[k] = copy.deepcopy(basis[k - 1])\n basis[k - 1] = temp\n orthog = gram_schmidt(basis)\n mu = get_mus(basis, orthog)\n k = max(k - 1, 1)\n return basis", "def LCSubSeq(X, Y, m, n):\n LCSuff = [[0 for k in range (n+1)] for l in range(m+1)]\n result = 0\n\n for i in range(m+1):\n for j in range(n+1):\n if (i==0 or j==0):\n LCSuff[i][j] = 0\n elif (X[i-1] == Y[j - 1]):\n LCSuff[i][j] = LCSuff[i-1][j-1] + 1\n result = max(result, LCSuff[i][j])\n else:\n LCSuff[i][j] = 0\n return result", "def get_cl(self, re, aa):\r\n return Turbine.v_fCL(re, np.degrees(aa))", "def pos_conserved(df, conservation):\n nb_rows, nb_cols = df.shape\n\n value_counts = df.apply(pd.Series.value_counts, axis=0).max(axis=0).ge(conservation * nb_rows)\n\n ge = [i for i, x in enumerate(value_counts) if x]\n return ge", "def get_maxmin_index_from_row(\n distance_matrix: np.ndarray,\n row: int,\n previous_indexes: List,\n type: str,\n )-> int:\n distance_matrix = distance_matrix.copy()\n arr = distance_matrix[row].astype(float)\n \n aux_list = range(arr.shape[0])\n aux_list_2 = []\n for i in aux_list:\n if i in previous_indexes:\n aux_list_2.append(True)\n else:\n aux_list_2.append(False)\n previous_indexes_bool = aux_list_2\n \n if type == 'max':\n arr[previous_indexes_bool] = -1\n target_index = np.argmax(arr)\n if type == 'min':\n arr[previous_indexes_bool] = np.Inf\n target_index = np.argmin(arr)\n \n return target_index", "def cgid2llnr(self, cgID):\n cgID = cgID.replace('_240_37-ASM', '')\n possible = self.phenotypes[self.phenotypes.cgID == cgID].LLnr.values \n if len(possible) > 0:\n return possible[0]\n #fi\n return None", "def _calculate_ll(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n forward_log_probs = self._forward(observation_log_probs)\n log_likelihood = logsumexp(\n forward_log_probs[forward_log_probs.shape[0] - 1, :].numpy())\n return log_likelihood", "def labeling_func(df_clus):\n\n df_all_labeled = df_all_columns.copy()\n df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()\n df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)\n for i in range(0, clus_params['n_components']):\n df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()\n\n return df_all_labeled", "def lcmm(*args): \r\n return reduce(lcm, args)", "def norm_leg_coefs(self, cl):\n new_cl = np.zeros_like( cl )\n for i in range(cl.shape[1]):\n for j in range(cl.shape[2]):\n if (cl[:,i,j]).max() >0:\n new_cl[:,i,j] = cl[:,i,j]/(cl[:,i,j]).max()\n\n return new_cl", "def _get_LFDP(self, segment):\n dists_to_fitting_line = self._get_dists_to_fitting_line(segment)\n return numpy.max(dists_to_fitting_line), numpy.argmax(dists_to_fitting_line)", "def ml_kl_loss(self, simulation, c1 = 1.0, ndims = 2, ehigh=1e5, emax = 1e10, turnover=200):\n loss = MLKL(c1, simulation, ndims, ehigh, emax, turnover)\n return loss.lossFunction", "def __isFarFromLevel(self, l):\n\n s = np.mean(self.df['high'] - self.df['low'])\n return np.sum([abs(l-x) < s for x in self.levels]) == 0", "def lam2f(l):\n f=c['c']/l\n return f", "def lap(self):\n\n gr = self.grid\n phi = gr.phi\n\n lapphi = gr.scratch_array()\n\n ib = gr.ilo\n ie = gr.ihi\n\n lapphi[ib:ie+1] = \\\n (phi[ib-1:ie] - 2.0*phi[ib:ie+1] + phi[ib+1:ie+2])/gr.dx**2\n\n return lapphi", "def get_lca_in_bst(root, node_0, node_1):\n res = root\n s = node_0 if node_0.data < node_1.data else node_1\n b = node_1 if node_0.data < node_1.data else node_0\n while (res.data < s.data) or (res.data > b.data):\n while res.data < s.data:\n res = res.right\n while res.data > b.data:\n res = res.left\n return res", "def lagrangePoints(mu):\n \n # define l = 1-mu\n l = 1 - mu\n \n # collinear points\n def eqL1(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)+mu-l)*x**2 + (mu**2*l**2+2*(l**2+mu**2))*x + mu**3-l**3\n #fval = gamma**5 - (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 + 2*mu*gamma - mu\n return fval\n sol_l1 = optimize.root(eqL1, 0.5, method='hybr')\n l1 = np.array([sol_l1.x[0] , 0, 0])\n \n def eqL2(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)-(mu+l))*x**2 + (mu**2*l**2+2*(l**2-mu**2))*x - (mu**3+l**3)\n #fval = gamma**5 + (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 - 2*mu*gamma - mu\n return fval\n sol_l2 = optimize.root(eqL2, 1.5, method='hybr')\n l2 = np.array([sol_l2.x[0] , 0, 0])\n \n def eqL3(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*mu*l+mu**2)*x**3 + (2*mu*l*(l-mu)+(l+mu))*x**2 + (mu**2*l**2+2*(mu**2-l**2))*x + l**3+mu**3\n return fval\n sol_l3 = optimize.root(eqL3, -1, method='hybr')\n l3 = np.array([sol_l3.x[0] , 0, 0])\n \n # equilateral points\n # L4\n l4 = np.array([np.cos(np.pi/3) - mu , np.sin(np.pi/3), 0])\n # L5\n l5 = np.array([np.cos(np.pi/3) - mu , -np.sin(np.pi/3), 0])\n \n return _lagrangePointsReturn(l1,l2,l3,l4,l5)" ]
[ "0.59105355", "0.57944614", "0.5768694", "0.5714691", "0.5706475", "0.5483352", "0.5476161", "0.5426626", "0.53980875", "0.5363862", "0.53620285", "0.5346637", "0.530857", "0.5301486", "0.52655256", "0.52437156", "0.52226084", "0.52145016", "0.5176464", "0.51718956", "0.5150789", "0.51381934", "0.5129281", "0.5109691", "0.5087405", "0.5087405", "0.50779736", "0.50730824", "0.50631976", "0.5058335", "0.50577444", "0.5046802", "0.50406176", "0.5040531", "0.5038964", "0.5037751", "0.50369555", "0.50353974", "0.5031617", "0.503026", "0.50252753", "0.5017688", "0.50091106", "0.50080657", "0.50073093", "0.50071096", "0.50018156", "0.4991105", "0.4991105", "0.49886194", "0.49829197", "0.49762926", "0.49739558", "0.4971177", "0.49567407", "0.49419445", "0.49405253", "0.49380735", "0.49355355", "0.49193478", "0.49181682", "0.491685", "0.49057543", "0.49018252", "0.48970035", "0.48878843", "0.4885905", "0.48795062", "0.48748678", "0.48742807", "0.48701495", "0.48689002", "0.48659247", "0.48632237", "0.48608205", "0.4858805", "0.48533145", "0.48276407", "0.4821546", "0.48213956", "0.4819021", "0.4817905", "0.48175704", "0.48149678", "0.48112413", "0.48082533", "0.48070386", "0.48062533", "0.47967336", "0.47933164", "0.47917014", "0.47833094", "0.478048", "0.47772712", "0.4770565", "0.47642025", "0.47638798", "0.47612715", "0.47612226", "0.47601923" ]
0.60421354
0
Adapted from interactive_vaporflux.ipynb sst, sea surface temperature (K) ft_qv, mixedlayer top qv (kg kg^1) use_NT, True or False outputs csv and json files with equilibrium values
def run_main(sst, ft_qv, use_NT): dtout=10. #minutes end_time=8*24. #hours del_time=dtout*60. #seconds end_time=end_time*3600. #seconds #sst=297 D=5.e-6 #s-1 U=7 #m/s psfc=100. #kPa qsfc=tf.qs_tp(sst,psfc) ft_intercept = 292 #K ft_gamma = 6.e-3 #K/m #ft_qv = 2.e-3 k=0.2 #entrainment efficiency Cd = 1.e-3 #drag coefficient tspan = np.arange(0.,end_time,del_time) vars_init=[285.,400.,8.e-3] #theta (K), height (m) qv (kg/kg) to start the_tup=dict(D=D,U=U,sst=sst,ft_intercept=ft_intercept,ft_gamma=ft_gamma, qsfc=qsfc,ft_qv=ft_qv,k=k,Cd=Cd,radcool=30.,use_NT=use_NT) # include use_NT the_tup=make_tuple(the_tup,'coeffs') output=integrate.odeint(dmixed_vars, vars_init, tspan,(the_tup,)) result=pd.DataFrame.from_records(output,columns=['theta','h','qv']) # save time/computation by only doing calculations for the last timestep (equilibrium) result['time']=tspan[-1]/3600./24. #days result['deltheta'] = theta_ft(result['h'].values[-1],ft_intercept,ft_gamma) - result['theta'].iloc[-1] result['delqv'] = ft_qv - result['qv'].iloc[-1] result['LCL'] = calc_lcl(result.iloc[-1], psfc) result['q_flux_0']=calc_sfc_qvap_flux(result.iloc[-1],the_tup) result['T_flux_0']=calc_sfc_theta_flux(result.iloc[-1],the_tup) result['entflux_theta']=calc_entflux_theta(result.iloc[-1],the_tup) # decide how to calculate entrainment the_vars = [result['theta'].iloc[-1],result['h'].iloc[-1],result['qv'].iloc[-1]] if use_NT: result['went']=calc_went_NT(the_vars, the_tup, result['deltheta'].iloc[-1], result['T_flux_0'].iloc[-1], result['q_flux_0'].iloc[-1]) else: result['went']=calc_went(result.iloc[-1],the_tup) result['entflux_qv']=calc_entflux_qv(result.iloc[-1],the_tup) with open('dumpmodel.csv','w') as f: result.to_csv(f,index=False) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_equil(sst, ft_qv, use_NT=False):\n \n run_main(sst, ft_qv, use_NT)\n \n # grab csv file\n with open('dumpmodel.csv','r') as f:\n df_result=pd.read_csv(f)\n\n # last time step into named tupple\n out=df_result.iloc[-1]\n steady_state=make_tuple(out.to_dict())\n steady_state\n \n # obtain steady-state values\n dth=steady_state.deltheta\n dqt=steady_state.delqv\n thetal_m=steady_state.theta\n qt_m=steady_state.qv\n h=steady_state.h\n press=tf.find_press(steady_state.h) #kPa\n thetal_ft = steady_state.theta + dth\n qt_ft = steady_state.qv + dqt\n zb = steady_state.LCL\n zi = steady_state.h\n we = steady_state.went\n \n # calculate thetal at z = 3000 m (take qt(z = 3000m) = qt(z = h), so delta_qt = dqt)\n gamma = 6e-3 \n thetal_3000 = thetal_ft + gamma*(3000-h)\n LTS = thetal_3000 - steady_state.theta\n \n # calculate delta_Fr\n delta_Frstar = 82.0 # Wm^-2\n Frlambda = 7.9 # Wm^-2, using with CTL from Gesso\n delta_Fr = delta_Frstar - Frlambda*qt_ft*1000 # convert qt_ft to g kg^-1\n\n # calculate LWP\n rho = 1.\n LWP = 0.5*rho*(zi-zb)**2\n \n # put all required variables into output array\n out_array = np.array([thetal_m, qt_m, zi, zb, we, LWP, delta_Fr, LTS, dqt])\n \n return out_array", "def extractQuantities(path, run, t0, t1):\n data = pyLTR.Models.MIX(path, run)\n\n # hard-coded input for testing & debugging:\n #data = pyLTR.Models.LFM('/hao/aim2/schmitt/data/LTR-2_0_1b/r1432/March1995/LR/single', 'LRs')\n \n #Make sure variables are defined in the model.\n modelVars = data.getVarNames()\n for v in ['Grid X', 'Grid Y', \n 'Potential North [V]', 'Potential South [V]', \n 'FAC North [A/m^2]', 'FAC South [A/m^2]',\n 'Pedersen conductance North [S]', 'Pedersen conductance South [S]', \n 'Hall conductance North [S]', 'Hall conductance South [S]', \n 'Average energy North [keV]', 'Average energy South [keV]',\n 'Number flux North [1/cm^2 s]', 'Number flux South [1/cm^2 s]']:\n assert( v in modelVars )\n\n timeRange = data.getTimeRange()\n if len(timeRange) == 0:\n raise Exception(('No data files found. Are you pointing to the correct run directory?'))\n\n index0 = 0\n if t0:\n for i,t in enumerate(timeRange):\n if t0 >= t:\n index0 = i\n\n index1 = len(timeRange)-1\n if t1:\n for i,t in enumerate(timeRange):\n if t1 >= t:\n index1 = i \n\n print(( 'Extracting MIX quantities for time series over %d time steps.' % (index1-index0) ))\n \n # Output a status bar displaying how far along the computation is.\n progress = pyLTR.StatusBar(0, index1-index0)\n progress.start()\n\n t_doy = []\n cpcpNorth = []\n cpcpSouth = []\n hpNorth = []\n hpSouth = []\n ipfacNorth = []\n ipfacSouth = []\n\n # Pre-compute area of the grid.\n x = data.read('Grid X', timeRange[index0])\n y = data.read('Grid Y', timeRange[index0])\n # Fix singularity at the pole\n x[:,0] = 0.0\n y[:,0] = 0.0\n z = numpy.sqrt(1.0-x**2-y**2)\n ri = 6500.0e3 # Radius of ionosphere\n areaMixGrid = pyLTR.math.integrate.calcFaceAreas(x,y,z)*ri*ri\n\n for i,time in enumerate(timeRange[index0:index1]):\n try:\n # -- Day of Year\n tt = time.timetuple()\n t_doy.append(tt.tm_yday+tt.tm_hour/24.0+tt.tm_min/1440.0+tt.tm_sec/86400.0)\n\n # --- Cross Polar Cap Potential\n psiNorth = data.read('Potential North [V]', time) / 1000.0\n cpcpNorth.append(psiNorth.max() - psiNorth.min())\n\n psiSouth = data.read('Potential South [V]', time) / 1000.0\n cpcpSouth.append(psiSouth.max() - psiSouth.min())\n \n # --- Hemispheric Power\n energy = data.read('Average energy North [keV]', time)\n flux = data.read('Number flux North [1/cm^2 s]', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpNorth.append(hp.sum() * 1.6e-21) \n\n energy = data.read('Average energy South [keV]', time)\n flux = data.read('Number flux South [1/cm^2 s]', time)\n hp = areaMixGrid*energy[:-1,:-1] * flux[:-1,:-1]\n # KeV/cm^2s to mW/m^2 to GW\n hpSouth.append(hp.sum() * 1.6e-21)\n\n # --- Positive current density\n fac = data.read('FAC North [A/m^2]', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacNorth.append(pfac.sum()/1.0e6)\n\n fac = data.read('FAC South [A/m^2]', time)\n fac[fac <= 0] = 0.0\n pfac = areaMixGrid * fac[:-1,:-1]\n ipfacSouth.append(pfac.sum()/1.0e6)\n\n progress.increment()\n except KeyboardInterrupt:\n # Exit when the user hits CTRL+C.\n progress.stop()\n progress.join() \n print('Exiting.')\n import sys\n sys.exit(0)\n except:\n # Cleanup progress bar if something bad happened.\n progress.stop()\n progress.join()\n raise\n progress.stop()\n progress.join()\n\n dataNorth = pyLTR.TimeSeries()\n dataSouth = pyLTR.TimeSeries()\n dataNorth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataSouth.append('datetime', 'Date & Time', '', timeRange[index0:index1])\n dataNorth.append('doy', 'Day of Year', '', t_doy)\n dataSouth.append('doy', 'Day of Year', '', t_doy)\n \n # \"N\" and \"S\" label subscripts are redundant here, potentially leading to\n # mis-labeling of plots\n #dataNorth.append('cpcp', r'$\\Phi_N$', 'kV', cpcpNorth)\n #dataSouth.append('cpcp', r'$\\Phi_S$', 'kV', cpcpSouth)\n #\n #dataNorth.append('hp', r'$HP_N$', 'GW', hpNorth)\n #dataSouth.append('hp', r'$HP_S$', 'GW', hpSouth)\n #\n #dataNorth.append('ipfac', r'$FAC_N$', 'MA', ipfacNorth)\n #dataSouth.append('ipfac', r'$FAC_S$', 'MA', ipfacSouth)\n \n dataNorth.append('cpcp', r'$\\Phi$', 'kV', cpcpNorth)\n dataSouth.append('cpcp', r'$\\Phi$', 'kV', cpcpSouth)\n \n dataNorth.append('hp', r'$HP$', 'GW', hpNorth)\n dataSouth.append('hp', r'$HP$', 'GW', hpSouth)\n \n dataNorth.append('ipfac', r'$FAC$', 'MA', ipfacNorth)\n dataSouth.append('ipfac', r'$FAC$', 'MA', ipfacSouth)\n\n return (dataNorth, dataSouth)", "def water(fname):\n\n variables = [\"H2OICE\", \"QV_COLUMN\", \"QI_COLUMN\"]\n data = dict()\n for v in variables:\n data.update(common.zonal_mean_surface(fname, v))\n \n return data", "def env_temperature(v3: \"float\", v4: \"float\") -> \"float\":", "def get_iPTF14gqr(colorplt=False):\n z = 0.063\n # ebv = 0.082\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_exp = 56943.74 # \n t_max = 56950.26 # g band max light + 3\n \n tb = Table(fits.open('../data/otherSN/De2018/tables1.fit')[1].data)\n tb.rename_column('MJD' , 'mjd')\n tb['texp_rf'] = (tb['mjd'] - t_exp) / (1+z)\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n # tb = tb[tb[\"Filt\"]==\"g \"]\n tb = tb[~np.isnan(tb['e_mag'])]\n tb.rename_column('Filt' , 'filter')\n tb.rename_column('e_mag' , 'emag')\n tb.rename_column('mag' , 'mag0')\n \n ixg = tb['filter']==\"g \"\n ixB = tb['filter']==\"B \"\n ixV = tb['filter']==\"V \"\n ixr = tb['filter']==\"r \"\n ixi = tb['filter']==\"i \"\n ixUVW1 = tb['filter']==\"UVW1\"\n ixUVW2 = tb['filter']==\"UVW2\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'][ixUVW2] = 2079\n tb['wave'][ixUVW1] = 2614\n tb['wave'][ixB] = 4359\n tb['wave'][ixg] = 4814\n tb['wave'][ixV] = 5430\n tb['wave'][ixr] = 6422\n tb['wave'][ixi] = 7883\n \n tb['mag0_abs'] = tb['mag0'] - dis_mod\n \n tb = tb.to_pandas()\n tb[\"texp_rf\"] = tb[\"Phase\"]\n tb = tb.drop(columns=[\"recno\", \"Phase\", \"l_mag\"])\n \"\"\"\n ix = np.any([tb['Tel'].values==\"P60 \",\n tb[\"filter\"].values=='g '], axis=0)\n tb = tb[ix]\n \"\"\"\n tb = add_datecol(tb)\n tb = add_physcol(tb)\n tt = tb[\"tmax_rf\"].values\n epochs = [\" \" for x in range(len(tt))]\n epochs = np.array(epochs)\n \"\"\"\n ix = (tt>-5.6)&(tt<-5.55)\n epochs[ix] = \"epoch 01\"\n \"\"\"\n ix = (tt>-5.55)&(tt<-5.50)\n epochs[ix] = \"epoch 02\"\n \n ix = (tt>-5.50)&(tt<-5.45)\n epochs[ix] = \"epoch 03\"\n \n ix = (tt>-5.2)&(tt<-5.0)\n epochs[ix] = \"epoch 04\"\n ix = (tt>-5.0)&(tt<-4.7)\n epochs[ix] = \"epoch 05\"\n \n ix = (tt>-4.7)&(tt<-4.5)\n epochs[ix] = \"epoch 06\"\n ix = (tt>-4.5)&(tt<-3.5)\n epochs[ix] = \"epoch 07\"\n ix = (tt>-3.5)&(tt<-2.5)\n epochs[ix] = \"epoch 08\"\n ix = (tt>-1.5)&(tt<-1)\n epochs[ix] = \"epoch 09\"\n ix = (tt>-1)&(tt<-0.82)\n epochs[ix] = \"epoch 10\"\n ix = (tt>-0.82)&(tt<-0.6)\n epochs[ix] = \"epoch 11\"\n ix = (tt>-0.5)&(tt<0.5)\n epochs[ix] = \"epoch 12\"\n ix = (tt>0.5)&(tt<1.5)\n epochs[ix] = \"epoch 13\"\n ix = (tt>1.5)&(tt<2.5)\n epochs[ix] = \"epoch 14\"\n ix = (tt>3.5)&(tt<4.5)\n epochs[ix] = \"epoch 15\"\n ix = (tt>4.5)&(tt<5)\n epochs[ix] = \"epoch 16\"\n ix = (tt>5)&(tt<5.6)\n epochs[ix] = \"epoch 17\"\n ix = (tt>5.6)&(tt<5.8)\n epochs[ix] = \"epoch 18\"\n ix = (tt>6)&(tt<7)\n epochs[ix] = \"epoch 19\"\n ix = (tt>7)&(tt<8)\n epochs[ix] = \"epoch 20\"\n ix = (tt>8)&(tt<9)\n epochs[ix] = \"epoch 21\"\n tb[\"epoch\"] = epochs\n\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g ', 'r ', 'i ']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r \" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g \"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r \"]\n itb = tbsub[tbsub[\"filter\"].values==\"i \"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def get_thermochem(file_set, results_dict, save_vibes, out_dir, tog_output_fname, qh_h_opt, write_mode):\n h = []\n qh_h = []\n gt = []\n qh_gt = []\n temps = []\n for index, file in enumerate(file_set):\n base_name = os.path.basename(file)\n if file == REACT_PROD_SEP:\n h.append(np.full([len(temps)], np.nan))\n qh_h.append(np.full([len(temps)], np.nan))\n gt.append(np.full([len(temps)], np.nan))\n qh_gt.append(np.full([len(temps)], np.nan))\n continue\n vibes_out = results_dict[base_name][GOODVIBES_OUT]\n found_structure = False\n skip_line = True\n h.append([])\n qh_h.append([])\n gt.append([])\n qh_gt.append([])\n # we know the last line should be dropped, and at least the first 10\n for line in vibes_out[10:-2]:\n if GOODVIBES_ERROR_PAT.match(line):\n raise InvalidDataError(\"See GoodVibes output: {}\".format(vibes_out))\n if not found_structure:\n if GOODVIBES_DATA_PAT.match(line):\n found_structure = True\n continue\n elif skip_line:\n skip_line = False\n continue\n else:\n vals = line.split()\n if index == 0:\n temps.append(float(vals[1]))\n h[index].append(float(vals[2]))\n if qh_h_opt:\n qh_h[index].append(float(vals[3]))\n gt[index].append(float(vals[-2]))\n qh_gt[index].append(float(vals[-1]))\n if save_vibes:\n vibes_out_fname = os.path.relpath(create_out_fname(file, suffix='_vibes', base_dir=out_dir, ext='.dat'))\n list_to_file(vibes_out, vibes_out_fname, print_message=False)\n print('Saved GoodVibes output as: {}'.format(vibes_out_fname))\n if tog_output_fname:\n list_to_file(vibes_out, tog_output_fname, mode=write_mode, print_message=False)\n if write_mode == 'w':\n print(\"Adding all GoodVibes output to: {}\".format(tog_output_fname))\n write_mode = \"a\"\n\n temps = np.asarray(temps)\n # for each molecule, multiply the array to convert to kcal/mol\n for index in range(len(gt)):\n h[index] = np.asarray(h[index]) * EHPART_TO_KCAL_MOL\n if qh_h_opt:\n qh_h[index] = np.asarray(qh_h[index]) * EHPART_TO_KCAL_MOL\n gt[index] = np.asarray(gt[index]) * EHPART_TO_KCAL_MOL\n qh_gt[index] = np.asarray(qh_gt[index]) * EHPART_TO_KCAL_MOL\n\n return temps, h, qh_h, gt, qh_gt", "def main(folder, quiet=0):\n\n if quiet:\n output_stream = StringIO()\n else:\n output_stream = sys.stdout\n\n\n\n color1 = \"I4\" #filter system for first color of CMD\n color2 = \"M1\" #filter system for second color of CMD\n zeromagc1 = zero.zero_mag[color1]\n zeromagc2 = zero.zero_mag[color2]\n min_mag = 8. #minimal observation limit\n max_mag = 0. #maximal observation limit\n\n#getting file list\n files = sorted(os.listdir('%s/%s' % (os.getcwdu(), folder))) \n out = []\n\n for fil in files:\n#only using files created by the automated simulation\n if fil.startswith('sim_') and not 'settings' in fil.encode(\"ascii\"):\n print(\"%s/%s\" % (folder,fil.encode(\"ascii\")), file=output_stream)\n \n\n # Read in\n hdulist = fits.open('%s/%s' %(folder,fil))\n data = hdulist[1].data\n\n #calculating magnitudes from fluxes and converting to CMD-data\n x = -2.5*(np.log10(data['c%s' % color1]/zeromagc1) - np.log10(data['c%s' % color2]/zeromagc2))\n y = -2.5*(np.log10(data['c%s' % color2]/zeromagc2))\n\n \n sel = np.logical_and( (y > -10./3. * (x-1.) + 10.), np.logical_and(max_mag < y, y < min_mag))\n sel = np.logical_and(sel, y < -x + 12.)\n n = sum(sel)\n t = Table(hdulist[1].data)\n if 'sel' in t.columns:\n t.remove_column('sel')\n t.add_column(Column(name='sel', data=sel.astype('int')))\n \n hdulist[1].data = np.array(t)\n tmp, av, apera, age = fil.split('_')\n fits.update('%s/%s' %(folder,fil), np.array(t), ext = 1, clobber=True)\n out.append([av, apera, age, n])\n\n #writing obtained data to \"folder/__expected_number\"\n head = ['#', 'AV', 'Aperature_size', 'Age', 'Expected_number']\n f = open('%s/__expected_number' % folder, 'w')\n f.write(','.join(head)+'\\n' )\n np.savetxt(f, np.asarray(out).astype(int))\n f.close()\n \n print (\"Analysed %s files and saved output to %s\" % (len(out),'%s/__expected_number' % folder), file=output_stream)", "def get_taux_variants(name):\n indicateurResult = get_empty_kpi()\n config = get_config(name)\n log.debug('Processing - '+name)\n\n indicateurResult['nom'] = config['nom']\n indicateurResult['unite'] = config['unite']\n indicateurResult['unite_short'] = config['unite_short']\n indicateurResult['trendType'] = config['trendType']\n indicateurResult['color'] = get_color_indicateur(config)\n\n if name == \"prop_variant_A\":\n colname = \"tx_A1\"\n elif name == \"prop_variant_B\":\n colname = \"tx_B1\"\n elif name == \"prop_variant_C\":\n colname = \"tx_C1\"\n elif name == \"prop_variant_D\":\n colname = \"tx_D1\"\n else:\n colname = \"tx_A0C0\"\n\n df = pd.read_csv(\n 'files_new/'+config['res_id_fra'],\n sep=None,\n engine='python',\n dtype={'reg': str, 'dep': str}\n )\n df = enrich_dataframe(df, name)\n df['date'] = df['semaine'].apply(lambda x: str(x)[11:])\n for country in tqdm(countries, desc=\"Processing National\"):\n res = process_stock(\n df,\n 'nat',\n 'fra',\n config['trendType'],\n colname\n )\n indicateurResult['france'].append(res)\n \n df = pd.read_csv(\n 'files_new/'+config['res_id_reg'],\n sep=None,\n engine='python',\n dtype={'reg': str, 'dep': str}\n )\n df = df[~df['reg'].isnull()]\n df = enrich_dataframe(df, name)\n df['date'] = df['semaine'].apply(lambda x: str(x)[11:])\n for reg in tqdm(df.reg.unique(), desc=\"Processing Régions\"):\n res = process_stock(\n df[df['reg'] == reg].copy(),\n 'reg',\n reg,\n config['trendType'],\n colname\n )\n indicateurResult['regions'].append(res)\n\n # df = pd.read_csv(\n # 'files_new/'+config['res_id_dep'],\n # sep=None,\n # engine='python',\n # dtype={'reg': str, 'dep': str}\n # )\n # df = enrich_dataframe(df, name)\n # df['date'] = df['semaine'].apply(lambda x: str(x)[11:])\n # for dep in tqdm(df.dep.unique(), desc=\"Processing Départements\"):\n # res = process_stock(\n # df[df['dep'] == dep].copy(),\n # 'dep',\n # dep,\n # config['trendType'],\n # colname\n # )\n # indicateurResult['departements'].append(res)\n\n save_result(indicateurResult, name)", "def ferry_data_QC(ferry, TH_abs, TH_u, TH_d):\n if type(ferry) is not xr.core.dataset.Dataset:\n raise ValueError('Ferry is not defined')\n # QC1: make nan all Absolute velocities that are greater than 6.5 m/s\n abs_u = ferry.eastward_absolute_water_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n abs_v = ferry.northward_absolute_water_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n abs_w = ferry.vertical_absolute_water_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n # Get bottom track velocity for reference\n # and also clean for TH in abs velocity\n east_btv = ferry.eastward_bottom_tracking_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n north_btv = ferry.northward_bottom_tracking_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n\n vert_btv = ferry.vertical_bottom_tracking_velocity.where(\n (abs(ferry.eastward_absolute_water_velocity) < TH_abs) &\n (abs(ferry.northward_absolute_water_velocity) < TH_abs))\n # Estimate u_true = abs_u + east_bt_v\n u_true = abs_u + east_btv\n v_true = abs_v + north_btv\n w_true = abs_w + vert_btv\n U = np.sqrt(u_true**2 + v_true**2)\n # QC2: check that u_true and v_true are less than 4 m/s\n u_true = u_true.where((u_true < TH_u) & (v_true < TH_u) & (U < TH_u))\n v_true = v_true.where((u_true < TH_u) & (v_true < TH_u) & (U < TH_u))\n w_true = w_true.where((u_true) < TH_u & (v_true < TH_u) & (U < TH_u))\n U = U.where((u_true) < TH_u & (v_true < TH_u) & (U < TH_u))\n # Add true velocity data to the dataset\n ferry['u_true'] = u_true\n ferry['v_true'] = v_true\n ferry['w_true'] = w_true\n ferry['Horizontal_speed'] = U\n # Remove first 5 rows of depth\n ferryQC = ferry.isel(depth=slice(TH_d, None))\n goodQC = True\n return(ferryQC, goodQC)", "def read_elia_activated_energy_volumes(filename,status):\r\n df = pd.read_excel(filename,skiprows=2,parse_dates=False)\r\n df[\"Timestamp\"] = df[\"Date\"]+\" \"+df['Quarter'].map(lambda x: str(x)[:-9])\r\n pd.to_datetime(df[\"Timestamp\"])\r\n df.set_index(\"Timestamp\",inplace=True)\r\n if ((status == \"validated\") | (status == \"valid\")):\r\n df = df.drop(df[df.Status != \"Validated\"].index)\r\n df = df.drop([\"Date\",\"Quarter\",\"Status\"], axis=1)\r\n \r\n if ((len(df.columns)<13) & (len(df.columns)>11)) :\r\n df.columns.values[0:13] = [\"NRV in MW\", \"GUV in MW\", \"IGCC+ in MW\", \"R2+ in MW\", \"Bids+ in MW\", \"R3+ in MW\", \"R3DP+ in MW\", \"GDV in MW\", \"IGCC- in MW\", \"R2- in MW\", \"Bids- in MW\", \"R3- in MW\"]\r\n if len(df.columns)<= 11:\r\n df.columns.values[0:12] = [\"NRV in MW\", \"GUV in MW\", \"IGCC+ in MW\", \"R2+ in MW\", \"Bids+ in MW\", \"R3+ in MW\", \"GDV in MW\", \"IGCC- in MW\", \"R2- in MW\", \"Bids- in MW\", \"R3- in MW\"]\r\n if len(df.columns)>14:\r\n df.columns.values[0:16] = [\"NRV in MW\", \"SR in MW\",\"GUV in MW\", \"IGCC+ in MW\",\"R2+ in MW\",\"Bids+ in MW\",\"R3 std in MW\",\"R3 flex in MW\",\"ICH in MW\",\"inter TSO import in MW\",\"GDV in MW\",\"IGCC- in MW\",\"R2- in MW\",\"Bids- in MW\",\"inter TSO export in MW\"]\r\n \r\n return df", "def main(daymet_dir,pickles,start_date='1980-10-01',end_date='2020-09-30',huc_col = 'huc8', **kwargs):\r\n\tprint(f'The huc col being processed is: {huc_col}')\r\n\t################################################################\r\n\t#first do the daymet data \r\n\t#read in all the files in this dir and combine them into one df\r\n\tearly=FormatData(glob.glob(daymet_dir+f'*_12_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\tmid=FormatData(glob.glob(daymet_dir+f'*_2_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\tlate=FormatData(glob.glob(daymet_dir+f'*_4_{huc_col}.csv'),drop_cols=['system:index','.geo','dayl','vp']).read_in_csvs()\r\n\t################################################################\r\n\t#next do the snotel data \r\n\toutput=[]\r\n\r\n\t#read in some pickled objects, these look like a list of dfs with each being a station for the full time period \r\n\tfor item in ['PREC','TAVG','WTEQ']:\r\n\t\t#get the pickled objects for each parameter \r\n\t\tfiles = glob.glob(pickles+f'*{item}_{start_date}_{end_date}_snotel_data_list') #hardcoded currently\r\n\t\tdf=FormatData(files,drop_cols=['year','month','day']).read_in_pickles()\r\n\t\toutput.append(df) #the df here is 365 days x ~30 yrs x 237 stations so these are pretty big dfs\r\n\t\r\n\t#join the three enviro params \r\n\toutput_df = reduce(lambda left,right: pd.merge(left,right,how='inner',on=['date','id']), output)\r\n\t\r\n\t\r\n\t#convert the temp column from F to C \r\n\toutput_df['TAVG'] = (output_df['TAVG']-32)*(5/9) \r\n\t#there are a couple of erroneous temp values, remove those \r\n\toutput_df = output_df.loc[output_df['TAVG'] <= 50]\r\n\r\n\t#convert prec and swe cols from inches to cm \r\n\toutput_df['PREC'] = output_df['PREC']*2.54\r\n\toutput_df['WTEQ'] = output_df['WTEQ']*2.54\r\n\t\r\n\t#remove rows that have one of the data types missing- this might need to be amended because \r\n\t#it means that there are different numbers of records in some of the periods. \r\n\toutput_df=output_df.dropna()\r\n\t\r\n\t#cast the snotel id col to int to add the hucs \r\n\toutput_df['id'] = output_df['id'].astype('int')\r\n\r\n\t#add the as yet nonexistant hucs data to the outputs \r\n\thucs = kwargs.get('hucs')\r\n\toutput_df[huc_col] = output_df['id'].map(hucs)\r\n\r\n\t#there are multiple snotel stations in some of the basins, \r\n\t#combine those so there is just one number per basin like the \r\n\t#daymet and RS data. \r\n\r\n\toutput_df=output_df.groupby([huc_col,'date'])[['PREC','WTEQ','TAVG']].mean().reset_index()\r\n\r\n\tperiod_list = []\r\n\tfor p1,p2 in zip(['early','mid','late'],[early,mid,late]): \r\n\t\t\t#get snotel first\r\n\t\t#make a temporal chunk of data \r\n\t\tsnotel_chunk=FormatData(None,time_period=p1).split_yearly_data(output_df)\r\n\r\n\t\t##########working below here\r\n\t\t############################\r\n\t\t#calculate the snow droughts for that chunk \r\n\t\tif (p1 == 'mid') | (p1 == 'late'): \r\n\t\t\tsnotel_drought=CalcSnowDroughts(snotel_chunk,swe_c='WTEQ',precip='PREC',temp='TAVG',start_year=1991,sort_col=huc_col).prepare_df_cols()\r\n\t\t\t#print('snotel')\r\n\t\t\t#print(snotel_drought)\r\n\t\telse: \r\n\t\t\tsnotel_drought=CalcSnowDroughts(snotel_chunk,swe_c='WTEQ',precip='PREC',temp='TAVG',sort_col=huc_col).prepare_df_cols()\r\n\r\n\t\t#get cols of interest \r\n\t\t#snotel_drought=snotel_drought[['huc8','year','dry','warm','warm_dry']]\r\n\t\t#rename cols so they don't get confused when data are merged \r\n\t\t#snotel_drought.columns=['huc8','year']+['s_'+column for column in snotel_drought.columns if not (column =='huc8') | (column=='year')]\r\n\t\t\r\n\t\t#then do the same for daymet \r\n\t\tif (p1 == 'mid') | (p1 == 'late'): \r\n\t\t\tdaymet_drought=CalcSnowDroughts(p2,start_year=1991,sort_col=huc_col).prepare_df_cols()\r\n\t\telse: \r\n\t\t\tdaymet_drought=CalcSnowDroughts(p2,sort_col=huc_col).prepare_df_cols()\r\n\t\t#print('daymet',daymet_drought)\r\n\t\t#daymet_drought=daymet_drought[['huc8','year','dry','warm','warm_dry']]\r\n\t\t\r\n\t\t#daymet_drought.columns=['huc8','year']+['d_'+column for column in daymet_drought.columns if not (column =='huc8') | (column=='year')]\r\n\r\n\t##########################################\r\n\t\r\n\t\t#run the kmeans with drought types as intiilization conditions (centroids) for the clusters\r\n\t\t\r\n\t\t#these are all of the huc 4 basins in the study area \r\n\t\thuc4s = ['1708','1801','1710','1711','1709','1701','1702','1705','1703','1601','1707','1706','1712','1704']\r\n\t\ts_output = []\r\n\t\td_output = []\r\n\t\tfor huc4 in huc4s: \r\n\t\t\thuc4_s = sd.prep_clusters(snotel_drought,huc4,huc_col=huc_col) #get the subset of the snow drought data for a given huc4\r\n\t\t\thuc4_d = sd.prep_clusters(daymet_drought,huc4,huc_col=huc_col)\r\n\t\t\t#make the centroids that serve as the intialization for the kmeans clusters- these are like endmembers (ish)\r\n\t\t\ts_centroids = DefineClusterCenters(huc4_s,'WTEQ','PREC','TAVG').combine_centroids() #makes a numpy array with four centroids\r\n\t\t\td_centroids = DefineClusterCenters(huc4_d,'swe','prcp','tavg').combine_centroids() #makes a numpy array with four centroids\r\n\r\n\t\t\t#clusters should be like: {0:dry, 1:warm, 2:warm_dry, 3:no_drought} 6/8/2021 DOUBLE CHECK\r\n\t\t\t#run kmeans for the snotel data\r\n\t\t\ts_clusters = sd.run_kmeans(huc4_s[['WTEQ','PREC','TAVG']].to_numpy(),huc4_s['label'],s_centroids)\r\n\t\t\ts_clusters = sd.add_drought_cols_to_kmeans_output(s_clusters, huc_col=huc_col) #add a few cols needed for plotting \r\n\t\t\t#run kmeans for the daymet data \r\n\t\t\td_clusters = sd.run_kmeans(huc4_d[['swe','prcp','tavg']].to_numpy(),huc4_d['label'],d_centroids)\r\n\t\t\td_clusters = sd.add_drought_cols_to_kmeans_output(d_clusters, huc_col=huc_col) #add a few cols needed for plotting \r\n\r\n\t\t\ts_output.append(s_clusters)\r\n\t\t\td_output.append(d_clusters)\r\n\t\ts_plot = pd.concat(s_output)\r\n\r\n\t\t#select the cols of interest and rename so there's no confusion when dfs are merged \r\n\t\ts_plot=s_plot[[huc_col,'year','dry','warm','warm_dry']]\r\n\t\ts_plot.columns=[huc_col,'year']+['s_'+column for column in s_plot.columns if not (column == huc_col) | (column=='year')]\r\n\r\n\t\td_plot = pd.concat(d_output)\r\n\t\td_plot=d_plot[[huc_col,'year','dry','warm','warm_dry']]\r\n\t\td_plot.columns=[huc_col,'year']+['d_'+column for column in d_plot.columns if not (column == huc_col) | (column=='year')]\r\n\t\r\n\t\t#merge the two datasets into one df \r\n\t\tdfs = s_plot.merge(d_plot,on=[huc_col,'year'],how='inner')\r\n\t\t\r\n\t\t#deal with the scenario that there are basins with less than 30 years of data, remove those here\r\n\t\tdfs = sd.remove_short_dataset_stations(dfs,huc_col)\r\n\t\tperiod_list.append(dfs)\r\n\r\n\tplot_counts(period_list,kwargs.get('stats_dir'),huc_col=huc_col,**kwargs)", "def show_trap_results():\n df_grid = pd.read_hdf('./temp_results.h5', '/optimize_grid')\n print(df_grid)\n \n print('Minimum fwhm:')\n print(df_grid[df_grid.fwhm_ovr_mean==df_grid.fwhm_ovr_mean.min()])\n \n plt.plot(df_grid.e_fit, df_grid.fwhm_ovr_mean, '.b')\n plt.show()", "def get_ptf10iuv(colorplt = False):\n z = 0.0251485\n ebv = 0.0371 # SFD\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n print (\"adopt g band t_max estimated by myself\")\n t_max = 55357.387 \n tb = pd.read_csv('../data/otherSN/Kasliwal2012/PTF10iuv', sep='\\t')\n tb = tb.drop(columns=[\"Unnamed: 4\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Mag'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Mag'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Mag'].values])\n tb = tb.drop(columns=[\"Mag\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n ixz = tb['filter'].values == \"z\"\n ixB = tb['filter'].values == \"B\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['wave'].values[ixz] = 9670\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n tb = tb.sort_values(by = \"mjd\")\n if colorplt==False:\n return tb\n \n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g', 'r', 'i']))\n tb = tb[ix]\n tb = tb[tb.mjd > 55352.5]\n tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def get_iPTF16asu():\n z = 0.187\n ebv = 0.0\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n \n tb = asci.read('../data/otherSN/Whitesides2017/table1.txt')\n tb = tb.to_pandas()\n tb = tb[tb[\"col4\"].values!=\">\"]\n \n tb = tb.rename(columns={'col1' : 'mjd',\n 'col2': 'tmax_rf',\n 'col3': 'filter',\n \"col4\": 'mag',\n 'col5': 'emag',\n 'col6': 'instrument'})\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb[\"mag\"] = np.array(tb[\"mag\"].values, dtype = np.float)\n #tb[\"emag\"] = np.array(tb[\"emag\"].values, dtype = np.float)\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb = tb[tb.wave!=0]\n return tb", "def generate_experimental_condition(dir_output, file_name_prefix,list_temperature, partial_pressure_H2O=0.02,SinteringTemperature=1600,SinteringTime=24):\r\n\r\n print(\"Enter the host element occupying the A-site\")\r\n set_A1 = input (\"Ex: Ba\\n\")\r\n print(\"Enter the valence of the A-site host element\")\r\n set_A1_valence = input(\"Ex: 2\\n\")\r\n frac_A1 = '1'\r\n print(\"Enter the host element occupying the B-site\")\r\n set_B1 = input (\"Ex: Zr\\n\")\r\n print(\"Enter the valence of the B-site host element\")\r\n set_B1_valence = input(\"Ex:4\\n\")\r\n print(\"Enter the fraction that describes the composition of the B-site host element\")\r\n frac_B1 = str(format(float( input (\"Ex:0.8\\n\")), '.2f'))\r\n print(\"Enter the dopant element occupying the B-site\")\r\n set_B2 = input (\"Ex: Sc\\n\")\r\n print(\"Enter the valence of the B-dopant\")\r\n set_B2_valence = input(\"Ex: 3\\n\")\r\n frac_B2 = str(format((1 - float(frac_B1)), '.2f'))\r\n\r\n # generate dataframe for base\r\n CA = set_A1 + set_B1 + frac_B1 + set_B2 + frac_B2 + \"O3\"\r\n dic = {'Composition':CA,\r\n 'A1':set_A1, 'Valence A1':set_A1_valence, 'fraction A1':frac_A1,\r\n 'B1':set_B1, 'Valence B1':set_B1_valence, 'fraction B1':frac_B1,\r\n 'B2':set_B2, 'Valence B2':set_B2_valence, 'fraction B2':frac_B2}\r\n df = pd.DataFrame(dic,index=['i',])\r\n\r\n # add columns name\r\n columns_all = ['Composition','Temperature / C','pH2O / atm','CH',\r\n 'A1','Valence A1','fraction A1','A2','Valence A2','fraction A2',\r\n 'B1','Valence B1','fraction B1','B2','Valence B2','fraction B2',\r\n 'B3','Valence B3','fraction B3','X1','Valence X1','fraction X1','fraction total']\r\n for c in columns_all:\r\n if not(c in df.columns):\r\n df[c] = float(np.NaN)\r\n df = df[columns_all]\r\n\r\n # add another experimental conditions\r\n df['pH2O / atm'] = partial_pressure_H2O\r\n df['Sintering temperature/C'] = SinteringTemperature\r\n df['Sintering time / h'] = SinteringTime\r\n df['fraction A2']='0'\r\n df['fraction B3']='0'\r\n df['X1']='O'\r\n df['Valence X1']='-2'\r\n df['fraction X1']='0.2'\r\n df['fraction total']='1'\r\n\r\n for cnt, tmp in enumerate(list_temperature):\r\n df['Temperature / C'] = tmp\r\n if cnt==0:\r\n df_all = df.copy()\r\n else:\r\n df_all = pd.concat([df_all,df], ignore_index=True)\r\n file_name = os.path.join(dir_output,'{:}_all.csv'.format(file_name_prefix, tmp))\r\n df_all.to_csv(file_name, index=False)", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def main():\n std_template_file = os.path.join(os.environ['HOME'], 'prospect/py/prospect/data/std_templates.fits')\n if os.path.isfile(std_template_file):\n print('Error std template file already exists')\n\n #- Templates produced from 1st component of old (pre-Aug 2022) Redrock templates:\n template_dir = os.path.join(os.environ['DESICONDA'], '../code/redrock-templates/0.7.2')\n #std_templates = {'QSO': ('QSO',''), 'GALAXY': ('GALAXY',''), 'STAR': ('STAR','F') }\n std_templates = {'GALAXY': ('GALAXY',''), 'STAR': ('STAR','F') }\n delta_lambd_templates = 3\n\n rr_templts = load_redrock_templates(template_dir=template_dir)\n for key,rr_key in std_templates.items() :\n wave_array = np.arange(rr_templts[rr_key].wave[0], rr_templts[rr_key].wave[-1], delta_lambd_templates)\n flux_array = resample_flux(wave_array, rr_templts[rr_key].wave, rr_templts[rr_key].flux[0,:])\n table_templates = Table(data=[wave_array, flux_array], names=['wave_'+key, 'flux_'+key], meta={'name':key})\n table_templates.write(std_template_file, append=True)\n\n #- Case of QSO (Summer 2022): use new template provided by A. Brodzeller\n qsotemplate_file = os.environ['HOME'] + '/stdtemplate-qso.fits'\n hdul = fits.open(qsotemplate_file)\n qsowave = 10**(hdul[0].header['CRVAL1']+np.arange(hdul[0].header['NAXIS1'])*hdul[0].header['CDELT1'])\n qsoflux = hdul[0].data\n # Resample as previously:\n wave_array = np.arange(qsowave[0], qsowave[-1], delta_lambd_templates)\n flux_array = resample_flux(wave_array, qsowave, qsoflux)\n table_templates = Table(data=[wave_array, flux_array], names=['wave_QSO', 'flux_QSO'], meta={'name':'QSO'})\n table_templates.write(std_template_file, append=True)\n return 0", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def analyzeViSDEMData(dict):\n \n if 'path_in' in dict:\n path_in = dict['path_in']\n else:\n print(\"Caution: No path for input folder containing the data has been defined. Please define path to folder by dict['path_in']=path_in\") \n return\n \n path_out_default = '../colordeficiency-data/' \n if 'path_out' in dict:\n path_out = dict['path_out']\n else:\n print(\"Caution: No path for output folder where the data should be stored has been defined. Using default output path instead: \"+str(path_out_default))\n path_out = path_out_default\n \n if 'round' in dict:\n round = dict['round']\n else:\n print(\"Error: You have to chose a round first.\")\n \n path = os.path.join(os.path.dirname(os.path.abspath(os.path.join(__file__,os.pardir))),'colordeficiency-data')\n \n # 0. Step: Get all the relevant information, i.e. motive_ids, obs_col_defs etc.\n if round == 1:\n visualsearch_ids = os.path.join(path,\"visualsearch_ids.csv\")\n elif round == 2:\n visualsearch_ids = os.path.join(path,\"visualsearch_ids_2.csv\")\n \n vs_ids_sheet = pandas.read_csv(visualsearch_ids,sep=\";\")\n \n # Get all the relevant information about the observers, i.e. obs_col_defs etc.\n observer_ids = os.path.join(path,\"observer_ids.csv\")\n obs_ids_sheet = pandas.read_csv(observer_ids,sep=\";\")\n \n # 1. Step: Read all the XLSX data in the path\n ext = 'xlsx'; xlsx_files = getAllXXXinPath(path_in,ext)\n dataArray = pandas.DataFrame()\n i=1\n for xlsx_file in xlsx_files:\n sys.stdout.write(xlsx_file)\n dataArray_tmp, testArray, extraDataDict = extractExperimentData(os.path.join(path_in,xlsx_file))\n \n newDataArray = dataArray_tmp[['dalt_id','coldef_type','resp.corr_raw','resp.rt_raw','stimFile']]\n \n if \"2. Session\" in extraDataDict:\n sessionID = int(extraDataDict['2. Session'])\n newDataArray['session_id'] = sessionID\n \n if 'group' in extraDataDict:\n obsGroup = str(extraDataDict['group'])\n newDataArray['obsGroup'] = obsGroup\n \n if '0. Participant ID' in extraDataDict:\n obsID = int(extraDataDict['0. Participant ID'])\n \n newDataArray['observer_id'] = obsID\n obs_coldef_type = obs_ids_sheet.loc[obs_ids_sheet['observer_id']==obsID,['observer_coldef_type']]\n newDataArray['observer_coldef_type'] = int(obs_coldef_type['observer_coldef_type'])\n \n dataArray = pandas.concat([dataArray, newDataArray])\n sys.stdout.write(' . ')\n if (i%5)==0: sys.stdout.write('\\n')\n i+=1\n sys.stdout.write('\\n')\n \n # 2. Step: Adapt values to programstandards\n for item in settings.colDefLong2ID:\n dataArray.loc[dataArray['coldef_type'] == item, ['coldef_type']] = settings.colDefLong2ID[item]\n \n for item in settings.dalt2ID:\n dataArray.loc[dataArray['dalt_id'] == item, ['dalt_id']] = settings.dalt2ID[item]\n \n dataArray.loc[dataArray['dalt_id'] == 'none', ['dalt_id']] = 0\n \n \n dataArray = dataArray.rename(columns={'dalt_id': 'dalt_id',\n 'coldef_type': 'coldef_type',\n 'resp.corr_raw': 'is_correct',\n 'resp.rt_raw': 'resp_time',\n 'stimFile': 'filepath'})\n dataArray = dataArray.reset_index()\n \n # Adding set_id, motive_id and variant_id to each file\n for index, row in dataArray.iterrows():\n path_tmp = row['filepath']\n filename = os.path.basename(path_tmp).split('.')[0]\n dict_tmp = getStatsFromFilename(filename)\n imgID_tmp = int(dict_tmp['img_id'])\n \n tempVSDataArray = vs_ids_sheet.loc[vs_ids_sheet['image_id']==imgID_tmp,['set_id','motive_id','variant_id']]\n \n dataArray.at[index,'image_id'] = imgID_tmp\n dataArray.ix[index,'set_id'] = int(tempVSDataArray['set_id'])\n dataArray.ix[index,'motive_id'] = int(tempVSDataArray['motive_id'])\n dataArray.ix[index,'variant_id'] = int(tempVSDataArray['variant_id'])\n\n dataArray.image_id = dataArray.image_id.astype(int)\n dataArray.set_id = dataArray.set_id.astype(int)\n dataArray.motive_id = dataArray.motive_id.astype(int)\n dataArray.variant_id = dataArray.variant_id.astype(int)\n dataArray.is_correct = dataArray.is_correct.astype(bool)\n \n dataArray = dataArray[['image_id','set_id','motive_id','variant_id','dalt_id','coldef_type','is_correct','resp_time','observer_id','observer_coldef_type','session_id','filepath','obsGroup']]\n \n # 3. Saving data to file\n try:\n dataArray.to_csv(os.path.join(path_out, 'visdem-data.csv'),sep=\";\")\n sys.stdout.write(\"Success: ViSDEM data successfully saved in '\"+str(path_out)+\"'.\\n\")\n except Exception as e:\n print(e)", "def temperature(self):\n value = float(self._parent.query('R{}'.format(self._idx))[1:])\n return pq.Quantity(value, pq.Kelvin)", "def vorticity(tsr,solidity):\n \n # Reading in csv file (vorticity database)\n basepath = path.join(path.dirname(path.realpath(__file__)),'data')\n fdata = basepath + path.sep + 'vortdatabase.csv'\n f = open(fdata)\n csv_f = csv.reader(f)\n \n i = 0\n sol_d = np.array([])\n for row in csv_f:\n if i == 0:\n raw = row\n raw = np.delete(raw,0)\n vortdat = raw\n tsr_d = raw # range of tip-speed ratios included\n if row[0] == 'solidity':\n sol_d = np.append(sol_d,float(row[1])) # range of solidities included\n elif row[0] != 'TSR' and row[0] != 'solidity':\n raw = row\n raw = np.delete(raw,0)\n vortdat = np.vstack([vortdat,raw]) # adding entry to vorticity database array\n i += 1\n f.close()\n \n vortdat = np.delete(vortdat,(0),axis=0) # eliminating first row used as a placeholder\n tsr_d = tsr_d.astype(np.float) # converting tip-speed ratio entries into floats\n vortdat = vortdat.astype(np.float) # converting vorticity database entries into floats\n \n # Creating arrays for each EMG parameter\n for i in range(np.size(sol_d)):\n sol = str(i+1)\n \n exec('s'+sol+'_loc1 = vortdat[i*10]\\ns'+sol+'_loc2 = vortdat[i*10+1]\\ns'+sol+'_loc3 = vortdat[i*10+2]\\ns'+sol+'_spr1 = vortdat[i*10+3]\\ns'+sol+'_spr2 = vortdat[i*10+4]\\ns'+sol+'_skw1 = vortdat[i*10+5]\\ns'+sol+'_skw2 = vortdat[i*10+6]\\ns'+sol+'_scl1 = vortdat[i*10+7]\\ns'+sol+'_scl2 = vortdat[i*10+8]\\ns'+sol+'_scl3 = vortdat[i*10+9]\\n')\n \n # BIVARIATE SPLINE FITTING\n \n iz = np.size(sol_d)\n jz = np.size(tsr_d)\n \n # Initializing rectangular matrices\n Z_loc1 = np.zeros((iz,jz))\n Z_loc2 = np.zeros((iz,jz))\n Z_loc3 = np.zeros((iz,jz))\n Z_spr1 = np.zeros((iz,jz))\n Z_spr2 = np.zeros((iz,jz))\n Z_skw1 = np.zeros((iz,jz))\n Z_skw2 = np.zeros((iz,jz))\n Z_scl1 = np.zeros((iz,jz))\n Z_scl2 = np.zeros((iz,jz))\n Z_scl3 = np.zeros((iz,jz))\n \n # Transferring raw data into rectangular matrices\n for i in range(iz):\n for j in range(jz):\n sol = str(i+1)\n exec('Z_loc1[i,j] = s'+sol+'_loc1[j]')\n exec('Z_loc2[i,j] = s'+sol+'_loc2[j]')\n exec('Z_loc3[i,j] = s'+sol+'_loc3[j]')\n exec('Z_spr1[i,j] = s'+sol+'_spr1[j]')\n exec('Z_spr2[i,j] = s'+sol+'_spr2[j]')\n exec('Z_skw1[i,j] = s'+sol+'_skw1[j]')\n exec('Z_skw2[i,j] = s'+sol+'_skw2[j]')\n exec('Z_scl1[i,j] = s'+sol+'_scl1[j]')\n exec('Z_scl2[i,j] = s'+sol+'_scl2[j]')\n exec('Z_scl3[i,j] = s'+sol+'_scl3[j]')\n \n # Creating a rectangular bivariate spline of the parameter data\n s_loc1 = RectBivariateSpline(sol_d,tsr_d,Z_loc1)\n s_loc2 = RectBivariateSpline(sol_d,tsr_d,Z_loc2)\n s_loc3 = RectBivariateSpline(sol_d,tsr_d,Z_loc3)\n s_spr1 = RectBivariateSpline(sol_d,tsr_d,Z_spr1)\n s_spr2 = RectBivariateSpline(sol_d,tsr_d,Z_spr2)\n s_skw1 = RectBivariateSpline(sol_d,tsr_d,Z_skw1)\n s_skw2 = RectBivariateSpline(sol_d,tsr_d,Z_skw2)\n s_scl1 = RectBivariateSpline(sol_d,tsr_d,Z_scl1)\n s_scl2 = RectBivariateSpline(sol_d,tsr_d,Z_scl2)\n s_scl3 = RectBivariateSpline(sol_d,tsr_d,Z_scl3)\n \n # Selecting the specific parameters to use for TSR and solidity\n loc1 = s_loc1(solidity,tsr)\n loc2 = s_loc2(solidity,tsr)\n loc3 = s_loc3(solidity,tsr)\n spr1 = s_spr1(solidity,tsr)\n spr2 = s_spr2(solidity,tsr)\n skw1 = s_skw1(solidity,tsr)\n skw2 = s_skw2(solidity,tsr)\n scl1 = s_scl1(solidity,tsr)\n scl2 = s_scl2(solidity,tsr)\n scl3 = s_scl3(solidity,tsr)\n \n # Creating arrays of the parameters\n loc = np.array([loc1[0,0],loc2[0,0],loc3[0,0]])\n spr = np.array([spr1[0,0],spr2[0,0]])\n skw = np.array([skw1[0,0],skw2[0,0]])\n scl = np.array([scl1[0,0],scl2[0,0],scl3[0,0]])\n \n return loc,spr,skw,scl", "def process_weather(forecast_file):\n with open(forecast_file) as json_file:\n json_data = json.load(json_file)\n\n min_temp_store = {}\n max_temp_store = {}\n weather_results = str()\n header_results = str()\n\n for day_in_forecast in json_data['DailyForecasts']:\n day_date = day_in_forecast['Date']\n min_temp = day_in_forecast['Temperature']['Minimum'][\"Value\"]\n min_temp_c = convert_f_to_c(min_temp)\n min_temp_store[day_date] = min_temp_c\n max_temp = day_in_forecast['Temperature']['Maximum'][\"Value\"]\n max_temp_c = convert_f_to_c(max_temp)\n max_temp_store[day_date] = max_temp_c\n\n day_time_phrase = day_in_forecast['Day']['LongPhrase']\n rain_chance_day = day_in_forecast['Day']['RainProbability']\n night_time_phrase = day_in_forecast['Night']['LongPhrase']\n rain_chance_night = day_in_forecast['Night']['RainProbability']\n weather_results = weather_results + (f\"-------- {convert_date(day_date)} --------\\nMinimum Temperature: {format_temperature(round(min_temp_c,1))}\\nMaximum Temperature: {format_temperature(round(max_temp_c,1))}\\nDaytime: {day_time_phrase}\\n Chance of rain: {rain_chance_day}%\\nNighttime: {night_time_phrase}\\n Chance of rain: {rain_chance_night}%\\n\")+ \"\\n\"\n\n\n max_day = max(max_temp_store, key=max_temp_store.get)\n max_value = max_temp_store[max_day]\n min_day = min(min_temp_store, key=min_temp_store.get)\n min_value = min_temp_store[min_day]\n max_totals = (sum(max_temp_store.values()))\n min_totals = (sum(min_temp_store.values()))\n num_items = len(min_temp_store)\n mean_min = round(calculate_mean(min_totals,num_items),1)\n mean_max = round(calculate_mean(max_totals,num_items),1)\n\n save_header = (f\"{len(json_data['DailyForecasts'])} Day Overview\\n The lowest temperature will be {format_temperature(round((min_value),1))}, and will occur on {convert_date(min_day)}.\\n The highest temperature will be {format_temperature(round((max_value),1))}, and will occur on {convert_date(max_day)}.\\n The average low this week is {format_temperature(mean_min)}.\\n The average high this week is {format_temperature(mean_max)}.\\n\")\n\n header_results = save_header + \"\\n\"+ weather_results\n \n return(header_results)", "def get_temp():\n epts = [\"cage_coldPlate_temp\", \"cage_pressure\"]\n # t_earlier_aug = '2019-10-02T00:00'\n # t_later_aug = datetime.utcnow().isoformat()\n t_earlier_aug = '2019-09-27T13:00'\n t_later_aug = '2019-09-28T19:49'\n dfs = pandas_db_query(epts, t_earlier_aug, t_later_aug)\n print(dfs[epts[0]].tail())\n\n exit()\n\n xv = dfs[epts[0]][\"timestamp\"]\n yv = dfs[epts[0]][epts[0]]\n plt.plot(xv, yv, '-b')\n plt.ylabel(epts[0], ha='right', y=1)\n\n p1a = plt.gca().twinx()\n xv = dfs[epts[1]][\"timestamp\"]\n yv = dfs[epts[1]][epts[1]]\n p1a.set_ylabel(epts[1], color='r', ha='right', y=1)\n p1a.tick_params('y', colors='r')\n p1a.semilogy(xv, yv, '-r')\n\n plt.gcf().autofmt_xdate()\n plt.tight_layout()\n plt.show()", "def temperatures():\n hi_act= session.query(measurements.tobs,measurements.date,measurements.station).\\\n filter(measurements.station == 'USC00519281').\\\n filter(measurements.date >last_12).\\\n order_by(measurements.date).all()\n hi_act_df=pd.DataFrame(hi_act).set_index('date')\n hi_act_dict=hi_act_df.to_dict()\n return jsonify(hi_act_dict)", "def run_get_tones_for_15k_subset():\n add_tone_columns_to_csv('politics_30_months_comments_cleaned_standardized_vader_flair_15k.csv',\n 'politics_30_months_comments_cleaned_standardized_vader_flair_15k_tones.csv')", "def run_purity_plotting(input_tsvs, output_dir):\n # type: (list[str], str) -> None\n result_cols = [\"sensitivity\", \"sens_lo\", \"sens_hi\", \"sens_N\", \"precision\", \"prec_lo\", \"prec_hi\", \"prec_N\", \"purity\", \"pass\"]\n amp_results_df = DataFrame(columns=result_cols)\n del_results_df = DataFrame(columns=result_cols)\n\n min_sensitivity = 0.85\n min_precision = 0.8\n min_supported_purity = 0.39\n\n for i, input_tsv in enumerate(input_tsvs):\n purity = find_purity_from_filename(input_tsv)\n print(input_tsv + \" purity: \" + str(purity))\n\n if purity is None:\n print(\"The file \" + input_tsv + \" is unrecognized as being a HCC1143T purity file, so it is being skipped. Please see the src code here if you believe this is an error.\")\n continue\n\n sample = find_sample_from_filename(input_tsv)\n\n segs_df_tmp = pandas.read_csv(input_tsv, sep=\"\\t\", comment=\"@\")\n\n # Clean up by removing all locations where there was more than one ground truth value for copy number/ratio\n segs_df = segs_df_tmp[segs_df_tmp[GT_CN_COLUMN_NAME].apply(more_than_one_value)]\n tmp = segs_df[GT_CN_COLUMN_NAME]\n tmp = pandas.to_numeric(tmp, errors='coerce', downcast='integer')\n segs_df[GT_CN_COLUMN_NAME] = tmp\n\n cr_gt = 1 + (purity * ((segs_df[GT_CN_COLUMN_NAME] / ploidy) - 1))\n cr_gt.rename(GT_CR_COLUMN_NAME, inplace=True)\n\n if IS_LOG2_GUESS_CR:\n cr_guess = 2 ** segs_df[INPUT_GUESS_CR_COLUMN_NAME]\n else:\n cr_guess = segs_df[INPUT_GUESS_CR_COLUMN_NAME]\n\n cr_guess.rename(GUESS_CR_COLUMN_NAME, inplace=True)\n\n segs_df[GT_CR_COLUMN_NAME] = cr_gt\n segs_df[GUESS_CR_COLUMN_NAME] = cr_guess\n segs_gt_to_consider = segs_df[~segs_df[\"CALL\"].isnull() & (segs_df[\"CONTIG\"] != \"2\")]\n\n ## Amps\n tp = segs_gt_to_consider[(segs_gt_to_consider[\"CALL\"] == \"+\") & (segs_gt_to_consider[GT_CN_COLUMN_NAME] >= 5)]\n all_gt_amp = segs_gt_to_consider[segs_gt_to_consider[GT_CN_COLUMN_NAME] >= 5]\n sens_amps = float(len(tp)) / float(len(all_gt_amp))\n sens_amps_ci = clopper_pearson(len(tp), len(all_gt_amp))\n sens_amps_N = len(all_gt_amp)\n\n fp = segs_gt_to_consider[(segs_gt_to_consider[\"CALL\"] == \"+\") & (segs_gt_to_consider[GT_CN_COLUMN_NAME] <= 4)]\n prec_amps = float(len(tp)) / float(len(tp) + len(fp))\n prec_amps_ci = clopper_pearson(len(tp), (len(tp) + len(fp)))\n prec_amps_N = len(tp) + len(fp)\n\n amp_result = Series(name=sample, data={result_cols[0]: sens_amps, result_cols[1]: sens_amps_ci[0],\n result_cols[2]: sens_amps_ci[1], result_cols[3]: sens_amps_N,\n result_cols[4]: prec_amps, result_cols[5]: prec_amps_ci[0],\n result_cols[6]: prec_amps_ci[1], result_cols[7]: prec_amps_N,\n result_cols[8]: purity,\n result_cols[9]: is_passing(sens_amps_ci[1], prec_amps_ci[1], purity, min_sensitivity, min_precision,\n min_supported_purity)})\n\n amp_results_df = amp_results_df.append(amp_result)\n amp_results_df.sort_values(result_cols[8], inplace=True)\n\n print(\"Amp sensitivity: \" + str(sens_amps) + \" \" + str(sens_amps_ci))\n print(\"Amp precision: \" + str(prec_amps) + \" \" + str(prec_amps_ci))\n\n ## Dels\n tp_del = segs_gt_to_consider[\n (segs_gt_to_consider[\"CALL\"] == \"-\") & (segs_gt_to_consider[GT_CN_COLUMN_NAME] <= 2)]\n all_gt_del = segs_gt_to_consider[segs_gt_to_consider[GT_CN_COLUMN_NAME] <= 2]\n sens_dels = float(len(tp_del)) / float(len(all_gt_del))\n sens_dels_ci = clopper_pearson(len(tp_del), len(all_gt_del))\n sens_dels_N = len(all_gt_del)\n\n fp_del = segs_gt_to_consider[\n (segs_gt_to_consider[\"CALL\"] == \"-\") & (segs_gt_to_consider[GT_CN_COLUMN_NAME] > 2)]\n prec_dels = float(len(tp_del)) / float(len(tp_del) + len(fp_del))\n prec_dels_ci = clopper_pearson(len(tp_del), (len(tp_del) + len(fp_del)))\n prec_dels_N = len(tp_del) + len(fp_del)\n\n del_result = Series(name=sample, data={result_cols[0]: sens_dels, result_cols[1]: sens_dels_ci[0],\n result_cols[2]: sens_dels_ci[1], result_cols[3]: sens_dels_N,\n result_cols[4]: prec_dels, result_cols[5]: prec_dels_ci[0],\n result_cols[6]: prec_dels_ci[1], result_cols[7]: prec_dels_N,\n result_cols[8]: purity,\n result_cols[9]: is_passing(sens_dels_ci[1], prec_dels_ci[1], purity,\n min_sensitivity, min_precision,\n min_supported_purity)})\n del_results_df = del_results_df.append(del_result)\n del_results_df.sort_values(result_cols[8], inplace=True)\n\n print(\"Del sensitivity: \" + str(sens_dels) + \" \" + str(sens_dels_ci))\n print(\"Del precision: \" + str(prec_dels) + \" \" + str(prec_dels_ci))\n\n if len(amp_results_df) > 0 and len(del_results_df) > 0:\n plot_purity_series(output_dir, amp_results_df, \"Amplifications\", min_sensitivity, min_precision, min_supported_purity)\n plot_purity_series(output_dir, del_results_df, \"Deletions\", min_sensitivity, min_precision, min_supported_purity)\n amp_results_df.to_csv(output_dir + \"Amplifications_table.tsv\", sep=\"\\t\")\n del_results_df.to_csv(output_dir + \"Deletions_table.tsv\", sep=\"\\t\")", "def get_iPTF16hgs(colorplt = False):\n z = 0.017\n ebv = 0\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n \n tb = pd.read_csv('../data/otherSN/iPTF16hgs/table1.txt', sep=\"\\t\")\n tb = tb.drop(columns=[\"Unnamed: 5\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Magnitude'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Magnitude'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Magnitude'].values])\n tb = tb.drop(columns=[\"Magnitude\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n t_max = 57691.59 # from the paper\n tb['tmax_of'] = tb['mjd'] - t_max\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n \"\"\"\n plt.errorbar(tb[\"tmax_rf\"].values[ixg], tb[\"mag\"].values[ixg], tb[\"emag\"].values[ixg], fmt=\".g\")\n plt.errorbar(tb[\"tmax_rf\"].values[ixr], tb[\"mag\"].values[ixr], tb[\"emag\"].values[ixr], fmt=\".r\")\n plt.errorbar(tb[\"tmax_rf\"].values[ixi], tb[\"mag\"].values[ixi], tb[\"emag\"].values[ixi], fmt=\".y\")\n \"\"\"\n tb = add_datecol(tb)\n tb = add_physcol(tb)\n #tb = tb.drop(columns=[\"datetime64\"])\n if colorplt==False:\n return tb\n else:\n #tb = tb[tb.mjd > 55352.5]\n #tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def main(temp, humid):\n user = 'root'\n password = 'root'\n dbname = 'iot'\n dbuser = 'raspberry'\n dbuser_password = 'password'\n query = 'select temp_value,humid_value from temp_humid;'\n json_body = [\n {\n \"measurement\": \"temp_humid\",\n \"fields\": {\n \"temp_value\": temp,\n \"humid_value\":humid \n\t}\n }\n ]\n\n client = InfluxDBClient('localhost', 8086, user, password, dbname)\n\n #client.create_database(dbname)\n\n print(\"Write points: {0}\".format(json_body))\n client.write_points(json_body)\n\n #print(\"Querying data: \" + query)\n #result = client.query(query)\n\n #print(\"Result: {0}\".format(result))\n\n #client.drop_database(dbname)", "def get_taux(name):\n indicateurResult = get_empty_kpi()\n config = get_config(name)\n log.debug('Processing - '+name)\n\n indicateurResult['nom'] = config['nom']\n indicateurResult['unite'] = config['unite']\n indicateurResult['unite_short'] = config['unite_short']\n indicateurResult['trendType'] = config['trendType']\n indicateurResult['color'] = get_color_indicateur(config)\n\n df = pd.read_csv(\n 'files_new/'+config['res_id_fra'],\n sep=None,\n engine='python',\n dtype={'reg': str, 'dep': str}\n )\n df = enrich_dataframe(df, name)\n df['date'] = df['semaine_glissante'].apply(lambda x: str(x)[11:])\n for country in tqdm(countries, desc=\"Processing National\"):\n res = process_stock(\n df,\n 'nat',\n 'fra',\n config['trendType'],\n name\n )\n indicateurResult['france'].append(res)\n\n df = pd.read_csv(\n 'files_new/'+config['res_id_reg'],\n sep=None,\n engine='python',\n dtype={'reg': str, 'dep': str}\n )\n df = enrich_dataframe(df, name)\n df['date'] = df['semaine_glissante'].apply(lambda x: str(x)[11:])\n for reg in tqdm(df.reg.unique(), desc=\"Processing Régions\"):\n res = process_stock(\n df[df['reg'] == reg].copy(),\n 'reg',\n reg,\n config['trendType'],\n name\n )\n indicateurResult['regions'].append(res)\n\n df = pd.read_csv(\n 'files_new/'+config['res_id_dep'],\n sep=None,\n engine='python',\n dtype={'reg': str, 'dep': str}\n )\n df = enrich_dataframe(df, name)\n df['date'] = df['semaine_glissante'].apply(lambda x: str(x)[11:])\n for dep in tqdm(df.dep.unique(), desc=\"Processing Départements\"):\n res = process_stock(\n df[df['dep'] == dep].copy(),\n 'dep',\n dep,\n config['trendType'],\n name\n )\n indicateurResult['departements'].append(res)\n\n save_result(indicateurResult, name)", "def Q_flux(self):\n fields = self.read_vars(['x','y','z'])\n Z, Y, X = np.meshgrid(fields['z']/self.params['Lz'],\n fields['y']/self.params['Ly'] - 0.5,\n fields['x']/self.params['Lx'] - 0.5, indexing='ij')\n\n r = np.sqrt(X**2 + Y**2)\n r0 = 0.01\n msk = 0.5*(1.-np.tanh(r/r0))\n delta = 1/(self.params[\"global_nz\"])\n Q =1e-5*np.exp(-Z/delta)/delta*msk\n\n return Q", "def amet_memoryWise(self):\r\n # set up logging files to monitor the calculation\r\n logging.basicConfig(filename = os.path.join(self.path,'history_amet_python.log'),\r\n filemode = 'w+', level = logging.DEBUG,\r\n format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n # initialize the time span\r\n # define sigma level\r\n A, B = self.defineSigmaLevels()\r\n # use example input file to load the basic dimensions information\r\n datapath_var = os.path.join(self.path, 'MERRA2_400.inst3_3d_asm_Nv.20160101.nc4.nc')\r\n var_key = Dataset(datapath_var)\r\n lat = var_key.variables['lat'][:]\r\n lon = var_key.variables['lon'][:]\r\n # calculate the reference levels based on A & B and standard surface pressure\r\n half_level = A + B * 101325\r\n level = (half_level[1:] + half_level[:-1]) / 2\r\n # create space for the output\r\n # AMET in the entire column\r\n E = np.zeros((len(lat),len(lon)), dtype=float)\r\n cpT = np.zeros((len(lat),len(lon)), dtype=float)\r\n Lvq = np.zeros((len(lat),len(lon)), dtype=float)\r\n gz = np.zeros((len(lat),len(lon)), dtype=float)\r\n uv2 = np.zeros((len(lat),len(lon)), dtype=float)\r\n logging.info(\"Start retrieving variables T,q,u,v,sp\")\r\n # The shape of each variable is (8,72,361,576)\r\n T = var_key.variables['T'][:]\r\n q = var_key.variables['QV'][:]\r\n sp = var_key.variables['PS'][:] #(8,361,576)\r\n u = var_key.variables['U'][:]\r\n v = var_key.variables['V'][:]\r\n logging.info(\"Extracting variables successfully!\") \r\n # compute gz\r\n z_model = self.calc_gz(var_key)\r\n # get the basic shape\r\n tt, hh, yy, xx = q.shape\r\n AMET = amet.met()\r\n E, cpT, Lvq, gz, uv2 = AMET.calc_met(T, q, sp, u, v, z_model, A, B,\r\n tt, hh, len(lat), len(lon), lat, self.lat_unit)\r\n\r\n return np.mean(E)", "def temperatures():\n\n return station_9281", "def readqFile(self, PFC, t):\n if self.qFilePath[-1] != '/':\n base = self.qFilePath + '/'\n else:\n base = self.qFilePath\n\n f = base + '{:06d}/'.format(t) + PFC.name + '/' + self.qFileTag\n try:\n df = pd.read_csv(f, names=['X','Y','Z','HF'], skiprows=[0])\n if len(df['HF'].values) != len(PFC.centers):\n print('HF file mesh is not same length as STL file mesh.')\n print('Will not assign HF to mismatched mesh')\n print(\"qFile length: {:d}\".format(len(df['HF'].values)))\n print(\"PFC STL mesh length: {:d}\".format(len(PFC.centers)))\n val = -1\n else:\n PFC.qDiv = df['HF'].values\n PFC.powerFrac = self.getDivertorPowerFraction(PFC.DivCode)\n PFC.qOpticalList.append(PFC.qDiv)\n print(\"Loaded heat flux from file: \"+f)\n val = 0\n except:\n print(\"COULD NOT READ qFILE PATH: \"+f)\n print(\"Please point HEAT to a valid qFilePath and qFileTag,\")\n print(\"which should be a .csv file with (X,Y,Z,HF)\")\n val = -1\n\n return val", "def test():\n temp_data = fetch_temp_data(\n (\"https://opendata-download-metobs.smhi.se/api/version/\" +\n \"latest/parameter/1/station/52350/period/latest-day/data.json\"))\n data = temp_series(temp_data)\n print(data)", "def readtempfilt(MAIN_OUTPUT_FILE='photz', OUTPUT_DIRECTORY='./OUTPUT'):\n\n root = os.path.join(OUTPUT_DIRECTORY, MAIN_OUTPUT_FILE)\n \n ###### .tempfilt\n file_path = root+'.tempfilt'\n \n if os.path.exists(file_path) is False:\n raise ValueError('File, %s, not found.' %(file_path))\n\n with open(file_path,'rb') as f:\n # summary data\n s = np.fromfile(file=f,dtype=np.int32, count=4)\n NFILT=s[0] # number of filters\n NTEMP=s[1] # number of templates\n NZ=s[2] # number points on the redshift grid\n NOBJ=s[3] # number of objects\n # (?) template SED convolved with filter transmission at each redshift\n tempfilt = np.fromfile(file=f,dtype=np.double,count=NFILT*NTEMP*NZ).reshape((NZ,NTEMP,NFILT)).transpose()\n # filter pivot wavelengths\n lc = np.fromfile(file=f,dtype=np.double,count=NFILT)\n # redshift grid\n zgrid = np.fromfile(file=f,dtype=np.double,count=NZ)\n # observed flux\n fnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()\n # (?) error in observed flux\n efnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()\n \n tempfilt = {'NFILT':NFILT,'NTEMP':NTEMP,'NZ':NZ,'NOBJ':NOBJ,\\\n 'tempfilt':tempfilt,'lc':lc,'zgrid':zgrid,'fnu':fnu,'efnu':efnu}\n\n return tempfilt", "def read_elia_activated_energy_prices(filename,status):\r\n \r\n df = pd.read_excel(filename,skiprows=2,parse_dates=False)\r\n df[\"Timestamp\"] = df[\"Date\"]+\" \"+df['Quarter'].map(lambda x: str(x)[:-9])\r\n pd.to_datetime(df[\"Timestamp\"])\r\n df.set_index(\"Timestamp\",inplace=True)\r\n if ((status == \"validated\") | (status == \"valid\")):\r\n df = df.drop(df[df.Status != \"Validated\"].index)\r\n df = df.drop([\"Date\",\"Quarter\",\"Status\"], axis=1)\r\n \r\n if len(df.columns)>14:\r\n df.columns.values[0:16] = [\"NRV in MW\",\"SR in euro/MWh\",\"MIP in euro/MWh\",\"IGGC+ in euro/MWh\", \"R2+ in euro/MWh\",\"Bids+ in euro/MWh\",\"R3 std in euro/MWh\", \"R3 flex in euro/MWh\", \"ICH in euro/MWh\", \"inter TSO import in euro/MWh\", \"MDP in euro/MWh\", \"IGCC- in euro/MWh\", \"R2- in euro/MWh\", \"Bids- in euro/MWh\", \"R3- in euro/MWh\"]\r\n\r\n if len(df.columns)<12:\r\n df.columns.values[0:12] = [\"NRV in MW\",\"MIP in euro/MWh\",\"IGGC+ in euro/MWh\", \"R2+ in euro/MWh\",\"Bids+ in euro/MWh\", \"R3+ in euro/MWh\", \"MDP in euro/MWh\", \"IGCC- in euro/MWh\", \"R2- in euro/MWh\", \"Bids- in euro/MWh\", \"R3- in euro/MWh\"]\r\n\r\n return df", "def export_results_max():\n\n # initialise the list of generated files\n gen_files = []\n\n ######\n # Define allowed variable names and associated equations to generate values.\n ######\n # Note that mannings n (friction value) is taken as 0.01, as in the model\n # run density of water is 1000\n var_equations = {'stage': enm.export_newstage_max,\n 'oldstage': 'stage',\n 'momentum': '(xmomentum**2 + ymomentum**2)**0.5',\n 'olddepth': 'oldstage-elevation',\n 'depth': edm.export_depthonland_max,\n 'speed': '(xmomentum**2 + ymomentum**2)**0.5/(stage-elevation+1.e-6)',\n 'energy': '(((xmomentum/(stage-elevation+1.e-6))**2'\n ' + (ymomentum/(stage-elevation+1.e-6))**2)'\n '*0.5*1000*(stage-elevation+1.e-6))+(9.81*stage*1000)',\n 'bed_shear_stress': ('(((1/(stage-elevation+1.e-6)**(7./3.))*1000*9.81*0.01**2*(xmomentum/(stage-elevation+1.e-6))*((xmomentum/(stage-elevation+1.e-6))**2+(ymomentum/(stage-elevation+1.e-6))**2)**0.5)**2'\n '+ ((1/(stage-elevation+1.e-6)**(7./3.))*1000*9.81*0.01**2*(ymomentum/(stage-elevation+1.e-6))*((xmomentum/(stage-elevation+1.e-6))**2+(ymomentum/(stage-elevation+1.e-6))**2)**0.5)**2)**0.5'),\n 'elevation': 'elevation'}\n\n ######\n # Start script, running through variables, area, sww file\n ######\n\n for which_var in project.layers_list:\n which_var = which_var.lower()\n log.info(\"Exporting value: %s\" % which_var)\n\n if which_var not in var_equations:\n log.critical('Unrecognized variable name: %s' % which_var)\n break\n\n project.export_area = project.export_area.lower()\n if project.export_area == 'all':\n easting_min = None\n easting_max = None\n northing_min = None\n northing_max = None\n elif project.export_area == 'aoi':\n easting_min = project.xminAOI\n easting_max = project.xmaxAOI\n northing_min = project.yminAOI\n northing_max = project.ymaxAOI\n else:\n log.critical('Unrecognized area name: %s' % project.export_area)\n break\n\n name = os.path.join(project.output_folder, project.scenario)\n\n outname = name + '_' + project.export_area + '_' + which_var\n quantityname = var_equations[which_var]\n\n log.info('Generating output file: %s' % (outname+'.asc'))\n\n # assume 'quantityname' is a string, handle in the old way,\n # else call the handler function (same params as anuga.sww2dem)\n if isinstance(quantityname, basestring):\n export_func = anuga.sww2dem\n elif callable(quantityname):\n export_func = quantityname\n\n export_func(name+'.sww', outname+'.asc', quantity=quantityname,\n reduction=max, cellsize=project.cell_size,\n easting_min=easting_min, easting_max=easting_max,\n northing_min=northing_min, northing_max=northing_max,\n verbose=False)\n\n # add generated filename to result list\n gen_files.append(outname+'.asc')\n\n return gen_files", "def cygx3IndFlux(self):\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n detect = lcTab['ts'] >= self.tsmin\n lcTab = lcTab[detect] \n\n ind08 = (lcTab['mjd'] > 54700) & (lcTab['mjd'] < 54900) \n flux08 = lcTab['flux'][ind08]\n fluxerr08 = lcTab['fluxerr'][ind08]\n index08 = lcTab['index'][ind08]\n indexerr08 = lcTab['indexerr'][ind08]\n\n ind09 = (lcTab['mjd'] > 54900) & (lcTab['mjd'] < 55100) \n flux09 = lcTab['flux'][ind09]\n fluxerr09 = lcTab['fluxerr'][ind09]\n index09 = lcTab['index'][ind09]\n indexerr09 = lcTab['indexerr'][ind09]\n\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux08, flux09), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n indplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n indplt.figname = os.path.join(self.workpath, 'IndvsFlux.pdf')\n indplt.xlabel = r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale)))\n indplt.ylabel = r'Index'\n indplt.mksize = 2\n indplt.color = self.lblue\n indplt.label = r'2008'\n indplt.plot(x=flux08/scale, xerr=fluxerr08/scale, y=index08, yerr=indexerr08)\n indplt.color = self.loran\n indplt.label = r'2009'\n indplt.plot(x=flux09/scale, xerr=fluxerr09/scale, y=index09, yerr=indexerr09)\n indplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(indplt.figname)) \n return", "def test_plt_mag_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n # create the arrays per filter and readout pattern\n nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], []\n nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], []\n filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout']\n max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr']\n for i, val in enumerate(max_val_box):\n if '140' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(val)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(val)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif '110' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(val)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(val)\n nrsrapidd6_clear.append(np.NaN)\n else:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(val)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(val)\n # add to the bokeh data structure\n ta.source.data[\"nrsrapid_f140x\"] = nrsrapid_f140x\n ta.source.data[\"nrsrapid_f110w\"] = nrsrapid_f110w\n ta.source.data[\"nrsrapid_clear\"] = nrsrapid_clear\n ta.source.data[\"nrsrapidd6_f140x\"] = nrsrapidd6_f140x\n ta.source.data[\"nrsrapidd6_f110w\"] = nrsrapidd6_f110w\n ta.source.data[\"nrsrapidd6_clear\"] = nrsrapidd6_clear\n result = ta.plt_mag_time()\n\n assert bokeh_plot_type == type(result)", "def plc_temp(coil_df):", "def calc(main_system,dir_db,dir_def,xlfile,mu_limit,functional=\"GGA\",soc=False,logfile=None):\n \n ## set up logging\n if logfile:\n myLogger = logging.setup_logging(logfile)\n else:\n myLogger = logging.setup_logging()\n\n\n ## load list of dataframes from sheets from excel file \n df = pd.read_excel(os.path.join(dir_def,xlfile),sheet_name=None)\n \n \n ## find initdef.json file\n if osutils.check_file_exists(dir_def,\"initdef\") == True:\n for file in os.listdir(dir_def): \n if file.startswith(\"initdef\"):\n file_initdef = file\n ## get species i and ni from initdefect.json file \n with open(os.path.join(dir_def,file_initdef), 'r') as file:\n initdef = json.loads(file.read())\n species_list, ni_list = [],[]\n for defect in initdef:\n species, ni = get_i_ni(initdef[defect])\n species_list += species\n ni_list += ni\n myLogger.info(\"Atoms added/removed: \" + \\\n \", \".join([str(n)+\"*\"+i for n,i in zip(ni_list,species_list)]))\n\n \n for q in [qi for qi in df.keys()]:\n \n ## get the relevant chemical potentials\n found_mu = True\n for species in species_list:\n mu = \"mu_%s_%s\"%(species,mu_limit)\n \n ## check if the relevant database entry exists\n if osutils.check_file_exists(dir_db,\"%s.json\"%species) == True:\n dbentry_file = \"%s.json\"%species\n with open(os.path.join(dir_db, dbentry_file), 'r') as file:\n mater = json.loads(file.read())\n ## search for appropriate mu entry\n mu_key = \"mu\"\n for key in mater[functional].keys():\n if key.startswith(\"mu_%s\"%mu_limit):\n mu_key = key\n myLogger.info(\"Using chemical potential \" + mu_key + \" from \" + dbentry_file) \n ## input the corresponding mus into the dataframe\n df[q][mu] = mater[functional][mu_key]\n \n else:\n myLogger.info(\"Cannot find the database entry for \" + species)\n found_mu = False\n \n \n ## get the VBMs\n ## check if the relevant database entry exists\n if osutils.check_file_exists(dir_db,\"%s.json\"%main_system) == True:\n dbentry_file = \"%s.json\"%(main_system)\n with open(os.path.join(dir_db, dbentry_file), 'r') as file:\n mater = json.loads(file.read()) \n \n ## input the VBMs corresponding to each vacuum spacing into the dataframe\n for rowind in df[q].index.values:\n vac = df[q].loc[rowind].vacuum\n if vac in mater[functional].keys():\n df[q].at[rowind,'VBM'] = mater[functional][vac][\"VBM\"]\n else:\n myLogger.info(\"Cannot find the VBM entry for \" + vac) \n \n ## Finally, we can compute the uncorrected defect formation energy:\n ## Eform = Etot(def) - Etot(pristine) - sum(n_i*mu_i) + q*E_Fermi\n if found_mu:\n ## proceed if chemical potentials and VBMs have been correctly entered\n sum_mu = 0\n for n,species in zip(ni_list,species_list):\n mu = \"mu_%s_%s\"%(species,mu_limit)\n sum_mu += n * df[q][mu]\n if q == 'charge_0': \n colname = \"E_form_corr\"\n else:\n colname = \"E_form_uncorr\"\n df[q][colname] = df[q].loc[:,'E_def'] \\\n - df[q].loc[:,'E_bulk'] \\\n - sum_mu \\\n + int(q.split(\"_\")[-1]) * df[q].loc[:,'VBM'] \n \n else:\n myLogger.info(\"Cannot find the database entry for \" + main_system)\n\n\n ## write the updated excel file\n writer = pd.ExcelWriter(os.path.join(dir_def,xlfile))\n for q in df.keys(): \n df[q].to_excel(writer, q, index=False)\n writer.save()", "def Thrustexe_data(h,Vc,Temp_m,FFl,FFr):\n T,p,rho = isa(h)\n M = Mach(h,Vc,p)\n Delta_T = Static_T(Temp_m,M)-T\n data = np.array([h,M,Delta_T,FFl,FFr]).T\n\n np.savetxt(\"matlab.dat\",data)\n return", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def SAM_CSV_to_solar_data(filename):\n if not os.path.isfile(filename):\n raise FileNotFoundError(filename + \" does not exist.\")\n wfd = defaultdict(list)\n with open(filename) as file_in:\n info = []\n for i in range(2):\n info.append(file_in.readline().rstrip())\n info[i] = info[i].split(\",\")\n if \"Time Zone\" not in info[0]:\n raise ValueError(\"`Time Zone` field not found in solar resource file.\")\n latitude = info[1][info[0].index(\"Latitude\")]\n longitude = info[1][info[0].index(\"Longitude\")]\n tz = info[1][info[0].index(\"Time Zone\")]\n elev = info[1][info[0].index(\"Elevation\")]\n source = info[1][info[0].index(\"Source\")]\n reader = csv.DictReader(file_in)\n for row in reader:\n for col, dat in row.items():\n if len(col) > 0:\n wfd[col].append(float(dat))\n\n weather = dict()\n weather['tz'] = float(tz)\n weather['elev'] = float(elev)\n weather['lat'] = float(latitude)\n weather['lon'] = float(longitude)\n\n # Create dict with keys = keys passed to SAM and values = list of possible key versions found in resource files (NREL / NASA POWER)\n acceptable_keys = {\n 'year' : ['year', 'Year', 'yr'],\n 'month' : ['month', 'Month', 'mo'],\n 'day' : ['day', 'Day'],\n 'hour' : ['hour', 'Hour', 'hr'],\n 'minute' : ['minute', 'Minute', 'min'],\n 'dn' : ['dn', 'DNI','dni', 'beam', 'direct normal', 'direct normal irradiance'],\n 'df' : ['df', 'DHI', 'dhi', 'diffuse', 'diffuse horizontal', 'diffuse horizontal irradiance'],\n 'gh' : ['gh', 'GHI','ghi', 'global', 'global horizontal', 'global horizontal irradiance'],\n 'wspd' : ['wspd', 'Wind Speed', 'wind speed'],\n 'tdry' : ['tdry', 'Temperature', 'dry bulb', 'dry bulb temp', 'temperature', 'ambient', 'ambient temp'],\n 'wdir' : ['wdir', 'Wind Direction', 'wind direction'],\n 'pres' : ['pres', 'Pressure', 'pressure'],\n 'tdew' : ['tdew', 'Dew Point', 'Tdew', 'dew point', 'dew point temperature'],\n 'rhum' : ['rhum', 'Relative Humidity', 'rh', 'RH', 'relative humidity', 'humidity'],\n 'alb' : ['alb', 'Surface Albedo', 'albedo', 'surface albedo'],\n 'snow' : ['snow', 'Snow Depth', 'snow depth', 'snow cover']\n }\n \n # enumerates acceptable_keys, inserts key and values into weather dictionary if found in the resource file\n for key, list_of_keys in acceptable_keys.items():\n for good_key in list_of_keys:\n if good_key in wfd.keys():\n weather[key] = wfd.pop(good_key)\n break\n\n # handles averaged hourly data with no minute column provided by NASA POWER and removes 2/29 data for leap years\n # this is a workaround so PySAM/SAM processes as instantaneous data (not setup to handle no minute column)\n if source == 'NASA/POWER':\n weather['minute'] = [30] * len(weather['hour'])\n if len(weather['hour']) == 8784:\n for key in weather.keys():\n if key not in ['tz','elev','lat','lon']:\n del weather[key][1416:1440]\n\n\n return weather", "def main():\n\n PARSER = argparse.ArgumentParser()\n PARSER.add_argument('-TIC', default=307210830, type=str)\n PARSER.add_argument('-from_file', default='any')\n PARSER.add_argument('-tic_id', default='TIC_ID')\n PARSER.add_argument('-path_plots', default='.')\n PARSER.add_argument('-interactive', action='store_true', default=False)\n PARSER.add_argument('-no_detrend', action='store_true', default=False)\n PARSER.add_argument('-window', default=1501, type=int)\n PARSER.add_argument('-GP', default=False, action='store_true')\n PARSER.add_argument('-from_fits', default=False, action='store_true')\n PARSER.add_argument('-path_fits', default=None)\n PARSER.add_argument('-noTLS', default=False, action='store_true')\n PARSER.add_argument('-tofile', default=False, action='store_true')\n PARSER.add_argument('-tojuliet', default=False, action='store_true')\n\n ARGS = PARSER.parse_args()\n\n if ARGS.from_file != 'any':\n TICs = []\n if os.path.isfile(ARGS.from_file):\n try:\n TICs = ascii.read(ARGS.from_file)[ARGS.tic_id]\n except KeyError:\n print('Column %s does not exist' % ARGS.tic_id)\n else:\n print('Filename does not exist')\n\n else:\n TICs = [float(i) for i in ARGS.TIC.split(',')]\n\n detrend = not ARGS.no_detrend\n path_plots = ARGS.path_plots\n interactive = ARGS.interactive\n window = ARGS.window\n gp = ARGS.GP\n from_fits = ARGS.from_fits\n path_fits = ARGS.path_fits\n noTLS = ARGS.noTLS\n tofile = ARGS.tofile\n tojuliet = ARGS.tojuliet\n\n if len(TICs) > 0:\n for i, TIC in enumerate(TICs):\n try:\n print('TIC %d (%d/%d)' % (TIC, i+1, len(TICs)))\n _ = get_lk(TIC, path_plots=path_plots, detrend=detrend, interactive=interactive,\n window=window, gp_detrend=gp, from_fits=from_fits, path_fits=path_fits,\n noTLS=noTLS, tofile=tofile, tojuliet=tojuliet)\n except Exception as e:\n print('Problem with object TIC %d' % TIC)\n _, _, exc_tb = sys.exc_info()\n print('line %d: %s' % (exc_tb.tb_lineno, e))", "def get_sn2018kzr(colorplt = False):\n ebv = 0.113/3.1\n z = 0.053\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 58480.422+0.1\n \n f = open('../data/otherSN/Mcbrien2019/table1.tex')\n lines = f.readlines()\n f.close()\n lines = lines[:-4]\n \n dates = [x.split(\"&\")[0] for x in lines]\n mjds = [float(x.split(\"&\")[1]) for x in lines]\n phases = [float(x.split(\"&\")[2].replace('$', '').replace('\\t', '')) for x in lines]\n gs = [x.split(\"&\")[3].replace('$', '') for x in lines]\n rs = [x.split(\"&\")[4].replace('$', '') for x in lines]\n iis = [x.split(\"&\")[5].replace('$', '') for x in lines]\n zs = [x.split(\"&\")[6].replace('$', '') for x in lines]\n insts = [x.split(\"&\")[7] for x in lines]\n \n dtg = digital_latex(mjds, phases, gs, insts)\n dtr = digital_latex(mjds, phases, rs, insts)\n dti = digital_latex(mjds, phases, iis, insts)\n \n filt = np.hstack([np.repeat(\"g\", len(dtg[0])),\n np.repeat(\"r\", len(dtr[0])),\n np.repeat(\"i\", len(dti[0]))])\n phase = np.hstack([dtg[1], dtr[1], dti[1]])\n mag = np.hstack([dtg[2], dtr[2], dti[2]])\n emag = np.hstack([dtg[3], dtr[3], dti[3]])\n mjd = np.hstack([dtg[0], dtr[0], dti[0]])\n \n tb = Table(data = [(mjd - t_max) / (1+z), mag, emag, filt],\n names = ['tmax_rf', 'mag', 'emag', 'filter'])\n \n ixr = tb['filter'] == \"r\"\n ixg = tb['filter'] == \"g\"\n ixi = tb['filter'] == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'][ixg] = 4814\n tb['wave'][ixr] = 6422\n tb['wave'][ixi] = 7883\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'], 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb = tb.to_pandas()\n return tb", "def main():\n # files\n summary_file = sys.argv[1]\n pwms_to_tfs_file = sys.argv[2]\n expressed_tfs_file = sys.argv[3] # TODO\n\n # TODO pull in num regions to resize things? but complicated with overlaps etc\n # TODO edit edges with type of interaction\n # TODO may want to color by trajectory, to demonstrate waves of trajectory\n \n # read in data\n summary = pd.read_csv(summary_file, sep=\"\\t\")\n pwms_to_tfs = pd.read_csv(pwms_to_tfs_file, sep=\"\\t\")\n pwms_to_tfs = pwms_to_tfs[pwms_to_tfs[\"expressed\"].notna()]\n pwms_to_filt_tfs = {} # dict: key - pwm names, vals - dict of ensembl ids to hgnc ids\n for line_idx in range(pwms_to_tfs.shape[0]):\n pwm_info = pwms_to_tfs.iloc[line_idx,:]\n pwm_name = pwm_info[\"hclust_model_name\"]\n pwm_to_tf = dict(zip(pwm_info[\"expressed\"].split(\";\"), pwm_info[\"expressed_hgnc\"].split(\";\")))\n pwms_to_filt_tfs[pwm_name] = pwm_to_tf\n\n \n # filter expressed hgncs for dynamic ones only\n tfs_filt = pd.read_csv(expressed_tfs_file, sep=\"\\t\", index_col=0)\n for pwm_name in pwms_to_filt_tfs.keys():\n tfs_tmp = pwms_to_filt_tfs[pwm_name]\n for ensembl_tf in tfs_tmp.keys():\n if ensembl_tf not in tfs_filt.index:\n del tfs_tmp[ensembl_tf]\n if len(tfs_tmp.keys()) == 0:\n del pwms_to_filt_tfs[pwm_name]\n pwms_to_filt_tfs[pwm_name] = tfs_tmp\n\n # add in tfs column\n tf1 = []\n for pwm in summary[\"pwm1\"]:\n tf_str = []\n for ensembl_id in pwms_to_filt_tfs[pwm]:\n tf_str.append(pwms_to_filt_tfs[pwm][ensembl_id])\n # TODO try add in max point\n expression = tfs_filt.loc[ensembl_id,:]\n max_idx = np.argmax(expression.values)\n tf_str.append(str(max_idx))\n tf_str = (\";\").join(tf_str)\n tf1.append(tf_str)\n summary[\"tf1\"] = tf1\n\n tf2 = []\n for pwm in summary[\"pwm2\"]:\n tf_str = []\n for ensembl_id in pwms_to_filt_tfs[pwm]:\n tf_str.append(pwms_to_filt_tfs[pwm][ensembl_id])\n expression = tfs_filt.loc[ensembl_id,:]\n max_idx = np.argmax(expression.values)\n tf_str.append(str(max_idx))\n tf_str = (\";\").join(tf_str)\n tf2.append(tf_str)\n summary[\"tf2\"] = tf2\n \n # remove failed rules\n summary = summary[~summary[\"interaction\"].str.contains(\"FAILED\")]\n \n # make graph\n graph = nx.from_pandas_edgelist(summary, \"tf1\", \"tf2\")\n\n # set up positions\n #pos = graphviz_layout(graph, prog=\"dot\")\n pos = graphviz_layout(graph, prog=\"neato\")\n scale_factor = 3\n for key in pos.keys():\n coords = pos[key]\n pos[key] = {\"x\": scale_factor*coords[0], \"y\": -scale_factor*coords[1]}\n nx.set_node_attributes(graph, pos, \"graphics\") # note this is diff from v1 to v2 in networkx\n \n # add graphics\n add_graphics_theme_to_nx_graph(graph)\n\n # write gml\n out_file = \"summary.gml\"\n nx.write_gml(stringize_nx_graph(graph), out_file, stringizer=str)\n\n # tfs: for each tf, get gene column\n \n \n return", "def wem_market_value_all():\n __query = \"\"\"\n select\n date_trunc('month', wfs.trading_interval) AS trading_day,\n sum(wfs.eoi_quantity * wbs.price) as energy_interval,\n wf.fueltech_id\n from wem_facility_scada wfs\n left join wem_facility wf on wfs.facility_id = wf.code\n join wem_balancing_summary wbs on wfs.trading_interval = wbs.trading_interval\n where\n wf.fueltech_id is not null\n group by 1, wf.fueltech_id\n order by 1 desc, 2 asc\n \"\"\"\n\n query = __query.format()\n\n json_envelope = {}\n\n with engine.connect() as c:\n rows = c.execute(query)\n\n current_tech = None\n\n for row in rows:\n\n current_tech = row[2]\n\n if current_tech not in json_envelope.keys():\n json_envelope[current_tech] = {\n \"id\": f\"wem.fuel_tech.{current_tech}.market_value\",\n \"fuel_tech\": current_tech,\n \"region\": \"wa\",\n \"type\": \"market_value\",\n \"units\": \"AUD\",\n \"history\": {\n \"interval\": \"1M\",\n \"start\": None,\n \"last\": None,\n \"data\": [],\n },\n }\n\n if (\n json_envelope[current_tech][\"history\"][\"start\"] == None\n or row[0] < json_envelope[current_tech][\"history\"][\"start\"]\n ):\n json_envelope[current_tech][\"history\"][\"start\"] = row[0]\n\n if (\n json_envelope[current_tech][\"history\"][\"last\"] == None\n or row[0] > json_envelope[current_tech][\"history\"][\"last\"]\n ):\n json_envelope[current_tech][\"history\"][\"last\"] = row[0]\n\n json_envelope[current_tech][\"history\"][\"data\"].append(row[1])\n\n return [json_envelope[i] for i in json_envelope.keys()]", "def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux", "def test_convert_azfp_01a_notemperature_notilt(azfp_path):\n azfp_01a_path = azfp_path / 'rutgers_glider_notemperature/22052500.01A'\n azfp_xml_path = azfp_path / 'rutgers_glider_notemperature/22052501.XML'\n\n echodata = open_raw(\n raw_file=azfp_01a_path, sonar_model='AZFP', xml_path=azfp_xml_path\n )\n\n # Temperature variable is present in the Environment group and its values are all nan\n assert \"temperature\" in echodata[\"Environment\"]\n assert echodata[\"Environment\"][\"temperature\"].isnull().all()\n\n # Tilt variables are present in the Platform group and their values are all nan\n assert \"tilt_x\" in echodata[\"Platform\"]\n assert \"tilt_y\" in echodata[\"Platform\"]\n assert echodata[\"Platform\"][\"tilt_x\"].isnull().all()\n assert echodata[\"Platform\"][\"tilt_y\"].isnull().all()", "def epflux_all(U, V, W, T, longitude, latitude, press, boa=None):\n pass", "def do_qc(fn, df, year):\n (lon, lat) = fn2lonlat(fn)\n stage4 = compute_stage4(lon, lat, year)\n # Does the frame appear to have all dates?\n if len(df.index) != len(df.resample(\"D\").mean().index):\n print(\"ERROR: Appears to be missing dates!\")\n\n if open(fn).read()[-1] != \"\\n\":\n print(\"ERROR: File does not end with \\\\n\")\n\n print(\"--------- Summary stats from the .cli file\")\n print(\"YEAR | RAIN | MAXRATE | MAXACC | #DAYS | #>1RT | RAD/D\")\n print(\" --- | --- | --- | --- | --- | --- | ---\")\n for _year, gdf in df.groupby(by=df.index.year):\n print(\n (\"%s | %6.2f | %7.2f | %7.2f | %6i | %6i | %6.0f\")\n % (\n _year,\n mm2inch(gdf[\"pcpn\"].sum()),\n mm2inch(gdf[\"maxr\"].max()),\n mm2inch(gdf[\"pcpn\"].max()),\n len(gdf[gdf[\"pcpn\"] > 0].index),\n len(gdf[gdf[\"maxr\"] > 25.4].index),\n gdf[\"rad\"].mean(),\n )\n )\n\n print(\"---- Months with < 0.05 precipitation ----\")\n gdf = df.groupby(by=[df.index.year, df.index.month])[\"pcpn\"].sum()\n print(gdf[gdf < 1.0])\n\n print(\"----- Average high temperature -----\")\n print(\"YEAR | Avg High F | Avg Low F | Days > 100F\")\n print(\" --- | --- | --- | ---\")\n for _year, gdf in df.groupby(by=df.index.year):\n print(\n (\"%s | %6.2f | %6.2f | %3i\")\n % (\n _year,\n c2f(gdf[\"tmax\"].mean()),\n c2f(gdf[\"tmin\"].mean()),\n len(gdf[gdf[\"tmax\"] > 37.7].index),\n )\n )\n\n monthly = df[df.index.year == year][\"pcpn\"].resample(\"M\").sum().copy()\n monthly = pd.DataFrame(\n {\"dep\": mm2inch(monthly.values)}, index=range(1, 13)\n )\n\n # Get prism, for a bulk comparison\n prism = requests.get(\n (\n \"http://mesonet.agron.iastate.edu/json/prism/\"\n \"%.2f/%.2f/%s0101-%s1231\"\n )\n % (lon, lat, year, year)\n ).json()\n rows = []\n for entry in prism[\"data\"]:\n rows.append(\n {\n \"date\": datetime.datetime.strptime(\n entry[\"valid\"][:10], \"%Y-%m-%d\"\n ),\n \"precip\": entry[\"precip_in\"],\n }\n )\n prismdf = pd.DataFrame(rows)\n prismdf = prismdf.set_index(\"date\")\n monthly[\"prism\"] = prismdf[\"precip\"].resample(\"M\").sum().copy().values\n\n # Compare daily values\n iemjson = requests.get(\n (\n \"http://mesonet.agron.iastate.edu/iemre/multiday/\"\n \"%s-01-01/%s-12-31/%s/%s/json\"\n )\n % (year, year, lat, lon)\n ).json()\n rows = []\n for entry in iemjson[\"data\"]:\n rows.append(\n {\n \"date\": datetime.datetime.strptime(entry[\"date\"], \"%Y-%m-%d\"),\n \"precip\": entry[\"daily_precip_in\"],\n }\n )\n iemdf = pd.DataFrame(rows)\n iemdf = iemdf.set_index(\"date\")\n print(\"PRISM %s precip is: %.2f\" % (year, prismdf[\"precip\"].sum()))\n print(\"IEMRE sum precip is: %.2f\" % (iemdf[\"precip\"].sum(),))\n print(\"StageIV sum precip is: %.2f\" % (stage4[\"precip\"].sum(),))\n monthly[\"stage4\"] = stage4[\"precip\"].resample(\"M\").sum().copy().values\n monthly[\"iemre\"] = iemdf[\"precip\"].resample(\"M\").sum().copy().values\n monthly[\"prism-dep\"] = monthly[\"prism\"] - monthly[\"dep\"]\n monthly[\"iemre-dep\"] = monthly[\"iemre\"] - monthly[\"dep\"]\n\n print(\" --------- %s Monthly Totals --------\" % (year,))\n print(monthly)\n df.at[\n slice(datetime.date(year, 1, 1), datetime.date(year, 12, 31)),\n \"stage4_precip\",\n ] = stage4[\"precip\"].values\n df[\"iemre_precip\"] = iemdf[\"precip\"]\n df[\"diff_precip\"] = df[\"pcpn_in\"] - df[\"iemre_precip\"]\n df[\"diff_stage4\"] = df[\"pcpn_in\"] - df[\"stage4_precip\"]\n print(\" --- Top 5 Largest DEP > IEMRE ----\")\n print(\n df[\n [\n \"diff_precip\",\n \"pcpn_in\",\n \"iemre_precip\",\n \"stage4_precip\",\n \"diff_stage4\",\n ]\n ]\n .sort_values(by=\"diff_precip\", ascending=False)\n .head()\n )\n print(\" --- Top 5 Largest IEMRE > DEP ----\")\n print(\n df[\n [\n \"diff_precip\",\n \"pcpn_in\",\n \"iemre_precip\",\n \"stage4_precip\",\n \"diff_stage4\",\n ]\n ]\n .sort_values(by=\"diff_precip\", ascending=True)\n .head()\n )\n\n print(\" --- Top 10 Largest Stage4 > DEP ----\")\n print(\n df[\n [\n \"diff_precip\",\n \"pcpn_in\",\n \"iemre_precip\",\n \"stage4_precip\",\n \"diff_stage4\",\n ]\n ]\n .sort_values(by=\"diff_stage4\", ascending=True)\n .head(10)\n )\n print(\" vvv job listing based on the above vvv\")\n for dt in df.sort_values(by=\"diff_stage4\", ascending=True).head(10).index:\n print(\n \"python daily_clifile_editor.py 0 %s %s %s\"\n % (dt.year, dt.month, dt.day)\n )\n df2 = df.loc[slice(datetime.date(year, 1, 1), datetime.date(year, 1, 31))][\n [\"diff_precip\", \"pcpn_in\", \"iemre_precip\", \"stage4_precip\"]\n ].sort_values(by=\"diff_precip\")\n print(\" --- Daily values for month \" \"\")\n print(df2)", "def get_ta_data(path_data):\n if file_exist(path_data) == 0:\n df = ts.pro_bar(ts_code=ts_code_str, adj='qfq', start_date=dt_baseDeltaValue_start_str,\n end_date=dt_now_str, freq=freq_label, ma=[3, 5, 55])\n # print(df.head())\n\n df['delta'] = df['ma3'] - df['ma5']\n writer = pd.ExcelWriter(path_data)\n # print(type(df).__name__)\n if type(df).__name__ == 'DataFrame':\n df.to_excel(writer, sheet_name='Data', index=False)\n writer.save()", "def getData(self, indexSelect):\n csv_filename = '../ExpDataFiles.csv'\n fieldnames = ['index', 'filename', 'sample', 'shear']\n\n parameters = files_list_reduce(csv_filename, fieldnames)\n fileName, sample, shear = files_to_reduce(parameters, indexSelect)\n\n self.setName = '../2D_data_sub/' + fileName[0]\n\n self.expData_raw = np.loadtxt(self.setName, delimiter=\" \", skiprows=self.skipRows)\n bsq = np.sqrt(self.expData_raw[:, 0]**2 + self.expData_raw[:, 1]**2)\n self.beamStop = (bsq < self.qmin)\n self.expData_bs = self.expData_raw[~self.beamStop]\n\n # Removing detector shadow\n\n qx_neg_lower = -0.019\n qx_neg_upper = -0.012\n qx_pos_lower = 0.021\n qx_pos_upper = 0.029\n\n qy_lower = -0.014\n qy_upper = -0.011\n\n q_min = -0.062\n q_max = -0.0539\n\n qx_neg = np.logical_and(self.expData_bs[:, 0] <\n qx_neg_upper, self.expData_bs[:, 0] > qx_neg_lower)\n data_qx_neg = self.expData_bs[~qx_neg]\n\n qx_neg = np.logical_and(data_qx_neg[:, 0] <\n q_max, data_qx_neg[:, 0] > q_min)\n data_qx_neg = data_qx_neg[~qx_neg]\n\n qx_pos = np.logical_and(data_qx_neg[:, 0] < qx_pos_upper, data_qx_neg[:, 0] > qx_pos_lower)\n data_qx_pos = data_qx_neg[~qx_pos]\n\n qy_qxRange = np.logical_and(\n data_qx_pos[:, 0] < qx_pos_lower, data_qx_pos[:, 0] > qx_neg_upper)\n qyRange = np.logical_and(data_qx_pos[:, 1] < qy_upper, data_qx_pos[:, 1] > qy_lower)\n qy_mask = np.logical_and(qy_qxRange == True, qyRange == True)\n\n self.expData_masked = data_qx_pos[~qy_mask]\n\n # End removing detector shadow\n\n self.expData_sort = np.array(sorted(self.expData_masked, key=lambda col: (col[1], col[0])))\n\n self.qx_unique = np.unique(np.array(self.expData_sort[:, 0]))\n self.qy_unique = np.unique(np.array(self.expData_sort[:, 1]))\n\n# self.expData_sort = abs(self.expData_sort)\n\n # testing resolution\n self.expData = sasmodels.data.Data2D(x=self.expData_sort[:, 0], dx=0.1*abs(self.expData_sort[:, 0]), y=self.expData_sort[:, 1], dy=0.1*abs(\n self.expData_sort[:, 1]), z=self.expData_sort[:, 2], dz=self.expData_sort[:, 3])\n self.expData.sample = sample\n self.expData.shear = shear\n\n return", "def optimize_trap(dg):\n f_peak = './temp_peak.lh5' # lh5\n f_results = './temp_results.h5' # pandas\n grp_data, grp_grid = '/optimize_data', '/optimize_grid'\n \n # epar, elo, ehi, epb = 'energy', 0, 1e7, 10000 # full range\n epar, elo, ehi, epb = 'energy', 3.88e6, 3.92e6, 500 # K40 peak\n \n show_movie = True\n write_output = True\n n_rows = None # default None\n \n with open('opt_trap.json') as f:\n dsp_config = json.load(f, object_pairs_hook=OrderedDict)\n \n # files to consider. fixme: right now only works with one file\n sto = lh5.Store()\n lh5_dir = os.path.expandvars(dg.config['lh5_dir'])\n raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']\n f_raw = raw_list.values[0] \n tb_raw = 'ORSIS3302DecoderForEnergy/raw/'\n\n # quick check of the energy range\n # ene_raw = sto.read_object(tb_raw+'/'+epar, f_raw).nda\n # hist, bins, var = pgh.get_hist(ene_raw, range=(elo, ehi), dx=epb)\n # plt.plot(bins[1:], hist, ds='steps')\n # plt.show()\n # exit()\n \n # set grid parameters\n # TODO: jason's suggestions, knowing the expected shape of the noise curve\n # e_rises = np.linspace(-1, 0, sqrt(sqrt(3))\n # e_rises # make another list which is 10^pwr of this list\n # np.linspace(log_tau_min, log_tau_max) # try this too\n e_rises = np.arange(1, 12, 1)\n e_flats = np.arange(1, 6, 1)\n # rc_consts = np.arange(54, 154, 10) # changing this here messes up DCR\n \n # -- create the grid search file the first time -- \n # NOTE: this makes a linear grid, and is editable by the arrays above.\n # jason also proposed a more active gradient-descent style search\n # like with Brent's method. (https://en.wikipedia.org/wiki/Brent%27s_method)\n \n if True:\n # if not os.path.exists(f_peak):\n print('Recreating grid search file')\n \n # create the grid file\n # NOTE: save it as an lh5 Table just as an example of writing/reading one\n lists = [e_rises, e_flats]#, rc_consts]\n prod = list(itertools.product(*lists)) # clint <3 stackoverflow\n df_grid = pd.DataFrame(prod, columns=['rise', 'flat'])#,'rc']) \n lh5_grid = {}\n for i, dfcol in df_grid.iteritems():\n lh5_grid[dfcol.name] = lh5.Array(dfcol.values)\n tb_grid = lh5.Table(col_dict=lh5_grid)\n sto.write_object(tb_grid, grp_grid, f_peak)\n \n # filter events by onboard energy\n ene_raw = sto.read_object(tb_raw+'/'+epar, f_raw).nda\n # hist, bins, var = pgh.get_hist(ene_raw, range=(elo, ehi), dx=epb)\n # plt.plot(bins[1:], hist, ds='steps')\n # plt.show()\n if n_rows is not None:\n ene_raw = ene_raw[:n_rows]\n idx = np.where((ene_raw > elo) & (ene_raw < ehi))\n\n # create a filtered table with correct waveform and attrs\n # TODO: move this into a function in lh5.py which takes idx as an input\n tb_data, wf_tb_data = lh5.Table(), lh5.Table()\n\n # read non-wf cols (lh5 Arrays)\n data_raw = sto.read_object(tb_raw, f_raw, n_rows=n_rows)\n for col in data_raw.keys():\n if col=='waveform': continue\n newcol = lh5.Array(data_raw[col].nda[idx], attrs=data_raw[col].attrs)\n tb_data.add_field(col, newcol)\n \n # handle waveform column (lh5 Table)\n data_wfs = sto.read_object(tb_raw+'/waveform', f_raw, n_rows=n_rows)\n for col in data_wfs.keys():\n attrs = data_wfs[col].attrs\n if isinstance(data_wfs[col], lh5.ArrayOfEqualSizedArrays):\n # idk why i can't put the filtered array into the constructor\n aoesa = lh5.ArrayOfEqualSizedArrays(attrs=attrs, dims=[1,1])\n aoesa.nda = data_wfs[col].nda[idx]\n newcol = aoesa\n else:\n newcol = lh5.Array(data_wfs[col].nda[idx], attrs=attrs)\n wf_tb_data.add_field(col, newcol)\n tb_data.add_field('waveform', wf_tb_data)\n tb_data.attrs = data_raw.attrs\n sto.write_object(tb_data, grp_data, f_peak)\n\n else:\n print('Loading peak file. groups:', sto.ls(f_peak))\n tb_grid = sto.read_object(grp_grid, f_peak)\n tb_data = sto.read_object(grp_data, f_peak) # filtered file\n # tb_data = sto.read_object(tb_raw, f_raw) # orig file\n df_grid = tb_grid.get_dataframe()\n \n # check shape of input table\n print('input table attributes:')\n for key in tb_data.keys():\n obj = tb_data[key]\n if isinstance(obj, lh5.Table):\n for key2 in obj.keys():\n obj2 = obj[key2]\n print(' ', key, key2, obj2.nda.shape, obj2.attrs)\n else:\n print(' ', key, obj.nda.shape, obj.attrs)\n\n # clear new colums if they exist\n new_cols = ['e_fit', 'fwhm_fit', 'rchisq', 'xF_err', 'fwhm_ovr_mean']\n for col in new_cols:\n if col in df_grid.columns:\n df_grid.drop(col, axis=1, inplace=True)\n\n t_start = time.time()\n def run_dsp(dfrow):\n \"\"\"\n run dsp on the test file, editing the processor list\n alternate idea: generate a long list of processors with different names\n \"\"\"\n # adjust dsp config dictionary\n rise, flat = dfrow\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = f'{tau}*us'\n dsp_config['processors']['wf_trap']['args'][1] = f'{rise}*us'\n dsp_config['processors']['wf_trap']['args'][2] = f'{flat}*us'\n # pprint(dsp_config)\n \n # run dsp\n pc, tb_out = build_processing_chain(tb_data, dsp_config, verbosity=0)\n pc.execute()\n \n # analyze peak\n e_peak = 1460.\n etype = 'trapEmax'\n elo, ehi, epb = 4000, 4500, 3 # the peak moves around a bunch\n energy = tb_out[etype].nda\n \n # get histogram\n hE, bins, vE = pgh.get_hist(energy, range=(elo, ehi), dx=epb)\n xE = bins[1:]\n \n # should I center the max at 1460?\n\n # simple numerical width\n i_max = np.argmax(hE)\n h_max = hE[i_max]\n upr_half = xE[(xE > xE[i_max]) & (hE <= h_max/2)][0]\n bot_half = xE[(xE < xE[i_max]) & (hE >= h_max/2)][0]\n fwhm = upr_half - bot_half\n sig = fwhm / 2.355\n \n # fit to gaussian: amp, mu, sig, bkg\n fit_func = pgf.gauss_bkg\n amp = h_max * fwhm\n bg0 = np.mean(hE[:20])\n x0 = [amp, xE[i_max], sig, bg0]\n xF, xF_cov = pgf.fit_hist(fit_func, hE, bins, var=vE, guess=x0)\n\n # collect results\n e_fit = xF[0]\n xF_err = np.sqrt(np.diag(xF_cov))\n e_err = xF\n fwhm_fit = xF[1] * 2.355 * 1460. / e_fit\n \n fwhm_err = xF_err[2] * 2.355 * 1460. / e_fit\n \n chisq = []\n for i, h in enumerate(hE):\n model = fit_func(xE[i], *xF)\n diff = (model - h)**2 / model\n chisq.append(abs(diff))\n rchisq = sum(np.array(chisq) / len(hE))\n fwhm_ovr_mean = fwhm_fit / e_fit\n\n if show_movie:\n \n plt.plot(xE, hE, ds='steps', c='b', lw=2, label=f'{etype} {rise}--{flat}')\n\n # peak shape\n plt.plot(xE, fit_func(xE, *x0), '-', c='orange', alpha=0.5,\n label='init. guess')\n plt.plot(xE, fit_func(xE, *xF), '-r', alpha=0.8, label='peakshape fit')\n plt.plot(np.nan, np.nan, '-w', label=f'mu={e_fit:.1f}, fwhm={fwhm_fit:.2f}')\n\n plt.xlabel(etype, ha='right', x=1)\n plt.ylabel('Counts', ha='right', y=1)\n plt.legend(loc=2)\n\n # show a little movie\n plt.show(block=False)\n plt.pause(0.01)\n plt.cla()\n\n # return results\n return pd.Series({'e_fit':e_fit, 'fwhm_fit':fwhm_fit, 'rchisq':rchisq,\n 'fwhm_err':xF_err[0], 'fwhm_ovr_mean': fwhm_ovr_mean})\n \n # df_grid=df_grid[:10]\n df_tmp = df_grid.progress_apply(run_dsp, axis=1)\n df_grid[new_cols] = df_tmp\n # print(df_grid)\n \n if show_movie:\n plt.close()\n \n print('elapsed:', time.time() - t_start)\n if write_output:\n df_grid.to_hdf(f_results, key=grp_grid)\n print(f\"Wrote output file: {f_results}\")", "def zonal_stats_workflow():\n save_as = \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/summary/monthly_quickflow.csv\"\n scenario_dict = {\n 'pre-decline': \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/pre_decline\",\n 'post-decline': \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/post_decline\",\n }\n df_list = []\n for scenario in scenario_dict.iterkeys():\n results_dict = {\n 'scenario': [],\n 'month': [],\n 'sum_quickflow': [],\n }\n folder = scenario_dict[scenario]\n aoi_shp = os.path.join(folder, 'aggregated_results.shp')\n for month in xrange(1, 13):\n qf_raster = os.path.join(\n folder, 'intermediate_outputs', 'qf_{}.tif'.format(month))\n zonal_stats = pygeoprocessing.zonal_statistics(\n (qf_raster, 1), aoi_shp)\n sum_QF = zonal_stats[0]['sum']\n results_dict['scenario'].append(scenario)\n results_dict['month'].append(month)\n results_dict['sum_quickflow'].append(sum_QF)\n results_df = pandas.DataFrame(data=results_dict)\n df_list.append(results_df)\n combined_list = pandas.concat(df_list)\n combined_list.to_csv(save_as, index=False)", "def getHFtableData(self, ep=None):\n HFdict = {}\n if self.hfMode == 'limiter':\n HFdict['Heat Flux Mode'] = 'Limiter'\n if self.lqCNmode == 'eich':\n HFdict[\"\\u03BB Near Mode\"] = 'Eich Regression #15'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqEich\n else:\n HFdict[\"\\u03BB Near Mode\"] = 'User Defined'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqCN\n if self.lqCFmode == 'horacek':\n HFdict[\"\\u03BB Far Mode\"] = 'Horacek Figure 6a'\n HFdict[\"Common Region Far Heat Flux Width (\\u03BBq CF) [mm]\"] = self.lqCF\n else:\n HFdict[\"\\u03BB Far Mode\"] = 'User Defined'\n HFdict[\"Common Region Far Heat Flux Width (\\u03BBq CF) [mm]\"] = self.lqCF\n\n HFdict[\"Common Region Near Power Fraction\"] = self.fracCN\n HFdict[\"Common Region Far Power Fraction\"] = self.fracCF\n\n elif self.hfMode == 'multiExp':\n HFdict['Heat Flux Mode'] = 'Multiple (4) Exponentials'\n if self.lqCNmode == 'eich':\n HFdict[\"\\u03BB Near Mode\"] = 'Eich Regression #15'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqEich\n else:\n HFdict[\"\\u03BB Near Mode\"] = 'User Defined'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqCN\n\n if self.lqCFmode == 'horacek':\n HFdict[\"\\u03BB Far Mode\"] = 'Horacek Figure 6a'\n else:\n HFdict[\"\\u03BB Far Mode\"] = 'User Defined'\n\n\n\n HFdict[\"Common Region Far Heat Flux Width (\\u03BBq CF) [mm]\"] = self.lqCF\n HFdict[\"Private Region Near Heat Flux Width (\\u03BBq PN) [mm]\"] = self.lqPN\n HFdict[\"Private Region Far Heat Flux Width (\\u03BBq PF) [mm]\"] = self.lqPF\n HFdict[\"Common Region Near Power Fraction\"] = self.fracCN\n HFdict[\"Common Region Far Power Fraction\"] = self.fracCF\n HFdict[\"Private Region Near Power Fraction\"] = self.fracPN\n HFdict[\"Private Region Far Power Fraction\"] = self.fracPF\n\n elif self.hfMode == 'qFile':\n HFdict[\"Heat Flux Mode\"] = 'Read HF from qFile'\n HFdict['qFilePath'] = self.qFilePath\n HFdict['qFileTag'] = self.qFileTag\n\n elif self.hfMode == 'eich':\n HFdict['Heat Flux Mode'] = 'Gaussian Spreading'\n if self.lqCNmode == 'eich':\n HFdict[\"\\u03BB Mode\"] = 'Eich Regression #15'\n HFdict[\"Heat Flux Width (\\u03BBq) [mm]\"] = self.lqEich\n else:\n HFdict[\"\\u03BB Mode\"] = 'User Defined'\n HFdict[\"Heat Flux Width (\\u03BBq) [mm]\"] = self.lqCN\n\n if self.SMode == 'makowski':\n HFdict['Greenwald Density Fraction'] = self.fG\n HFdict['Spreading (S) Mode'] = 'Makowski Figure 6'\n else:\n HFdict['Spreading (S) Mode'] = 'User Defined'\n HFdict['Greenwald Density Fraction'] = 'Only used for Makowski S Mode'\n HFdict['S [mm]'] = self.S\n HFdict['Background Heat Flux'] = self.qBG\n\n if self.hfMode != 'qFile':\n HFdict[\"Power Injected (Pinj) [MW]\"] = self.Pinj\n HFdict[\"Radiated Fraction of Injected Power\"] = self.coreRadFrac\n HFdict[\"Power Crossing Separatrix (Psol) [MW]\"] = self.Psol\n HFdict[\"Upper Inner Divertor Power Fraction\"] = self.fracUI\n HFdict[\"Upper Outer Divertor Power Fraction\"] = self.fracUO\n HFdict[\"Lower Inner Divertor Power Fraction\"] = self.fracLI\n HFdict[\"Lower Outer Divertor Power Fraction\"] = self.fracLO\n\n return HFdict", "def get_data_iue(obsid, filt):\n\n # This error code will be used unless there's a problem reading any\n # of the FITS files in the list, or the FILTER value is not understood.\n errcode = 0\n\n # This defines a data point for a DataSeries object as a namedtuple.\n data_point = collections.namedtuple('DataPoint', ['x', 'y'])\n\n # For IUE, this defines the x-axis and y-axis units as a string.\n iue_xunit = \"Angstroms (vacuum, heliocentric)\"\n iue_yunit = \"ergs/cm^2/s/Angstrom\"\n\n # Parse the obsID string to determine the paths+files to read. Note:\n # this step will assign some of the error codes returned to the top level.\n if filt == ' ':\n filt = \"UNKNOWN\"\n if filt.upper() in [\"LOW_DISP\", \"HIGH_DISP\"] or filt == \"UNKNOWN\":\n parsed_files_result = parse_obsid_iue(obsid, filt.upper())\n errcode = parsed_files_result.errcode\n else:\n errcode = 4\n\n # In the case of low dispersion spectra, there can be two apertures for\n # a single obsID. In that case, we return a list of TWO DataSeries, one\n # for each aperture. In other words, we treat the single obsID as if it\n # were two different obsIDs in the case of a double-aperture.\n all_data_series = []\n\n # For each file, read in the contents and create a return JSON object.\n if errcode == 0:\n for sfile in parsed_files_result.specfiles:\n # Figure out if this is an mxhi or mxlo spectrum.\n if sfile[-7:] == \"mxlo.gz\":\n is_lo = True\n is_hi = False\n else:\n is_lo = False\n is_hi = True\n\n try:\n with fits.open(sfile) as hdulist:\n if is_lo:\n # Get the dispersion type from the primary header.\n dispersion = hdulist[0].header[\"disptype\"]\n # Get the aperture size(s) from the header.\n apertures = hdulist[1].data[\"aperture\"]\n n_apertures = len(apertures)\n # Number of spectral data points for each aperture size.\n n_wls = [int(x) for x in hdulist[1].data[\"npoints\"]]\n # Initial wavelength value(s).\n starting_wl = [float(x) for x in\n hdulist[1].data[\"wavelength\"]]\n # Step size(s) for each subsequent wavelength.\n delta_wl = [float(x) for x in hdulist[1].data[\"deltaw\"]]\n\n # Generate the full array of wavelength values, and get\n # full array of flux values, for each aperture.\n for aper in range(n_apertures):\n wls = [starting_wl[aper] +\n x*delta_wl[aper] for\n x in range(n_wls[aper])]\n fls = [float(x) for\n x in hdulist[1].data[\"flux\"][aper]]\n # Make sure wavelengths and fluxes are sorted\n # from smallest wavelength to largest.\n sort_indexes = [x[0] for x in\n sorted(enumerate(wls),\n key=itemgetter(1))]\n wls = [wls[x] for x in sort_indexes]\n fls = [fls[x] for x in sort_indexes]\n wlfls = [(x, y) for x, y in zip(wls, fls) if\n y != 0.]\n if wlfls != []:\n datapoints = [\n [data_point(x=float(\"{0:.8f}\".format(x)),\n y=float(\"{0:.8e}\".format(y)))\n for x, y in wlfls]]\n # Create the return DataSeries object.\n all_data_series.append(\n DataSeries('iue', obsid,\n datapoints,\n ['IUE_' + obsid + ' DISP:'\n + dispersion + ' APER:' +\n apertures[aper]],\n [iue_xunit], [iue_yunit],\n errcode))\n\n if is_hi:\n # Get the aperture from the primary header.\n aperture = hdulist[0].header[\"aperture\"].strip()\n # Get the dispersion type from the primary header.\n dispersion = hdulist[0].header[\"disptype\"].strip()\n # Get the camera used (SWP, LWP, LWR).\n camera = hdulist[0].header[\"camera\"].strip()\n # Get a list of spectral orders. Those that are beyond\n # the range defined in Solano are not considered.\n if camera == \"LWP\":\n max_order = 124\n elif camera == \"LWR\":\n max_order = 119\n else:\n max_order = 120\n orders = [int(x) for x in hdulist[1].data[\"order\"] if x\n <= max_order]\n n_orders = len(orders)\n # This lists will store each orders' spectral info.\n order_spectra = []\n\n # Loop over each order.\n for order in range(n_orders):\n # Number of fluxes for this order.\n n_p = int(hdulist[1].data[\"npoints\"][order])\n # Starting pixel within the array of 768 elements.\n s_pix = int(\n hdulist[1].data[\"startpix\"][order])\n # Wavelength corresponding to this start pixel.\n starting_wl = float(\n hdulist[1].data[\"wavelength\"][order])\n # Step size for each subsequent wavelength.\n delta_wl = float(\n hdulist[1].data[\"deltaw\"][order])\n # Generate the full array of wavelength values.\n wls = [starting_wl + x*delta_wl for x in\n range(n_p)]\n # Extract the fluxes that go along with these wls.\n all_fluxes = hdulist[1].data[\"abs_cal\"][order]\n fls = [float(x) for x in\n all_fluxes[(s_pix-1):(s_pix-1+n_p-1+1)]]\n # Extract the quality flags that go along with\n # these wls.\n all_qfs = hdulist[1].data[\"quality\"][order]\n qfs = [int(x) for x in all_qfs[(s_pix-1):(s_pix-1+\n n_p-1+1)]]\n # Only keep good Quality Flags, if the order is all\n # bad flags, don't add it.\n keep = [i for i, x in enumerate(qfs) if (qfs[i] >\n -16384)]\n if keep != [] and fls != [0.]*len(fls):\n wls = [wls[i] for i in keep]\n fls = [fls[i] for i in keep]\n # Create a dict that will store this order's\n # info.\n order_spec = {'order':orders[order],\n 'wls':numpy.asarray(wls),\n 'fls':numpy.asarray(fls)}\n order_spectra.append(order_spec)\n\n # Order-combine the spectra.\n comb_spec = order_combine(order_spectra, camera, False)\n\n # Resample onto an evenly-spaced wavelength scale.\n comb_spec_reb = resample_spectrum(comb_spec, camera,\n False)\n\n # Create the return DataSeries object.\n datapoints = [\n [data_point(x=float(\"{0:.8f}\".format(x)),\n y=float(\"{0:.8e}\".format(y)))\n for x, y in comb_spec_reb]]\n all_data_series.append(\n DataSeries('iue', obsid,\n datapoints,\n ['IUE_' + obsid + ' DISP:'\n + dispersion + ' APER:' +\n aperture],\n [iue_xunit], [iue_yunit],\n errcode))\n\n except IOError:\n errcode = 3\n all_data_series.append(\n DataSeries('iue', obsid, [], [''], [''], [''], errcode))\n\n else:\n # This is where an error DataSeries object would be returned.\n all_data_series.append(\n DataSeries('iue', obsid, [], [], [],\n [], errcode))\n\n # Return the DataSeries object back to the calling module.\n if len(all_data_series) == 1:\n return all_data_series[0]\n return all_data_series", "def usefulquantities(dffin):\n dffin['log_length_box'] = np.log(dffin['length_box_um'])\n dffin['time_min']=dffin['time_sec']/60\n dffin['pred_length_box_um'] = np.exp(dffin['pred_log_length'])\n dffin['unique_id'] = dffin['cell']+dffin['time_sec'].apply(lambda x:str(x))\n dffin['cv_gr']= dffin.groupby('cell')['pred_growth_rate'].transform(lambda x:\\\n np.std(x)/np.mean(x))\n dffin['std_gr']= dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.std(x))\n dffin['mean_gr'] = dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.mean(x))\n dffin['mean_len'] = dffin.groupby('cell')['pred_length_box_um'].transform(lambda x: np.mean(x))\n dffin['norm_pred_growth_rate'] = (dffin['pred_growth_rate']-dffin.groupby('cell')['pred_growth_rate'].transform(lambda\\\n x: np.mean(x)))/dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.mean(x))\n dffin = rl.genalogy(dffin,'parent_cell') #Create genealogy\n dffin = rl.genalogy(dffin,'g_parent_cell')\n dffin = rl.genalogy(dffin,'g_g_parent_cell')\n dffin = dffin.set_index('unique_id')\n qq= dffin.groupby('cell').apply(lambda x: (x['pred_length_box_um']-x['pred_length_box_um'].iloc[0])/(x['pred_length_box_um'].iloc[-1]-x['pred_length_box_um'].iloc[0])).rename('add_len')\n jj= dffin.groupby('cell').apply(lambda x: (x['time_sec']-x['time_sec'].iloc[0])/(x['time_sec'].iloc[-1]-x['time_sec'].iloc[0])).rename('cell_cycle')\n return pd.concat([dffin, qq.reset_index().set_index('unique_id')['add_len'], jj.reset_index().set_index('unique_id')['cell_cycle']], axis=1, join='inner')", "def add_derived_GEOSChem_specs2df(df):\n # Add temperature in deg C\n df['T'] = df['GMAO_TEMP'].copy()\n df['T'] = df['GMAO_TEMP'].values - 273.15\n # Inc. V nd U with same variable names as GEOS-CF\n df['V'] = df['GMAO_VWND'].copy()\n df['U'] = df['GMAO_UWND'].copy()\n # Add NOx as combined NO and NO2\n df['NOx'] = df['NO'].values + df['NO2'].values\n # Add NOy as defined in GEOS-CF\n # NOy = no_no2_hno3_hno4_hono_2xn2o5_pan_organicnitrates_aerosolnitrates\n vars2use = AC.GC_var('NOy-all')\n df['NOy'] = df['N2O5'].copy() #  2 N2O5 in NOy, so 2x via template\n for var in vars2use:\n try:\n df.loc[:, 'NOy'] = df['NOy'].values + df[var].values\n except KeyError:\n pass\n # Add a variable for gas-phase NOy (by subtracting aerosol nitrate)\n vars2use = AC.GC_var('NOy-gas')\n df['NOy-gas'] = df['N2O5'].copy() #  2 N2O5 in NOy, so 2x via template\n for var in vars2use:\n try:\n df.loc[:, 'NOy-gas'] = df['NOy-gas'].values + df[var].values\n except KeyError:\n pass\n # Include a variable of NOy where HNO3 is removed\n # NOy = no_no2_hno3_hno4_hono_2xn2o5_pan_organicnitrates_aerosolnitrates\n df['NOy-HNO3'] = df['NOy'].values - df['HNO3'].values\n # Include a variable of NOy where HNO3 is removed\n df['NOy-HNO3-PAN'] = df['NOy'].values - \\\n df['HNO3'].values - df['PAN'].values\n # gas-phase (exc. PAN, HNO3, HNO4, Org-NIT, N2O5)\n df['NOy-Limited'] = df['NO'].values + df['NO2'].values + \\\n df['HNO2'].values + df['NIT'].values + df['NITs'].values\n # Add an all sulfate tracer\n NewVar = 'SO4-all'\n vars2use = AC.GC_var(NewVar)\n df[NewVar] = df['NIT'].values\n for var2use in vars2use:\n try:\n df[NewVar] = df[NewVar].values + df[var2use].values\n except KeyError:\n pass\n # And a all nitrate tracer\n NewVar = 'NIT-all'\n vars2use = AC.GC_var(NewVar)\n df[NewVar] = df['NIT'].values\n for var2use in vars2use:\n try:\n df[NewVar] = df[NewVar].values + df[var2use].values\n except KeyError:\n pass\n # Uset the P-I variable as a model level variable\n df['model-lev'] = df['P-I'].copy()\n return df", "def temperature() -> float:", "def whc_tot(mukey, layers=''):\n #read appropriate soils.in content to a python list\n mukey = str(mukey)\n soil_path = \"/data/paustian/ernie/SSURGO_master_script/soil_test2/\"\n soil_fpath = soil_path+mukey[:-3]+\"/\"+mukey+\".in\"\n cont = [[]]\n data_input = open(soil_fpath, 'r')\n for line in data_input:\n cont.append(line.split())\n del cont[0]\n\n #convert all entries in the 2D list to float format where possible, or zero in the case\n #of very small numbers recorded in scientific notation\n for k in range(len(cont)):\n for l in range(len(cont[k])):\n cont[k][l] = float(cont[k][l])\n\n #loop through list and compute the water holding capacity increment represented in \n #each line\n min_h2o_evap = 0\n min_h2o = 0\n max_h2o = 0\n whc = 0\n for i in range(len(cont)):\n if not layers:\n depth = cont[i][1] - cont[i][0]\n FC = cont[i][3]\n WP = cont[i][4]\n WHC = FC - WP\n if i != 0:\n min_h2o_evap += depth*WP\n min_h2o += depth*WP\n max_h2o += depth*FC\n whc += depth*WHC\n else:\n if 1+i <= layers:\n depth = cont[i][1] - cont[i][0]\n FC = cont[i][3]\n WP = cont[i][4]\n WHC = FC - WP\n if i != 0:\n min_h2o_evap += depth*WP\n min_h2o += depth*WP\n max_h2o += depth*FC\n whc += depth*WHC\n if layers:\n if layers > len(cont):\n print \"NOTE: specified layer limit exceeds number of layers found in soils.in file\"\n\n return whc, min_h2o, max_h2o", "def test_input_flux_file():\n # Generate an input file\n flux_input_file = tstutils.data_path('test.flux')\n if os.path.isfile(flux_input_file):\n os.remove(flux_input_file)\n\n cfg_lines = ['[fluxcalib]']\n cfg_lines += [' extinct_correct = False # Set to True if your SENSFUNC derived with the UVIS algorithm\\n']\n cfg_lines += ['# Please add your SENSFUNC file name below before running pypeit_flux_calib']\n\n # These files need to be in tests/files/\n data = Table()\n data['filename'] = ['spec1d_cN20170331S0216-pisco_GNIRS_20170331T085412.181.fits',\n 'spec1d_cN20170331S0217-pisco_GNIRS_20170331T085933.097.fits']\n data['sensfile'] = 'sens_cN20170331S0206-HIP62745_GNIRS_20170331T083351.681.fits'\n # \n paths = [tstutils.data_path('')]\n\n fluxFile = inputfiles.FluxFile(config=cfg_lines, \n file_paths=paths,\n data_table=data)\n # Write\n fluxFile.write(flux_input_file)\n\n # Read\n fluxFile2 = inputfiles.FluxFile.from_file(flux_input_file)\n assert np.all(fluxFile2.data['filename'] == data['filename'])\n\n # Test path\n assert fluxFile2.file_paths[0] == paths[0]\n assert fluxFile2.filenames[0] == os.path.join(paths[0], data['filename'][0])\n\n # #################\n # Tickle the other ways to do sensfiles\n data3 = Table()\n data3['filename'] = ['spec1d_cN20170331S0216-pisco_GNIRS_20170331T085412.181.fits',\n 'spec1d_cN20170331S0217-pisco_GNIRS_20170331T085933.097.fits']\n data3['sensfile'] = ['sens_cN20170331S0206-HIP62745_GNIRS_20170331T083351.681.fits',\n '']\n\n fluxFile3 = inputfiles.FluxFile(config=cfg_lines, \n file_paths=paths,\n data_table=data3)\n assert fluxFile3.sensfiles[1] == os.path.join(paths[0], data['sensfile'][0])\n \n data4 = Table()\n data4['filename'] = ['spec1d_cN20170331S0216-pisco_GNIRS_20170331T085412.181.fits',\n 'spec1d_cN20170331S0217-pisco_GNIRS_20170331T085933.097.fits']\n data4['sensfile'] = ''\n\n fluxFile4 = inputfiles.FluxFile(config=cfg_lines, \n file_paths=paths,\n data_table=data4)\n assert len(fluxFile4.sensfiles) == 0\n\n # Clean up\n os.remove(flux_input_file)", "def addtonc(ncfout,key,vd,ofield,ftype=\"timeseries\"):\n nc_out=nc.Dataset(ncfout,'r+')\n if ftype==\"timeseries\":\n diml=['time','height','south_north','west_east'] # Tuple of Dimensions\n if vd['dims']==4:\n dimtup=tuple(diml)\n elif vd['dims']==3:\n dimtup = tuple([c for c in diml if c != \"height\"])\n elif vd['dims']==2:\n dimtup = tuple([c for c in diml if c not in [\"height\",\"time\"]])\n elif ftype==\"roughness\":\n diml=['south_north','west_east']\n dimtup=tuple(diml)\n elif ftype==\"tabfile\":\n diml=['south_north','west_east','sector','wind','stab']\n if vd['dims']==3:\n dimtup=tuple(diml.remove('wind').remove('stab'))\n if vd['dims']==2:\n dimtup=tuple(diml.remove('wind').remove('stab').remove('sector'))\n if key in (\"TKE\", \"ABLAT_CYL\", \"ACCRE_CYL\"):\n outv=nc_out.createVariable(key, 'f4', dimtup, zlib=True,\n complevel=9, fill_value=-999.)\n else:\n outv=nc_out.createVariable(key,'f4',dimtup,zlib=True,complevel=9)\n outv.units=vd['units']\n outv.long_name=vd['name']\n if vd['std_name'] is not None:\n outv.standard_name=vd['std_name']\n if key==\"PRECIP\":\n outv.cell_methods=\"time: sum\"\n outv.grid_mapping=\"crs\"\n outv.coordinates=\"XLAT XLON\"\n outv[:]=ofield[:]\n nc_out.close()\n return(None)", "def load_zT(all_data=False):\n path = os.path.join(DATA_DIR, \"zT-citrination-165.csv\")\n df = pd.read_csv(path, index_col=None)\n if not all_data:\n df = df[[\"composition\", \"zT\"]]\n return df", "def radiance_map(file, config, vmax=4200, levels=20, typ=''):\n \n # Select data from configuration \n azimuths = config['skymap'][:, 0] # +180 # azimuths\n zeniths = config['skymap'][:, 1] # zeniths\n\n if typ == 'sim':\n # look for wavelength index in array\n waves_sim = dataset.attrs['simulated_Columns'].split('nm')[0].split('[')[1].split(\n ']')[0].split(',')\n waves = np.asarray(list(map(int, waves_sim)))\n wave_indx = np.where(waves == wave)\n try:\n wave_indx = np.int(wave_indx[0][0])\n except:\n print(\"Wavelength is not in dataset\")\n z = dataset.simulated[:, wave_indx, time_indx]\n\n elif typ == 'meas':\n wave_indx = int((config['wavelength'] - 250 - config['wave_correction']) / 0.446)\n with h5py.File(file, 'r') as data:\n z = data['data'][:, wave_indx]\n else:\n print('Select a input data type(sim or meas)')\n\n # Add values in the origin to close the surface interpolation\n azimuths = np.append(azimuths, [270, 0, 0, 0, 0, 0, 0, 0])\n zeniths = np.append(zeniths, [0, 12, 24, 36, 48, 60, 72, 84])\n z = np.append(z, [z[0], z[3], z[9], z[19], z[33], z[51], z[73], z[99]])\n\n # Convert x to radians\n azimuths = np.radians(azimuths)\n zeniths = np.radians(zeniths)\n\n # Remove dead channels of the dataset\n azimuths = np.delete(azimuths, config['dead_fibre'])\n zeniths = np.delete(zeniths, config['dead_fibre'])\n z = np.delete(z, config['dead_fibre'])\n\n # Set up a regular grid of interpolation point\n thetai, ri = np.linspace(azimuths.min(), azimuths.max(),\n num=len(azimuths)), \\\n np.linspace(zeniths.min(), zeniths.max(), num=len(zeniths))\n\n ri, thetai = np.meshgrid(ri, thetai, indexing='ij')\n\n # zi = scipy.interpolate.griddata((azimuths, zeniths), z, (thetai, ri),\n # method='linear')\n\n rbf = scipy.interpolate.Rbf(azimuths, zeniths, z, fucntion='gaussian',\n epsilon=0.05)\n\n ZI = rbf(thetai, ri)\n\n if typ == 'sim':\n name = str(dataset.time[time_indx].values) # ''\n else:\n name = 'testing' #str(dataset.time[time_indx].values)\n\n # Create the directory to save the results\n # os.makedirs(os.path.dirname(config['path_note'] + '/figures/'),\n # exist_ok=True)\n if vmax == 'default':\n vmax = 4200\n else:\n vmax = vmax\n\n # Plot the dataset\n fig, ax = plt.subplots(subplot_kw=dict(projection='polar'))\n cmap = 'Spectral_r' # 'rainbow'\n a = plt.contourf(thetai, ri, ZI, levels, cmap=cmap, vmin=0,\n vmax=vmax) # , vmax=4932)\n plt.title('{} UTC {}nm'.format(name, config['wavelength']))\n plt.axis([0, 2*np.pi, 0, 1.48])\n\n plt.scatter(azimuths, zeniths, cmap=cmap, s=1)\n ax.grid(False)\n ax.set_theta_zero_location(\"N\") # Set the direction of polar plot\n ax.set_theta_direction(1) # Set the increase direction on azimuth angles\n # (-1 to clockwise, 1 counterclockwise)\n cbar = plt.colorbar(a)\n cbar.set_label(\"counts\", rotation=90)\n\n # if typ == 'sim':\n # plt.savefig(\n # 'figures/skymap/simulated/skymap{}nm_{}UTC_sim.jpeg'.format(wave,\n # name),\n # dpi=300)\n # plt.show();\n # else:\n # plt.savefig(\n # 'figures/skymap/measured/skymap{}nm_{}UTC_meas.jpeg'.format(wave,\n # name),\n # dpi=300)", "def getTemperatureMeasurements(self):\n # self.board.readline()\n self.stop = False\n times = []\n temps = [[], [], []]\n \n # A synchronisation string containing the characters tx is sent before each set of measurements,\n # we ensure correct reading of the measurements by waiting for this string\n while str(self.board.readline()).strip('b\\'\\\\rn') != 'tx':\n pass\n \n while not self.stop:\n # A synchronisation string containing the characters tx is sent before each set of measurements\n tx = self.board.readline()\n if str(tx).strip('b\\'\\\\rn') == 'tx':\n rawData1 = self.board.readline()\n rawData2 = self.board.readline()\n rawData3 = self.board.readline()\n rawData4 = self.board.readline()\n \n \n timeStamp = str(rawData1).strip('b\\'\\\\rn')\n temp1 = str(rawData2).strip('b\\'\\\\rn')\n temp2 = str(rawData3).strip('b\\'\\\\rn')\n temp3 = str(rawData4).strip('b\\'\\\\rn')\n try:\n times.append(float(timeStamp) / 1000)\n temps[0].append(float(temp1) / 128)\n temps[1].append(float(temp2) / 128)\n temps[2].append(float(temp3) / 128)\n # print(f'\\rtime: {float(timeStamp) / 1000:.2f} s, Temperature measured on sensor 1: {float(temp1) / 128:.2f} °C,'\n # f'sensor 2: {float(temp2) / 128:.2f} °C, sensor 3: {float(temp3) / 128:.2f} °C', sep='', end='', flush=True)\n except:\n print(rawData1, rawData2, rawData3, rawData4)\n \n \n if self.stop:\n print('\\nMeasurement finished...')\n \n self.data_stack[self.fetch_kinds[0]] = times\n self.data_stack[self.fetch_kinds[1]] = temps[0]\n self.data_stack[self.fetch_kinds[2]] = temps[1]\n self.data_stack[self.fetch_kinds[3]] = temps[2]\n \n if (len(self.data_stack['Sensor 1 Temp']) != len(times) or len(self.data_stack['Sensor 2 Temp']) != len(times) or len(self.data_stack['Sensor 3 Temp']) != len(times)):\n print(\"Warning: There may be some missing values!\")", "def read_spectral_k(filename=\"tc_dos_l.dat\"):\n # column headers for the data \n #tcdosl_labels = [\n # \"wavelength\",\n # \"k_xx_raw\",\"k_xx_smooth\",\n # \"k_yy_raw\",\"k_yy_smooth\",\n # \"k_zz_raw\",\"k_zz_smooth\"]\n\n tcdosl_labels = [\n \"wavelength\",\n \"k_xx_raw\",\"k_yy_raw\",\"k_zz_raw\",\n \"k_xx_smooth\",\"k_yy_smooth\",\"k_zz_smooth\"]\n\n def subselect_table_block(i_start,lines):\n i = i_start + 1\n\n table = []\n while(lines[i].strip() != \"\"):\n args = lines[i].split()\n args = [arg.strip() for arg in args]\n args = [float(arg) for arg in args]\n table.append(args)\n i += 1 \n return np.array(table)\n\n line = None # initialize\n with open(filename,'r') as f:\n lines = f.readlines()\n lines = [s.strip() for s in lines]\n\n temperatures = []\n tcdosl_dict = OrderedDict()\n\n for il,line in enumerate(lines):\n if line.startswith('# Temp:'):\n args = line.split(':')\n T = int(float(args[1].strip()))\n temperatures.append(T)\n tcdosl_dict[T] = subselect_table_block(il,lines)\n\n tcdosl_df_dict = OrderedDict()\n for temp in temperatures:\n tcdosl_df_dict[temp] = pd.DataFrame(\n copy.deepcopy(tcdosl_dict[temp]),\n columns=list(tcdosl_labels))\n\n return {k:v.copy() for k,v in tcdosl_df_dict.items()}", "def get_ecmwf_forecast_statistics(params):\n blob_mapped_dir = \"/mnt/output\"\n\n path_to_rapid_output = blob_mapped_dir\n\n watershed_name = params[\"region\"].split(\"-\")[0]\n subbasin_name = params[\"region\"].split(\"-\")[1]\n river_id = params[\"reach_id\"]\n\n # units = params[\"units\"]\n # if (not units):\n # units = \"metric\"\n\n units = \"metric\"\n\n # forecast_folder = params[\"date\"]\n # if (not forecast_folder):\n # forecast_folder = 'most_recent'\n\n forecast_folder = 'most_recent'\n\n stat_type = params[\"stat\"]\n if (stat_type is None):\n stat_type = \"\"\n\n # find/check current output datasets\n path_to_output_files = \\\n os.path.join(path_to_rapid_output,\n \"{0}-{1}\".format(watershed_name, subbasin_name))\n forecast_nc_list, start_date = \\\n ecmwf_find_most_current_files(path_to_output_files, forecast_folder)\n if (not forecast_nc_list or not start_date):\n log.log_error('ECMWF forecast for %s (%s).' % (watershed_name, subbasin_name))\n\n # combine 52 ensembles\n qout_datasets = []\n ensemble_index_list = []\n for forecast_nc in forecast_nc_list:\n ensemble_index_list.append(\n int(os.path.basename(forecast_nc)[:-3].split(\"_\")[-1])\n )\n qout_datasets.append(\n xarray.open_dataset(forecast_nc, autoclose=True)\n .sel(rivid=river_id).Qout\n )\n\n merged_ds = xarray.concat(qout_datasets,\n pd.Index(ensemble_index_list, name='ensemble'))\n\n return_dict = {}\n if (stat_type == 'high_res' or not stat_type):\n # extract the high res ensemble & time\n try:\n return_dict['high_res'] = merged_ds.sel(ensemble=52).dropna('time')\n except:\n pass\n\n if (stat_type != 'high_res' or not stat_type):\n # analyze data to get statistic bands\n merged_ds = merged_ds.dropna('time')\n\n if (stat_type == 'mean' or 'std' in stat_type or not stat_type):\n return_dict['mean'] = merged_ds.mean(dim='ensemble')\n std_ar = merged_ds.std(dim='ensemble')\n if (stat_type == 'std_dev_range_upper' or not stat_type):\n return_dict['std_dev_range_upper'] = \\\n return_dict['mean'] + std_ar\n if (stat_type == 'std_dev_range_lower' or not stat_type):\n return_dict['std_dev_range_lower'] = \\\n return_dict['mean'] - std_ar\n if (stat_type == \"min\" or not stat_type):\n return_dict['min'] = merged_ds.min(dim='ensemble')\n if (stat_type == \"max\" or not stat_type):\n return_dict['max'] = merged_ds.max(dim='ensemble')\n\n for key in list(return_dict):\n if (units == 'english'):\n # convert m3/s to ft3/s\n return_dict[key] *= M3_TO_FT3\n # convert to pandas series\n return_dict[key] = return_dict[key].to_dataframe().Qout\n\n return return_dict, watershed_name, subbasin_name, river_id, units", "def get_jhu(data_path: str, filter_: Union[dict, bool] = True) -> None:\n # Where JHU stores their data\n url_template = (\"https://raw.githubusercontent.com/CSSEGISandData/\"\n \"COVID-19/master/csse_covid_19_data/\"\n \"csse_covid_19_time_series/time_series_covid19_%s_%s.csv\")\n\n # Scrape the data\n dfs = {}\n for region in ['global', 'US']:\n dfs[region] = {}\n for kind in ['confirmed', 'deaths', 'recovered']:\n url = url_template % (kind, region) # Create the full data URL\n try:\n df = pd.read_csv(url) # Download the data into a dataframe\n except HTTPError:\n print(\"Could not download data for %s, %s\" % (kind, region))\n else:\n if region == 'global':\n has_no_province = df['Province/State'].isnull()\n # Whole countries only; use country name as index\n df1 = df[has_no_province].set_index('Country/Region')\n more_dfs = []\n for country in ['China', 'Canada', 'Australia']:\n if country == 'Canada' and kind in 'recovered':\n continue\n is_c = df['Country/Region'] == country\n df2 = df[is_c].sum(axis=0, skipna=False).to_frame().T\n df2['Country/Region'] = country\n df2 = df2.set_index('Country/Region')\n more_dfs.append(df2)\n df = pd.concat([df1] + more_dfs)\n elif region == 'US':\n # Use state name as index\n for k, v in us_state_abbrev.items(): # get US state abbrev\n if not us_state_abbrev[k].startswith('US_'):\n us_state_abbrev[k] = 'US_' + v # Add 'US_' to abbrev\n df.replace(us_state_abbrev, inplace=True)\n df = df.set_index('Province_State')\n df = df.groupby('Province_State').sum() # combine counties to create state level data\n\n df = df[[x for x in df if any(year in x for year in ['20', '21'])]] # Use only data columns\n # 20 or 21 signifies 2020 or 2021\n dfs[region][kind] = df # Add to dictionary of dataframes\n\n # Generate a list of countries that have \"good\" data,\n # according to these criteria:\n good_countries = get_countries(dfs['global'], filter_=filter_)\n\n # For each \"good\" country,\n # reformat and save that data in its own .csv file.\n source = dfs['global']\n for country in tqdm(good_countries, desc='Countries'): # For each country\n if country in ['Diamond Princess', 'MS Zaandam', 'Samoa',\n 'Vanuatu', 'Marshall Islands', 'US', 'Micronesia']:\n print(\"Skipping {}\".format(country))\n continue\n # If we have data in the downloaded JHU files for that country\n if country in source['confirmed'].index:\n df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths',\n 'cum_recover', 'new_cases',\n 'new_deaths', 'new_recover',\n 'new_uninfected'])\n df['dates2'] = source['confirmed'].columns\n df['dates2'] = df['dates2'].apply(fix_jhu_dates)\n df['cum_cases'] = source['confirmed'].loc[country].values\n df['cum_deaths'] = source['deaths'].loc[country].values\n df['cum_recover'] = source['recovered'].loc[country].values\n df[['new_cases', 'new_deaths', 'new_recover']] = \\\n df[['cum_cases', 'cum_deaths', 'cum_recover']].diff()\n df['new_uninfected'] = df['new_recover'] + df['new_deaths']\n\n\n try:\n population = get_population_count(data_path, country)\n df['population'] = population\n except:\n pass\n\n # Fill NaN with 0 and convert to int\n dfs[country] = df.set_index('dates2').fillna(0).astype(int)\n dfs[country].to_csv(data_path / ('covidtimeseries_%s.csv' % country))\n\n else:\n print(\"No data for %s\" % country)\n\n source = dfs['US']\n states = source['confirmed'].index.tolist()\n us_recovery_data = covid_tracking_recovery(data_path)\n for state in tqdm(states, desc='US States'): # For each country\n if state in ['Diamond Princess', 'Grand Princess', 'MS Zaandam', 'US_AS']:\n print(\"Skipping {}\".format(state))\n continue\n # If we have data in the downloaded JHU files for that country\n if state in source['confirmed'].index:\n df = pd.DataFrame(columns=['dates2', 'cum_cases', 'cum_deaths',\n 'new_cases','new_deaths','new_uninfected'])\n df['dates2'] = source['confirmed'].columns\n df['dates2'] = df['dates2'].apply(fix_jhu_dates)\n df['cum_cases'] = source['confirmed'].loc[state].values\n df['cum_deaths'] = source['deaths'].loc[state].values\n\n df[['new_cases', 'new_deaths']] = df[['cum_cases', 'cum_deaths']].diff()\n\n # add recovery data\n df.set_index('dates2', inplace=True)\n df = df.merge(us_recovery_data[state], on='dates2', how='left')\n df['new_uninfected'] = df['new_recover'] + df['new_deaths']\n\n try:\n population = get_population_count(data_path, state)\n df['population'] = population\n except:\n pass\n # Fill NaN with 0 and convert to int\n dfs[state] = df.fillna(0).astype(int)\n dfs[state].to_csv(data_path /\n ('covidtimeseries_%s.csv' % state))\n else:\n print(\"No data for %s\" % state)", "def main():\n args = get_args()\n\n entries = []\n\n noe_dim = \"h1\" if args.hch else \"c1\" # save the name of the noe dimension\n\n with open(args.sparkylist) as lines:\n\n lines = lines.readlines()\n # lines = set(lines) # remove duplicate lines\n peak = 1\n for idx, line in enumerate(lines):\n idx = idx + 1\n\n try:\n label, c1, c2, h2, intensity, *rest = line.split()\n\n c1 = float(c1) # convert these to floats\n c2 = float(c2)\n h2 = float(h2)\n intensity = float(intensity)\n\n label = f\"peak{peak}\"\n peak += 1\n\n except ValueError:\n print(f\"invalid NOE definition on line {idx}\")\n continue\n\n dic = {\"label\": label, noe_dim: c1,\n \"c2\": c2, \"h2\": h2, \"intensity\": intensity}\n\n entries.append(dic)\n\n # create dataframe and write out\n csv = pd.DataFrame(entries)\n order = [\"label\", noe_dim, \"c2\", \"h2\", \"intensity\"]\n csv.to_csv(args.output, columns=order, index=False)", "def read_core_temp(self) -> float:", "def runqn3(h,steps):\n func = ['x7','x8','x9','x10','x11','x12','-(x1-x3)/(((x1-x3)**2+(x2-x4)**2)**(3.0/2))-(x1-x5)/(((x1-x5)**2+(x2-x6)**2)**(3.0/2))','-(x2-x4)/(((x1-x3)**2+(x2-x4)**2)**(3.0/2))-(x2-x6)/(((x1-x5)**2+(x2-x6)**2)**(3.0/2))','-(x3-x1)/(((x3-x1)**2+(x4-x2)**2)**(3.0/2))-(x3-x5)/(((x3-x5)**2+(x4-x6)**2)**(3.0/2))','-(x4-x2)/(((x3-x1)**2+(x4-x2)**2)**(3.0/2))-(x4-x6)/(((x3-x5)**2+(x4-x6)**2)**(3.0/2))','-(x5-x1)/(((x5-x1)**2+(x6-x2)**2)**(3.0/2))-(x5-x3)/(((x5-x3)**2+(x6-x4)**2)**(3.0/2))','-(x6-x2)/(((x5-x1)**2+(x6-x2)**2)**(3.0/2))-(x6-x4)/(((x5-x3)**2+(x6-x4)**2)**(3.0/2))']\n init = [0,0,0.97000436,-0.24308753,-0.97000436,0.24308753,-0.93240737,-0.86473146,0.466203685,0.43236573,0.466203685,0.43236573]\n rungeODE(func,init,0,h,steps)\n # extract data from CSV output file\n filename = open('RungeKuttaOutput.csv','rb')\n reader = csv.reader(filename,delimiter=',')\n x1=[]\n x2=[]\n x3=[]\n x4=[]\n x5=[]\n x6=[]\n for row in reader:\n x1 = x1 + [row[1]]\n x2 = x2 + [row[2]]\n x3 = x3 + [row[3]]\n x4 = x4 + [row[4]]\n x5 = x5 + [row[5]]\n x6 = x6 + [row[6]]\n x1out=[float(x1[i]) for i in range(1,len(x1))]\n x2out=[float(x2[i]) for i in range(1,len(x1))]\n x3out=[float(x3[i]) for i in range(1,len(x1))]\n x4out=[float(x4[i]) for i in range(1,len(x1))]\n x5out=[float(x5[i]) for i in range(1,len(x1))]\n x6out=[float(x6[i]) for i in range(1,len(x1))]\n plot(x1out,x2out,'r',x3out,x4out,'g',x5out,x6out,'b')\n show()", "def useThibautsData(log, output, bcgr=72.2, sigma=0.75, iterations=4, loc=1900, galaxies=1000,\n datadir='/Users/smn2/EUCLID/CTItesting/uniform/',\n thibautCDM03=False, beta=False, serial=1, parallel=1):\n files = g.glob(datadir + '*.fits')\n #pick randomly\n files = np.random.choice(files, galaxies, replace=False)\n\n #trap parameters: parallel\n if thibautCDM03:\n f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_parallel.dat'\n f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_thibaut_serial.dat'\n params = ThibautsCDM03params()\n params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))\n else:\n f1 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_parallel.dat'\n f2 = '/Users/smn2/EUCLID/vissim-python/data/cdm_euclid_serial.dat'\n params = MSSLCDM03params()\n params.update(dict(parallelTrapfile=f1, serialTrapfile=f2, rdose=8.0e9, serial=serial, parallel=parallel))\n if beta:\n params.update(dict(beta_p=0.6, beta_s=0.6))\n\n print f1, f2\n\n #store shapes\n eclean = []\n e1clean = []\n e2clean = []\n R2clean = []\n xclean = []\n yclean = []\n eCTI = []\n e1CTI = []\n e2CTI = []\n R2CTI = []\n xCTI = []\n yCTI = []\n eCTIfixed = []\n e1CTIfixed = []\n e2CTIfixed = []\n R2CTIfixed = []\n xCTIfixed = []\n yCTIfixed = []\n\n fh = open(output.replace('.pk', '.csv'), 'w')\n fh.write('#files: %s and %s\\n' % (f1, f2))\n for key in params:\n print key, params[key]\n fh.write('# %s = %s\\n' % (key, str(params[key])))\n fh.write('#file, delta_e, delta_e1, delta_e2, delta_R2, delta_x, delta_y\\n')\n for f in files:\n print 'Processing: ', f\n\n #load data\n nocti = pf.getdata(f)\n\n #scale to SNR about 10 (average galaxy, a single exposure)\n nocti /= np.sum(nocti)\n nocti *= 1500.\n\n #place it on canvas\n tmp = np.zeros((2066, 2048))\n ysize, xsize = nocti.shape\n ysize /= 2\n xsize /= 2\n tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize] = nocti.copy()\n\n #add background\n tmp += bcgr\n\n #run CDM03\n c = CTI.CDM03bidir(params, [])\n tmp = c.applyRadiationDamage(tmp.copy().transpose()).transpose()\n\n #remove background and make a cutout\n CTIdata = tmp[loc-ysize:loc+ysize, loc-xsize:loc+xsize]\n CTIdata -= bcgr\n CTIdata[CTIdata < 0.] = 0.\n\n #write files\n #fileIO.writeFITS(nocti, f.replace('.fits', 'noCTI.fits'), int=False)\n #fileIO.writeFITS(CTI, f.replace('.fits', 'CTI.fits'), int=False)\n\n #reset settings\n settings = dict(sigma=sigma, iterations=iterations)\n\n #calculate shapes\n sh = shape.shapeMeasurement(nocti.copy(), log, **settings)\n results = sh.measureRefinedEllipticity()\n\n eclean.append(results['ellipticity'])\n e1clean.append(results['e1'])\n e2clean.append(results['e2'])\n R2clean.append(results['R2'])\n xclean.append(results['centreX'])\n yclean.append(results['centreY'])\n\n #CTI, fitted centroid\n sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)\n results2 = sh.measureRefinedEllipticity()\n\n eCTI.append(results2['ellipticity'])\n e1CTI.append(results2['e1'])\n e2CTI.append(results2['e2'])\n R2CTI.append(results2['R2'])\n xCTI.append(results2['centreX'])\n yCTI.append(results2['centreY'])\n\n #fixed centroid\n settings['fixedPosition'] = True\n settings['fixedX'] = results['centreX']\n settings['fixedY'] = results['centreY']\n settings['iterations'] = 1\n sh = shape.shapeMeasurement(CTIdata.copy(), log, **settings)\n results3 = sh.measureRefinedEllipticity()\n\n eCTIfixed.append(results3['ellipticity'])\n e1CTIfixed.append(results3['e1'])\n e2CTIfixed.append(results3['e2'])\n R2CTIfixed.append(results3['R2'])\n xCTIfixed.append(results3['centreX'])\n yCTIfixed.append(results3['centreY'])\n\n text = '%s,%e,%e,%e,%e,%e,%e\\n' % (f, results['ellipticity'] - results2['ellipticity'],\n results['e1'] - results2['e1'], results['e2'] - results2['e2'],\n results['R2'] - results2['R2'],\n results['centreX'] - results2['centreX'],\n results['centreY'] - results2['centreY'])\n fh.write(text)\n print text\n\n fh.close()\n\n results = {'eclean': np.asarray(eclean),\n 'e1clean': np.asarray(e1clean),\n 'e2clean': np.asarray(e2clean),\n 'R2clean': np.asarray(R2clean),\n 'xclean': np.asarray(xclean),\n 'yclean': np.asarray(yclean),\n 'eCTI': np.asarray(eCTI),\n 'e1CTI': np.asarray(e1CTI),\n 'e2CTI': np.asarray(e2CTI),\n 'R2CTI': np.asarray(R2CTI),\n 'xCTI': np.asarray(xCTI),\n 'yCTI': np.asarray(yCTI),\n 'eCTIfixed': np.asarray(eCTIfixed),\n 'e1CTIfixed': np.asarray(e1CTIfixed),\n 'e2CTIfixed': np.asarray(e2CTIfixed),\n 'R2CTIfixed': np.asarray(R2CTIfixed),\n 'xCTIfixed': np.asarray(xCTIfixed),\n 'yCTIfixed': np.asarray(yCTIfixed)}\n\n #save to a file\n fileIO.cPickleDumpDictionary(results, output)\n\n return results", "def generate_summary(weather_data):\n# 5 Day Overview\n# The lowest temperature will be 9.4°C, and will occur on Friday 02 July 2021.\n# The highest temperature will be 20.0°C, and will occur on Saturday 03 July 2021.\n# The average low this week is 12.2°C.\n# The average high this week is 17.8°C.\n\n\n Number_of_days=0\n Min_Value=[]\n Date_value=[]\n Max_Value=[]\n\n\n for rows in weather_data: \n if len(rows) != 0:\n Number_of_days = Number_of_days + 1\n Min_Value.append(rows[1])\n Date_value.append(str(rows[0]))\n Max_Value.append(rows[2])\n \n min_temperature,min_position = find_min(Min_Value)\n min_tempe_celcius = convert_f_to_c(min_temperature)\n occur_date_min = convert_date(Date_value[min_position])\n max_temperature,max_position = find_max(Max_Value)\n max_tempe_celcius = convert_f_to_c(max_temperature)\n occur_date_max = convert_date(Date_value[max_position])\n mean_low = calculate_mean(Min_Value)\n mean_low__tempe_celcius = convert_f_to_c(mean_low)\n mean_high = calculate_mean(Max_Value)\n mean_high__tempe_celcius = convert_f_to_c(mean_high)\n\n summary=\"\"\n summary+=f\"{Number_of_days} Day Overview\\n\"\n summary+=f\" The lowest temperature will be {format_temperature(min_tempe_celcius)}, and will occur on {occur_date_min}.\\n\"\n summary+=f\" The highest temperature will be {format_temperature(max_tempe_celcius)}, and will occur on {occur_date_max}.\\n\"\n summary+=f\" The average low this week is {format_temperature(mean_low__tempe_celcius)}.\\n\"\n summary+=f\" The average high this week is {format_temperature(mean_high__tempe_celcius)}.\\n\"\n\n return summary", "def prepare_and_save_data(df, index=None, output_path=None):\n assert index is not None\n assert output_path is not None\n\n df.columns.name = 'fundamental_variable'\n df = df.set_index(['Quarter end', 'symbol'])\n\n df = df.stack()\n df = df.unstack(level=1)\n\n # In the initial csv files, None is used to represent missing data\n df[df == 'None'] = np.nan\n\n df.columns.name = None\n\n #df.reset_index().to_csv(path.join(data_dir, 'fundamental_data_of_stocks_stacked.csv'), index=False)\n\n # split the large dataframe based on the fundamental_variables\n # now each DataFrame represent one fundamental variable about a bunch of stocks.\n df = df.reset_index()\n\n\n # convert Quarter end to datetime object\n df['Quarter end'] = pd.to_datetime(df['Quarter end'])\n df = df.sort(['Quarter end'], axis=0)\n # convert the strings to float\n df = df.set_index(['Quarter end', 'fundamental_variable']).astype(float).reset_index(level=1)\n\n\n def _reindex(df):\n # This function will be applied to each fundamental variable group.\n # It will reindex the DataFrame. It will add the target index to the existing index and sort the new index\n new_index = index.append(df.index).drop_duplicates().values\n new_index.sort()\n\n # fill NaN with forward fill method. We should use forward filling because we can only use historical data\n # not the data from the future.\n df = df.reindex(new_index).fillna(method='ffill')\n\n return df\n\n\n def _to_csv(df):\n var_name = df.iloc[0]['fundamental_variable']\n print(\"{:<30} {}\".format(\"preparing fundamental variable\", var_name))\n var_name = re.sub('[-\\s\\&\\/]', '_', var_name) # remove special characters\n var_name = re.sub('_+', '_', var_name) # remove repetitive underscores\n var_name = re.sub('_$', '', var_name) # remove the tailing underscore\n df.set_index(['Quarter end']).reindex(index).to_csv(path.join(output_path, var_name + '.csv'), index=True)\n\n\n\n # The following step is to reindex the DataFrame. Note that the reindex is done by each fundamental variable.\n # The reason for this is if we do the reindex without groupby, there will be NaN in the fundamental_variable after\n # reindex operation, which is problematic for the forward fill operation of the next step\n df_reindex = df.groupby(by=['fundamental_variable'], as_index=False).apply(_reindex).sort_index()\n df_reindex.index = df_reindex.index.droplevel(0)\n\n # save to csv file by each fundamental variable\n df_out = df_reindex.reset_index()\n df_out.groupby(by=['fundamental_variable']).apply(_to_csv)", "def makeRooDataSet(type,infile_name,outfile_name,tree_name,nevents):\n\n\n\n\n inputfile = TFile.Open(infile_name,\"READ\")\n print \"Importing tree\"\n tree = TTree()\n inputfile.GetObject(tree_name, tree) #get the tree from the data file\n\n \n\n #define variables for the RooDataSet\n m_mumu = RooRealVar(\"m_mumu\", \"m_mumu\", 0.0, 4.0)\n y_mumu = RooRealVar(\"y_mumu\", \"y_mumu\", 0.0, 2.0 )\n pt_mumu = RooRealVar(\"pt_mumu\", \"pt_mumu\", 0.0, 260.0)\n eta_gamma = RooRealVar(\"eta_gamma\",\"eta_gamma\",-3.5, 3.5)\n pt_gamma = RooRealVar(\"pt_gamma\", \"pt_gamma\", 0.0, 100.0)\n m_gamma = RooRealVar(\"m_gamma\", \"m_gamma\", -0.1,0.1)\n \n m_chi_rf1S = RooRealVar(\"m_chi_rf1S\", \"m_chi_rf1S\", 0.0, 7.0)\n m_chi_rf2S = RooRealVar(\"m_chi_rf2S\", \"m_chi_rf2S\", -1.0, 1.0)\n #Qvalue = RooRealVar(\"Qvalue\",\"Q\", -15., 15.)\n s = RooRealVar(\"s\",\"s\", -10., 10.)\n \n ctpv = RooRealVar(\"ctpv\",\"ctpv\", -1.0, 3.5)\n ctpv_error = RooRealVar(\"ctpv_err\",\"ctpv_err\", -1.0, 1.0)\n pi0_abs_mass = RooRealVar(\"pi0_abs_mass\",\"pi0_abs_mass\", 0.0, 2.2)\n psi1S_nsigma = RooRealVar(\"psi1S_nsigma\",\"psi1S_nsigma\",0.0,1.0)\n psi2S_nsigma = RooRealVar(\"psi2S_nsigma\",\"psi2S_nsigma\",0.0,1.0)\n psi3S_nsigma = RooRealVar(\"psi3S_nsigma\",\"psi3S_nsigma\",0.0,1.0)\n rho_conv = RooRealVar(\"rho_conv\", \"rho_conv\", 0.0, 70.0)\n dz = RooRealVar(\"dz\",\"dz\", -1.0, 1.0)\n probFit1S = RooRealVar(\"probFit1S\",\"probFit1S\",0,1)\n probFit2S = RooRealVar(\"probFit2S\",\"probFit2S\",0,1)\n\n dataArgSet = RooArgSet(m_mumu,\n y_mumu,\n pt_mumu,\n eta_gamma,\n pt_gamma,\n m_gamma, \n m_chi_rf1S)\n \n dataArgSet.add( m_chi_rf2S )\n dataArgSet.add( s )\n dataArgSet.add( ctpv )\n dataArgSet.add( ctpv_error )\n dataArgSet.add( pi0_abs_mass )\n dataArgSet.add( psi1S_nsigma )\n dataArgSet.add( psi2S_nsigma )\n dataArgSet.add( rho_conv )\n dataArgSet.add( dz )\n dataArgSet.add( probFit1S )\n dataArgSet.add( probFit2S )\n\n\n print \"Creating DataSet\"\n dataSet = RooDataSet(\"chicds\",\"Chic RooDataSet\", dataArgSet)\n\n entries = tree.GetEntries()\n print entries\n\n if nevents is not 0:\n entries = nevents\n\n for ientry in range(0,entries):\n tree.GetEntry(ientry)\n\n # unfort ntuples are slightly different for chic and chib\n \n if type == 'chic':\n \n m_mumu.setVal(tree.dimuon_mass)\n y_mumu.setVal(tree.dimuon_rapidity) \n pt_mumu.setVal(tree.dimuon_pt)\n eta_gamma.setVal(tree.photon_eta)\n pt_gamma.setVal(tree.photon_pt)\n #m_gamma.setVal(tree.photon_p4.M())\n m_chi_rf1S.setVal(tree.rf1S_chic_mass)\n #m_chi_rf1S.setVal(tree.rf2S_chi_p4.M())\n #Qvalue.setVal((tree.chi_p4).M() - tree.dimuon_p4.M())\n #Qvalue.setVal((tree.chi_p4).M()**2 - tree.dimuon_p4.M()**2)\n #Qvalue.setVal((tree.rf1S_chic_mass**2 -tree.dimuon_mass**2)\n # / (3.5107**2 - 3.0969**2 ) -1)\n\n \n# this should be the correct one if the refitted variable was available\n# s.setVal((tree.rf1S_chic_mass**2 - tree.rf1S_dimuon_p4.M()**2)/ (3.5107**2 - 3.0969**2 ) -1)\n \n s.setVal((tree.rf1S_chic_mass**2 - 3.0969**2)/ (3.5107**2 - 3.0969**2 ) -1)\n\n\n \n psi1S_nsigma.setVal(tree.psi1S_nsigma)\n psi2S_nsigma.setVal(0) \n psi3S_nsigma.setVal(0)\n \n elif type == 'chib':\n\n m_mumu.setVal(tree.dimuon_mass)\n y_mumu.setVal(tree.dimuon_rapidity) \n pt_mumu.setVal(tree.dimuon_pt)\n eta_gamma.setVal(tree.photon_eta)\n pt_gamma.setVal(tree.photon_pt)\n m_chi_rf1S.setVal(tree.rf1S_chib_mass)\n m_chi_rf2S.setVal(tree.rf2S_chib_mass) \n Qvalue.setVal(tree.chib_mass - tree.dimuon_mass)\n psi1S_nsigma.setVal(tree.Y1S_nsigma)\n psi2S_nsigma.setVal(tree.Y2S_nsigma) \n psi3S_nsigma.setVal(tree.Y3S_nsigma)\n \n ctpv.setVal(tree.ct_pv)\n ctpv_error.setVal(tree.ct_pv_error)\n #pi0_abs_mass.setVal(tree.pi0_abs_mass)\n\n rho_conv.setVal(tree.Conv)\n dz.setVal(tree.Dz)\n probFit1S.setVal(tree.probfit1S)\n #probFit2S.setVal(tree.probFit2S)\n \n #if (tree.chic_pdgId == 20443):dataSet.add(dataArgSet)\n dataSet.add(dataArgSet)\n \n\n outfile = TFile(outfile_name,'recreate') \n dataSet.Write()", "def result2json(ifilename, poiname, ofilename):\n nameMap = {\n \"SysWeight1\" : \"mc\",\n \"SysWeight2\" : \"FSR\",\n \"SysWeight3\" : \"bkg\",\n \"SysWeight4\" : \"tagpt\",\n \"SysWeight6\" : \"Prefire\",\n \"SysRecoil2\" : \"recoil_eta\",\n \"SysRecoil3\" : \"recoil_keys\",\n \"SysRecoil6\" : \"recoil_stat0\",\n \"SysRecoil7\" : \"recoil_stat1\",\n \"SysRecoil8\" : \"recoil_stat2\",\n \"SysRecoil9\" : \"recoil_stat3\",\n \"SysRecoil10\": \"recoil_stat4\",\n \"SysRecoil11\": \"recoil_stat5\",\n \"SysRecoil12\": \"recoil_stat6\",\n \"SysRecoil13\": \"recoil_stat7\",\n \"SysRecoil14\": \"recoil_stat8\",\n \"SysRecoil15\": \"recoil_stat9\",\n }\n\n def getNuisName(nuis):\n if nuis in nameMap.keys():\n return nameMap[nuis]\n elif bool(re.match(r\"\\w*bin\\d+shape\", nuis)):\n return \"QCD_\" + nuis\n else:\n return nuis\n\n ifile = ROOT.TFile(ifilename)\n himpact = ifile.Get(\"nuisance_impact_mu\")\n himpact_grouped = ifile.Get(\"nuisance_group_impact_mu\")\n tree = ifile.Get(\"fitresults\")\n tree.GetEntry(0)\n\n # find the POI bin for poiname\n ibinX = -1\n for binX in range(1, himpact.GetNbinsX()+1):\n poi = himpact.GetXaxis().GetBinLabel(binX)\n if poi == poiname:\n ibinX = binX\n continue\n assert ibinX >=0, \"Can not find the POI {} in the postfit file {}. Please check.\".format(poiname, ifilename)\n\n results = OrderedDict()\n results['POIs'] = []\n val = getattr(tree, poiname)\n err = abs(getattr(tree, poiname+\"_err\"))\n poi = OrderedDict()\n poi['fit'] = [val-err, val, val+err]\n poi['name'] = poiname\n results['POIs'].append(poi)\n\n results['method'] = 'default'\n results['params'] = []\n\n # dump impacts\n impacts = OrderedDict()\n for ibinY in range(1, himpact.GetNbinsY()+1):\n nuis = himpact.GetYaxis().GetBinLabel(ibinY)\n impacts[nuis] = himpact.GetBinContent(ibinX, ibinY)\n\n # add the grouped QCD and Recoil systematic\n groupnames = []\n for ibinY in range(1, himpact_grouped.GetNbinsY()+1):\n tmpY = himpact_grouped.GetYaxis().GetBinLabel(ibinY)\n if tmpY == 'stat':\n continue\n impacts[tmpY] = himpact_grouped.GetBinContent(ibinX, ibinY)\n groupnames.append(tmpY)\n\n # sort impacts, descending\n impacts = OrderedDict(sorted(impacts.items(), key=lambda x: abs(x[1]), reverse=True))\n\n pulls = OrderedDict()\n for nuis in impacts.keys():\n if nuis not in groupnames:\n val = getattr(tree, nuis)\n err = getattr(tree, nuis+\"_err\")\n err = abs(err)\n else:\n # manually set the postfit of the grouped sys to [-1,1], and pulled at 0,\n # since only the impacts are useful to us\n val = 0.\n err = 1.\n pulls[nuis] = [val - err, val, val + err]\n\n # save to results\n for nuis in impacts.keys():\n systematic = OrderedDict()\n systematic['fit'] = pulls[nuis]\n systematic['groups'] = []\n systematic['impact_' + poiname] = impacts[nuis]\n systematic['name'] = getNuisName(nuis)\n systematic['prefit'] = [-1.0, 0., 1.0]\n systematic[poiname] = [poi['fit'][1] - impacts[nuis], poi['fit'][1], poi['fit'][1] + impacts[nuis]]\n systematic['type'] = \"Gaussian\"\n print(getNuisName(nuis), pulls[nuis][1], pulls[nuis][1]-pulls[nuis][0], impacts[nuis])\n\n results['params'].append(systematic)\n\n with open(ofilename, 'w') as fp:\n json.dump(results, fp, indent=2)", "def EORA(set_year=2016,save_output=True,print_output=True,in_dollars=False):\n\n # load mapping function for industries\n mapper_EORA = pd.read_excel(os.path.join(data_path,'other_sources','mappers.xlsx'),\n sheet_name=\"EORA_INDEC\")\n mapper_EORA = dict(zip(mapper_EORA['EORA'],mapper_EORA['INDEC']))\n\n ### Load supply and use table\n EORA_SUP = pd.read_excel(os.path.join(data_path,'EORA',\n 'EORA_IO_ARG_2015_BasicPrice.xlsx'),sheet_name='SUP',\n index_col=[0,1,2],header=[0,1,2])\n\n EORA_USE = pd.read_excel(os.path.join(data_path,'EORA',\n 'EORA_IO_ARG_2015_BasicPrice.xlsx'),sheet_name='USE',\n index_col=[0,1,2],header=[0,1,2]).fillna(0)\n\n # GET VARIABLES\n x = np.array(EORA_SUP.sum(axis=1)) # total production on industry level\n g = np.array(EORA_SUP.sum(axis=0)) # total production on product level\n F = EORA_USE.iloc[:196,125:].sum(axis=1)\n\n #Numpify\n Sup_array = np.asarray(EORA_SUP.iloc[:125,:196]) # numpy array of supply matrix\n Use_array = np.asarray(EORA_USE.iloc[:196,:125]) # numpy array of use matrix\n\n g_diag_inv = np.linalg.inv(np.diag(g)) # inverse of g (and diagolinized)\n x_diag_inv = np.linalg.inv(np.diag(x)) # inverse of x (and diagolinized)\n\n # Calculate the matrices\n B = np.dot(Use_array,x_diag_inv) # B matrix (U*x^-1)\n D = np.dot(Sup_array,g_diag_inv) # D matrix (V*g^-1)\n I_i = np.identity((len(x))) # Identity matrix for industry-to-industry\n\n # Inverse for industry-to-industry\n A_ii = np.dot(D,B)\n IDB_inv = np.linalg.inv((I_i-np.dot(D,B))) # (I-DB)^-1 \n\n # And canclulate sum of industries\n ind = np.dot(IDB_inv,np.dot(D,F)) # (I-DB)^-1 * DF\n\n # split FD in local, import and export\n LFD = np.dot(D,EORA_USE.iloc[:196,125:131].sum(axis=1) )\n Exp = np.dot(D,EORA_USE.iloc[:196,131])\n\n # combine all elements into one table\n EORA = pd.concat([pd.DataFrame(np.dot(A_ii,np.diag(ind))),pd.DataFrame(LFD),\n pd.DataFrame(Exp)],axis=1)\n EORA.columns = [x[2] for x in EORA_USE.columns[:125]]+['FinalD','Exports']\n EORA.index = [x[2] for x in EORA_USE.columns[:125]]\n\n VA = list(EORA_USE.iloc[196:202,:125].sum(axis=0))+[0,0]\n IMP = list(EORA_USE.iloc[202,:125])+[0,0]\n EORA.loc['ValueA'] = VA\n EORA.loc['Imports'] = IMP\n EORA[EORA < 1e-5] = 0\n\n # and map into the INDEC classes:\n EORA.columns = EORA.columns.map(mapper_EORA)\n EORA = EORA.groupby(level=0,axis=1).sum()\n EORA.index = EORA.index.map(mapper_EORA)\n EORA = EORA.groupby(level=0,axis=0).sum()\n\n EORA = EORA[[chr(i).upper() for i in range(ord('a'),ord('p')+1)]+['FinalD','Exports']]\n EORA = EORA.T[[chr(i).upper() for i in range(ord('a'),ord('p')+1)]+['ValueA','Imports']]\n EORA = EORA.T\n\n # convert to the desired year using INDEC time series\n Total_Prod = pd.read_excel(os.path.join(data_path,\n 'INDEC','sh_VBP_VAB_06_19.xls'),\n sheet_name='Cuadro 2',skiprows=3,index_col=[0])/1e3 \n\n rel_change = Total_Prod[set_year]/Total_Prod[2015] \n\n # and balance the table\n X0 = EORA.values[:,:]\n X0_sum = np.array(list(EORA.sum(axis=1)[:16]*rel_change)+[(EORA['FinalD'][:16]*rel_change).sum()]+[(EORA['Exports'][:16]*rel_change).sum()])\n\n # apply RAS method to rebalance the table\n new_IO = ras_method(X0,X0_sum,X0_sum,1e-5,print_out=False)\n\n if in_dollars:\n EORA.iloc[:,:] = new_IO/1e6\n else:\n EORA.iloc[:,:] = new_IO/1e6*dollar_pesos()[set_year]\n\n if save_output:\n EORA.to_csv(os.path.join(data_path,'national_tables','{}_EORA.csv'.format(set_year)))\n\n if print_output:\n print('NOTE : Standardized national table for Argentina for the year {} finished using EORA data'.format(set_year)) \n\n return EORA", "def vibrational_analysis_summary(self):\n print(\"Summary of the vibrational analysis:\")\n #cols = [ \"eigvals [a.u.]\" , \"w [a.u.]\", \"w [THz]\", \"w [cm^-1]\", \"T [a.u.]\", \"T [ps]\",\"E [a.u.]\", \"n [a.u.]\"]\n df = pd.DataFrame()\n eigvals = self.eigvals.copy()\n eigvals [ eigvals == MicroStatePrivate.smallest_float ] = np.nan\n df[\"eigvals [a.u.]\"] = eigvals\n df[\"w [a.u.]\"] = [ np.sqrt(i) if i > 0. else None for i in eigvals ]\n df[\"w [THz]\"] = convert(df[\"w [a.u.]\"],\"frequency\",_from=\"atomic_unit\",_to=\"thz\")\n df[\"w [cm^-1]\"] = convert(df[\"w [a.u.]\"],\"frequency\",_from=\"atomic_unit\",_to=\"inversecm\")\n df[\"T [a.u.]\"] = 2*np.pi / df[\"w [a.u.]\"]\n df[\"T [ps]\"] = convert(df[\"T [a.u.]\"],\"time\",_from=\"atomic_unit\",_to=\"picosecond\")\n\n if hasattr(self,\"energy\"):\n df[\"E [a.u.]\"] = self.energy.mean(axis=0)\n\n if hasattr(self,\"occupations\"):\n df[\"n [a.u.]\"] = self.occupations.mean(axis=0)\n\n return df", "def plot_uwrz_field(f=False,ax=False, oro='h500a7', vm=[-2,2],tinst=18,var='QVrz',ik=0,jk=0):\n\n tinst = int(tinst)\n \n # in case f is not passed, f and ax are created\n if not f:\n f,ax=plt.subplots()\n\n # Name the axes \n decorate_ax(ax,'',daytime(tinst),'')\n \n\n # specify full path of the data\n\n iensmean = 1\n \n if iensmean:\n srcpath = BASE3D + oro + '/' + sm +'/' + CIRCMEAN\n print srcpath\n else: # 1 single ensemble member first day\n srcpath = BASE3D + oro + '/' + sm +'/'\n seedname= filter(lambda x: x.startswith('circmean_seed'), os.listdir(srcpath))[0]\n srcpath +=seedname\n\n # Open the data in netcdf format and read it only \n ncfl = Dataset(srcpath,'r')\n \n # Load the fields of interest\n # COSMO fields dimensions are (time, z,rlat,rlon)\n #\n # Field dimensions in the following are (time,z,r)\n #\n # By specifing '::-1' the second timension is inverted\n urz = ncfl.variables['Urz'][tinst,::-1,:] # U component of wind\n wrz = ncfl.variables['Wrz'][tinst,::-1,:] # W component of wind\n\n speedrz = ncfl.variables['Speedrz'][tinst,::-1,:] # wind speed\n rhorz = ncfl.variables['RHOrz'][:,::-1,:] # density in rz coordinates\n TOTP = ncfl.variables['TOT_PRECr'][:,:]/2. # half-hourly output\n \n LCLr= ncfl.variables['LCL_MLr'][tinst,:]/1000. # LCL\n LFCr = ncfl.variables['LFC_MLr'][tinst,:]/1000. # LFC\n hPBLr = ncfl.variables['HPBLr'][tinst,:]/1000. \n \n CAPEr = ncfl.variables['CAPE_MLr'][tinst,:] # CAPE\n CINr = ncfl.variables['CIN_MLr'][tinst,:] # CIN\n \n TOTPsum = np.sum(TOTP[:tinst,:],axis=0)\n TOTPsum = TOTP[tinst,:]\n \n TPthres = 0.5\n rainyr=np.where(TOTPsum>TPthres)\n\n\n if var: #select a specific humidity var\n qvrz = ncfl.variables[var][:,::-1,:] # specficif humidity\n qcrz = ncfl.variables['QCrz'][:,::-1,:]\n else: # otherwise sum it all up:\n qxrzvars = filter(lambda x: x.startswith('Q'),ncfl.variables.keys())\n \n qvrz = np.zeros(ncfl.variables['QVrz'].shape)\n for var in qxrzvars:\n print var\n qvrz+=ncfl.variables[var][:,::-1,:]\n \n tref = 12 # 06:00\n ipltdiff= 0\n if ipltdiff:\n plotvar = rhorz[tinst,:]*qvrz[tinst,:]-rhorz[tref,:]*qvrz[tref,:]\n qcrz = rhorz[tinst,:]*qcrz[tinst,:]-rhorz[tref,:]*qcrz[tref,:]\n else:\n plotvar = rhorz[tinst,:]*qvrz[tinst,:]\n \n\n rhorz = rhorz[tinst,:]\n \n \n # Load the grid in r, z coordinate\n X = ncfl.variables['X'][:]\n \n \n # nasty and temporary workaround as Z is empty in single seed data - just\n # take it from the circmean field\n srcpath2 = BASE3D + oro + '/' + sm +'/' + CIRCMEAN\n ncfl2=Dataset(srcpath2)\n Z = ncfl2.variables['Z'][::-1,:]/1000. # highest index <-> ground\n ncfl2.close() \n \n # Add mountain contour\n r=X[0,:]\n ax.plot(r,Z[0,:],color='grey',linewidth=2)\n \n# ax.plot(r,hPBLr,'k-.',linewidth=1)\n# ax.plot(r,LCLr,'k-',linewidth=1) \n \n ax.fill_between(X[0,:],Z[0,:],0,color='lightgrey')\n \n #indicate rainy region\n \n ifillrainy = False\n if ifillrainy:\n ax.fill_between(X[0,rainyr][0],Z[0,rainyr][0],color='lightblue',alpha=0.8) \n ax.plot(X[0,rainyr][0],Z[0,rainyr][0],color='lightblue',linewidth=2)\n else:\n pass#ax.plot(X[0,rainyr][0],Z[0,rainyr][0],color='blue',linewidth=2,linestyle='dashed')\n \n # Velocity vectors with speed (length) smaller than speedthres are not \n # shown\n oroorig = oro\n if oro.endswith('bell') or oro.endswith('cos2'):\n \n oro = oro[:-5]\n print oro\n \n i,j=np.where(expnames==oro)\n\n if i==[]:\n print 'HH'\n \n oro = oro.split('_')[0]\n h,a = getha(oro)\n #h,a = hrange[i],arange[j]\n print h,a\n xrang = [0,40]\n yrang = [0,10.]\n delx = xrang[1]-xrang[0]\n dely = yrang[1]-yrang[0]\n \n speedthres = 0.1# m/s\n M = speedrz[:] < speedthres\n U = ma.masked_array(urz[:,:],mask=M)\n scalefac_w = 1. #delx/dely\n W = ma.masked_array(scalefac_w*wrz[:,:],mask=M) \n \n \n \n if 0: # REMOVE\n fact_agl = -dely/delx\n sin = np.arcsin(fact_agl)\n cos = np.arccos(fact_agl)\n U=cos*U-sin*W\n W=sin*U+cos*W\n # Plot radial velocity\n urzlevs = np.linspace(vm[0],vm[1],10)\n\n\n levs = np.array([2,4,6,8,10,12,14,16])\n #levs = np.array([0.25,0.5,1,2,4,8,16,32])\n #levs = np.array([0.5,1,2,4,8,16,32])\n if ipltdiff:\n levs=levs/8. #np.linspace(0.001,0.004,11)d\n #else:\n # levs=np.array([0.001,0.0015,0.002,0.0025,0.003,0.0035,0.004])*50\n #if var.startswith('QC'): levs=np.linspace(0.1,1.5,9)/1000.\n \n #cf = ax.contourf(X,Z,plotvar*1000., levels=levs*1000.,cmap=viridis_inv,extend='both')\n \n ipltmoistening = False \n if ipltmoistening:\n cf = ax.contourf(X,Z,plotvar*1000.,cmap=viridis_inv,extend='both',\n levels=levs)\n else:\n cf = 0\n \n #cfc = ax.contourf(X,Z,qvrz*1000.,cmap='Blues',extend='both', alpha=1,\n # levels=levs/4.) \n\n iplt_thermodynvar = False\n if iplt_thermodynvar:\n for var in [LFCr]:\n print var.shape\n print X.shape\n ax.plot(var)\n \n \n qstx = 2\n qstz = 2\n Q=ax.quiver(X[::qstz,::qstx],(Z+0.02)[::qstz,::qstx],U[::qstz,::qstx],W[::qstz,::qstx],\n units='inches',scale=10,angles='xy', color='red',pivot='mid',\n headwidth=4,width=0.009)\n #Q=ax.quiverkey(Q,a/2,h/2000.,1,r'$1\\frac{m}{s}$',labelpos='N',coordinates='data')\n\n if jk==2 or oroorig.endswith('cos2'):#(ik,jk)==(1,0):\n ax.quiverkey(Q,-2,0.05,1,r'$1 \\frac{m}{s}$',labelsep=0.05,\n labelpos='N',coordinates='data')\n\n\n ax.set_xticks(np.arange(0,80,20)) \n\n ax.set_xlim(xrang)\n ax.set_ylim(yrang)\n\n if ik==2:\n ax.set_xlabel('r in km')\n if jk==0:\n ax.set_ylabel('z in km') \n \n ax.grid(True)\n\n ncfl.close()\n \n return f,ax ,cf # plot_uwrz_field", "def encircled_energy(uvotfilter, areapix):\n import os\n caldb = os.getenv(\"CALDB\")\n if uvotfilter == 'wh': uvotfilter = 'white'\n command=\"quzcif swift uvota - \"+uvotfilter.upper()+\\\n \" REEF 2009-10-30 12:00:00 - > quzcif.out\"\n print(command) \n if not os.system(command):\n print(\"not \" +command) \n f = open(\"quzcif.out\")\n reeffile, ext = f.read().split()\n ext = int(ext)\n f.close()\n os.system(\"rm -f quzcif.out\")\n print(reeffile, ext)\n f = fits.getdata(reeffile,ext)\n r = f['radius'] # in arc sec\n E = f['reef'] \n x = sqrt(old_div(areapix,pi))*0.502 # lookup radius\n f = interp1d(r,E)\n return f(x)", "def potentials(path_to_units, path_to_eez, path_to_shared_coast,\n path_to_electricity_yield_pv_prio, path_to_electricity_yield_wind_prio,\n path_to_eligibility_categories, path_to_land_cover, path_to_protected_areas,\n path_to_result, scenario, config):\n with rasterio.open(path_to_eligibility_categories, \"r\") as src:\n eligibility_categories = src.read(1)\n with rasterio.open(path_to_electricity_yield_pv_prio, \"r\") as src:\n transform = src.transform\n electricity_yield_pv_prio = src.read(1)\n with rasterio.open(path_to_electricity_yield_wind_prio, \"r\") as src:\n electricity_yield_wind_prio = src.read(1)\n with rasterio.open(path_to_land_cover, \"r\") as src:\n land_cover = src.read(1)\n with rasterio.open(path_to_protected_areas, \"r\") as src:\n protected_areas = src.read(1)\n with fiona.open(path_to_units, \"r\") as src:\n unit_ids = [feature[\"properties\"][\"id\"] for feature in src]\n unit_geometries = [feature[\"geometry\"] for feature in src]\n with fiona.open(path_to_eez, \"r\") as src:\n eez_ids = [feature[\"properties\"][\"id\"] for feature in src]\n eez_geometries = [feature[\"geometry\"] for feature in src]\n shared_coasts = pd.read_csv(path_to_shared_coast, index_col=0)\n\n electricity_yield_pv_prio, electricity_yield_wind_prio = apply_scenario_config(\n potential_pv_prio=electricity_yield_pv_prio,\n potential_wind_prio=electricity_yield_wind_prio,\n categories=eligibility_categories,\n land_cover=land_cover,\n protected_areas=protected_areas,\n scenario_config=config[\"scenarios\"][scenario]\n )\n electricity_yield_pv_prio, electricity_yield_wind_prio = decide_between_pv_and_wind(\n potential_pv_prio=electricity_yield_pv_prio,\n potential_wind_prio=electricity_yield_wind_prio,\n electricity_yield_pv_prio=electricity_yield_pv_prio,\n electricity_yield_wind_prio=electricity_yield_wind_prio,\n eligibility_categories=eligibility_categories\n )\n\n onshore_potentials = pd.DataFrame(\n index=unit_ids,\n data={\n potential: potentials_per_shape(\n eligibilities=potential.eligible_on,\n potential_map=(electricity_yield_pv_prio if \"pv\" in str(potential).lower()\n else electricity_yield_wind_prio),\n eligibility_categories=eligibility_categories,\n shapes=unit_geometries,\n transform=transform\n )\n for potential in Potential.onshore()\n }\n )\n offshore_eez_potentials = pd.DataFrame(\n index=eez_ids,\n data={\n potential: potentials_per_shape(\n eligibilities=potential.eligible_on,\n potential_map=(electricity_yield_pv_prio if \"pv\" in str(potential).lower()\n else electricity_yield_wind_prio),\n eligibility_categories=eligibility_categories,\n shapes=eez_geometries,\n transform=transform\n )\n for potential in Potential.offshore()\n }\n )\n offshore_potentials = pd.DataFrame(\n data=shared_coasts.dot(offshore_eez_potentials),\n columns=Potential.offshore()\n )\n potentials = pd.concat([onshore_potentials, offshore_potentials], axis=1)\n potentials.index.name = \"id\"\n potentials.to_csv(\n path_to_result,\n header=True,\n index=True\n )", "def initial_importer(initials, initialZMT=True):\n from .functions import cosd, lna\n ###filling the running variables with values depending on the systemconfiguration in rk4input###\n\n if Base.spatial_resolution == 0:\n dim = 0\n print('0D')\n Vars.T = initials['zmt']\n else:\n dim = 1\n # NS==True corresponds to southpole to northpole representation (180 Degrees)\n if Base.both_hemispheres == True:\n Latrange = 180\n\n # Checking if Temperature and Latitude is set on a latitudal circle (0°,10°,..if step=10)\n # or on a latitudinal belt and therefore between the boundaries (5°,15°,..if step=10)\n\n # circle==True and belt==False says on the latitudinal circle\n if Base.latitudinal_circle == True and Base.latitudinal_belt == False:\n Vars.Lat = np.linspace(-90 + Base.spatial_resolution, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution - 1))\n Vars.Lat2 = np.linspace(-90, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution - 1))\n # Checking if the Temperature for each latitude starts with the same value or a\n # cosine shifted value range\n if initials['initial_temperature_cosine'] == True:\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1)\n\n # circle==False and belt==True say on the latitudinal belt\n if Base.latitudinal_circle == False and Base.latitudinal_belt == True:\n Vars.Lat2 = np.linspace(-90 + Base.spatial_resolution, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution - 1))\n Vars.Lat = np.linspace(-90, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution))\n if initials['initial_temperature_cosine'] == True:\n if initials['initial_temperature_noise'] == True:\n z = [0] * len(Vars.Lat)\n for k in range(len(Vars.Lat)):\n z[k] = np.random.normal(0, initials['initial_temperature_noise_amplitude'])\n else:\n z = 0\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1) + lna(z)\n\n # Not from southpole to northpole rather equator to pole\n else:\n Latrange = 90\n if Base.latitudinal_circle == True and Base.latitudinal_belt == False:\n Vars.Lat = np.linspace(0, 90 - Base.spatial_resolution, int(Latrange / Base.spatial_resolution))\n Vars.Lat2 = np.linspace(0, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution))\n if initials['initial_temperature_cosine'] == True:\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1)\n if Base.latitudinal_circle == False and Base.latitudinal_belt == True:\n Vars.Lat2 = np.linspace(0, 90 - Base.spatial_resolution, int(Latrange / Base.spatial_resolution))\n Vars.Lat = np.linspace(0, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution))\n if initials['initial_temperature_cosine'] == True:\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1)\n\n Vars.t = initials['time']\n if Base.parallelization == True:\n if initialZMT == True:\n Vars.T = np.array([Vars.T] * Base.number_of_parallels)\n Vars.T_global = np.array([initials['gmt']] * Base.number_of_parallels)\n else:\n Vars.T_global = initials['gmt']", "def ImportTemps(cls, mfg_enduse):\n\n def create_dict(file_dir, file):\n dict_out = dict(pd.read_excel(\n file_dir + file, _sheetname=0).iloc[:, (0, 2)].values\n )\n\n return dict_out\n\n ndict = {}\n\n for k, v in cls.nfiles.items():\n ndict[k[0:7]] = create_dict(cls.file_dir, v)\n\n temps = pd.read_excel(cls.file_dir + cls.temp_file, sheetname=0)\n\n temps.SIC.fillna(method='ffill', inplace=True)\n\n temps.loc[:, 'Temp_C'] = temps.Temp_C.apply(\n lambda x: int(np.around(x))\n )\n\n # Calculate energy fraction of each process by temperature\n temps = pd.DataFrame(temps.groupby(\n ['SIC', 'Unit_Process', 'Heat_type', 'Temp_C']\n )['E_Btu'].sum())\n\n e_totals = temps.reset_index()[\n temps.reset_index()['Unit_Process'] != 'Boiler'\n ].groupby(['SIC', 'Heat_type']).E_Btu.sum()\n\n for i in temps.index:\n if 'Boiler' in i:\n continue\n\n temps.loc[i, 'Fraction'] = \\\n temps.loc[i, 'E_Btu'] / e_totals.loc[(i[0], i[2])]\n\n temps.reset_index(inplace=True)\n\n temps.loc[:, 'SIC'] = temps.SIC.apply(lambda x: int(str(x)[0:4]))\n\n temps.loc[:, 'NAICS02'] = temps.SIC.map(ndict['sic_N02'])\n\n temps.loc[:, 'NAICS07'] = temps.NAICS02.map(ndict['N02_N07'])\n\n temps.loc[:, 'NAICS12'] = temps.NAICS07.map(ndict['N07_N12'])\n\n # Multiple entries for each SIC/NAICS; take simple mean. \n temps = temps.groupby(\n ['NAICS12', 'Unit_Process', 'Heat_type']\n )[['E_Btu', 'Temp_C', 'Fraction']].mean()\n\n # Create 4-, and 3-digit NAICS table for matching \n temps_NAICS = pd.DataFrame(index=temps.index.levels[0],\n columns=['N5', 'N4', 'N3']\n )\n\n for n in [5, 4, 3]:\n temps_NAICS.loc[:, 'N' + str(n)] = \\\n [float(str(x)[0:n]) for x in temps_NAICS.index.values]\n\n temps_NAICS.reset_index(inplace=True)\n\n eu_naics = pd.DataFrame(\n mfg_enduse.naics.drop_duplicates().sort_values(ascending=True),\n copy=True\n )\n\n eu_naics.reset_index(inplace=True, drop=True)\n\n eu_naics.rename(columns={'naics':'NAICS12'}, inplace=True)\n\n for n in [5, 4, 3]:\n eu_naics.loc[:, 'N' + str(n)] = \\\n [float(str(x)[0:n]) for x in eu_naics.NAICS12.values]\n\n # Match naics between end use data set and temperature info. \n nmatch = pd.DataFrame()\n for column in temps_NAICS.columns:\n nmatch = pd.concat([nmatch, pd.Series(\n [x in temps_NAICS[column].values for x in eu_naics[\n column\n ].values]\n )], axis=1)\n\n nmatch.columns = eu_naics.columns\n\n nmask = pd.DataFrame()\n\n for c in nmatch.columns:\n\n nmask = pd.concat(\n [nmask, eu_naics[c].multiply(nmatch[c])],\n axis=1\n )\n\n nmask.replace({0:np.nan}, inplace=True)\n\n # Values of 0 indicate no matching temperature data, even at 3-digit \n # level.\n nmask.N3.fillna(0, inplace=True)\n\n nmask.loc[:, 'TN_Match'] = nmask.apply(\n lambda x: int(list(x.dropna())[0]), axis=1\n )\n\n nmask.rename(columns={'NAICS12':'N6'}, inplace=True)\n\n nmask.loc[:, 'NAICS12'] = eu_naics.NAICS12\n\n # Merge matched NAICS values with end use energy data\n mfg_enduse = pd.merge(mfg_enduse,\n nmask[['NAICS12', 'TN_Match']], how='left',\n left_on='naics', right_on='NAICS12')\n\n mfg_enduse.drop('NAICS12', inplace=True, axis=1)\n\n # Merge temps and temps_NAICS for future operations by other NAICS\n temps.reset_index(inplace=True)\n\n temps = pd.merge(temps, temps_NAICS, left_on='NAICS12',\n right_on='NAICS12', how='left')\n\n for tb, tr in {'<100': (0, 99), '100-249': (100, 249),\n '250-399': (250, 399), '400-999': (400, 999),\n '>1000': (1000, 3000)}.items():\n\n ti = temps[temps.Temp_C.between(tr[0], tr[1])].index\n\n temps.loc[ti, 'Temp_Bucket'] = tb\n\n return mfg_enduse, temps", "def test_all_false(directorio = str(), database = 'red_cod-db.pkl', \r\n local_function = 'fij_2.0_25_diccio'):\r\n df = pd.read_pickle(database)\r\n collection = pd.read_csv(directorio + '/compounds_collection.csv')\r\n \r\n cifs = [i for i in collection['cif']]\r\n maxsites = np.max(collection['sitios'])\r\n \r\n df = df[df['sitios'] > 0][df['sitios'] <= maxsites].reset_index(drop=True)\r\n df = df.loc[~df['cif'].isin(cifs)].reset_index(drop=True)\r\n \r\n x, _, df = inout_creator(df=df)\r\n \r\n x = compute_quotients(X=x)\r\n x, df = append_local_functions(X = x,df=df)\r\n \r\n busqueda = \"ls \" + directorio + \"/*.h5 > model_names.txt\"\r\n os.system(busqueda)\r\n \r\n diccio = np.load(directorio + '/feature_standarisation.npy').item()\r\n \r\n X = (x - diccio['mean'])/diccio['std']\r\n x = np.reshape(X,(X.shape[0],X.shape[2]))\r\n \r\n with open('model_names.txt','r') as f:\r\n for line in f:\r\n modelo = models.load_model(line[:len(line)-1])\r\n nombre = line.split('/')[1]\r\n \r\n outpred = modelo.predict(x)\r\n prediction = outpred >= 0.5\r\n df['y_pred'] = np.ravel(prediction)\r\n \r\n with open(directorio+'/test_with_all_false.txt','a') as tr:\r\n tr.write(nombre + '\\n')\r\n \r\n for sitios in range(1, max(df['sitios']) + 1):\r\n \r\n acc = df[df['sitios'] == sitios][df['y_pred'] == False].shape[0]\r\n miniset = df[df['sitios'] == sitios].shape[0]\r\n percent = round(100*acc/miniset,2)\r\n \r\n \r\n tr.write('With '+ str(sitios) + ' sites:' + str(percent) +\\\r\n '(' + str(miniset) + ' samples)' + '\\n')\r\n tr.close()\r\n return", "def data(dataset=\"bio_eventrelated_100hz\"):\n # TODO: one could further improve this function with like\n # selectors 'ecg=True, eda=True, restingstate=True' that would\n # find the most appropriate dataset\n\n dataset = dataset.lower()\n\n # TODO: change this path back to \"master\"\n path = \"https://raw.githubusercontent.com/neuropsychology/NeuroKit/dev/data/\"\n\n # Signals as vectors =======================\n if dataset in [\"eeg\", \"eeg_150hz\", \"eeg.txt\"]:\n return pd.read_csv(path + \"eeg.txt\").values[:, 0]\n\n if dataset in [\"rsp\", \"rsp_1000hz\", \"rsp_1000hz.txt\"]:\n return pd.read_csv(path + \"rsp_1000hz.txt\", header=None).values[:, 0]\n\n if dataset in [\"ecg\", \"ecg_1000hz\", \"ecg_1000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"ecg_3000hz\", \"ecg_3000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"eog\", \"veog\", \"eog_100hz\", \"eog_100hz.csv\"]:\n return pd.read_csv(path + \"eog_100hz.csv\")[\"vEOG\"].values\n\n # Dataframes ===============================\n if dataset == \"iris\":\n info = sklearn_datasets.load_iris()\n data = pd.DataFrame(\n info.data, columns=[\"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\", \"Petal.Width\"]\n )\n data[\"Species\"] = info.target_names[info.target]\n return data\n\n if dataset in [\"eogs\", \"eogs_200hz\", \"eog_200hz\", \"eog_200hz.csv\"]:\n return pd.read_csv(path + \"eog_200hz.csv\")\n\n # Add extension\n if dataset in [\"bio_resting_8min_200hz\"]:\n dataset += \".json\"\n\n # Specific case for json file\n if dataset.endswith(\".json\"):\n if \"https\" not in dataset:\n data = pd.read_json(path + dataset, orient=\"index\")\n else:\n data = pd.read_json(dataset, orient=\"index\")\n df = {}\n for participant, row in data.iterrows():\n for _, data_string in row.items():\n data_list = json.loads(data_string)\n data_pd = pd.DataFrame(data_list)\n df[participant] = data_pd\n\n return df\n\n # TODO: Add more EEG (fif and edf datasets)\n if dataset in [\"eeg_1min_200hz\"]:\n\n return pickle.load(\n urllib.request.urlopen(\n \"https://github.com/neuropsychology/NeuroKit/blob/dev/data/eeg_1min_200hz.pickle?raw=true\"\n )\n )\n\n # General case\n file, ext = os.path.splitext(dataset) # pylint: disable=unused-variable\n if ext == \"\":\n df = pd.read_csv(path + dataset + \".csv\")\n else:\n if \"https\" not in dataset:\n df = pd.read_csv(path + dataset)\n else:\n df = pd.read_csv(dataset)\n return df", "def summaryD5(self):\r\n\r\n if self.window.diff_tabs.tempruns_D45_set==3:\r\n\r\n dfD5_temp1_pos1= pd.read_csv('raw_text_D5_1_1.txt')\r\n dfD5_temp1_pos2= pd.read_csv('raw_text_D5_1_2.txt')\r\n \r\n D5summary_temp1=pd.concat([dfD5_temp1_pos1,dfD5_temp1_pos2],axis=1)\r\n D5summary_temp1.to_csv('D5summary_temp1.txt',index=False)\r\n\r\n dfD5_temp2_pos1= pd.read_csv('raw_text_D5_1_1.txt')\r\n dfD5_temp2_pos2= pd.read_csv('raw_text_D5_1_2.txt')\r\n \r\n\r\n D5summary_temp2=pd.concat([dfD5_temp2_pos1,dfD5_temp2_pos2],axis=1)\r\n D5summary_temp2.to_csv('D5summary_temp2.txt',index=False)\r\n\r\n dfD5_temp3_pos1= pd.read_csv('raw_text_D5_3_1.txt')\r\n dfD5_temp3_pos2= pd.read_csv('raw_text_D5_3_2.txt')\r\n \r\n\r\n D4summary_temp3=pd.concat([dfD5_temp3_pos1,dfD5_temp3_pos2],axis=1)\r\n D4summary_temp3.to_csv('D5summary_temp3.txt',index=False)", "def ReadData( fileName ):\n \n # define column names\n colNames = ['Date','Precip','Max Temp', 'Min Temp','Wind Speed'] #NOTE: I changed the column names because .query() would not work when referencing column names with spaces\n global DataDF #added this line to make the dataframe visible in the variable explorer\n global ReplacedValuesDF #added this line to make the dataframe visible in the variable explorer\n # open and read the file\n DataDF = pd.read_csv(\"DataQualityChecking.txt\",header=None, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n \n # define and initialize the missing data dictionary\n ReplacedValuesDF = pd.DataFrame(0, index=[\"1. No Data\",\"2. Gross Error\",\"3. Swapped\",\"4. Range Fail\"], columns=colNames[1:]) #added additional indexed rows to make adding the values later easier\n \n return( DataDF, ReplacedValuesDF )", "def readin (filename, hdu=0, non_std_fits=False,\n text_comments='#', text_skiprows=0, get_data=False, verbose=False,\n apply_WCS_rv=False):\n multi_order_txt = False\n use_naxis2='all'\n use_naxis3='all'\n \n \n preferred_wlsoln=None # !! need to fix this\n # !! should also be able to input wavelength solution?\n \n if preferred_wlsoln is not None: preferred_wlsoln = wlsolvefxn.get_func_name(preferred_wlsoln)\n \n #### check if file exists ####### #############\n if not os.path.exists(filename): raise IOError(\"File does not exist:'\"+filename+\"'\")\n\n\n #### check if file is text############# \n np_kwargs = {'comments':text_comments,\n 'skiprows':text_skiprows}\n is_text_file, txt_data = check_for_txt_format(filename,**np_kwargs)\n\n #### if it is a text file ######################\n if is_text_file:\n spec_obj = readin_txt(filename,txt_data,get_data) \n return spec_obj \n\n #### now check how it behaves as a fits file\n if non_std_fits: hdulist = pyfits.open(filename)\n else:\n # give standard pyfits readin a try\n try: hdulist = pyfits.open(filename)\n except: raise IOError(\"PYFITS DOES NOT LIKE THE FILE YOU GAVE ('\"+filename+\"'), TO SEE WHAT ERROR IT GIVES TRY: hdulist = pyfits.open('\"+filename+\"')\")\n\n\n #### open up fits file ##############################\n hdulist = pyfits.open(filename)\n\n # select which header unit ot use\n if len(hdulist) > 1: \n hdu = int(hdu)\n hdu = np.clip(hdu,0,len(hdulist)-1)\n else: hdu = 0\n\n # specify the current header unit\n header_unit = hdulist[hdu]\n prihdr = header_unit.header\n\n # can display some useful information \n if verbose: \n print \"=\"*60\n print (hdulist.info(),'\\n')\n if len(hdulist) > 1:\n print \"=\"*20+\" USING HEADER: \"+\"=\"*20\n print repr(hdulist[hdu])\n\n ##### fill in the data class\n # not get header info of relevance\n simple = query_fits_header(prihdr,'SIMPLE',noval=False)\n xtension = query_fits_header(prihdr,'XTENSION')\n if simple.found:\n if not simple.val: print \"HeadsUp: Header Keyword SIMPLE is False, you may encounter unexpected behavior\"\n else:\n if not xtension.found: print \"HeadsUp: No extension keyword found in headers, you may encounter unexpected behavior\"\n \n \n #### read in important information from header, if present\n ibits = query_fits_header(prihdr,'BITPIX') # how many bits per pixel in the data? Not currently necessary, numpy will adapt\n \n naxis = query_fits_header(prihdr,'NAXIS' ,noval=0) # how many dimenstions?\n naxis1 = query_fits_header(prihdr,'NAXIS1',noval=0) # number of points per order\n naxis2 = query_fits_header(prihdr,'NAXIS2',noval=0) # number of orders\n naxis3 = query_fits_header(prihdr,'NAXIS3',noval=0) # number of different spectra\n\n apformat = query_fits_header(prihdr,'APFORMAT')\n if apformat.found: print \"WARNING: I'M NOT SURE HOW TO DEAL WITH APFORMAT VALUES\" # !! though I think it's just the spec files\n\n if not naxis.found: raise IOError(\"ERROR: Keyword NAXIS not found\")\n\n bzero = query_fits_header(prihdr,\"BZERO\",noval=0)\n bscale = query_fits_header(prihdr,\"BSCALE\",noval=1)\n\n ###### read in data ##############################################\n data = header_unit.data\n\n if data is None:\n wl, data, inv_var = np.zeros(3).reshape((3,1))\n if get_data: return (wl,data,inv_var)\n else: return eyeSpec_spec(wl,data,inv_var,header_unit.header)\n else:\n # check that data matches up with at least one of the dimensions\n if data.ndim != naxis.val: raise ValueError(\"Dimension of data \"+str(data.ndim)+\" does not match keyword naxis \"+str(naxis.val))\n \n statement = 'Dimension does not match data.shape = '+str(data.shape)+\" fits file (naxis1, naxis2, naxis3) \"+str(tuple([naxis1.val,naxis2.val,naxis3.val]))\n if data.ndim == 1: \n assert data.shape == (naxis1.val,) , statement\n data = data.reshape((1,1,)+data.shape)\n \n elif data.ndim == 2: \n assert data.shape == (naxis2.val, naxis1.val), statement\n data = data.reshape((1,)+data.shape) \n \n elif data.ndim == 3: \n assert data.shape == (naxis3.val, naxis2.val, naxis1.val), statement\n \n ##### Determine the which data is useful \n # which orders to read in \n nband = np.arange(data.shape[0])+1\n nord = np.arange(data.shape[1])+1\n\n \n ##### Calculate the wavelengths for the data\n # set up wavelength and inverse_variance\n wl = np.ones(data.shape)\n \n # get the wavelength coefficients\n wlcoeff = wlsoln_coeff_from_header(header_unit.header, apply_WCS_rv, preferred_wlsoln)\n \n # the same wavelength solution is applied to all bands so just pick the first and broadcast\n band = 0\n priv_info = {}\n \n # go through all the orders\n do_progress = True\n progressive_pt = 1 # this will advance and be used when there is no wavelength solution\n for i in xrange(len(nord)):\n order_i = nord[i]\n\n # get the coefficients and function type \n equ_type = wlcoeff.get_equation_type()\n if equ_type in ['none',None,'no solution'] and do_progress: \n coeff = [progressive_pt,1]\n equ_type = 'pts'\n else: coeff = wlcoeff.get_coeffs(order_i)\n \n # pts[0] = 1 :: this was definitely the right thing to do for SPECTRE's 1-D output but may not be for other equations, may need pts[0]=0, this may be for bzero,bscale\n pts = np.arange(len(wl[0][i]))+1 \n # apply function\n wl[0][i] = wlsolvefxn(pts, coeff, equ_type) \n \n progressive_pt += len(pts)\n \n for j in xrange(len(nband)): \n band_j = nband[j]\n if (band_j,order_i) not in priv_info: priv_info[(band_j,order_i)] = {} \n # record the private information\n priv_info[(band_j,order_i)]['disp']= [coeff, equ_type]\n priv_info[(band_j,order_i)]['rv'] = [0] \n priv_info[(band_j,order_i)]['disp extr'] = deepcopy(wlcoeff.extra)\n \n # now propogate the solution to the other bands\n stdwl = wl[0]\n for i in xrange(1,len(nband)): wl[i] = stdwl \n \n inv_var = np.ones(data.shape)\n #=================================================================#\n # return the data .OR. go on and create the spec_obj\n if get_data: return (wl, data, inv_var)\n\n #=================================================================# \n spec_obj = eyeSpec_spec(wl,data,inv_var,header_unit.header)\n # set up private information\n priv_info['filename'] = filename\n spec_obj.filename = filename\n \n bands = np.array(np.arange(1,len(data)+1),dtype=str)\n band_info = {}\n i = -1\n for key in prihdr.keys():\n if key[:6] != 'BANDID': continue\n if i < len(bands):\n i+=1\n bands[i] = prihdr[key]\n band_info[key] = prihdr[key]\n else: raise IOError(\"MORE BANDID KEYWORDS IN HEADER THAN FIRST DIMENSION OF DATA\") \n\n # add band info if available:\n if len(band_info) != 0: priv_info['bandids'] = band_info\n else: priv_info['bandids'] = None\n \n # match up the private info created during read in to the spec_obj\n for key in priv_info: spec_obj._private_info[key] = priv_info[key]\n \n # map fits value => acutal index\n # spec_obj._bands = {}\n # spec_obj._orders = {}\n # for i in range(len(nspec)): spec_obj._bands[nspec[i]] = i\n # for i in range(len(nord)): spec_obj._orders[nord[i]] = i\n # \n \n if 7 in nband: spec_obj.set_band(6) # this is where Magellian data stores it's object data, i.e. BANDID7 which is index 6\n\n if len(hdulist) > 1: spec_obj.hdrlist = [h.header for h in hdulist]\n \n return spec_obj", "def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}", "def get_TESS_data(filename, fluxtype = 'PDCSAP_FLUX'):\n # Manipulate the fits file:\n data = fits.getdata(filename)\n\n # Identify zero-flux values to take them out of the data arrays:\n idx = np.where((data[fluxtype]!=0.)&(~np.isnan(data[fluxtype])))[0]\n\n # Return median-normalized flux:\n return data['TIME'][idx],data[fluxtype][idx]/np.median(data[fluxtype][idx]), \\\n data[fluxtype+'_ERR'][idx]/np.median(data[fluxtype][idx])", "def T_c(I, T_amb, V, D, R_list, N_cond=1, T_range=[298,323,348], a_s=0.9, e_s=0.9, I_sun=900.0, temp_factor=1, wind_factor=1, n_iter=10):\n\n # def Q_gen(I, R):\n # w = I * I * R\n # return w\n\n # def Q_rad_in(I_sun, A_s, a_s):\n # w = I_sun * D * a_s\n # return w\n\n # def Q_conv(htcoeff, A_s, T_lin, T_amb):\n # w = htcoeff * A_s * (T_line - T_amb)\n # return w\n\n # def Q_rad_out(e_s, A_s, sigma, T_line, T_amb):\n # w = e_s * D * sigma * (T_line**4 - T_amb**4)\n # return w\n\n def reynolds(V, D, v, Mair=1.103):\n r = V * D / v\n return r\n\n def nusselt(Re, Pr):\n a = 0.62 * ( (Re) ** (1.0/2.0) ) * ( Pr ** (1.0/3.0) )\n b = (1 + (0.4/(Pr**(2.0/3.0) ) ) ) ** (1.0/4.0)\n c = (Re / 282000) ** (5.0/8.0)\n n = 0.3 + (a/b) * ( (1 + c) ** (4.0/5.0) )\n return n\n\n def air_prop(T_amb):\n # temp v k Pr\n air_prop = np.array([[200, 7.59e-6, 18.1e-3, 0.737],\n [250, 11.44e-6, 22.3e-3, 0.720],\n [300, 15.89e-6, 26.3e-3, 0.707],\n [350, 20.92e-6, 30.0e-3, 0.700],\n [400, 26.41e-6, 33.8e-3, 0.690],\n [450, 32.39e-6, 37.3e-3, 0.686],\n [500, 38.79e-6, 40.7e-3, 0.684],\n [550, 45.57e-6, 43.9e-3, 0.683],\n [600, 52.69e-6, 46.9e-3, 0.685]])\n\n v, k, Pr = np.apply_along_axis(lambda x: np.interp(T_amb, air_prop[:,0], x),\n 0, air_prop[:,1:])\n return v, k, Pr\n\n def R_T(R_lo, R_mid, R_hi, T_line, N_cond, T_range=T_range):\n if 273 <= T_line <= 323:\n R = ((R_lo + \n ((R_lo - R_mid)/(T_range[0] - T_range[1]))\n *(T_line - T_range[0]))/N_cond)\n elif T_line > 323:\n R = ((R_mid + \n ((R_mid - R_hi)/(T_range[1] - T_range[2]))\n *(T_line - T_range[1]))/N_cond)\n else:\n R = R_lo\n print('Out of bounds')\n return R\n\n R_lo, R_mid, R_hi = R_list[0], R_list[1], R_list[2]\n temp_factor = 1\n wind_factor = 1\n sigma = 5.6703e-8 # Stefan-Boltzmann constant\n\n T_amb = T_amb*temp_factor\n V = V*wind_factor\n\n v, k, Pr = air_prop(T_amb)\n Re = reynolds(V, D, v)\n htcoeff = nusselt(Re, Pr) * k / D\n\n def T_line(T_init):\n \n R = R_T(R_lo, R_mid, R_hi, T_init, N_cond)\n print R\n\n C4 = e_s * sigma * D * math.pi\n C3 = 0.0\n C2 = 0.0\n C1 = htcoeff * D * math.pi\n C0 = - ( I ** 2 * R\n + I_sun * a_s * D\n + htcoeff * D * math.pi * T_amb\n + e_s * D * math.pi * sigma * (T_amb ** 4))\n\n return np.roots([C4, C3, C2, C1, C0])\n\n T_c = T_amb\n \n for i in range(n_iter):\n T_arr = T_line(T_c)\n T_c = np.real(T_arr[np.where((np.real(T_arr) > 0) & ~(np.iscomplex(T_arr)))]).mean()\n print T_c\n\n return T_c", "def temp_series(smhi_data):\n consumable_data = {\n \"station\": smhi_data[\"station\"][\"name\"],\n \"temp\": [],\n \"from\": smhi_data[\"value\"][0][\"date\"],\n \"to\": smhi_data[\"value\"][-1][\"date\"]\n }\n for temp_post in smhi_data[\"value\"]:\n consumable_data[\"temp\"].append(float(temp_post[\"value\"]))\n return consumable_data", "def read_ep_data(path=None):\n if path is None:\n path = \"\"\"C:/Users/Brendan/Downloads/ExodusPoint - Assignment Data (Rates Quantitative Strategist) - New York.xlsx\"\"\"\n \n xls = pd.ExcelFile(path)\n legacy_sheets = ['TU - Com; NonCom','FV - Com;NonCom', 'TY - Com;NonCom', 'US - Com;NonCom ', 'WN - Com;NonCom']\n tff_sheets = ['TU - Sectorial','FV - Sectorial', 'TY - Sectorial', 'US - Sectorial', 'WN - Sectorial']\n \n a, b = {}, {}\n for s in legacy_sheets:\n d = pd.read_excel(xls, sheet_name=s, index_col=0, header=3, skipfooter=1, parse_dates=True)\n d = d.rename(rename_legacy_cols, axis=1).drop('Total', axis=1)\n ct = s[0:2]\n a[ct] = d\n legacy_raw = pd.concat(a, axis=1)\n \n for s in tff_sheets:\n d = pd.read_excel(xls, sheet_name=s, index_col=0, header=3, skipfooter=1, parse_dates=True)\n d = d.rename(rename_tff_cols, axis=1)\n ct = s[0:2]\n b[ct] = d\n tff_raw = pd.concat(b, axis=1)\n \n futures_raw = pd.read_excel(xls, sheet_name=\"Futures Price and Duration Data\",\n index_col=0, header=[3,4,5], skipfooter=1, parse_dates=True)\n futures_raw.columns = futures_raw.columns.droplevel(1)\n futures_raw.columns.set_levels([s[0:2] for s in futures_raw.columns.levels[0]], \n level=0, inplace=True)\n futures_raw.columns.set_names(['ct', 'field'], inplace=True)\n \n swaps_raw = pd.read_excel(xls, sheet_name=\"Swap Prices\",\n index_col=0, header=3, skipfooter=1, parse_dates=True)\n \n return legacy_raw, tff_raw, futures_raw, swaps_raw", "def windTimeSeries(ftp, rootFolder, selectedDate, outputFolder, ymax):\n \n dataType = 'wind_reconstruction_data'\n file_list = []\n \n\n # Retrieve data from folders in FTP\n \n possible_hours = [format(x,'02d') for x in range(24)]\n from_zone = tz.gettz('America/Mexico_City')\n to_zone = tz.gettz('UTC')\n \n for item in possible_hours:\n \n local_time = datetime.strptime(selectedDate+item, '%Y-%m-%d%H')\n local_time = local_time.replace(tzinfo=from_zone)\n \n utc_date = local_time.astimezone(to_zone)\n \n utc_day = utc_date.strftime(\"%Y-%m-%d\")\n \n utc_hour = utc_date.strftime(\"%H\")\n \n time_folder = rootFolder+utc_day+'/'+dataType+'/'+utc_hour+'-00/'\n \n creat_min = []\n try:\n\n ftp.dir(time_folder, creat_min.append)\n dbs = []\n for i in range(len(creat_min)):\n if creat_min[i].find('DBS') != -1:\n dbs.append(creat_min[i])\n\n if len(dbs) == 0:\n file_list.append('missing_data_'+utc_day+'_'+utc_hour+'-25')\n file_list.append('missing_data_'+utc_day+'_'+utc_hour+'-55')\n\n elif len(dbs) == 1:\n if dbs[0].find('-25-'):\n file_list.append(time_folder+dbs[0][55:])\n file_list.append('missing_data_'+utc_day+'_'+utc_hour+'-55')\n elif dbs[0].find('-55-'):\n file_list.append('missing_data_'+utc_day+'_'+utc_hour+'-25')\n file_list.append(time_folder+dbs[0][55:])\n else:\n for i in range(len(dbs)):\n file_list.append(time_folder+dbs[i][55:])\n\n except:\n file_list.append('missing_data_'+utc_day+'_'+utc_hour+'-25')\n file_list.append('missing_data_'+utc_day+'_'+utc_hour+'-55')\n\n\n \n # Create output folder\n outputFolder = outputFolder+'/'+selectedDate\n try:\n mkdirp(outputFolder)\n except:\n print('warning: folder '+outputFolder+' already exists')\n \n # Make 'generic' outputfile\n outFile = outputFolder+'/'\n\n try:\n \n meas_time = []\n Usets = []\n Vsets = []\n Wsets = []\n \n for working_file in file_list:\n\n if working_file.find('missing_data') != -1:\n Xset = pd.DataFrame(np.nan,columns=['Xwind'],index=range(100,2550,50))\n Yset = pd.DataFrame(np.nan,columns=['Ywind'],index=range(100,2550,50))\n Zset = pd.DataFrame(np.nan,columns=['Zwind'],index=range(100,2550,50))\n match = (re.search('\\d{4}-\\d{2}-\\d{2}_\\d{2}-\\d{2}',working_file)).group(0)\n meas_time.append(match)\n\n else: \n match = (re.search('\\d{4}-\\d{2}-\\d{2}_\\d{2}-\\d{2}',working_file)).group(0)\n #hour_min = re.search('_\\d{2}-\\d{2}-\\d{2}',working_file).group(0)\n meas_time.append(match)\n \n obj = DataContainer()\n ftp.retrbinary('RETR %s' % working_file, obj.readFromFTP)\n columns = ['Timestamp', 'Azimuth', 'Elevation','Range','Xwind','Ywind','Zwind',\\\n 'CNR','ConfIdx']\n\n # Get the date from the file as a DataFrame\n data = obj.dataToArray(columns)\n data.drop(data[data.Range > ymax].index, inplace=True)\n data = data[['Range','Xwind','Ywind','Zwind']]\n # Replace abs values Xwind and Ywind greater than 50 with NaN\n \n data.Xwind.where((data.Xwind < 50) | (data.Xwind > -50),np.nan,inplace=True)\n data.Ywind.where((data.Ywind > 50) | (data.Ywind > -50),np.nan,inplace=True)\n\n dataByRange = data.groupby('Range').mean() \n \n Xset = dataByRange[['Xwind']]\n Yset = dataByRange[['Ywind']]\n Zset = dataByRange[['Zwind']]\n \n Usets.append(Xset)\n Vsets.append(Yset)\n Wsets.append(Zset)\n \n df_U = pd.concat(Usets,axis=1)\n df_V = pd.concat(Vsets,axis=1)\n df_W = pd.concat(Wsets,axis=1)\n \n U = df_U.values;\n V = df_V.values;\n W = df_W.values;\n \n # Vertical wind grid\n x = np.linspace(0,48,49)\n y = np.linspace(100,2500,49)\n X, Y = np.meshgrid(x, y)\n \n # If the plot is going to have a variable pcolormesh uncomment next section\n# cmapLim = 0\n# if abs(np.nanmax(W)) > abs(np.nanmin(W)):\n# cmapLim = abs(np.nanmax(W))\n# else:\n# cmapLim = abs(np.nanmin(W))\n \n # Change back the UTC times to local time\n \n timelabels = []\n to_zone = tz.gettz('America/Mexico_City')\n from_zone = tz.gettz('UTC')\n \n for entry in meas_time:\n utc_time = datetime.strptime(entry, '%Y-%m-%d_%H-%M')\n utc_time = utc_time.replace(tzinfo=from_zone)\n \n local = utc_time.astimezone(to_zone)\n \n timelabels.append(local.strftime(\"%H:%M\"))\n \n timelabels = [i.replace('25','00') for i in timelabels[2:48:4]]\n \n # Plot \n title = ('Perfil vertical del viento, %s (serie de tiempo)' % selectedDate)\n fig, ax = plt.subplots(figsize=(20,8))\n \n # For variable pcolormesh uncomment next line\n #cax = ax.pcolormesh(X, Y, W, cmap='seismic', vmin= (-cmapLim), vmax=cmapLim)\n \n cax = ax.pcolormesh(X, Y, W, cmap='seismic', vmin= -3, vmax=3)\n ax.set_title(title,fontsize = 24)\n ax.set_xlabel('Hora Local [hh:mm]', fontsize = 18, labelpad = 14)\n ax.set_xticks(np.arange(2.5,48, step=4))\n ax.set_xticklabels(timelabels, rotation=45, size=12)\n ax.set_ylabel('Altura [m]', fontsize = 18, labelpad = 18)\n ax.yaxis.set_tick_params(labelsize=12)\n ax.text(0.18,-0.08,'Datos de perfilador LIDAR Leosphere Windcube 100s. Lat 19.3262, Lon -99.1761, Alt 2280 msnm',transform=plt.gcf().transFigure, fontdict={'size': 18})\n \n # Colorbar\n cbaxes = fig.add_axes([0.91, 0.125, 0.02, 0.4]) \n cbar = fig.colorbar(cax, cax = cbaxes)\n cbar.set_label(r'Viento vertical [$m\\,s^{-1}$]', fontsize = 18)\n cbaxes.text(-0.3,1.07,'(Hacia arriba)')\n cbaxes.text(-0.3,-0.07,'(Hacia abajo)') \n \n \n #Other a artists\n im = mpimg.imread('/home/perfilador/perfila/drozanes/resources/time_adv.png')\n newax = fig.add_axes([0.6, -.03, 0.06, 0.06], anchor='NW', zorder=0) #[xo,yo,width,height]\n newax.imshow(im)\n newax.axis('off')\n \n im = mpimg.imread('/home/perfilador/perfila/drozanes/resources/directions.png')\n newax1 = fig.add_axes([0.90, 0.75, 0.15, 0.15], anchor='NW', zorder=0) #[xo,yo,width,height]\n newax1.imshow(im)\n newax1.axis('off')\n \n \n # Horizontal wind grid\n \n x_h = np.arange(0, len(meas_time), 1)\n Xh, Yh = np.meshgrid(x_h, y)\n \n # Plot arrows\n Q = ax.quiver(Xh[::2], Yh[::2], U[::2], V[::2], pivot = 'mid',alpha=0.75)\n ax.quiverkey(Q, 0.93, 0.68, 10, r'$10\\,m\\,s^{-1}$',coordinates = 'figure', labelpos='N' )\n \n \n finalOutputFile = outFile+'timeseries_daily_'+selectedDate\n fig.savefig(finalOutputFile,bbox_inches='tight')\n plt.close()\n \n except Exception as e:\n print(\"Problema calculando Wind Time Series\", e)", "def Uiso(data, logfile):\n printer.register_file(logfile, 'log', mode='a')\n # isofilters, isopartnerfilters = set_filter()\n #===========================================================================\n # keys=['cart_int','cart_ext','cart_sum','cart_meas','iso_meas']\n #===========================================================================\n use1, use2 = None, None\n try:\n use = config.arg('iso').split(':')\n use1 = use[0]\n use2 = use[1]\n\n except:\n use1 = 'cart_sum'\n use2 = 'cart_sum'\n # printer('\\nSelected filters:\\n\\n Attribute | Value | True | Function'\n # '\\n===================================================\\nAtom filter: | | |')\n # for isofilter in isofilters:\n # printer('{:15s} | {:12s} | {:5s} | {}'.format(isofilter[0], isofilter[1], isofilter[2], isofilter[3]))\n # printer('-----------------|--------------|-------|----------\\nPartner filter: | | |')\n # for isofilter in isopartnerfilters:\n # printer('{:15s} | {:12s} | {:5s} | {}'.format(isofilter[0], isofilter[1], isofilter[2], isofilter[3]))\n printer('\\nComparing {} of hydrogen atoms\\nwith {} of parent atoms.\\n'.format(use1, use2))\n printer(' Riding | Parent | U_rel | U_rel\\n atom | atom | geom | arith')\n printer(' ================================')\n geom_sum = []\n arit_sum = []\n for atom in data['exp'].atoms:\n # if apply_filter(atom, isofilters):\n for heavy_atom in cg.get_framework_neighbours(atom, useH=True):\n if not atom == heavy_atom and filter_atom_pair(config, atom, heavy_atom):\n U_rel_geom = cg.Uiso(atom.adp[use1]) / cg.Uiso(heavy_atom.adp[use2])\n geom_sum.append(U_rel_geom)\n U_rel_arith = cg.Uiso(atom.adp[use1], mean='arithmetic') / \\\n cg.Uiso(heavy_atom.adp[use2], mean='arithmetic')\n arit_sum.append(U_rel_arith)\n printer(' {light:5s} | {heavy:5s} | {U:5.3f} | {U2:5.3f}'.format(light=atom.name,\n heavy=heavy_atom,\n U=U_rel_geom,\n U2=U_rel_arith))\n printer(' -------|--------|-------|-------')\n printer(' {light:5s} | {heavy:5s} | {U:5.3f} | {U2:5.3f}'.format(light='mean',\n heavy='---',\n U=np.mean(geom_sum),\n U2=np.mean(arit_sum)))\n printer(' {light:5s} | {heavy:5s} | {U:5.3f} | {U2:5.3f}'.format(light='SD',\n heavy='---',\n U=np.std(geom_sum),\n U2=np.std(arit_sum)))\n\n printer('{temp:.1f} {U:5.3f} {Uer:5.3f} {U2:5.3f} {U2er:5.3f}'.format(temp=data.temperature,\n U=np.mean(geom_sum),\n Uer=np.std(geom_sum),\n U2=np.mean(arit_sum),\n U2er=np.std(arit_sum)),\n use=['log'])\n printer.spacer()", "def test_get_engTemperature(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, ENG_TEMPERATURE_IDX, ENG_TEMPERATURE_SUB)\n param_obj = self.__dict__[servo_type]._get_engTemperature()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in engTemperature...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue", "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)" ]
[ "0.6243293", "0.5701022", "0.5552244", "0.5491569", "0.5487227", "0.5479191", "0.54358137", "0.54272795", "0.5394585", "0.5392419", "0.53615534", "0.533306", "0.53179353", "0.5315923", "0.5295476", "0.5265205", "0.52420974", "0.5239961", "0.52259064", "0.52226466", "0.52215695", "0.52029717", "0.51637083", "0.5157517", "0.5152349", "0.51520777", "0.5150459", "0.51497936", "0.5144993", "0.51448935", "0.5137341", "0.5125648", "0.5116353", "0.51061916", "0.5106179", "0.5091296", "0.50789547", "0.5066447", "0.50615686", "0.50563544", "0.5051718", "0.5051292", "0.5037253", "0.50225985", "0.5020266", "0.50184566", "0.50047016", "0.50023496", "0.50021005", "0.499682", "0.49967542", "0.4985171", "0.49824747", "0.4971986", "0.49593222", "0.49538475", "0.49490976", "0.49468535", "0.49437168", "0.4935829", "0.49346888", "0.49257395", "0.49223068", "0.49219185", "0.49171853", "0.49164367", "0.4911596", "0.49089813", "0.49074277", "0.4906587", "0.49046436", "0.49044317", "0.49020398", "0.48814997", "0.48804653", "0.48803282", "0.48737067", "0.48729736", "0.48681065", "0.48654687", "0.4861055", "0.48598596", "0.48589188", "0.4856761", "0.48551357", "0.4854716", "0.48511732", "0.48396713", "0.48359758", "0.48335093", "0.48299232", "0.48297882", "0.48276323", "0.4827087", "0.48234984", "0.48183045", "0.48145908", "0.48119092", "0.48098204", "0.4809589" ]
0.64122236
0